From ea08eeccae9297f7aabd2ef7f0c2517ac4549acc Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Tue, 20 Feb 2024 01:18:26 +0000
Subject: [PATCH] write in 30M
---
kernel/drivers/spi/spi.c | 934 +++++++++++++++++++++++++++++++++++++++++++++------------
1 files changed, 734 insertions(+), 200 deletions(-)
diff --git a/kernel/drivers/spi/spi.c b/kernel/drivers/spi/spi.c
index fefb700..b1a638d 100644
--- a/kernel/drivers/spi/spi.c
+++ b/kernel/drivers/spi/spi.c
@@ -1,10 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SPI init/core code
- *
- * Copyright (C) 2005 David Brownell
- * Copyright (C) 2008 Secret Lab Technologies Ltd.
- */
+// SPI init/core code
+//
+// Copyright (C) 2005 David Brownell
+// Copyright (C) 2008 Secret Lab Technologies Ltd.
#include <linux/kernel.h>
#include <linux/device.h>
@@ -21,6 +19,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/property.h>
@@ -37,6 +36,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>
+EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
+EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
#include "internals.h"
@@ -45,10 +46,6 @@
static void spidev_release(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
-
- /* spi controllers may cleanup for released devices */
- if (spi->controller->cleanup)
- spi->controller->cleanup(spi);
spi_controller_put(spi->controller);
kfree(spi->driver_override);
@@ -91,7 +88,7 @@
if (len) {
spi->driver_override = driver_override;
} else {
- /* Emptry string, disable driver override */
+ /* Empty string, disable driver override */
spi->driver_override = NULL;
kfree(driver_override);
}
@@ -469,7 +466,7 @@
static LIST_HEAD(spi_controller_list);
/*
- * Used to protect add/del opertion for board_info list and
+ * Used to protect add/del operation for board_info list and
* spi_controller list, and their matching process
* also used to protect object of type struct idr
*/
@@ -516,6 +513,7 @@
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
spi->cs_gpio = -ENOENT;
+ spi->mode = ctlr->buswidth_override_bits;
spin_lock_init(&spi->statistics.lock);
@@ -546,6 +544,12 @@
spi->chip_select == new_spi->chip_select)
return -EBUSY;
return 0;
+}
+
+static void spi_cleanup(struct spi_device *spi)
+{
+ if (spi->controller->cleanup)
+ spi->controller->cleanup(spi);
}
/**
@@ -593,7 +597,10 @@
goto done;
}
- if (ctlr->cs_gpios)
+ /* Descriptors take precedence */
+ if (ctlr->cs_gpiods)
+ spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
+ else if (ctlr->cs_gpios)
spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
/* Drivers may modify this initial i/o setup, but will
@@ -609,11 +616,13 @@
/* Device may be bound to an active driver when this returns */
status = device_add(&spi->dev);
- if (status < 0)
+ if (status < 0) {
dev_err(dev, "can't add %s, status %d\n",
dev_name(&spi->dev), status);
- else
+ spi_cleanup(spi);
+ } else {
dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
+ }
done:
mutex_unlock(&spi_add_lock);
@@ -706,7 +715,9 @@
}
if (ACPI_COMPANION(&spi->dev))
acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
- device_unregister(&spi->dev);
+ device_del(&spi->dev);
+ spi_cleanup(spi);
+ put_device(&spi->dev);
}
EXPORT_SYMBOL_GPL(spi_unregister_device);
@@ -782,21 +793,68 @@
/*-------------------------------------------------------------------------*/
-static void spi_set_cs(struct spi_device *spi, bool enable)
+static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
{
+ bool enable1 = enable;
+
+ /*
+ * Avoid calling into the driver (or doing delays) if the chip select
+ * isn't actually changing from the last time this was called.
+ */
+ if (!force && (spi->controller->last_cs_enable == enable) &&
+ (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
+ return;
+
+ spi->controller->last_cs_enable = enable;
+ spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
+
+ if (!spi->controller->set_cs_timing) {
+ if (enable1)
+ spi_delay_exec(&spi->controller->cs_setup, NULL);
+ else
+ spi_delay_exec(&spi->controller->cs_hold, NULL);
+ }
+
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
- if (gpio_is_valid(spi->cs_gpio)) {
- /* Honour the SPI_NO_CS flag */
- if (!(spi->mode & SPI_NO_CS))
- gpio_set_value(spi->cs_gpio, !enable);
+ if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
+ if (!(spi->mode & SPI_NO_CS)) {
+ if (spi->cs_gpiod) {
+ /*
+ * Historically ACPI has no means of the GPIO polarity and
+ * thus the SPISerialBus() resource defines it on the per-chip
+ * basis. In order to avoid a chain of negations, the GPIO
+ * polarity is considered being Active High. Even for the cases
+ * when _DSD() is involved (in the updated versions of ACPI)
+ * the GPIO CS polarity must be defined Active High to avoid
+ * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
+ * into account.
+ */
+ if (has_acpi_companion(&spi->dev))
+ gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
+ else
+ /* Polarity handled by GPIO library */
+ gpiod_set_value_cansleep(spi->cs_gpiod, enable1);
+ } else {
+ /*
+ * invert the enable line, as active low is
+ * default for SPI.
+ */
+ gpio_set_value_cansleep(spi->cs_gpio, !enable);
+ }
+ }
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
spi->controller->set_cs)
spi->controller->set_cs(spi, !enable);
} else if (spi->controller->set_cs) {
spi->controller->set_cs(spi, !enable);
+ }
+
+ if (!spi->controller->set_cs_timing) {
+ if (!enable1)
+ spi_delay_exec(&spi->controller->cs_inactive, NULL);
}
}
@@ -823,10 +881,10 @@
int i, ret;
if (vmalloced_buf || kmap_buf) {
- desc_len = min_t(int, max_seg_size, PAGE_SIZE);
+ desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
} else if (virt_addr_valid(buf)) {
- desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
+ desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
sgs = DIV_ROUND_UP(len, desc_len);
} else {
return -EINVAL;
@@ -888,6 +946,8 @@
if (sgt->orig_nents) {
dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
sg_free_table(sgt);
+ sgt->orig_nents = 0;
+ sgt->nents = 0;
}
}
@@ -965,6 +1025,8 @@
spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
}
+ ctlr->cur_msg_mapped = false;
+
return 0;
}
#else /* !CONFIG_HAS_DMA */
@@ -1006,7 +1068,8 @@
void *tmp;
unsigned int max_tx, max_rx;
- if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
+ if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
+ && !(msg->spi->mode & SPI_3WIRE)) {
max_tx = 0;
max_rx = 0;
@@ -1058,7 +1121,8 @@
{
struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
- unsigned long long ms = 1;
+ u32 speed_hz = xfer->speed_hz;
+ unsigned long long ms;
if (spi_controller_is_slave(ctlr)) {
if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
@@ -1066,8 +1130,11 @@
return -EINTR;
}
} else {
+ if (!speed_hz)
+ speed_hz = 100000;
+
ms = 8LL * 1000LL * xfer->len;
- do_div(ms, xfer->speed_hz);
+ do_div(ms, speed_hz);
ms += ms + 200; /* some tolerance */
if (ms > UINT_MAX)
@@ -1088,6 +1155,99 @@
return 0;
}
+static void _spi_transfer_delay_ns(u32 ns)
+{
+ if (!ns)
+ return;
+ if (ns <= 1000) {
+ ndelay(ns);
+ } else {
+ u32 us = DIV_ROUND_UP(ns, 1000);
+
+ if (us <= 10)
+ udelay(us);
+ else
+ usleep_range(us, us + DIV_ROUND_UP(us, 10));
+ }
+}
+
+int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
+{
+ u32 delay = _delay->value;
+ u32 unit = _delay->unit;
+ u32 hz;
+
+ if (!delay)
+ return 0;
+
+ switch (unit) {
+ case SPI_DELAY_UNIT_USECS:
+ delay *= 1000;
+ break;
+ case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
+ break;
+ case SPI_DELAY_UNIT_SCK:
+ /* clock cycles need to be obtained from spi_transfer */
+ if (!xfer)
+ return -EINVAL;
+ /* if there is no effective speed know, then approximate
+ * by underestimating with half the requested hz
+ */
+ hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
+ if (!hz)
+ return -EINVAL;
+ delay *= DIV_ROUND_UP(1000000000, hz);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return delay;
+}
+EXPORT_SYMBOL_GPL(spi_delay_to_ns);
+
+int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
+{
+ int delay;
+
+ might_sleep();
+
+ if (!_delay)
+ return -EINVAL;
+
+ delay = spi_delay_to_ns(_delay, xfer);
+ if (delay < 0)
+ return delay;
+
+ _spi_transfer_delay_ns(delay);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_delay_exec);
+
+static void _spi_transfer_cs_change_delay(struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ u32 delay = xfer->cs_change_delay.value;
+ u32 unit = xfer->cs_change_delay.unit;
+ int ret;
+
+ /* return early on "fast" mode - for everything but USECS */
+ if (!delay) {
+ if (unit == SPI_DELAY_UNIT_USECS)
+ _spi_transfer_delay_ns(10000);
+ return;
+ }
+
+ ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
+ if (ret) {
+ dev_err_once(&msg->spi->dev,
+ "Use of unsupported delay unit %i, using default of 10us\n",
+ unit);
+ _spi_transfer_delay_ns(10000);
+ }
+}
+
/*
* spi_transfer_one_message - Default implementation of transfer_one_message()
*
@@ -1104,7 +1264,7 @@
struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
- spi_set_cs(msg->spi, true);
+ spi_set_cs(msg->spi, true, false);
SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
@@ -1115,11 +1275,25 @@
spi_statistics_add_transfer_stats(statm, xfer, ctlr);
spi_statistics_add_transfer_stats(stats, xfer, ctlr);
- if (xfer->tx_buf || xfer->rx_buf) {
+ if (!ctlr->ptp_sts_supported) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+
+ if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
reinit_completion(&ctlr->xfer_completion);
+fallback_pio:
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
+ if (ctlr->cur_msg_mapped &&
+ (xfer->error & SPI_TRANS_FAIL_NO_START)) {
+ __spi_unmap_msg(ctlr, msg);
+ ctlr->fallback = true;
+ xfer->error &= ~SPI_TRANS_FAIL_NO_START;
+ goto fallback_pio;
+ }
+
SPI_STATISTICS_INCREMENT_FIELD(statm,
errors);
SPI_STATISTICS_INCREMENT_FIELD(stats,
@@ -1141,28 +1315,26 @@
xfer->len);
}
+ if (!ctlr->ptp_sts_supported) {
+ ptp_read_system_postts(xfer->ptp_sts);
+ xfer->ptp_sts_word_post = xfer->len;
+ }
+
trace_spi_transfer_stop(msg, xfer);
if (msg->status != -EINPROGRESS)
goto out;
- if (xfer->delay_usecs) {
- u16 us = xfer->delay_usecs;
-
- if (us <= 10)
- udelay(us);
- else
- usleep_range(us, us + DIV_ROUND_UP(us, 10));
- }
+ spi_transfer_delay_exec(xfer);
if (xfer->cs_change) {
if (list_is_last(&xfer->transfer_list,
&msg->transfers)) {
keep_cs = true;
} else {
- spi_set_cs(msg->spi, false);
- udelay(10);
- spi_set_cs(msg->spi, true);
+ spi_set_cs(msg->spi, false, false);
+ _spi_transfer_cs_change_delay(msg, xfer);
+ spi_set_cs(msg->spi, true, false);
}
}
@@ -1171,7 +1343,7 @@
out:
if (ret != 0 || !keep_cs)
- spi_set_cs(msg->spi, false);
+ spi_set_cs(msg->spi, false, false);
if (msg->status == -EINPROGRESS)
msg->status = ret;
@@ -1198,6 +1370,14 @@
}
EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
+static void spi_idle_runtime_pm(struct spi_controller *ctlr)
+{
+ if (ctlr->auto_runtime_pm) {
+ pm_runtime_mark_last_busy(ctlr->dev.parent);
+ pm_runtime_put_autosuspend(ctlr->dev.parent);
+ }
+}
+
/**
* __spi_pump_messages - function which processes spi message queue
* @ctlr: controller to process queue for
@@ -1213,8 +1393,10 @@
*/
static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
{
- unsigned long flags;
+ struct spi_transfer *xfer;
+ struct spi_message *msg;
bool was_busy = false;
+ unsigned long flags;
int ret;
/* Lock queue */
@@ -1228,7 +1410,7 @@
/* If another context is idling the device then defer */
if (ctlr->idling) {
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
@@ -1240,10 +1422,17 @@
return;
}
- /* Only do teardown in the thread */
+ /* Defer any non-atomic teardown to the thread */
if (!in_kthread) {
- kthread_queue_work(&ctlr->kworker,
- &ctlr->pump_messages);
+ if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
+ !ctlr->unprepare_transfer_hardware) {
+ spi_idle_runtime_pm(ctlr);
+ ctlr->busy = false;
+ trace_spi_controller_idle(ctlr);
+ } else {
+ kthread_queue_work(ctlr->kworker,
+ &ctlr->pump_messages);
+ }
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
@@ -1260,10 +1449,7 @@
ctlr->unprepare_transfer_hardware(ctlr))
dev_err(&ctlr->dev,
"failed to unprepare transfer hardware\n");
- if (ctlr->auto_runtime_pm) {
- pm_runtime_mark_last_busy(ctlr->dev.parent);
- pm_runtime_put_autosuspend(ctlr->dev.parent);
- }
+ spi_idle_runtime_pm(ctlr);
trace_spi_controller_idle(ctlr);
spin_lock_irqsave(&ctlr->queue_lock, flags);
@@ -1273,10 +1459,10 @@
}
/* Extract head of queue */
- ctlr->cur_msg =
- list_first_entry(&ctlr->queue, struct spi_message, queue);
+ msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
+ ctlr->cur_msg = msg;
- list_del_init(&ctlr->cur_msg->queue);
+ list_del_init(&msg->queue);
if (ctlr->busy)
was_busy = true;
else
@@ -1303,37 +1489,49 @@
ret = ctlr->prepare_transfer_hardware(ctlr);
if (ret) {
dev_err(&ctlr->dev,
- "failed to prepare transfer hardware\n");
+ "failed to prepare transfer hardware: %d\n",
+ ret);
if (ctlr->auto_runtime_pm)
pm_runtime_put(ctlr->dev.parent);
+
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+
mutex_unlock(&ctlr->io_mutex);
return;
}
}
- trace_spi_message_start(ctlr->cur_msg);
+ trace_spi_message_start(msg);
if (ctlr->prepare_message) {
- ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
+ ret = ctlr->prepare_message(ctlr, msg);
if (ret) {
dev_err(&ctlr->dev, "failed to prepare message: %d\n",
ret);
- ctlr->cur_msg->status = ret;
+ msg->status = ret;
spi_finalize_current_message(ctlr);
goto out;
}
ctlr->cur_msg_prepared = true;
}
- ret = spi_map_msg(ctlr, ctlr->cur_msg);
+ ret = spi_map_msg(ctlr, msg);
if (ret) {
- ctlr->cur_msg->status = ret;
+ msg->status = ret;
spi_finalize_current_message(ctlr);
goto out;
}
- ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
+ if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+ }
+
+ ret = ctlr->transfer_one_message(ctlr, msg);
if (ret) {
dev_err(&ctlr->dev,
"failed to transfer one message from queue\n");
@@ -1360,20 +1558,124 @@
__spi_pump_messages(ctlr, true);
}
+/**
+ * spi_take_timestamp_pre - helper for drivers to collect the beginning of the
+ * TX timestamp for the requested byte from the SPI
+ * transfer. The frequency with which this function
+ * must be called (once per word, once for the whole
+ * transfer, once per batch of words etc) is arbitrary
+ * as long as the @tx buffer offset is greater than or
+ * equal to the requested byte at the time of the
+ * call. The timestamp is only taken once, at the
+ * first such call. It is assumed that the driver
+ * advances its @tx buffer pointer monotonically.
+ * @ctlr: Pointer to the spi_controller structure of the driver
+ * @xfer: Pointer to the transfer being timestamped
+ * @progress: How many words (not bytes) have been transferred so far
+ * @irqs_off: If true, will disable IRQs and preemption for the duration of the
+ * transfer, for less jitter in time measurement. Only compatible
+ * with PIO drivers. If true, must follow up with
+ * spi_take_timestamp_post or otherwise system will crash.
+ * WARNING: for fully predictable results, the CPU frequency must
+ * also be under control (governor).
+ */
+void spi_take_timestamp_pre(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ size_t progress, bool irqs_off)
+{
+ if (!xfer->ptp_sts)
+ return;
+
+ if (xfer->timestamped)
+ return;
+
+ if (progress > xfer->ptp_sts_word_pre)
+ return;
+
+ /* Capture the resolution of the timestamp */
+ xfer->ptp_sts_word_pre = progress;
+
+ if (irqs_off) {
+ local_irq_save(ctlr->irq_flags);
+ preempt_disable();
+ }
+
+ ptp_read_system_prets(xfer->ptp_sts);
+}
+EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
+
+/**
+ * spi_take_timestamp_post - helper for drivers to collect the end of the
+ * TX timestamp for the requested byte from the SPI
+ * transfer. Can be called with an arbitrary
+ * frequency: only the first call where @tx exceeds
+ * or is equal to the requested word will be
+ * timestamped.
+ * @ctlr: Pointer to the spi_controller structure of the driver
+ * @xfer: Pointer to the transfer being timestamped
+ * @progress: How many words (not bytes) have been transferred so far
+ * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
+ */
+void spi_take_timestamp_post(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ size_t progress, bool irqs_off)
+{
+ if (!xfer->ptp_sts)
+ return;
+
+ if (xfer->timestamped)
+ return;
+
+ if (progress < xfer->ptp_sts_word_post)
+ return;
+
+ ptp_read_system_postts(xfer->ptp_sts);
+
+ if (irqs_off) {
+ local_irq_restore(ctlr->irq_flags);
+ preempt_enable();
+ }
+
+ /* Capture the resolution of the timestamp */
+ xfer->ptp_sts_word_post = progress;
+
+ xfer->timestamped = true;
+}
+EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
+
+/**
+ * spi_set_thread_rt - set the controller to pump at realtime priority
+ * @ctlr: controller to boost priority of
+ *
+ * This can be called because the controller requested realtime priority
+ * (by setting the ->rt value before calling spi_register_controller()) or
+ * because a device on the bus said that its transfers needed realtime
+ * priority.
+ *
+ * NOTE: at the moment if any device on a bus says it needs realtime then
+ * the thread will be at realtime priority for all transfers on that
+ * controller. If this eventually becomes a problem we may see if we can
+ * find a way to boost the priority only temporarily during relevant
+ * transfers.
+ */
+static void spi_set_thread_rt(struct spi_controller *ctlr)
+{
+ dev_info(&ctlr->dev,
+ "will run message pump with realtime priority\n");
+ sched_set_fifo(ctlr->kworker->task);
+}
+
static int spi_init_queue(struct spi_controller *ctlr)
{
- struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
-
ctlr->running = false;
ctlr->busy = false;
- kthread_init_worker(&ctlr->kworker);
- ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
- "%s", dev_name(&ctlr->dev));
- if (IS_ERR(ctlr->kworker_task)) {
- dev_err(&ctlr->dev, "failed to create message pump task\n");
- return PTR_ERR(ctlr->kworker_task);
+ ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
+ if (IS_ERR(ctlr->kworker)) {
+ dev_err(&ctlr->dev, "failed to create message pump kworker\n");
+ return PTR_ERR(ctlr->kworker);
}
+
kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
/*
@@ -1383,11 +1685,8 @@
* request and the scheduling of the message pump thread. Without this
* setting the message pump thread will remain at default priority.
*/
- if (ctlr->rt) {
- dev_info(&ctlr->dev,
- "will run message pump with realtime priority\n");
- sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m);
- }
+ if (ctlr->rt)
+ spi_set_thread_rt(ctlr);
return 0;
}
@@ -1426,6 +1725,7 @@
*/
void spi_finalize_current_message(struct spi_controller *ctlr)
{
+ struct spi_transfer *xfer;
struct spi_message *mesg;
unsigned long flags;
int ret;
@@ -1433,6 +1733,17 @@
spin_lock_irqsave(&ctlr->queue_lock, flags);
mesg = ctlr->cur_msg;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+
+ if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
+ ptp_read_system_postts(xfer->ptp_sts);
+ xfer->ptp_sts_word_post = xfer->len;
+ }
+ }
+
+ if (unlikely(ctlr->ptp_sts_supported))
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list)
+ WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
spi_unmap_msg(ctlr, mesg);
@@ -1454,7 +1765,8 @@
spin_lock_irqsave(&ctlr->queue_lock, flags);
ctlr->cur_msg = NULL;
ctlr->cur_msg_prepared = false;
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ ctlr->fallback = false;
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
trace_spi_message_done(mesg);
@@ -1480,7 +1792,7 @@
ctlr->cur_msg = NULL;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
return 0;
}
@@ -1536,8 +1848,7 @@
return ret;
}
- kthread_flush_worker(&ctlr->kworker);
- kthread_stop(ctlr->kworker_task);
+ kthread_destroy_worker(ctlr->kworker);
return 0;
}
@@ -1560,7 +1871,7 @@
list_add_tail(&msg->queue, &ctlr->queue);
if (!ctlr->busy && need_pump)
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return 0;
@@ -1637,12 +1948,12 @@
spi->mode |= SPI_CPHA;
if (of_property_read_bool(nc, "spi-cpol"))
spi->mode |= SPI_CPOL;
- if (of_property_read_bool(nc, "spi-cs-high"))
- spi->mode |= SPI_CS_HIGH;
if (of_property_read_bool(nc, "spi-3wire"))
spi->mode |= SPI_3WIRE;
if (of_property_read_bool(nc, "spi-lsb-first"))
spi->mode |= SPI_LSB_FIRST;
+ if (of_property_read_bool(nc, "spi-cs-high"))
+ spi->mode |= SPI_CS_HIGH;
/* Device DUAL/QUAD mode */
if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
@@ -1654,6 +1965,9 @@
break;
case 4:
spi->mode |= SPI_TX_QUAD;
+ break;
+ case 8:
+ spi->mode |= SPI_TX_OCTAL;
break;
default:
dev_warn(&ctlr->dev,
@@ -1673,6 +1987,9 @@
case 4:
spi->mode |= SPI_RX_QUAD;
break;
+ case 8:
+ spi->mode |= SPI_RX_OCTAL;
+ break;
default:
dev_warn(&ctlr->dev,
"spi-rx-bus-width %d not supported\n",
@@ -1682,7 +1999,7 @@
}
if (spi_controller_is_slave(ctlr)) {
- if (strcmp(nc->name, "slave")) {
+ if (!of_node_name_eq(nc, "slave")) {
dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
nc);
return -EINVAL;
@@ -1700,13 +2017,8 @@
spi->chip_select = value;
/* Device speed */
- rc = of_property_read_u32(nc, "spi-max-frequency", &value);
- if (rc) {
- dev_err(&ctlr->dev,
- "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
- return rc;
- }
- spi->max_speed_hz = value;
+ if (!of_property_read_u32(nc, "spi-max-frequency", &value))
+ spi->max_speed_hz = value;
return 0;
}
@@ -1789,9 +2101,18 @@
#endif
#ifdef CONFIG_ACPI
-static void acpi_spi_parse_apple_properties(struct spi_device *spi)
+struct acpi_spi_lookup {
+ struct spi_controller *ctlr;
+ u32 max_speed_hz;
+ u32 mode;
+ int irq;
+ u8 bits_per_word;
+ u8 chip_select;
+};
+
+static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
+ struct acpi_spi_lookup *lookup)
{
- struct acpi_device *dev = ACPI_COMPANION(&spi->dev);
const union acpi_object *obj;
if (!x86_apple_machine)
@@ -1799,35 +2120,46 @@
if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length >= 4)
- spi->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
+ lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length == 8)
- spi->bits_per_word = *(u64 *)obj->buffer.pointer;
+ lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
- spi->mode |= SPI_LSB_FIRST;
+ lookup->mode |= SPI_LSB_FIRST;
if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
- spi->mode |= SPI_CPOL;
+ lookup->mode |= SPI_CPOL;
if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
- spi->mode |= SPI_CPHA;
+ lookup->mode |= SPI_CPHA;
}
static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
{
- struct spi_device *spi = data;
- struct spi_controller *ctlr = spi->controller;
+ struct acpi_spi_lookup *lookup = data;
+ struct spi_controller *ctlr = lookup->ctlr;
if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
struct acpi_resource_spi_serialbus *sb;
+ acpi_handle parent_handle;
+ acpi_status status;
sb = &ares->data.spi_serial_bus;
if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
+
+ status = acpi_get_handle(NULL,
+ sb->resource_source.string_ptr,
+ &parent_handle);
+
+ if (ACPI_FAILURE(status) ||
+ ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
+ return -ENODEV;
+
/*
* ACPI DeviceSelection numbering is handled by the
* host controller driver in Windows and can vary
@@ -1840,25 +2172,26 @@
sb->device_selection);
if (cs < 0)
return cs;
- spi->chip_select = cs;
+ lookup->chip_select = cs;
} else {
- spi->chip_select = sb->device_selection;
+ lookup->chip_select = sb->device_selection;
}
- spi->max_speed_hz = sb->connection_speed;
+ lookup->max_speed_hz = sb->connection_speed;
+ lookup->bits_per_word = sb->data_bit_length;
if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
- spi->mode |= SPI_CPHA;
+ lookup->mode |= SPI_CPHA;
if (sb->clock_polarity == ACPI_SPI_START_HIGH)
- spi->mode |= SPI_CPOL;
+ lookup->mode |= SPI_CPOL;
if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
- spi->mode |= SPI_CS_HIGH;
+ lookup->mode |= SPI_CS_HIGH;
}
- } else if (spi->irq < 0) {
+ } else if (lookup->irq < 0) {
struct resource r;
if (acpi_dev_resource_interrupt(ares, 0, &r))
- spi->irq = r.start;
+ lookup->irq = r.start;
}
/* Always tell the ACPI core to skip this resource */
@@ -1868,12 +2201,36 @@
static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
struct acpi_device *adev)
{
+ acpi_handle parent_handle = NULL;
struct list_head resource_list;
+ struct acpi_spi_lookup lookup = {};
struct spi_device *spi;
int ret;
if (acpi_bus_get_status(adev) || !adev->status.present ||
acpi_device_enumerated(adev))
+ return AE_OK;
+
+ lookup.ctlr = ctlr;
+ lookup.irq = -1;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ acpi_spi_add_resource, &lookup);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (ret < 0)
+ /* found SPI in _CRS but it points to another controller */
+ return AE_OK;
+
+ if (!lookup.max_speed_hz &&
+ !ACPI_FAILURE(acpi_get_parent(adev->handle, &parent_handle)) &&
+ ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
+ /* Apple does not use _CRS but nested devices for SPI slaves */
+ acpi_spi_parse_apple_properties(adev, &lookup);
+ }
+
+ if (!lookup.max_speed_hz)
return AE_OK;
spi = spi_alloc_device(ctlr);
@@ -1883,20 +2240,13 @@
return AE_NO_MEMORY;
}
+
ACPI_COMPANION_SET(&spi->dev, adev);
- spi->irq = -1;
-
- INIT_LIST_HEAD(&resource_list);
- ret = acpi_dev_get_resources(adev, &resource_list,
- acpi_spi_add_resource, spi);
- acpi_dev_free_resource_list(&resource_list);
-
- acpi_spi_parse_apple_properties(spi);
-
- if (ret < 0 || !spi->max_speed_hz) {
- spi_dev_put(spi);
- return AE_OK;
- }
+ spi->max_speed_hz = lookup.max_speed_hz;
+ spi->mode |= lookup.mode;
+ spi->irq = lookup.irq;
+ spi->bits_per_word = lookup.bits_per_word;
+ spi->chip_select = lookup.chip_select;
acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
sizeof(spi->modalias));
@@ -1929,6 +2279,8 @@
return acpi_register_spi_device(ctlr, adev);
}
+#define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
+
static void acpi_register_spi_devices(struct spi_controller *ctlr)
{
acpi_status status;
@@ -1938,7 +2290,8 @@
if (!handle)
return;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ SPI_ACPI_ENUMERATE_MAX_DEPTH,
acpi_spi_add_device, NULL, ctlr, NULL);
if (ACPI_FAILURE(status))
dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
@@ -1984,8 +2337,8 @@
return 1;
}
-static ssize_t spi_slave_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
dev);
@@ -1996,9 +2349,8 @@
child ? to_spi_device(child)->modalias : NULL);
}
-static ssize_t spi_slave_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
dev);
@@ -2036,7 +2388,7 @@
return count;
}
-static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
+static DEVICE_ATTR_RW(slave);
static struct attribute *spi_slave_attrs[] = {
&dev_attr_slave.attr,
@@ -2067,8 +2419,10 @@
* __spi_alloc_controller - allocate an SPI master or slave controller
* @dev: the controller, possibly using the platform_bus
* @size: how much zeroed driver-private data to allocate; the pointer to this
- * memory is in the driver_data field of the returned device,
- * accessible with spi_controller_get_devdata().
+ * memory is in the driver_data field of the returned device, accessible
+ * with spi_controller_get_devdata(); the memory is cacheline aligned;
+ * drivers granting DMA access to portions of their private data need to
+ * round up @size using ALIGN(size, dma_get_cache_alignment()).
* @slave: flag indicating whether to allocate an SPI master (false) or SPI
* slave (true) controller
* Context: can sleep
@@ -2090,11 +2444,12 @@
unsigned int size, bool slave)
{
struct spi_controller *ctlr;
+ size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
if (!dev)
return NULL;
- ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
+ ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
if (!ctlr)
return NULL;
@@ -2108,7 +2463,7 @@
ctlr->dev.class = &spi_master_class;
ctlr->dev.parent = dev;
pm_suspend_ignore_children(&ctlr->dev, true);
- spi_controller_set_devdata(ctlr, &ctlr[1]);
+ spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
return ctlr;
}
@@ -2158,7 +2513,7 @@
EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
#ifdef CONFIG_OF
-static int of_spi_register_master(struct spi_controller *ctlr)
+static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
{
int nb, i, *cs;
struct device_node *np = ctlr->dev.of_node;
@@ -2191,11 +2546,85 @@
return 0;
}
#else
-static int of_spi_register_master(struct spi_controller *ctlr)
+static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
{
return 0;
}
#endif
+
+/**
+ * spi_get_gpio_descs() - grab chip select GPIOs for the master
+ * @ctlr: The SPI master to grab GPIO descriptors for
+ */
+static int spi_get_gpio_descs(struct spi_controller *ctlr)
+{
+ int nb, i;
+ struct gpio_desc **cs;
+ struct device *dev = &ctlr->dev;
+ unsigned long native_cs_mask = 0;
+ unsigned int num_cs_gpios = 0;
+
+ nb = gpiod_count(dev, "cs");
+ ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
+
+ /* No GPIOs at all is fine, else return the error */
+ if (nb == 0 || nb == -ENOENT)
+ return 0;
+ else if (nb < 0)
+ return nb;
+
+ cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
+ GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ ctlr->cs_gpiods = cs;
+
+ for (i = 0; i < nb; i++) {
+ /*
+ * Most chipselects are active low, the inverted
+ * semantics are handled by special quirks in gpiolib,
+ * so initializing them GPIOD_OUT_LOW here means
+ * "unasserted", in most cases this will drive the physical
+ * line high.
+ */
+ cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(cs[i]))
+ return PTR_ERR(cs[i]);
+
+ if (cs[i]) {
+ /*
+ * If we find a CS GPIO, name it after the device and
+ * chip select line.
+ */
+ char *gpioname;
+
+ gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
+ dev_name(dev), i);
+ if (!gpioname)
+ return -ENOMEM;
+ gpiod_set_consumer_name(cs[i], gpioname);
+ num_cs_gpios++;
+ continue;
+ }
+
+ if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
+ dev_err(dev, "Invalid native chip select %d\n", i);
+ return -EINVAL;
+ }
+ native_cs_mask |= BIT(i);
+ }
+
+ ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
+
+ if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
+ ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
+ dev_err(dev, "No unused native chip select available\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
static int spi_controller_check_ops(struct spi_controller *ctlr)
{
@@ -2244,7 +2673,7 @@
{
struct device *dev = ctlr->dev.parent;
struct boardinfo *bi;
- int status = -ENODEV;
+ int status;
int id, first_dynamic;
if (!dev)
@@ -2258,17 +2687,6 @@
if (status)
return status;
- if (!spi_controller_is_slave(ctlr)) {
- status = of_spi_register_master(ctlr);
- if (status)
- return status;
- }
-
- /* even if it's just one always-selected device, there must
- * be at least one chipselect
- */
- if (ctlr->num_chipselect == 0)
- return -EINVAL;
if (ctlr->bus_num >= 0) {
/* devices with a fixed bus num must check-in with the num */
mutex_lock(&board_lock);
@@ -2320,14 +2738,37 @@
* registration fails if the bus ID is in use.
*/
dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
- status = device_add(&ctlr->dev);
- if (status < 0) {
- /* free bus id */
- mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
- mutex_unlock(&board_lock);
- goto done;
+
+ if (!spi_controller_is_slave(ctlr)) {
+ if (ctlr->use_gpio_descriptors) {
+ status = spi_get_gpio_descs(ctlr);
+ if (status)
+ goto free_bus_id;
+ /*
+ * A controller using GPIO descriptors always
+ * supports SPI_CS_HIGH if need be.
+ */
+ ctlr->mode_bits |= SPI_CS_HIGH;
+ } else {
+ /* Legacy code path for GPIOs from DT */
+ status = of_spi_get_gpio_numbers(ctlr);
+ if (status)
+ goto free_bus_id;
+ }
}
+
+ /*
+ * Even if it's just one always-selected device, there must
+ * be at least one chipselect.
+ */
+ if (!ctlr->num_chipselect) {
+ status = -EINVAL;
+ goto free_bus_id;
+ }
+
+ status = device_add(&ctlr->dev);
+ if (status < 0)
+ goto free_bus_id;
dev_dbg(dev, "registered %s %s\n",
spi_controller_is_slave(ctlr) ? "slave" : "master",
dev_name(&ctlr->dev));
@@ -2343,11 +2784,7 @@
status = spi_controller_initialize_queue(ctlr);
if (status) {
device_del(&ctlr->dev);
- /* free bus id */
- mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
- mutex_unlock(&board_lock);
- goto done;
+ goto free_bus_id;
}
}
/* add statistics */
@@ -2362,7 +2799,12 @@
/* Register devices from the device tree and ACPI */
of_register_spi_devices(ctlr);
acpi_register_spi_devices(ctlr);
-done:
+ return status;
+
+free_bus_id:
+ mutex_lock(&board_lock);
+ idr_remove(&spi_master_idr, ctlr->bus_num);
+ mutex_unlock(&board_lock);
return status;
}
EXPORT_SYMBOL_GPL(spi_register_controller);
@@ -2612,12 +3054,9 @@
*/
void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
{
- struct spi_res *res;
+ struct spi_res *res, *tmp;
- while (!list_empty(&message->resources)) {
- res = list_last_entry(&message->resources,
- struct spi_res, entry);
-
+ list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
if (res->release)
res->release(ctlr, message, res->data);
@@ -2681,8 +3120,7 @@
/* allocate the structure using spi_res */
rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
- insert * sizeof(struct spi_transfer)
- + sizeof(struct spi_replaced_transfers)
+ struct_size(rxfer, inserted_transfers, insert)
+ extradatasize,
gfp);
if (!rxfer)
@@ -2744,10 +3182,11 @@
/* add to list */
list_add(&xfer->transfer_list, rxfer->replaced_after);
- /* clear cs_change and delay_usecs for all but the last */
+ /* clear cs_change and delay for all but the last */
if (i) {
xfer->cs_change = false;
xfer->delay_usecs = 0;
+ xfer->delay.value = 0;
}
}
@@ -2771,11 +3210,6 @@
struct spi_replaced_transfers *srt;
size_t offset;
size_t count, i;
-
- /* warn once about this fact that we are splitting a transfer */
- dev_warn_once(&msg->spi->dev,
- "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
- xfer->len, maxsize);
/* calculate how many we have to replace */
count = DIV_ROUND_UP(xfer->len, maxsize);
@@ -2925,7 +3359,8 @@
/* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
*/
if ((spi->mode & SPI_3WIRE) && (spi->mode &
- (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
+ SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
return -EINVAL;
/* help drivers fail *cleanly* when they need options
* that aren't supported with their current controller
@@ -2933,8 +3368,14 @@
* so it is ignored here.
*/
bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
+ /* nothing prevents from working with active-high CS in case if it
+ * is driven by GPIO.
+ */
+ if (gpio_is_valid(spi->cs_gpio))
+ bad_bits &= ~SPI_CS_HIGH;
ugly_bits = bad_bits &
- (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
+ SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
if (ugly_bits) {
dev_warn(&spi->dev,
"setup: ignoring unsupported mode bits %x\n",
@@ -2959,10 +3400,42 @@
if (!spi->max_speed_hz)
spi->max_speed_hz = spi->controller->max_speed_hz;
+ mutex_lock(&spi->controller->io_mutex);
+
if (spi->controller->setup)
status = spi->controller->setup(spi);
- spi_set_cs(spi, false);
+ if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
+ status = pm_runtime_get_sync(spi->controller->dev.parent);
+ if (status < 0) {
+ mutex_unlock(&spi->controller->io_mutex);
+ pm_runtime_put_noidle(spi->controller->dev.parent);
+ dev_err(&spi->controller->dev, "Failed to power device: %d\n",
+ status);
+ return status;
+ }
+
+ /*
+ * We do not want to return positive value from pm_runtime_get,
+ * there are many instances of devices calling spi_setup() and
+ * checking for a non-zero return value instead of a negative
+ * return value.
+ */
+ status = 0;
+
+ spi_set_cs(spi, false, true);
+ pm_runtime_mark_last_busy(spi->controller->dev.parent);
+ pm_runtime_put_autosuspend(spi->controller->dev.parent);
+ } else {
+ spi_set_cs(spi, false, true);
+ }
+
+ mutex_unlock(&spi->controller->io_mutex);
+
+ if (spi->rt && !spi->controller->rt) {
+ spi->controller->rt = true;
+ spi_set_thread_rt(spi->controller);
+ }
dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
@@ -2976,6 +3449,74 @@
return status;
}
EXPORT_SYMBOL_GPL(spi_setup);
+
+/**
+ * spi_set_cs_timing - configure CS setup, hold, and inactive delays
+ * @spi: the device that requires specific CS timing configuration
+ * @setup: CS setup time specified via @spi_delay
+ * @hold: CS hold time specified via @spi_delay
+ * @inactive: CS inactive delay between transfers specified via @spi_delay
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
+ struct spi_delay *hold, struct spi_delay *inactive)
+{
+ size_t len;
+
+ if (spi->controller->set_cs_timing)
+ return spi->controller->set_cs_timing(spi, setup, hold,
+ inactive);
+
+ if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
+ (hold && hold->unit == SPI_DELAY_UNIT_SCK) ||
+ (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) {
+ dev_err(&spi->dev,
+ "Clock-cycle delays for CS not supported in SW mode\n");
+ return -ENOTSUPP;
+ }
+
+ len = sizeof(struct spi_delay);
+
+ /* copy delays to controller */
+ if (setup)
+ memcpy(&spi->controller->cs_setup, setup, len);
+ else
+ memset(&spi->controller->cs_setup, 0, len);
+
+ if (hold)
+ memcpy(&spi->controller->cs_hold, hold, len);
+ else
+ memset(&spi->controller->cs_hold, 0, len);
+
+ if (inactive)
+ memcpy(&spi->controller->cs_inactive, inactive, len);
+ else
+ memset(&spi->controller->cs_inactive, 0, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_set_cs_timing);
+
+static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
+ struct spi_device *spi)
+{
+ int delay1, delay2;
+
+ delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
+ if (delay1 < 0)
+ return delay1;
+
+ delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
+ if (delay2 < 0)
+ return delay2;
+
+ if (delay1 < delay2)
+ memcpy(&xfer->word_delay, &spi->word_delay,
+ sizeof(xfer->word_delay));
+
+ return 0;
+}
static int __spi_validate(struct spi_device *spi, struct spi_message *message)
{
@@ -2993,6 +3534,7 @@
* cs_change is set for each transfer.
*/
if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
+ spi->cs_gpiod ||
gpio_is_valid(spi->cs_gpio))) {
size_t maxsize;
int ret;
@@ -3039,17 +3581,18 @@
* it is not set for this transfer.
* Set transfer tx_nbits and rx_nbits as single transfer default
* (SPI_NBITS_SINGLE) if it is not set for this transfer.
+ * Ensure transfer word_delay is at least as long as that required by
+ * device itself.
*/
message->frame_length = 0;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ xfer->effective_speed_hz = 0;
message->frame_length += xfer->len;
if (!xfer->bits_per_word)
xfer->bits_per_word = spi->bits_per_word;
if (!xfer->speed_hz)
xfer->speed_hz = spi->max_speed_hz;
- if (!xfer->speed_hz)
- xfer->speed_hz = ctlr->max_speed_hz;
if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
xfer->speed_hz = ctlr->max_speed_hz;
@@ -3109,6 +3652,9 @@
!(spi->mode & SPI_RX_QUAD))
return -EINVAL;
}
+
+ if (_spi_xfer_word_delay_update(xfer, spi))
+ return -EINVAL;
}
message->status = -EINPROGRESS;
@@ -3119,6 +3665,7 @@
static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
+ struct spi_transfer *xfer;
/*
* Some controllers do not support doing regular SPI transfers. Return
@@ -3133,6 +3680,13 @@
SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
trace_spi_message_submit(message);
+
+ if (!ctlr->ptp_sts_supported) {
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+ }
return ctlr->transfer(spi, message);
}
@@ -3437,8 +3991,7 @@
* is zero for success, else a negative errno status code.
* This call may only be used from a context that may sleep.
*
- * Parameters to this routine are always copied using a small buffer;
- * portable code should never use this for more than 32 bytes.
+ * Parameters to this routine are always copied using a small buffer.
* Performance-sensitive or bulk transfer code should instead use
* spi_{async,sync}() calls with dma-safe buffers.
*
@@ -3501,37 +4054,25 @@
/*-------------------------------------------------------------------------*/
#if IS_ENABLED(CONFIG_OF)
-static int __spi_of_device_match(struct device *dev, void *data)
-{
- return dev->of_node == data;
-}
-
/* must call put_device() when done with returned spi_device device */
struct spi_device *of_find_spi_device_by_node(struct device_node *node)
{
- struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
- __spi_of_device_match);
+ struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
+
return dev ? to_spi_device(dev) : NULL;
}
EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
#endif /* IS_ENABLED(CONFIG_OF) */
#if IS_ENABLED(CONFIG_OF_DYNAMIC)
-static int __spi_of_controller_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
/* the spi controllers are not using spi_bus, so we find it with another way */
static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
{
struct device *dev;
- dev = class_find_device(&spi_master_class, NULL, node,
- __spi_of_controller_match);
+ dev = class_find_device_by_of_node(&spi_master_class, node);
if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
- dev = class_find_device(&spi_slave_class, NULL, node,
- __spi_of_controller_match);
+ dev = class_find_device_by_of_node(&spi_slave_class, node);
if (!dev)
return NULL;
@@ -3602,11 +4143,6 @@
return ACPI_COMPANION(dev->parent) == data;
}
-static int spi_acpi_device_match(struct device *dev, void *data)
-{
- return ACPI_COMPANION(dev) == data;
-}
-
static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
{
struct device *dev;
@@ -3626,9 +4162,8 @@
{
struct device *dev;
- dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
-
- return dev ? to_spi_device(dev) : NULL;
+ dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
+ return to_spi_device(dev);
}
static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
@@ -3720,4 +4255,3 @@
* include needing to have boardinfo data structures be much more public.
*/
postcore_initcall(spi_init);
-
--
Gitblit v1.6.2