.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
---|
2 | | -/* |
---|
3 | | - * SPI init/core code |
---|
4 | | - * |
---|
5 | | - * Copyright (C) 2005 David Brownell |
---|
6 | | - * Copyright (C) 2008 Secret Lab Technologies Ltd. |
---|
7 | | - */ |
---|
| 2 | +// SPI init/core code |
---|
| 3 | +// |
---|
| 4 | +// Copyright (C) 2005 David Brownell |
---|
| 5 | +// Copyright (C) 2008 Secret Lab Technologies Ltd. |
---|
8 | 6 | |
---|
9 | 7 | #include <linux/kernel.h> |
---|
10 | 8 | #include <linux/device.h> |
---|
.. | .. |
---|
21 | 19 | #include <linux/spi/spi.h> |
---|
22 | 20 | #include <linux/spi/spi-mem.h> |
---|
23 | 21 | #include <linux/of_gpio.h> |
---|
| 22 | +#include <linux/gpio/consumer.h> |
---|
24 | 23 | #include <linux/pm_runtime.h> |
---|
25 | 24 | #include <linux/pm_domain.h> |
---|
26 | 25 | #include <linux/property.h> |
---|
.. | .. |
---|
37 | 36 | |
---|
38 | 37 | #define CREATE_TRACE_POINTS |
---|
39 | 38 | #include <trace/events/spi.h> |
---|
| 39 | +EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start); |
---|
| 40 | +EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop); |
---|
40 | 41 | |
---|
41 | 42 | #include "internals.h" |
---|
42 | 43 | |
---|
.. | .. |
---|
45 | 46 | static void spidev_release(struct device *dev) |
---|
46 | 47 | { |
---|
47 | 48 | struct spi_device *spi = to_spi_device(dev); |
---|
48 | | - |
---|
49 | | - /* spi controllers may cleanup for released devices */ |
---|
50 | | - if (spi->controller->cleanup) |
---|
51 | | - spi->controller->cleanup(spi); |
---|
52 | 49 | |
---|
53 | 50 | spi_controller_put(spi->controller); |
---|
54 | 51 | kfree(spi->driver_override); |
---|
.. | .. |
---|
91 | 88 | if (len) { |
---|
92 | 89 | spi->driver_override = driver_override; |
---|
93 | 90 | } else { |
---|
94 | | - /* Emptry string, disable driver override */ |
---|
| 91 | + /* Empty string, disable driver override */ |
---|
95 | 92 | spi->driver_override = NULL; |
---|
96 | 93 | kfree(driver_override); |
---|
97 | 94 | } |
---|
.. | .. |
---|
469 | 466 | static LIST_HEAD(spi_controller_list); |
---|
470 | 467 | |
---|
471 | 468 | /* |
---|
472 | | - * Used to protect add/del opertion for board_info list and |
---|
| 469 | + * Used to protect add/del operation for board_info list and |
---|
473 | 470 | * spi_controller list, and their matching process |
---|
474 | 471 | * also used to protect object of type struct idr |
---|
475 | 472 | */ |
---|
.. | .. |
---|
516 | 513 | spi->dev.bus = &spi_bus_type; |
---|
517 | 514 | spi->dev.release = spidev_release; |
---|
518 | 515 | spi->cs_gpio = -ENOENT; |
---|
| 516 | + spi->mode = ctlr->buswidth_override_bits; |
---|
519 | 517 | |
---|
520 | 518 | spin_lock_init(&spi->statistics.lock); |
---|
521 | 519 | |
---|
.. | .. |
---|
546 | 544 | spi->chip_select == new_spi->chip_select) |
---|
547 | 545 | return -EBUSY; |
---|
548 | 546 | return 0; |
---|
| 547 | +} |
---|
| 548 | + |
---|
| 549 | +static void spi_cleanup(struct spi_device *spi) |
---|
| 550 | +{ |
---|
| 551 | + if (spi->controller->cleanup) |
---|
| 552 | + spi->controller->cleanup(spi); |
---|
549 | 553 | } |
---|
550 | 554 | |
---|
551 | 555 | /** |
---|
.. | .. |
---|
593 | 597 | goto done; |
---|
594 | 598 | } |
---|
595 | 599 | |
---|
596 | | - if (ctlr->cs_gpios) |
---|
| 600 | + /* Descriptors take precedence */ |
---|
| 601 | + if (ctlr->cs_gpiods) |
---|
| 602 | + spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; |
---|
| 603 | + else if (ctlr->cs_gpios) |
---|
597 | 604 | spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; |
---|
598 | 605 | |
---|
599 | 606 | /* Drivers may modify this initial i/o setup, but will |
---|
.. | .. |
---|
609 | 616 | |
---|
610 | 617 | /* Device may be bound to an active driver when this returns */ |
---|
611 | 618 | status = device_add(&spi->dev); |
---|
612 | | - if (status < 0) |
---|
| 619 | + if (status < 0) { |
---|
613 | 620 | dev_err(dev, "can't add %s, status %d\n", |
---|
614 | 621 | dev_name(&spi->dev), status); |
---|
615 | | - else |
---|
| 622 | + spi_cleanup(spi); |
---|
| 623 | + } else { |
---|
616 | 624 | dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); |
---|
| 625 | + } |
---|
617 | 626 | |
---|
618 | 627 | done: |
---|
619 | 628 | mutex_unlock(&spi_add_lock); |
---|
.. | .. |
---|
706 | 715 | } |
---|
707 | 716 | if (ACPI_COMPANION(&spi->dev)) |
---|
708 | 717 | acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); |
---|
709 | | - device_unregister(&spi->dev); |
---|
| 718 | + device_del(&spi->dev); |
---|
| 719 | + spi_cleanup(spi); |
---|
| 720 | + put_device(&spi->dev); |
---|
710 | 721 | } |
---|
711 | 722 | EXPORT_SYMBOL_GPL(spi_unregister_device); |
---|
712 | 723 | |
---|
.. | .. |
---|
782 | 793 | |
---|
783 | 794 | /*-------------------------------------------------------------------------*/ |
---|
784 | 795 | |
---|
785 | | -static void spi_set_cs(struct spi_device *spi, bool enable) |
---|
| 796 | +static void spi_set_cs(struct spi_device *spi, bool enable, bool force) |
---|
786 | 797 | { |
---|
| 798 | + bool enable1 = enable; |
---|
| 799 | + |
---|
| 800 | + /* |
---|
| 801 | + * Avoid calling into the driver (or doing delays) if the chip select |
---|
| 802 | + * isn't actually changing from the last time this was called. |
---|
| 803 | + */ |
---|
| 804 | + if (!force && (spi->controller->last_cs_enable == enable) && |
---|
| 805 | + (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) |
---|
| 806 | + return; |
---|
| 807 | + |
---|
| 808 | + spi->controller->last_cs_enable = enable; |
---|
| 809 | + spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; |
---|
| 810 | + |
---|
| 811 | + if (!spi->controller->set_cs_timing) { |
---|
| 812 | + if (enable1) |
---|
| 813 | + spi_delay_exec(&spi->controller->cs_setup, NULL); |
---|
| 814 | + else |
---|
| 815 | + spi_delay_exec(&spi->controller->cs_hold, NULL); |
---|
| 816 | + } |
---|
| 817 | + |
---|
787 | 818 | if (spi->mode & SPI_CS_HIGH) |
---|
788 | 819 | enable = !enable; |
---|
789 | 820 | |
---|
790 | | - if (gpio_is_valid(spi->cs_gpio)) { |
---|
791 | | - /* Honour the SPI_NO_CS flag */ |
---|
792 | | - if (!(spi->mode & SPI_NO_CS)) |
---|
793 | | - gpio_set_value(spi->cs_gpio, !enable); |
---|
| 821 | + if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) { |
---|
| 822 | + if (!(spi->mode & SPI_NO_CS)) { |
---|
| 823 | + if (spi->cs_gpiod) { |
---|
| 824 | + /* |
---|
| 825 | + * Historically ACPI has no means of the GPIO polarity and |
---|
| 826 | + * thus the SPISerialBus() resource defines it on the per-chip |
---|
| 827 | + * basis. In order to avoid a chain of negations, the GPIO |
---|
| 828 | + * polarity is considered being Active High. Even for the cases |
---|
| 829 | + * when _DSD() is involved (in the updated versions of ACPI) |
---|
| 830 | + * the GPIO CS polarity must be defined Active High to avoid |
---|
| 831 | + * ambiguity. That's why we use enable, that takes SPI_CS_HIGH |
---|
| 832 | + * into account. |
---|
| 833 | + */ |
---|
| 834 | + if (has_acpi_companion(&spi->dev)) |
---|
| 835 | + gpiod_set_value_cansleep(spi->cs_gpiod, !enable); |
---|
| 836 | + else |
---|
| 837 | + /* Polarity handled by GPIO library */ |
---|
| 838 | + gpiod_set_value_cansleep(spi->cs_gpiod, enable1); |
---|
| 839 | + } else { |
---|
| 840 | + /* |
---|
| 841 | + * invert the enable line, as active low is |
---|
| 842 | + * default for SPI. |
---|
| 843 | + */ |
---|
| 844 | + gpio_set_value_cansleep(spi->cs_gpio, !enable); |
---|
| 845 | + } |
---|
| 846 | + } |
---|
794 | 847 | /* Some SPI masters need both GPIO CS & slave_select */ |
---|
795 | 848 | if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && |
---|
796 | 849 | spi->controller->set_cs) |
---|
797 | 850 | spi->controller->set_cs(spi, !enable); |
---|
798 | 851 | } else if (spi->controller->set_cs) { |
---|
799 | 852 | spi->controller->set_cs(spi, !enable); |
---|
| 853 | + } |
---|
| 854 | + |
---|
| 855 | + if (!spi->controller->set_cs_timing) { |
---|
| 856 | + if (!enable1) |
---|
| 857 | + spi_delay_exec(&spi->controller->cs_inactive, NULL); |
---|
800 | 858 | } |
---|
801 | 859 | } |
---|
802 | 860 | |
---|
.. | .. |
---|
823 | 881 | int i, ret; |
---|
824 | 882 | |
---|
825 | 883 | if (vmalloced_buf || kmap_buf) { |
---|
826 | | - desc_len = min_t(int, max_seg_size, PAGE_SIZE); |
---|
| 884 | + desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE); |
---|
827 | 885 | sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); |
---|
828 | 886 | } else if (virt_addr_valid(buf)) { |
---|
829 | | - desc_len = min_t(int, max_seg_size, ctlr->max_dma_len); |
---|
| 887 | + desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len); |
---|
830 | 888 | sgs = DIV_ROUND_UP(len, desc_len); |
---|
831 | 889 | } else { |
---|
832 | 890 | return -EINVAL; |
---|
.. | .. |
---|
888 | 946 | if (sgt->orig_nents) { |
---|
889 | 947 | dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); |
---|
890 | 948 | sg_free_table(sgt); |
---|
| 949 | + sgt->orig_nents = 0; |
---|
| 950 | + sgt->nents = 0; |
---|
891 | 951 | } |
---|
892 | 952 | } |
---|
893 | 953 | |
---|
.. | .. |
---|
965 | 1025 | spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); |
---|
966 | 1026 | } |
---|
967 | 1027 | |
---|
| 1028 | + ctlr->cur_msg_mapped = false; |
---|
| 1029 | + |
---|
968 | 1030 | return 0; |
---|
969 | 1031 | } |
---|
970 | 1032 | #else /* !CONFIG_HAS_DMA */ |
---|
.. | .. |
---|
1006 | 1068 | void *tmp; |
---|
1007 | 1069 | unsigned int max_tx, max_rx; |
---|
1008 | 1070 | |
---|
1009 | | - if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) { |
---|
| 1071 | + if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) |
---|
| 1072 | + && !(msg->spi->mode & SPI_3WIRE)) { |
---|
1010 | 1073 | max_tx = 0; |
---|
1011 | 1074 | max_rx = 0; |
---|
1012 | 1075 | |
---|
.. | .. |
---|
1058 | 1121 | { |
---|
1059 | 1122 | struct spi_statistics *statm = &ctlr->statistics; |
---|
1060 | 1123 | struct spi_statistics *stats = &msg->spi->statistics; |
---|
1061 | | - unsigned long long ms = 1; |
---|
| 1124 | + u32 speed_hz = xfer->speed_hz; |
---|
| 1125 | + unsigned long long ms; |
---|
1062 | 1126 | |
---|
1063 | 1127 | if (spi_controller_is_slave(ctlr)) { |
---|
1064 | 1128 | if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { |
---|
.. | .. |
---|
1066 | 1130 | return -EINTR; |
---|
1067 | 1131 | } |
---|
1068 | 1132 | } else { |
---|
| 1133 | + if (!speed_hz) |
---|
| 1134 | + speed_hz = 100000; |
---|
| 1135 | + |
---|
1069 | 1136 | ms = 8LL * 1000LL * xfer->len; |
---|
1070 | | - do_div(ms, xfer->speed_hz); |
---|
| 1137 | + do_div(ms, speed_hz); |
---|
1071 | 1138 | ms += ms + 200; /* some tolerance */ |
---|
1072 | 1139 | |
---|
1073 | 1140 | if (ms > UINT_MAX) |
---|
.. | .. |
---|
1088 | 1155 | return 0; |
---|
1089 | 1156 | } |
---|
1090 | 1157 | |
---|
| 1158 | +static void _spi_transfer_delay_ns(u32 ns) |
---|
| 1159 | +{ |
---|
| 1160 | + if (!ns) |
---|
| 1161 | + return; |
---|
| 1162 | + if (ns <= 1000) { |
---|
| 1163 | + ndelay(ns); |
---|
| 1164 | + } else { |
---|
| 1165 | + u32 us = DIV_ROUND_UP(ns, 1000); |
---|
| 1166 | + |
---|
| 1167 | + if (us <= 10) |
---|
| 1168 | + udelay(us); |
---|
| 1169 | + else |
---|
| 1170 | + usleep_range(us, us + DIV_ROUND_UP(us, 10)); |
---|
| 1171 | + } |
---|
| 1172 | +} |
---|
| 1173 | + |
---|
| 1174 | +int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) |
---|
| 1175 | +{ |
---|
| 1176 | + u32 delay = _delay->value; |
---|
| 1177 | + u32 unit = _delay->unit; |
---|
| 1178 | + u32 hz; |
---|
| 1179 | + |
---|
| 1180 | + if (!delay) |
---|
| 1181 | + return 0; |
---|
| 1182 | + |
---|
| 1183 | + switch (unit) { |
---|
| 1184 | + case SPI_DELAY_UNIT_USECS: |
---|
| 1185 | + delay *= 1000; |
---|
| 1186 | + break; |
---|
| 1187 | + case SPI_DELAY_UNIT_NSECS: /* nothing to do here */ |
---|
| 1188 | + break; |
---|
| 1189 | + case SPI_DELAY_UNIT_SCK: |
---|
| 1190 | + /* clock cycles need to be obtained from spi_transfer */ |
---|
| 1191 | + if (!xfer) |
---|
| 1192 | + return -EINVAL; |
---|
| 1193 | + /* if there is no effective speed know, then approximate |
---|
| 1194 | + * by underestimating with half the requested hz |
---|
| 1195 | + */ |
---|
| 1196 | + hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2; |
---|
| 1197 | + if (!hz) |
---|
| 1198 | + return -EINVAL; |
---|
| 1199 | + delay *= DIV_ROUND_UP(1000000000, hz); |
---|
| 1200 | + break; |
---|
| 1201 | + default: |
---|
| 1202 | + return -EINVAL; |
---|
| 1203 | + } |
---|
| 1204 | + |
---|
| 1205 | + return delay; |
---|
| 1206 | +} |
---|
| 1207 | +EXPORT_SYMBOL_GPL(spi_delay_to_ns); |
---|
| 1208 | + |
---|
| 1209 | +int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer) |
---|
| 1210 | +{ |
---|
| 1211 | + int delay; |
---|
| 1212 | + |
---|
| 1213 | + might_sleep(); |
---|
| 1214 | + |
---|
| 1215 | + if (!_delay) |
---|
| 1216 | + return -EINVAL; |
---|
| 1217 | + |
---|
| 1218 | + delay = spi_delay_to_ns(_delay, xfer); |
---|
| 1219 | + if (delay < 0) |
---|
| 1220 | + return delay; |
---|
| 1221 | + |
---|
| 1222 | + _spi_transfer_delay_ns(delay); |
---|
| 1223 | + |
---|
| 1224 | + return 0; |
---|
| 1225 | +} |
---|
| 1226 | +EXPORT_SYMBOL_GPL(spi_delay_exec); |
---|
| 1227 | + |
---|
| 1228 | +static void _spi_transfer_cs_change_delay(struct spi_message *msg, |
---|
| 1229 | + struct spi_transfer *xfer) |
---|
| 1230 | +{ |
---|
| 1231 | + u32 delay = xfer->cs_change_delay.value; |
---|
| 1232 | + u32 unit = xfer->cs_change_delay.unit; |
---|
| 1233 | + int ret; |
---|
| 1234 | + |
---|
| 1235 | + /* return early on "fast" mode - for everything but USECS */ |
---|
| 1236 | + if (!delay) { |
---|
| 1237 | + if (unit == SPI_DELAY_UNIT_USECS) |
---|
| 1238 | + _spi_transfer_delay_ns(10000); |
---|
| 1239 | + return; |
---|
| 1240 | + } |
---|
| 1241 | + |
---|
| 1242 | + ret = spi_delay_exec(&xfer->cs_change_delay, xfer); |
---|
| 1243 | + if (ret) { |
---|
| 1244 | + dev_err_once(&msg->spi->dev, |
---|
| 1245 | + "Use of unsupported delay unit %i, using default of 10us\n", |
---|
| 1246 | + unit); |
---|
| 1247 | + _spi_transfer_delay_ns(10000); |
---|
| 1248 | + } |
---|
| 1249 | +} |
---|
| 1250 | + |
---|
1091 | 1251 | /* |
---|
1092 | 1252 | * spi_transfer_one_message - Default implementation of transfer_one_message() |
---|
1093 | 1253 | * |
---|
.. | .. |
---|
1104 | 1264 | struct spi_statistics *statm = &ctlr->statistics; |
---|
1105 | 1265 | struct spi_statistics *stats = &msg->spi->statistics; |
---|
1106 | 1266 | |
---|
1107 | | - spi_set_cs(msg->spi, true); |
---|
| 1267 | + spi_set_cs(msg->spi, true, false); |
---|
1108 | 1268 | |
---|
1109 | 1269 | SPI_STATISTICS_INCREMENT_FIELD(statm, messages); |
---|
1110 | 1270 | SPI_STATISTICS_INCREMENT_FIELD(stats, messages); |
---|
.. | .. |
---|
1115 | 1275 | spi_statistics_add_transfer_stats(statm, xfer, ctlr); |
---|
1116 | 1276 | spi_statistics_add_transfer_stats(stats, xfer, ctlr); |
---|
1117 | 1277 | |
---|
1118 | | - if (xfer->tx_buf || xfer->rx_buf) { |
---|
| 1278 | + if (!ctlr->ptp_sts_supported) { |
---|
| 1279 | + xfer->ptp_sts_word_pre = 0; |
---|
| 1280 | + ptp_read_system_prets(xfer->ptp_sts); |
---|
| 1281 | + } |
---|
| 1282 | + |
---|
| 1283 | + if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) { |
---|
1119 | 1284 | reinit_completion(&ctlr->xfer_completion); |
---|
1120 | 1285 | |
---|
| 1286 | +fallback_pio: |
---|
1121 | 1287 | ret = ctlr->transfer_one(ctlr, msg->spi, xfer); |
---|
1122 | 1288 | if (ret < 0) { |
---|
| 1289 | + if (ctlr->cur_msg_mapped && |
---|
| 1290 | + (xfer->error & SPI_TRANS_FAIL_NO_START)) { |
---|
| 1291 | + __spi_unmap_msg(ctlr, msg); |
---|
| 1292 | + ctlr->fallback = true; |
---|
| 1293 | + xfer->error &= ~SPI_TRANS_FAIL_NO_START; |
---|
| 1294 | + goto fallback_pio; |
---|
| 1295 | + } |
---|
| 1296 | + |
---|
1123 | 1297 | SPI_STATISTICS_INCREMENT_FIELD(statm, |
---|
1124 | 1298 | errors); |
---|
1125 | 1299 | SPI_STATISTICS_INCREMENT_FIELD(stats, |
---|
.. | .. |
---|
1141 | 1315 | xfer->len); |
---|
1142 | 1316 | } |
---|
1143 | 1317 | |
---|
| 1318 | + if (!ctlr->ptp_sts_supported) { |
---|
| 1319 | + ptp_read_system_postts(xfer->ptp_sts); |
---|
| 1320 | + xfer->ptp_sts_word_post = xfer->len; |
---|
| 1321 | + } |
---|
| 1322 | + |
---|
1144 | 1323 | trace_spi_transfer_stop(msg, xfer); |
---|
1145 | 1324 | |
---|
1146 | 1325 | if (msg->status != -EINPROGRESS) |
---|
1147 | 1326 | goto out; |
---|
1148 | 1327 | |
---|
1149 | | - if (xfer->delay_usecs) { |
---|
1150 | | - u16 us = xfer->delay_usecs; |
---|
1151 | | - |
---|
1152 | | - if (us <= 10) |
---|
1153 | | - udelay(us); |
---|
1154 | | - else |
---|
1155 | | - usleep_range(us, us + DIV_ROUND_UP(us, 10)); |
---|
1156 | | - } |
---|
| 1328 | + spi_transfer_delay_exec(xfer); |
---|
1157 | 1329 | |
---|
1158 | 1330 | if (xfer->cs_change) { |
---|
1159 | 1331 | if (list_is_last(&xfer->transfer_list, |
---|
1160 | 1332 | &msg->transfers)) { |
---|
1161 | 1333 | keep_cs = true; |
---|
1162 | 1334 | } else { |
---|
1163 | | - spi_set_cs(msg->spi, false); |
---|
1164 | | - udelay(10); |
---|
1165 | | - spi_set_cs(msg->spi, true); |
---|
| 1335 | + spi_set_cs(msg->spi, false, false); |
---|
| 1336 | + _spi_transfer_cs_change_delay(msg, xfer); |
---|
| 1337 | + spi_set_cs(msg->spi, true, false); |
---|
1166 | 1338 | } |
---|
1167 | 1339 | } |
---|
1168 | 1340 | |
---|
.. | .. |
---|
1171 | 1343 | |
---|
1172 | 1344 | out: |
---|
1173 | 1345 | if (ret != 0 || !keep_cs) |
---|
1174 | | - spi_set_cs(msg->spi, false); |
---|
| 1346 | + spi_set_cs(msg->spi, false, false); |
---|
1175 | 1347 | |
---|
1176 | 1348 | if (msg->status == -EINPROGRESS) |
---|
1177 | 1349 | msg->status = ret; |
---|
.. | .. |
---|
1198 | 1370 | } |
---|
1199 | 1371 | EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); |
---|
1200 | 1372 | |
---|
| 1373 | +static void spi_idle_runtime_pm(struct spi_controller *ctlr) |
---|
| 1374 | +{ |
---|
| 1375 | + if (ctlr->auto_runtime_pm) { |
---|
| 1376 | + pm_runtime_mark_last_busy(ctlr->dev.parent); |
---|
| 1377 | + pm_runtime_put_autosuspend(ctlr->dev.parent); |
---|
| 1378 | + } |
---|
| 1379 | +} |
---|
| 1380 | + |
---|
1201 | 1381 | /** |
---|
1202 | 1382 | * __spi_pump_messages - function which processes spi message queue |
---|
1203 | 1383 | * @ctlr: controller to process queue for |
---|
.. | .. |
---|
1213 | 1393 | */ |
---|
1214 | 1394 | static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) |
---|
1215 | 1395 | { |
---|
1216 | | - unsigned long flags; |
---|
| 1396 | + struct spi_transfer *xfer; |
---|
| 1397 | + struct spi_message *msg; |
---|
1217 | 1398 | bool was_busy = false; |
---|
| 1399 | + unsigned long flags; |
---|
1218 | 1400 | int ret; |
---|
1219 | 1401 | |
---|
1220 | 1402 | /* Lock queue */ |
---|
.. | .. |
---|
1228 | 1410 | |
---|
1229 | 1411 | /* If another context is idling the device then defer */ |
---|
1230 | 1412 | if (ctlr->idling) { |
---|
1231 | | - kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); |
---|
| 1413 | + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); |
---|
1232 | 1414 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); |
---|
1233 | 1415 | return; |
---|
1234 | 1416 | } |
---|
.. | .. |
---|
1240 | 1422 | return; |
---|
1241 | 1423 | } |
---|
1242 | 1424 | |
---|
1243 | | - /* Only do teardown in the thread */ |
---|
| 1425 | + /* Defer any non-atomic teardown to the thread */ |
---|
1244 | 1426 | if (!in_kthread) { |
---|
1245 | | - kthread_queue_work(&ctlr->kworker, |
---|
1246 | | - &ctlr->pump_messages); |
---|
| 1427 | + if (!ctlr->dummy_rx && !ctlr->dummy_tx && |
---|
| 1428 | + !ctlr->unprepare_transfer_hardware) { |
---|
| 1429 | + spi_idle_runtime_pm(ctlr); |
---|
| 1430 | + ctlr->busy = false; |
---|
| 1431 | + trace_spi_controller_idle(ctlr); |
---|
| 1432 | + } else { |
---|
| 1433 | + kthread_queue_work(ctlr->kworker, |
---|
| 1434 | + &ctlr->pump_messages); |
---|
| 1435 | + } |
---|
1247 | 1436 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); |
---|
1248 | 1437 | return; |
---|
1249 | 1438 | } |
---|
.. | .. |
---|
1260 | 1449 | ctlr->unprepare_transfer_hardware(ctlr)) |
---|
1261 | 1450 | dev_err(&ctlr->dev, |
---|
1262 | 1451 | "failed to unprepare transfer hardware\n"); |
---|
1263 | | - if (ctlr->auto_runtime_pm) { |
---|
1264 | | - pm_runtime_mark_last_busy(ctlr->dev.parent); |
---|
1265 | | - pm_runtime_put_autosuspend(ctlr->dev.parent); |
---|
1266 | | - } |
---|
| 1452 | + spi_idle_runtime_pm(ctlr); |
---|
1267 | 1453 | trace_spi_controller_idle(ctlr); |
---|
1268 | 1454 | |
---|
1269 | 1455 | spin_lock_irqsave(&ctlr->queue_lock, flags); |
---|
.. | .. |
---|
1273 | 1459 | } |
---|
1274 | 1460 | |
---|
1275 | 1461 | /* Extract head of queue */ |
---|
1276 | | - ctlr->cur_msg = |
---|
1277 | | - list_first_entry(&ctlr->queue, struct spi_message, queue); |
---|
| 1462 | + msg = list_first_entry(&ctlr->queue, struct spi_message, queue); |
---|
| 1463 | + ctlr->cur_msg = msg; |
---|
1278 | 1464 | |
---|
1279 | | - list_del_init(&ctlr->cur_msg->queue); |
---|
| 1465 | + list_del_init(&msg->queue); |
---|
1280 | 1466 | if (ctlr->busy) |
---|
1281 | 1467 | was_busy = true; |
---|
1282 | 1468 | else |
---|
.. | .. |
---|
1303 | 1489 | ret = ctlr->prepare_transfer_hardware(ctlr); |
---|
1304 | 1490 | if (ret) { |
---|
1305 | 1491 | dev_err(&ctlr->dev, |
---|
1306 | | - "failed to prepare transfer hardware\n"); |
---|
| 1492 | + "failed to prepare transfer hardware: %d\n", |
---|
| 1493 | + ret); |
---|
1307 | 1494 | |
---|
1308 | 1495 | if (ctlr->auto_runtime_pm) |
---|
1309 | 1496 | pm_runtime_put(ctlr->dev.parent); |
---|
| 1497 | + |
---|
| 1498 | + msg->status = ret; |
---|
| 1499 | + spi_finalize_current_message(ctlr); |
---|
| 1500 | + |
---|
1310 | 1501 | mutex_unlock(&ctlr->io_mutex); |
---|
1311 | 1502 | return; |
---|
1312 | 1503 | } |
---|
1313 | 1504 | } |
---|
1314 | 1505 | |
---|
1315 | | - trace_spi_message_start(ctlr->cur_msg); |
---|
| 1506 | + trace_spi_message_start(msg); |
---|
1316 | 1507 | |
---|
1317 | 1508 | if (ctlr->prepare_message) { |
---|
1318 | | - ret = ctlr->prepare_message(ctlr, ctlr->cur_msg); |
---|
| 1509 | + ret = ctlr->prepare_message(ctlr, msg); |
---|
1319 | 1510 | if (ret) { |
---|
1320 | 1511 | dev_err(&ctlr->dev, "failed to prepare message: %d\n", |
---|
1321 | 1512 | ret); |
---|
1322 | | - ctlr->cur_msg->status = ret; |
---|
| 1513 | + msg->status = ret; |
---|
1323 | 1514 | spi_finalize_current_message(ctlr); |
---|
1324 | 1515 | goto out; |
---|
1325 | 1516 | } |
---|
1326 | 1517 | ctlr->cur_msg_prepared = true; |
---|
1327 | 1518 | } |
---|
1328 | 1519 | |
---|
1329 | | - ret = spi_map_msg(ctlr, ctlr->cur_msg); |
---|
| 1520 | + ret = spi_map_msg(ctlr, msg); |
---|
1330 | 1521 | if (ret) { |
---|
1331 | | - ctlr->cur_msg->status = ret; |
---|
| 1522 | + msg->status = ret; |
---|
1332 | 1523 | spi_finalize_current_message(ctlr); |
---|
1333 | 1524 | goto out; |
---|
1334 | 1525 | } |
---|
1335 | 1526 | |
---|
1336 | | - ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg); |
---|
| 1527 | + if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { |
---|
| 1528 | + list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
---|
| 1529 | + xfer->ptp_sts_word_pre = 0; |
---|
| 1530 | + ptp_read_system_prets(xfer->ptp_sts); |
---|
| 1531 | + } |
---|
| 1532 | + } |
---|
| 1533 | + |
---|
| 1534 | + ret = ctlr->transfer_one_message(ctlr, msg); |
---|
1337 | 1535 | if (ret) { |
---|
1338 | 1536 | dev_err(&ctlr->dev, |
---|
1339 | 1537 | "failed to transfer one message from queue\n"); |
---|
.. | .. |
---|
1360 | 1558 | __spi_pump_messages(ctlr, true); |
---|
1361 | 1559 | } |
---|
1362 | 1560 | |
---|
| 1561 | +/** |
---|
| 1562 | + * spi_take_timestamp_pre - helper for drivers to collect the beginning of the |
---|
| 1563 | + * TX timestamp for the requested byte from the SPI |
---|
| 1564 | + * transfer. The frequency with which this function |
---|
| 1565 | + * must be called (once per word, once for the whole |
---|
| 1566 | + * transfer, once per batch of words etc) is arbitrary |
---|
| 1567 | + * as long as the @tx buffer offset is greater than or |
---|
| 1568 | + * equal to the requested byte at the time of the |
---|
| 1569 | + * call. The timestamp is only taken once, at the |
---|
| 1570 | + * first such call. It is assumed that the driver |
---|
| 1571 | + * advances its @tx buffer pointer monotonically. |
---|
| 1572 | + * @ctlr: Pointer to the spi_controller structure of the driver |
---|
| 1573 | + * @xfer: Pointer to the transfer being timestamped |
---|
| 1574 | + * @progress: How many words (not bytes) have been transferred so far |
---|
| 1575 | + * @irqs_off: If true, will disable IRQs and preemption for the duration of the |
---|
| 1576 | + * transfer, for less jitter in time measurement. Only compatible |
---|
| 1577 | + * with PIO drivers. If true, must follow up with |
---|
| 1578 | + * spi_take_timestamp_post or otherwise system will crash. |
---|
| 1579 | + * WARNING: for fully predictable results, the CPU frequency must |
---|
| 1580 | + * also be under control (governor). |
---|
| 1581 | + */ |
---|
| 1582 | +void spi_take_timestamp_pre(struct spi_controller *ctlr, |
---|
| 1583 | + struct spi_transfer *xfer, |
---|
| 1584 | + size_t progress, bool irqs_off) |
---|
| 1585 | +{ |
---|
| 1586 | + if (!xfer->ptp_sts) |
---|
| 1587 | + return; |
---|
| 1588 | + |
---|
| 1589 | + if (xfer->timestamped) |
---|
| 1590 | + return; |
---|
| 1591 | + |
---|
| 1592 | + if (progress > xfer->ptp_sts_word_pre) |
---|
| 1593 | + return; |
---|
| 1594 | + |
---|
| 1595 | + /* Capture the resolution of the timestamp */ |
---|
| 1596 | + xfer->ptp_sts_word_pre = progress; |
---|
| 1597 | + |
---|
| 1598 | + if (irqs_off) { |
---|
| 1599 | + local_irq_save(ctlr->irq_flags); |
---|
| 1600 | + preempt_disable(); |
---|
| 1601 | + } |
---|
| 1602 | + |
---|
| 1603 | + ptp_read_system_prets(xfer->ptp_sts); |
---|
| 1604 | +} |
---|
| 1605 | +EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); |
---|
| 1606 | + |
---|
| 1607 | +/** |
---|
| 1608 | + * spi_take_timestamp_post - helper for drivers to collect the end of the |
---|
| 1609 | + * TX timestamp for the requested byte from the SPI |
---|
| 1610 | + * transfer. Can be called with an arbitrary |
---|
| 1611 | + * frequency: only the first call where @tx exceeds |
---|
| 1612 | + * or is equal to the requested word will be |
---|
| 1613 | + * timestamped. |
---|
| 1614 | + * @ctlr: Pointer to the spi_controller structure of the driver |
---|
| 1615 | + * @xfer: Pointer to the transfer being timestamped |
---|
| 1616 | + * @progress: How many words (not bytes) have been transferred so far |
---|
| 1617 | + * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. |
---|
| 1618 | + */ |
---|
| 1619 | +void spi_take_timestamp_post(struct spi_controller *ctlr, |
---|
| 1620 | + struct spi_transfer *xfer, |
---|
| 1621 | + size_t progress, bool irqs_off) |
---|
| 1622 | +{ |
---|
| 1623 | + if (!xfer->ptp_sts) |
---|
| 1624 | + return; |
---|
| 1625 | + |
---|
| 1626 | + if (xfer->timestamped) |
---|
| 1627 | + return; |
---|
| 1628 | + |
---|
| 1629 | + if (progress < xfer->ptp_sts_word_post) |
---|
| 1630 | + return; |
---|
| 1631 | + |
---|
| 1632 | + ptp_read_system_postts(xfer->ptp_sts); |
---|
| 1633 | + |
---|
| 1634 | + if (irqs_off) { |
---|
| 1635 | + local_irq_restore(ctlr->irq_flags); |
---|
| 1636 | + preempt_enable(); |
---|
| 1637 | + } |
---|
| 1638 | + |
---|
| 1639 | + /* Capture the resolution of the timestamp */ |
---|
| 1640 | + xfer->ptp_sts_word_post = progress; |
---|
| 1641 | + |
---|
| 1642 | + xfer->timestamped = true; |
---|
| 1643 | +} |
---|
| 1644 | +EXPORT_SYMBOL_GPL(spi_take_timestamp_post); |
---|
| 1645 | + |
---|
| 1646 | +/** |
---|
| 1647 | + * spi_set_thread_rt - set the controller to pump at realtime priority |
---|
| 1648 | + * @ctlr: controller to boost priority of |
---|
| 1649 | + * |
---|
| 1650 | + * This can be called because the controller requested realtime priority |
---|
| 1651 | + * (by setting the ->rt value before calling spi_register_controller()) or |
---|
| 1652 | + * because a device on the bus said that its transfers needed realtime |
---|
| 1653 | + * priority. |
---|
| 1654 | + * |
---|
| 1655 | + * NOTE: at the moment if any device on a bus says it needs realtime then |
---|
| 1656 | + * the thread will be at realtime priority for all transfers on that |
---|
| 1657 | + * controller. If this eventually becomes a problem we may see if we can |
---|
| 1658 | + * find a way to boost the priority only temporarily during relevant |
---|
| 1659 | + * transfers. |
---|
| 1660 | + */ |
---|
| 1661 | +static void spi_set_thread_rt(struct spi_controller *ctlr) |
---|
| 1662 | +{ |
---|
| 1663 | + dev_info(&ctlr->dev, |
---|
| 1664 | + "will run message pump with realtime priority\n"); |
---|
| 1665 | + sched_set_fifo(ctlr->kworker->task); |
---|
| 1666 | +} |
---|
| 1667 | + |
---|
1363 | 1668 | static int spi_init_queue(struct spi_controller *ctlr) |
---|
1364 | 1669 | { |
---|
1365 | | - struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; |
---|
1366 | | - |
---|
1367 | 1670 | ctlr->running = false; |
---|
1368 | 1671 | ctlr->busy = false; |
---|
1369 | 1672 | |
---|
1370 | | - kthread_init_worker(&ctlr->kworker); |
---|
1371 | | - ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker, |
---|
1372 | | - "%s", dev_name(&ctlr->dev)); |
---|
1373 | | - if (IS_ERR(ctlr->kworker_task)) { |
---|
1374 | | - dev_err(&ctlr->dev, "failed to create message pump task\n"); |
---|
1375 | | - return PTR_ERR(ctlr->kworker_task); |
---|
| 1673 | + ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); |
---|
| 1674 | + if (IS_ERR(ctlr->kworker)) { |
---|
| 1675 | + dev_err(&ctlr->dev, "failed to create message pump kworker\n"); |
---|
| 1676 | + return PTR_ERR(ctlr->kworker); |
---|
1376 | 1677 | } |
---|
| 1678 | + |
---|
1377 | 1679 | kthread_init_work(&ctlr->pump_messages, spi_pump_messages); |
---|
1378 | 1680 | |
---|
1379 | 1681 | /* |
---|
.. | .. |
---|
1383 | 1685 | * request and the scheduling of the message pump thread. Without this |
---|
1384 | 1686 | * setting the message pump thread will remain at default priority. |
---|
1385 | 1687 | */ |
---|
1386 | | - if (ctlr->rt) { |
---|
1387 | | - dev_info(&ctlr->dev, |
---|
1388 | | - "will run message pump with realtime priority\n"); |
---|
1389 | | - sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m); |
---|
1390 | | - } |
---|
| 1688 | + if (ctlr->rt) |
---|
| 1689 | + spi_set_thread_rt(ctlr); |
---|
1391 | 1690 | |
---|
1392 | 1691 | return 0; |
---|
1393 | 1692 | } |
---|
.. | .. |
---|
1426 | 1725 | */ |
---|
1427 | 1726 | void spi_finalize_current_message(struct spi_controller *ctlr) |
---|
1428 | 1727 | { |
---|
| 1728 | + struct spi_transfer *xfer; |
---|
1429 | 1729 | struct spi_message *mesg; |
---|
1430 | 1730 | unsigned long flags; |
---|
1431 | 1731 | int ret; |
---|
.. | .. |
---|
1433 | 1733 | spin_lock_irqsave(&ctlr->queue_lock, flags); |
---|
1434 | 1734 | mesg = ctlr->cur_msg; |
---|
1435 | 1735 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); |
---|
| 1736 | + |
---|
| 1737 | + if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { |
---|
| 1738 | + list_for_each_entry(xfer, &mesg->transfers, transfer_list) { |
---|
| 1739 | + ptp_read_system_postts(xfer->ptp_sts); |
---|
| 1740 | + xfer->ptp_sts_word_post = xfer->len; |
---|
| 1741 | + } |
---|
| 1742 | + } |
---|
| 1743 | + |
---|
| 1744 | + if (unlikely(ctlr->ptp_sts_supported)) |
---|
| 1745 | + list_for_each_entry(xfer, &mesg->transfers, transfer_list) |
---|
| 1746 | + WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped); |
---|
1436 | 1747 | |
---|
1437 | 1748 | spi_unmap_msg(ctlr, mesg); |
---|
1438 | 1749 | |
---|
.. | .. |
---|
1454 | 1765 | spin_lock_irqsave(&ctlr->queue_lock, flags); |
---|
1455 | 1766 | ctlr->cur_msg = NULL; |
---|
1456 | 1767 | ctlr->cur_msg_prepared = false; |
---|
1457 | | - kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); |
---|
| 1768 | + ctlr->fallback = false; |
---|
| 1769 | + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); |
---|
1458 | 1770 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); |
---|
1459 | 1771 | |
---|
1460 | 1772 | trace_spi_message_done(mesg); |
---|
.. | .. |
---|
1480 | 1792 | ctlr->cur_msg = NULL; |
---|
1481 | 1793 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); |
---|
1482 | 1794 | |
---|
1483 | | - kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); |
---|
| 1795 | + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); |
---|
1484 | 1796 | |
---|
1485 | 1797 | return 0; |
---|
1486 | 1798 | } |
---|
.. | .. |
---|
1536 | 1848 | return ret; |
---|
1537 | 1849 | } |
---|
1538 | 1850 | |
---|
1539 | | - kthread_flush_worker(&ctlr->kworker); |
---|
1540 | | - kthread_stop(ctlr->kworker_task); |
---|
| 1851 | + kthread_destroy_worker(ctlr->kworker); |
---|
1541 | 1852 | |
---|
1542 | 1853 | return 0; |
---|
1543 | 1854 | } |
---|
.. | .. |
---|
1560 | 1871 | |
---|
1561 | 1872 | list_add_tail(&msg->queue, &ctlr->queue); |
---|
1562 | 1873 | if (!ctlr->busy && need_pump) |
---|
1563 | | - kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); |
---|
| 1874 | + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); |
---|
1564 | 1875 | |
---|
1565 | 1876 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); |
---|
1566 | 1877 | return 0; |
---|
.. | .. |
---|
1637 | 1948 | spi->mode |= SPI_CPHA; |
---|
1638 | 1949 | if (of_property_read_bool(nc, "spi-cpol")) |
---|
1639 | 1950 | spi->mode |= SPI_CPOL; |
---|
1640 | | - if (of_property_read_bool(nc, "spi-cs-high")) |
---|
1641 | | - spi->mode |= SPI_CS_HIGH; |
---|
1642 | 1951 | if (of_property_read_bool(nc, "spi-3wire")) |
---|
1643 | 1952 | spi->mode |= SPI_3WIRE; |
---|
1644 | 1953 | if (of_property_read_bool(nc, "spi-lsb-first")) |
---|
1645 | 1954 | spi->mode |= SPI_LSB_FIRST; |
---|
| 1955 | + if (of_property_read_bool(nc, "spi-cs-high")) |
---|
| 1956 | + spi->mode |= SPI_CS_HIGH; |
---|
1646 | 1957 | |
---|
1647 | 1958 | /* Device DUAL/QUAD mode */ |
---|
1648 | 1959 | if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { |
---|
.. | .. |
---|
1654 | 1965 | break; |
---|
1655 | 1966 | case 4: |
---|
1656 | 1967 | spi->mode |= SPI_TX_QUAD; |
---|
| 1968 | + break; |
---|
| 1969 | + case 8: |
---|
| 1970 | + spi->mode |= SPI_TX_OCTAL; |
---|
1657 | 1971 | break; |
---|
1658 | 1972 | default: |
---|
1659 | 1973 | dev_warn(&ctlr->dev, |
---|
.. | .. |
---|
1673 | 1987 | case 4: |
---|
1674 | 1988 | spi->mode |= SPI_RX_QUAD; |
---|
1675 | 1989 | break; |
---|
| 1990 | + case 8: |
---|
| 1991 | + spi->mode |= SPI_RX_OCTAL; |
---|
| 1992 | + break; |
---|
1676 | 1993 | default: |
---|
1677 | 1994 | dev_warn(&ctlr->dev, |
---|
1678 | 1995 | "spi-rx-bus-width %d not supported\n", |
---|
.. | .. |
---|
1682 | 1999 | } |
---|
1683 | 2000 | |
---|
1684 | 2001 | if (spi_controller_is_slave(ctlr)) { |
---|
1685 | | - if (strcmp(nc->name, "slave")) { |
---|
| 2002 | + if (!of_node_name_eq(nc, "slave")) { |
---|
1686 | 2003 | dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", |
---|
1687 | 2004 | nc); |
---|
1688 | 2005 | return -EINVAL; |
---|
.. | .. |
---|
1700 | 2017 | spi->chip_select = value; |
---|
1701 | 2018 | |
---|
1702 | 2019 | /* Device speed */ |
---|
1703 | | - rc = of_property_read_u32(nc, "spi-max-frequency", &value); |
---|
1704 | | - if (rc) { |
---|
1705 | | - dev_err(&ctlr->dev, |
---|
1706 | | - "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc); |
---|
1707 | | - return rc; |
---|
1708 | | - } |
---|
1709 | | - spi->max_speed_hz = value; |
---|
| 2020 | + if (!of_property_read_u32(nc, "spi-max-frequency", &value)) |
---|
| 2021 | + spi->max_speed_hz = value; |
---|
1710 | 2022 | |
---|
1711 | 2023 | return 0; |
---|
1712 | 2024 | } |
---|
.. | .. |
---|
1789 | 2101 | #endif |
---|
1790 | 2102 | |
---|
1791 | 2103 | #ifdef CONFIG_ACPI |
---|
1792 | | -static void acpi_spi_parse_apple_properties(struct spi_device *spi) |
---|
| 2104 | +struct acpi_spi_lookup { |
---|
| 2105 | + struct spi_controller *ctlr; |
---|
| 2106 | + u32 max_speed_hz; |
---|
| 2107 | + u32 mode; |
---|
| 2108 | + int irq; |
---|
| 2109 | + u8 bits_per_word; |
---|
| 2110 | + u8 chip_select; |
---|
| 2111 | +}; |
---|
| 2112 | + |
---|
| 2113 | +static void acpi_spi_parse_apple_properties(struct acpi_device *dev, |
---|
| 2114 | + struct acpi_spi_lookup *lookup) |
---|
1793 | 2115 | { |
---|
1794 | | - struct acpi_device *dev = ACPI_COMPANION(&spi->dev); |
---|
1795 | 2116 | const union acpi_object *obj; |
---|
1796 | 2117 | |
---|
1797 | 2118 | if (!x86_apple_machine) |
---|
.. | .. |
---|
1799 | 2120 | |
---|
1800 | 2121 | if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) |
---|
1801 | 2122 | && obj->buffer.length >= 4) |
---|
1802 | | - spi->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; |
---|
| 2123 | + lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; |
---|
1803 | 2124 | |
---|
1804 | 2125 | if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) |
---|
1805 | 2126 | && obj->buffer.length == 8) |
---|
1806 | | - spi->bits_per_word = *(u64 *)obj->buffer.pointer; |
---|
| 2127 | + lookup->bits_per_word = *(u64 *)obj->buffer.pointer; |
---|
1807 | 2128 | |
---|
1808 | 2129 | if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) |
---|
1809 | 2130 | && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) |
---|
1810 | | - spi->mode |= SPI_LSB_FIRST; |
---|
| 2131 | + lookup->mode |= SPI_LSB_FIRST; |
---|
1811 | 2132 | |
---|
1812 | 2133 | if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) |
---|
1813 | 2134 | && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) |
---|
1814 | | - spi->mode |= SPI_CPOL; |
---|
| 2135 | + lookup->mode |= SPI_CPOL; |
---|
1815 | 2136 | |
---|
1816 | 2137 | if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) |
---|
1817 | 2138 | && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) |
---|
1818 | | - spi->mode |= SPI_CPHA; |
---|
| 2139 | + lookup->mode |= SPI_CPHA; |
---|
1819 | 2140 | } |
---|
1820 | 2141 | |
---|
1821 | 2142 | static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) |
---|
1822 | 2143 | { |
---|
1823 | | - struct spi_device *spi = data; |
---|
1824 | | - struct spi_controller *ctlr = spi->controller; |
---|
| 2144 | + struct acpi_spi_lookup *lookup = data; |
---|
| 2145 | + struct spi_controller *ctlr = lookup->ctlr; |
---|
1825 | 2146 | |
---|
1826 | 2147 | if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { |
---|
1827 | 2148 | struct acpi_resource_spi_serialbus *sb; |
---|
| 2149 | + acpi_handle parent_handle; |
---|
| 2150 | + acpi_status status; |
---|
1828 | 2151 | |
---|
1829 | 2152 | sb = &ares->data.spi_serial_bus; |
---|
1830 | 2153 | if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { |
---|
| 2154 | + |
---|
| 2155 | + status = acpi_get_handle(NULL, |
---|
| 2156 | + sb->resource_source.string_ptr, |
---|
| 2157 | + &parent_handle); |
---|
| 2158 | + |
---|
| 2159 | + if (ACPI_FAILURE(status) || |
---|
| 2160 | + ACPI_HANDLE(ctlr->dev.parent) != parent_handle) |
---|
| 2161 | + return -ENODEV; |
---|
| 2162 | + |
---|
1831 | 2163 | /* |
---|
1832 | 2164 | * ACPI DeviceSelection numbering is handled by the |
---|
1833 | 2165 | * host controller driver in Windows and can vary |
---|
.. | .. |
---|
1840 | 2172 | sb->device_selection); |
---|
1841 | 2173 | if (cs < 0) |
---|
1842 | 2174 | return cs; |
---|
1843 | | - spi->chip_select = cs; |
---|
| 2175 | + lookup->chip_select = cs; |
---|
1844 | 2176 | } else { |
---|
1845 | | - spi->chip_select = sb->device_selection; |
---|
| 2177 | + lookup->chip_select = sb->device_selection; |
---|
1846 | 2178 | } |
---|
1847 | 2179 | |
---|
1848 | | - spi->max_speed_hz = sb->connection_speed; |
---|
| 2180 | + lookup->max_speed_hz = sb->connection_speed; |
---|
| 2181 | + lookup->bits_per_word = sb->data_bit_length; |
---|
1849 | 2182 | |
---|
1850 | 2183 | if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) |
---|
1851 | | - spi->mode |= SPI_CPHA; |
---|
| 2184 | + lookup->mode |= SPI_CPHA; |
---|
1852 | 2185 | if (sb->clock_polarity == ACPI_SPI_START_HIGH) |
---|
1853 | | - spi->mode |= SPI_CPOL; |
---|
| 2186 | + lookup->mode |= SPI_CPOL; |
---|
1854 | 2187 | if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) |
---|
1855 | | - spi->mode |= SPI_CS_HIGH; |
---|
| 2188 | + lookup->mode |= SPI_CS_HIGH; |
---|
1856 | 2189 | } |
---|
1857 | | - } else if (spi->irq < 0) { |
---|
| 2190 | + } else if (lookup->irq < 0) { |
---|
1858 | 2191 | struct resource r; |
---|
1859 | 2192 | |
---|
1860 | 2193 | if (acpi_dev_resource_interrupt(ares, 0, &r)) |
---|
1861 | | - spi->irq = r.start; |
---|
| 2194 | + lookup->irq = r.start; |
---|
1862 | 2195 | } |
---|
1863 | 2196 | |
---|
1864 | 2197 | /* Always tell the ACPI core to skip this resource */ |
---|
.. | .. |
---|
1868 | 2201 | static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, |
---|
1869 | 2202 | struct acpi_device *adev) |
---|
1870 | 2203 | { |
---|
| 2204 | + acpi_handle parent_handle = NULL; |
---|
1871 | 2205 | struct list_head resource_list; |
---|
| 2206 | + struct acpi_spi_lookup lookup = {}; |
---|
1872 | 2207 | struct spi_device *spi; |
---|
1873 | 2208 | int ret; |
---|
1874 | 2209 | |
---|
1875 | 2210 | if (acpi_bus_get_status(adev) || !adev->status.present || |
---|
1876 | 2211 | acpi_device_enumerated(adev)) |
---|
| 2212 | + return AE_OK; |
---|
| 2213 | + |
---|
| 2214 | + lookup.ctlr = ctlr; |
---|
| 2215 | + lookup.irq = -1; |
---|
| 2216 | + |
---|
| 2217 | + INIT_LIST_HEAD(&resource_list); |
---|
| 2218 | + ret = acpi_dev_get_resources(adev, &resource_list, |
---|
| 2219 | + acpi_spi_add_resource, &lookup); |
---|
| 2220 | + acpi_dev_free_resource_list(&resource_list); |
---|
| 2221 | + |
---|
| 2222 | + if (ret < 0) |
---|
| 2223 | + /* found SPI in _CRS but it points to another controller */ |
---|
| 2224 | + return AE_OK; |
---|
| 2225 | + |
---|
| 2226 | + if (!lookup.max_speed_hz && |
---|
| 2227 | + !ACPI_FAILURE(acpi_get_parent(adev->handle, &parent_handle)) && |
---|
| 2228 | + ACPI_HANDLE(ctlr->dev.parent) == parent_handle) { |
---|
| 2229 | + /* Apple does not use _CRS but nested devices for SPI slaves */ |
---|
| 2230 | + acpi_spi_parse_apple_properties(adev, &lookup); |
---|
| 2231 | + } |
---|
| 2232 | + |
---|
| 2233 | + if (!lookup.max_speed_hz) |
---|
1877 | 2234 | return AE_OK; |
---|
1878 | 2235 | |
---|
1879 | 2236 | spi = spi_alloc_device(ctlr); |
---|
.. | .. |
---|
1883 | 2240 | return AE_NO_MEMORY; |
---|
1884 | 2241 | } |
---|
1885 | 2242 | |
---|
| 2243 | + |
---|
1886 | 2244 | ACPI_COMPANION_SET(&spi->dev, adev); |
---|
1887 | | - spi->irq = -1; |
---|
1888 | | - |
---|
1889 | | - INIT_LIST_HEAD(&resource_list); |
---|
1890 | | - ret = acpi_dev_get_resources(adev, &resource_list, |
---|
1891 | | - acpi_spi_add_resource, spi); |
---|
1892 | | - acpi_dev_free_resource_list(&resource_list); |
---|
1893 | | - |
---|
1894 | | - acpi_spi_parse_apple_properties(spi); |
---|
1895 | | - |
---|
1896 | | - if (ret < 0 || !spi->max_speed_hz) { |
---|
1897 | | - spi_dev_put(spi); |
---|
1898 | | - return AE_OK; |
---|
1899 | | - } |
---|
| 2245 | + spi->max_speed_hz = lookup.max_speed_hz; |
---|
| 2246 | + spi->mode |= lookup.mode; |
---|
| 2247 | + spi->irq = lookup.irq; |
---|
| 2248 | + spi->bits_per_word = lookup.bits_per_word; |
---|
| 2249 | + spi->chip_select = lookup.chip_select; |
---|
1900 | 2250 | |
---|
1901 | 2251 | acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, |
---|
1902 | 2252 | sizeof(spi->modalias)); |
---|
.. | .. |
---|
1929 | 2279 | return acpi_register_spi_device(ctlr, adev); |
---|
1930 | 2280 | } |
---|
1931 | 2281 | |
---|
| 2282 | +#define SPI_ACPI_ENUMERATE_MAX_DEPTH 32 |
---|
| 2283 | + |
---|
1932 | 2284 | static void acpi_register_spi_devices(struct spi_controller *ctlr) |
---|
1933 | 2285 | { |
---|
1934 | 2286 | acpi_status status; |
---|
.. | .. |
---|
1938 | 2290 | if (!handle) |
---|
1939 | 2291 | return; |
---|
1940 | 2292 | |
---|
1941 | | - status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, |
---|
| 2293 | + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, |
---|
| 2294 | + SPI_ACPI_ENUMERATE_MAX_DEPTH, |
---|
1942 | 2295 | acpi_spi_add_device, NULL, ctlr, NULL); |
---|
1943 | 2296 | if (ACPI_FAILURE(status)) |
---|
1944 | 2297 | dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); |
---|
.. | .. |
---|
1984 | 2337 | return 1; |
---|
1985 | 2338 | } |
---|
1986 | 2339 | |
---|
1987 | | -static ssize_t spi_slave_show(struct device *dev, |
---|
1988 | | - struct device_attribute *attr, char *buf) |
---|
| 2340 | +static ssize_t slave_show(struct device *dev, struct device_attribute *attr, |
---|
| 2341 | + char *buf) |
---|
1989 | 2342 | { |
---|
1990 | 2343 | struct spi_controller *ctlr = container_of(dev, struct spi_controller, |
---|
1991 | 2344 | dev); |
---|
.. | .. |
---|
1996 | 2349 | child ? to_spi_device(child)->modalias : NULL); |
---|
1997 | 2350 | } |
---|
1998 | 2351 | |
---|
1999 | | -static ssize_t spi_slave_store(struct device *dev, |
---|
2000 | | - struct device_attribute *attr, const char *buf, |
---|
2001 | | - size_t count) |
---|
| 2352 | +static ssize_t slave_store(struct device *dev, struct device_attribute *attr, |
---|
| 2353 | + const char *buf, size_t count) |
---|
2002 | 2354 | { |
---|
2003 | 2355 | struct spi_controller *ctlr = container_of(dev, struct spi_controller, |
---|
2004 | 2356 | dev); |
---|
.. | .. |
---|
2036 | 2388 | return count; |
---|
2037 | 2389 | } |
---|
2038 | 2390 | |
---|
2039 | | -static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store); |
---|
| 2391 | +static DEVICE_ATTR_RW(slave); |
---|
2040 | 2392 | |
---|
2041 | 2393 | static struct attribute *spi_slave_attrs[] = { |
---|
2042 | 2394 | &dev_attr_slave.attr, |
---|
.. | .. |
---|
2067 | 2419 | * __spi_alloc_controller - allocate an SPI master or slave controller |
---|
2068 | 2420 | * @dev: the controller, possibly using the platform_bus |
---|
2069 | 2421 | * @size: how much zeroed driver-private data to allocate; the pointer to this |
---|
2070 | | - * memory is in the driver_data field of the returned device, |
---|
2071 | | - * accessible with spi_controller_get_devdata(). |
---|
| 2422 | + * memory is in the driver_data field of the returned device, accessible |
---|
| 2423 | + * with spi_controller_get_devdata(); the memory is cacheline aligned; |
---|
| 2424 | + * drivers granting DMA access to portions of their private data need to |
---|
| 2425 | + * round up @size using ALIGN(size, dma_get_cache_alignment()). |
---|
2072 | 2426 | * @slave: flag indicating whether to allocate an SPI master (false) or SPI |
---|
2073 | 2427 | * slave (true) controller |
---|
2074 | 2428 | * Context: can sleep |
---|
.. | .. |
---|
2090 | 2444 | unsigned int size, bool slave) |
---|
2091 | 2445 | { |
---|
2092 | 2446 | struct spi_controller *ctlr; |
---|
| 2447 | + size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); |
---|
2093 | 2448 | |
---|
2094 | 2449 | if (!dev) |
---|
2095 | 2450 | return NULL; |
---|
2096 | 2451 | |
---|
2097 | | - ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL); |
---|
| 2452 | + ctlr = kzalloc(size + ctlr_size, GFP_KERNEL); |
---|
2098 | 2453 | if (!ctlr) |
---|
2099 | 2454 | return NULL; |
---|
2100 | 2455 | |
---|
.. | .. |
---|
2108 | 2463 | ctlr->dev.class = &spi_master_class; |
---|
2109 | 2464 | ctlr->dev.parent = dev; |
---|
2110 | 2465 | pm_suspend_ignore_children(&ctlr->dev, true); |
---|
2111 | | - spi_controller_set_devdata(ctlr, &ctlr[1]); |
---|
| 2466 | + spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); |
---|
2112 | 2467 | |
---|
2113 | 2468 | return ctlr; |
---|
2114 | 2469 | } |
---|
.. | .. |
---|
2158 | 2513 | EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); |
---|
2159 | 2514 | |
---|
2160 | 2515 | #ifdef CONFIG_OF |
---|
2161 | | -static int of_spi_register_master(struct spi_controller *ctlr) |
---|
| 2516 | +static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) |
---|
2162 | 2517 | { |
---|
2163 | 2518 | int nb, i, *cs; |
---|
2164 | 2519 | struct device_node *np = ctlr->dev.of_node; |
---|
.. | .. |
---|
2191 | 2546 | return 0; |
---|
2192 | 2547 | } |
---|
2193 | 2548 | #else |
---|
2194 | | -static int of_spi_register_master(struct spi_controller *ctlr) |
---|
| 2549 | +static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) |
---|
2195 | 2550 | { |
---|
2196 | 2551 | return 0; |
---|
2197 | 2552 | } |
---|
2198 | 2553 | #endif |
---|
| 2554 | + |
---|
| 2555 | +/** |
---|
| 2556 | + * spi_get_gpio_descs() - grab chip select GPIOs for the master |
---|
| 2557 | + * @ctlr: The SPI master to grab GPIO descriptors for |
---|
| 2558 | + */ |
---|
| 2559 | +static int spi_get_gpio_descs(struct spi_controller *ctlr) |
---|
| 2560 | +{ |
---|
| 2561 | + int nb, i; |
---|
| 2562 | + struct gpio_desc **cs; |
---|
| 2563 | + struct device *dev = &ctlr->dev; |
---|
| 2564 | + unsigned long native_cs_mask = 0; |
---|
| 2565 | + unsigned int num_cs_gpios = 0; |
---|
| 2566 | + |
---|
| 2567 | + nb = gpiod_count(dev, "cs"); |
---|
| 2568 | + ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); |
---|
| 2569 | + |
---|
| 2570 | + /* No GPIOs at all is fine, else return the error */ |
---|
| 2571 | + if (nb == 0 || nb == -ENOENT) |
---|
| 2572 | + return 0; |
---|
| 2573 | + else if (nb < 0) |
---|
| 2574 | + return nb; |
---|
| 2575 | + |
---|
| 2576 | + cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), |
---|
| 2577 | + GFP_KERNEL); |
---|
| 2578 | + if (!cs) |
---|
| 2579 | + return -ENOMEM; |
---|
| 2580 | + ctlr->cs_gpiods = cs; |
---|
| 2581 | + |
---|
| 2582 | + for (i = 0; i < nb; i++) { |
---|
| 2583 | + /* |
---|
| 2584 | + * Most chipselects are active low, the inverted |
---|
| 2585 | + * semantics are handled by special quirks in gpiolib, |
---|
| 2586 | + * so initializing them GPIOD_OUT_LOW here means |
---|
| 2587 | + * "unasserted", in most cases this will drive the physical |
---|
| 2588 | + * line high. |
---|
| 2589 | + */ |
---|
| 2590 | + cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, |
---|
| 2591 | + GPIOD_OUT_LOW); |
---|
| 2592 | + if (IS_ERR(cs[i])) |
---|
| 2593 | + return PTR_ERR(cs[i]); |
---|
| 2594 | + |
---|
| 2595 | + if (cs[i]) { |
---|
| 2596 | + /* |
---|
| 2597 | + * If we find a CS GPIO, name it after the device and |
---|
| 2598 | + * chip select line. |
---|
| 2599 | + */ |
---|
| 2600 | + char *gpioname; |
---|
| 2601 | + |
---|
| 2602 | + gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", |
---|
| 2603 | + dev_name(dev), i); |
---|
| 2604 | + if (!gpioname) |
---|
| 2605 | + return -ENOMEM; |
---|
| 2606 | + gpiod_set_consumer_name(cs[i], gpioname); |
---|
| 2607 | + num_cs_gpios++; |
---|
| 2608 | + continue; |
---|
| 2609 | + } |
---|
| 2610 | + |
---|
| 2611 | + if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { |
---|
| 2612 | + dev_err(dev, "Invalid native chip select %d\n", i); |
---|
| 2613 | + return -EINVAL; |
---|
| 2614 | + } |
---|
| 2615 | + native_cs_mask |= BIT(i); |
---|
| 2616 | + } |
---|
| 2617 | + |
---|
| 2618 | + ctlr->unused_native_cs = ffs(~native_cs_mask) - 1; |
---|
| 2619 | + |
---|
| 2620 | + if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios && |
---|
| 2621 | + ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) { |
---|
| 2622 | + dev_err(dev, "No unused native chip select available\n"); |
---|
| 2623 | + return -EINVAL; |
---|
| 2624 | + } |
---|
| 2625 | + |
---|
| 2626 | + return 0; |
---|
| 2627 | +} |
---|
2199 | 2628 | |
---|
2200 | 2629 | static int spi_controller_check_ops(struct spi_controller *ctlr) |
---|
2201 | 2630 | { |
---|
.. | .. |
---|
2244 | 2673 | { |
---|
2245 | 2674 | struct device *dev = ctlr->dev.parent; |
---|
2246 | 2675 | struct boardinfo *bi; |
---|
2247 | | - int status = -ENODEV; |
---|
| 2676 | + int status; |
---|
2248 | 2677 | int id, first_dynamic; |
---|
2249 | 2678 | |
---|
2250 | 2679 | if (!dev) |
---|
.. | .. |
---|
2258 | 2687 | if (status) |
---|
2259 | 2688 | return status; |
---|
2260 | 2689 | |
---|
2261 | | - if (!spi_controller_is_slave(ctlr)) { |
---|
2262 | | - status = of_spi_register_master(ctlr); |
---|
2263 | | - if (status) |
---|
2264 | | - return status; |
---|
2265 | | - } |
---|
2266 | | - |
---|
2267 | | - /* even if it's just one always-selected device, there must |
---|
2268 | | - * be at least one chipselect |
---|
2269 | | - */ |
---|
2270 | | - if (ctlr->num_chipselect == 0) |
---|
2271 | | - return -EINVAL; |
---|
2272 | 2690 | if (ctlr->bus_num >= 0) { |
---|
2273 | 2691 | /* devices with a fixed bus num must check-in with the num */ |
---|
2274 | 2692 | mutex_lock(&board_lock); |
---|
.. | .. |
---|
2320 | 2738 | * registration fails if the bus ID is in use. |
---|
2321 | 2739 | */ |
---|
2322 | 2740 | dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); |
---|
2323 | | - status = device_add(&ctlr->dev); |
---|
2324 | | - if (status < 0) { |
---|
2325 | | - /* free bus id */ |
---|
2326 | | - mutex_lock(&board_lock); |
---|
2327 | | - idr_remove(&spi_master_idr, ctlr->bus_num); |
---|
2328 | | - mutex_unlock(&board_lock); |
---|
2329 | | - goto done; |
---|
| 2741 | + |
---|
| 2742 | + if (!spi_controller_is_slave(ctlr)) { |
---|
| 2743 | + if (ctlr->use_gpio_descriptors) { |
---|
| 2744 | + status = spi_get_gpio_descs(ctlr); |
---|
| 2745 | + if (status) |
---|
| 2746 | + goto free_bus_id; |
---|
| 2747 | + /* |
---|
| 2748 | + * A controller using GPIO descriptors always |
---|
| 2749 | + * supports SPI_CS_HIGH if need be. |
---|
| 2750 | + */ |
---|
| 2751 | + ctlr->mode_bits |= SPI_CS_HIGH; |
---|
| 2752 | + } else { |
---|
| 2753 | + /* Legacy code path for GPIOs from DT */ |
---|
| 2754 | + status = of_spi_get_gpio_numbers(ctlr); |
---|
| 2755 | + if (status) |
---|
| 2756 | + goto free_bus_id; |
---|
| 2757 | + } |
---|
2330 | 2758 | } |
---|
| 2759 | + |
---|
| 2760 | + /* |
---|
| 2761 | + * Even if it's just one always-selected device, there must |
---|
| 2762 | + * be at least one chipselect. |
---|
| 2763 | + */ |
---|
| 2764 | + if (!ctlr->num_chipselect) { |
---|
| 2765 | + status = -EINVAL; |
---|
| 2766 | + goto free_bus_id; |
---|
| 2767 | + } |
---|
| 2768 | + |
---|
| 2769 | + status = device_add(&ctlr->dev); |
---|
| 2770 | + if (status < 0) |
---|
| 2771 | + goto free_bus_id; |
---|
2331 | 2772 | dev_dbg(dev, "registered %s %s\n", |
---|
2332 | 2773 | spi_controller_is_slave(ctlr) ? "slave" : "master", |
---|
2333 | 2774 | dev_name(&ctlr->dev)); |
---|
.. | .. |
---|
2343 | 2784 | status = spi_controller_initialize_queue(ctlr); |
---|
2344 | 2785 | if (status) { |
---|
2345 | 2786 | device_del(&ctlr->dev); |
---|
2346 | | - /* free bus id */ |
---|
2347 | | - mutex_lock(&board_lock); |
---|
2348 | | - idr_remove(&spi_master_idr, ctlr->bus_num); |
---|
2349 | | - mutex_unlock(&board_lock); |
---|
2350 | | - goto done; |
---|
| 2787 | + goto free_bus_id; |
---|
2351 | 2788 | } |
---|
2352 | 2789 | } |
---|
2353 | 2790 | /* add statistics */ |
---|
.. | .. |
---|
2362 | 2799 | /* Register devices from the device tree and ACPI */ |
---|
2363 | 2800 | of_register_spi_devices(ctlr); |
---|
2364 | 2801 | acpi_register_spi_devices(ctlr); |
---|
2365 | | -done: |
---|
| 2802 | + return status; |
---|
| 2803 | + |
---|
| 2804 | +free_bus_id: |
---|
| 2805 | + mutex_lock(&board_lock); |
---|
| 2806 | + idr_remove(&spi_master_idr, ctlr->bus_num); |
---|
| 2807 | + mutex_unlock(&board_lock); |
---|
2366 | 2808 | return status; |
---|
2367 | 2809 | } |
---|
2368 | 2810 | EXPORT_SYMBOL_GPL(spi_register_controller); |
---|
.. | .. |
---|
2612 | 3054 | */ |
---|
2613 | 3055 | void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) |
---|
2614 | 3056 | { |
---|
2615 | | - struct spi_res *res; |
---|
| 3057 | + struct spi_res *res, *tmp; |
---|
2616 | 3058 | |
---|
2617 | | - while (!list_empty(&message->resources)) { |
---|
2618 | | - res = list_last_entry(&message->resources, |
---|
2619 | | - struct spi_res, entry); |
---|
2620 | | - |
---|
| 3059 | + list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) { |
---|
2621 | 3060 | if (res->release) |
---|
2622 | 3061 | res->release(ctlr, message, res->data); |
---|
2623 | 3062 | |
---|
.. | .. |
---|
2681 | 3120 | |
---|
2682 | 3121 | /* allocate the structure using spi_res */ |
---|
2683 | 3122 | rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, |
---|
2684 | | - insert * sizeof(struct spi_transfer) |
---|
2685 | | - + sizeof(struct spi_replaced_transfers) |
---|
| 3123 | + struct_size(rxfer, inserted_transfers, insert) |
---|
2686 | 3124 | + extradatasize, |
---|
2687 | 3125 | gfp); |
---|
2688 | 3126 | if (!rxfer) |
---|
.. | .. |
---|
2744 | 3182 | /* add to list */ |
---|
2745 | 3183 | list_add(&xfer->transfer_list, rxfer->replaced_after); |
---|
2746 | 3184 | |
---|
2747 | | - /* clear cs_change and delay_usecs for all but the last */ |
---|
| 3185 | + /* clear cs_change and delay for all but the last */ |
---|
2748 | 3186 | if (i) { |
---|
2749 | 3187 | xfer->cs_change = false; |
---|
2750 | 3188 | xfer->delay_usecs = 0; |
---|
| 3189 | + xfer->delay.value = 0; |
---|
2751 | 3190 | } |
---|
2752 | 3191 | } |
---|
2753 | 3192 | |
---|
.. | .. |
---|
2771 | 3210 | struct spi_replaced_transfers *srt; |
---|
2772 | 3211 | size_t offset; |
---|
2773 | 3212 | size_t count, i; |
---|
2774 | | - |
---|
2775 | | - /* warn once about this fact that we are splitting a transfer */ |
---|
2776 | | - dev_warn_once(&msg->spi->dev, |
---|
2777 | | - "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n", |
---|
2778 | | - xfer->len, maxsize); |
---|
2779 | 3213 | |
---|
2780 | 3214 | /* calculate how many we have to replace */ |
---|
2781 | 3215 | count = DIV_ROUND_UP(xfer->len, maxsize); |
---|
.. | .. |
---|
2925 | 3359 | /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden |
---|
2926 | 3360 | */ |
---|
2927 | 3361 | if ((spi->mode & SPI_3WIRE) && (spi->mode & |
---|
2928 | | - (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) |
---|
| 3362 | + (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | |
---|
| 3363 | + SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) |
---|
2929 | 3364 | return -EINVAL; |
---|
2930 | 3365 | /* help drivers fail *cleanly* when they need options |
---|
2931 | 3366 | * that aren't supported with their current controller |
---|
.. | .. |
---|
2933 | 3368 | * so it is ignored here. |
---|
2934 | 3369 | */ |
---|
2935 | 3370 | bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD); |
---|
| 3371 | + /* nothing prevents from working with active-high CS in case if it |
---|
| 3372 | + * is driven by GPIO. |
---|
| 3373 | + */ |
---|
| 3374 | + if (gpio_is_valid(spi->cs_gpio)) |
---|
| 3375 | + bad_bits &= ~SPI_CS_HIGH; |
---|
2936 | 3376 | ugly_bits = bad_bits & |
---|
2937 | | - (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); |
---|
| 3377 | + (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | |
---|
| 3378 | + SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL); |
---|
2938 | 3379 | if (ugly_bits) { |
---|
2939 | 3380 | dev_warn(&spi->dev, |
---|
2940 | 3381 | "setup: ignoring unsupported mode bits %x\n", |
---|
.. | .. |
---|
2959 | 3400 | if (!spi->max_speed_hz) |
---|
2960 | 3401 | spi->max_speed_hz = spi->controller->max_speed_hz; |
---|
2961 | 3402 | |
---|
| 3403 | + mutex_lock(&spi->controller->io_mutex); |
---|
| 3404 | + |
---|
2962 | 3405 | if (spi->controller->setup) |
---|
2963 | 3406 | status = spi->controller->setup(spi); |
---|
2964 | 3407 | |
---|
2965 | | - spi_set_cs(spi, false); |
---|
| 3408 | + if (spi->controller->auto_runtime_pm && spi->controller->set_cs) { |
---|
| 3409 | + status = pm_runtime_get_sync(spi->controller->dev.parent); |
---|
| 3410 | + if (status < 0) { |
---|
| 3411 | + mutex_unlock(&spi->controller->io_mutex); |
---|
| 3412 | + pm_runtime_put_noidle(spi->controller->dev.parent); |
---|
| 3413 | + dev_err(&spi->controller->dev, "Failed to power device: %d\n", |
---|
| 3414 | + status); |
---|
| 3415 | + return status; |
---|
| 3416 | + } |
---|
| 3417 | + |
---|
| 3418 | + /* |
---|
| 3419 | + * We do not want to return positive value from pm_runtime_get, |
---|
| 3420 | + * there are many instances of devices calling spi_setup() and |
---|
| 3421 | + * checking for a non-zero return value instead of a negative |
---|
| 3422 | + * return value. |
---|
| 3423 | + */ |
---|
| 3424 | + status = 0; |
---|
| 3425 | + |
---|
| 3426 | + spi_set_cs(spi, false, true); |
---|
| 3427 | + pm_runtime_mark_last_busy(spi->controller->dev.parent); |
---|
| 3428 | + pm_runtime_put_autosuspend(spi->controller->dev.parent); |
---|
| 3429 | + } else { |
---|
| 3430 | + spi_set_cs(spi, false, true); |
---|
| 3431 | + } |
---|
| 3432 | + |
---|
| 3433 | + mutex_unlock(&spi->controller->io_mutex); |
---|
| 3434 | + |
---|
| 3435 | + if (spi->rt && !spi->controller->rt) { |
---|
| 3436 | + spi->controller->rt = true; |
---|
| 3437 | + spi_set_thread_rt(spi->controller); |
---|
| 3438 | + } |
---|
2966 | 3439 | |
---|
2967 | 3440 | dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", |
---|
2968 | 3441 | (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), |
---|
.. | .. |
---|
2976 | 3449 | return status; |
---|
2977 | 3450 | } |
---|
2978 | 3451 | EXPORT_SYMBOL_GPL(spi_setup); |
---|
| 3452 | + |
---|
| 3453 | +/** |
---|
| 3454 | + * spi_set_cs_timing - configure CS setup, hold, and inactive delays |
---|
| 3455 | + * @spi: the device that requires specific CS timing configuration |
---|
| 3456 | + * @setup: CS setup time specified via @spi_delay |
---|
| 3457 | + * @hold: CS hold time specified via @spi_delay |
---|
| 3458 | + * @inactive: CS inactive delay between transfers specified via @spi_delay |
---|
| 3459 | + * |
---|
| 3460 | + * Return: zero on success, else a negative error code. |
---|
| 3461 | + */ |
---|
| 3462 | +int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup, |
---|
| 3463 | + struct spi_delay *hold, struct spi_delay *inactive) |
---|
| 3464 | +{ |
---|
| 3465 | + size_t len; |
---|
| 3466 | + |
---|
| 3467 | + if (spi->controller->set_cs_timing) |
---|
| 3468 | + return spi->controller->set_cs_timing(spi, setup, hold, |
---|
| 3469 | + inactive); |
---|
| 3470 | + |
---|
| 3471 | + if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) || |
---|
| 3472 | + (hold && hold->unit == SPI_DELAY_UNIT_SCK) || |
---|
| 3473 | + (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) { |
---|
| 3474 | + dev_err(&spi->dev, |
---|
| 3475 | + "Clock-cycle delays for CS not supported in SW mode\n"); |
---|
| 3476 | + return -ENOTSUPP; |
---|
| 3477 | + } |
---|
| 3478 | + |
---|
| 3479 | + len = sizeof(struct spi_delay); |
---|
| 3480 | + |
---|
| 3481 | + /* copy delays to controller */ |
---|
| 3482 | + if (setup) |
---|
| 3483 | + memcpy(&spi->controller->cs_setup, setup, len); |
---|
| 3484 | + else |
---|
| 3485 | + memset(&spi->controller->cs_setup, 0, len); |
---|
| 3486 | + |
---|
| 3487 | + if (hold) |
---|
| 3488 | + memcpy(&spi->controller->cs_hold, hold, len); |
---|
| 3489 | + else |
---|
| 3490 | + memset(&spi->controller->cs_hold, 0, len); |
---|
| 3491 | + |
---|
| 3492 | + if (inactive) |
---|
| 3493 | + memcpy(&spi->controller->cs_inactive, inactive, len); |
---|
| 3494 | + else |
---|
| 3495 | + memset(&spi->controller->cs_inactive, 0, len); |
---|
| 3496 | + |
---|
| 3497 | + return 0; |
---|
| 3498 | +} |
---|
| 3499 | +EXPORT_SYMBOL_GPL(spi_set_cs_timing); |
---|
| 3500 | + |
---|
| 3501 | +static int _spi_xfer_word_delay_update(struct spi_transfer *xfer, |
---|
| 3502 | + struct spi_device *spi) |
---|
| 3503 | +{ |
---|
| 3504 | + int delay1, delay2; |
---|
| 3505 | + |
---|
| 3506 | + delay1 = spi_delay_to_ns(&xfer->word_delay, xfer); |
---|
| 3507 | + if (delay1 < 0) |
---|
| 3508 | + return delay1; |
---|
| 3509 | + |
---|
| 3510 | + delay2 = spi_delay_to_ns(&spi->word_delay, xfer); |
---|
| 3511 | + if (delay2 < 0) |
---|
| 3512 | + return delay2; |
---|
| 3513 | + |
---|
| 3514 | + if (delay1 < delay2) |
---|
| 3515 | + memcpy(&xfer->word_delay, &spi->word_delay, |
---|
| 3516 | + sizeof(xfer->word_delay)); |
---|
| 3517 | + |
---|
| 3518 | + return 0; |
---|
| 3519 | +} |
---|
2979 | 3520 | |
---|
2980 | 3521 | static int __spi_validate(struct spi_device *spi, struct spi_message *message) |
---|
2981 | 3522 | { |
---|
.. | .. |
---|
2993 | 3534 | * cs_change is set for each transfer. |
---|
2994 | 3535 | */ |
---|
2995 | 3536 | if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || |
---|
| 3537 | + spi->cs_gpiod || |
---|
2996 | 3538 | gpio_is_valid(spi->cs_gpio))) { |
---|
2997 | 3539 | size_t maxsize; |
---|
2998 | 3540 | int ret; |
---|
.. | .. |
---|
3039 | 3581 | * it is not set for this transfer. |
---|
3040 | 3582 | * Set transfer tx_nbits and rx_nbits as single transfer default |
---|
3041 | 3583 | * (SPI_NBITS_SINGLE) if it is not set for this transfer. |
---|
| 3584 | + * Ensure transfer word_delay is at least as long as that required by |
---|
| 3585 | + * device itself. |
---|
3042 | 3586 | */ |
---|
3043 | 3587 | message->frame_length = 0; |
---|
3044 | 3588 | list_for_each_entry(xfer, &message->transfers, transfer_list) { |
---|
| 3589 | + xfer->effective_speed_hz = 0; |
---|
3045 | 3590 | message->frame_length += xfer->len; |
---|
3046 | 3591 | if (!xfer->bits_per_word) |
---|
3047 | 3592 | xfer->bits_per_word = spi->bits_per_word; |
---|
3048 | 3593 | |
---|
3049 | 3594 | if (!xfer->speed_hz) |
---|
3050 | 3595 | xfer->speed_hz = spi->max_speed_hz; |
---|
3051 | | - if (!xfer->speed_hz) |
---|
3052 | | - xfer->speed_hz = ctlr->max_speed_hz; |
---|
3053 | 3596 | |
---|
3054 | 3597 | if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) |
---|
3055 | 3598 | xfer->speed_hz = ctlr->max_speed_hz; |
---|
.. | .. |
---|
3109 | 3652 | !(spi->mode & SPI_RX_QUAD)) |
---|
3110 | 3653 | return -EINVAL; |
---|
3111 | 3654 | } |
---|
| 3655 | + |
---|
| 3656 | + if (_spi_xfer_word_delay_update(xfer, spi)) |
---|
| 3657 | + return -EINVAL; |
---|
3112 | 3658 | } |
---|
3113 | 3659 | |
---|
3114 | 3660 | message->status = -EINPROGRESS; |
---|
.. | .. |
---|
3119 | 3665 | static int __spi_async(struct spi_device *spi, struct spi_message *message) |
---|
3120 | 3666 | { |
---|
3121 | 3667 | struct spi_controller *ctlr = spi->controller; |
---|
| 3668 | + struct spi_transfer *xfer; |
---|
3122 | 3669 | |
---|
3123 | 3670 | /* |
---|
3124 | 3671 | * Some controllers do not support doing regular SPI transfers. Return |
---|
.. | .. |
---|
3133 | 3680 | SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); |
---|
3134 | 3681 | |
---|
3135 | 3682 | trace_spi_message_submit(message); |
---|
| 3683 | + |
---|
| 3684 | + if (!ctlr->ptp_sts_supported) { |
---|
| 3685 | + list_for_each_entry(xfer, &message->transfers, transfer_list) { |
---|
| 3686 | + xfer->ptp_sts_word_pre = 0; |
---|
| 3687 | + ptp_read_system_prets(xfer->ptp_sts); |
---|
| 3688 | + } |
---|
| 3689 | + } |
---|
3136 | 3690 | |
---|
3137 | 3691 | return ctlr->transfer(spi, message); |
---|
3138 | 3692 | } |
---|
.. | .. |
---|
3437 | 3991 | * is zero for success, else a negative errno status code. |
---|
3438 | 3992 | * This call may only be used from a context that may sleep. |
---|
3439 | 3993 | * |
---|
3440 | | - * Parameters to this routine are always copied using a small buffer; |
---|
3441 | | - * portable code should never use this for more than 32 bytes. |
---|
| 3994 | + * Parameters to this routine are always copied using a small buffer. |
---|
3442 | 3995 | * Performance-sensitive or bulk transfer code should instead use |
---|
3443 | 3996 | * spi_{async,sync}() calls with dma-safe buffers. |
---|
3444 | 3997 | * |
---|
.. | .. |
---|
3501 | 4054 | /*-------------------------------------------------------------------------*/ |
---|
3502 | 4055 | |
---|
3503 | 4056 | #if IS_ENABLED(CONFIG_OF) |
---|
3504 | | -static int __spi_of_device_match(struct device *dev, void *data) |
---|
3505 | | -{ |
---|
3506 | | - return dev->of_node == data; |
---|
3507 | | -} |
---|
3508 | | - |
---|
3509 | 4057 | /* must call put_device() when done with returned spi_device device */ |
---|
3510 | 4058 | struct spi_device *of_find_spi_device_by_node(struct device_node *node) |
---|
3511 | 4059 | { |
---|
3512 | | - struct device *dev = bus_find_device(&spi_bus_type, NULL, node, |
---|
3513 | | - __spi_of_device_match); |
---|
| 4060 | + struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); |
---|
| 4061 | + |
---|
3514 | 4062 | return dev ? to_spi_device(dev) : NULL; |
---|
3515 | 4063 | } |
---|
3516 | 4064 | EXPORT_SYMBOL_GPL(of_find_spi_device_by_node); |
---|
3517 | 4065 | #endif /* IS_ENABLED(CONFIG_OF) */ |
---|
3518 | 4066 | |
---|
3519 | 4067 | #if IS_ENABLED(CONFIG_OF_DYNAMIC) |
---|
3520 | | -static int __spi_of_controller_match(struct device *dev, const void *data) |
---|
3521 | | -{ |
---|
3522 | | - return dev->of_node == data; |
---|
3523 | | -} |
---|
3524 | | - |
---|
3525 | 4068 | /* the spi controllers are not using spi_bus, so we find it with another way */ |
---|
3526 | 4069 | static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) |
---|
3527 | 4070 | { |
---|
3528 | 4071 | struct device *dev; |
---|
3529 | 4072 | |
---|
3530 | | - dev = class_find_device(&spi_master_class, NULL, node, |
---|
3531 | | - __spi_of_controller_match); |
---|
| 4073 | + dev = class_find_device_by_of_node(&spi_master_class, node); |
---|
3532 | 4074 | if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) |
---|
3533 | | - dev = class_find_device(&spi_slave_class, NULL, node, |
---|
3534 | | - __spi_of_controller_match); |
---|
| 4075 | + dev = class_find_device_by_of_node(&spi_slave_class, node); |
---|
3535 | 4076 | if (!dev) |
---|
3536 | 4077 | return NULL; |
---|
3537 | 4078 | |
---|
.. | .. |
---|
3602 | 4143 | return ACPI_COMPANION(dev->parent) == data; |
---|
3603 | 4144 | } |
---|
3604 | 4145 | |
---|
3605 | | -static int spi_acpi_device_match(struct device *dev, void *data) |
---|
3606 | | -{ |
---|
3607 | | - return ACPI_COMPANION(dev) == data; |
---|
3608 | | -} |
---|
3609 | | - |
---|
3610 | 4146 | static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) |
---|
3611 | 4147 | { |
---|
3612 | 4148 | struct device *dev; |
---|
.. | .. |
---|
3626 | 4162 | { |
---|
3627 | 4163 | struct device *dev; |
---|
3628 | 4164 | |
---|
3629 | | - dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match); |
---|
3630 | | - |
---|
3631 | | - return dev ? to_spi_device(dev) : NULL; |
---|
| 4165 | + dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); |
---|
| 4166 | + return to_spi_device(dev); |
---|
3632 | 4167 | } |
---|
3633 | 4168 | |
---|
3634 | 4169 | static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, |
---|
.. | .. |
---|
3720 | 4255 | * include needing to have boardinfo data structures be much more public. |
---|
3721 | 4256 | */ |
---|
3722 | 4257 | postcore_initcall(spi_init); |
---|
3723 | | - |
---|