hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/spi/spi-fsl-dspi.c
....@@ -10,253 +10,349 @@
1010 #include <linux/delay.h>
1111 #include <linux/dmaengine.h>
1212 #include <linux/dma-mapping.h>
13
-#include <linux/err.h>
14
-#include <linux/errno.h>
1513 #include <linux/interrupt.h>
16
-#include <linux/io.h>
1714 #include <linux/kernel.h>
18
-#include <linux/math64.h>
1915 #include <linux/module.h>
20
-#include <linux/of.h>
2116 #include <linux/of_device.h>
2217 #include <linux/pinctrl/consumer.h>
23
-#include <linux/platform_device.h>
24
-#include <linux/pm_runtime.h>
2518 #include <linux/regmap.h>
26
-#include <linux/sched.h>
2719 #include <linux/spi/spi.h>
2820 #include <linux/spi/spi-fsl-dspi.h>
29
-#include <linux/spi/spi_bitbang.h>
30
-#include <linux/time.h>
3121
32
-#define DRIVER_NAME "fsl-dspi"
22
+#define DRIVER_NAME "fsl-dspi"
3323
34
-#ifdef CONFIG_M5441x
35
-#define DSPI_FIFO_SIZE 16
36
-#else
37
-#define DSPI_FIFO_SIZE 4
38
-#endif
39
-#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
24
+#define SPI_MCR 0x00
25
+#define SPI_MCR_MASTER BIT(31)
26
+#define SPI_MCR_PCSIS(x) ((x) << 16)
27
+#define SPI_MCR_CLR_TXF BIT(11)
28
+#define SPI_MCR_CLR_RXF BIT(10)
29
+#define SPI_MCR_XSPI BIT(3)
30
+#define SPI_MCR_DIS_TXF BIT(13)
31
+#define SPI_MCR_DIS_RXF BIT(12)
32
+#define SPI_MCR_HALT BIT(0)
4033
41
-#define SPI_MCR 0x00
42
-#define SPI_MCR_MASTER (1 << 31)
43
-#define SPI_MCR_PCSIS (0x3F << 16)
44
-#define SPI_MCR_CLR_TXF (1 << 11)
45
-#define SPI_MCR_CLR_RXF (1 << 10)
46
-#define SPI_MCR_XSPI (1 << 3)
47
-#define SPI_MCR_DIS_TXF (1 << 13)
48
-#define SPI_MCR_DIS_RXF (1 << 12)
49
-#define SPI_MCR_HALT (1 << 0)
34
+#define SPI_TCR 0x08
35
+#define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16)
5036
51
-#define SPI_TCR 0x08
52
-#define SPI_TCR_GET_TCNT(x) (((x) & 0xffff0000) >> 16)
37
+#define SPI_CTAR(x) (0x0c + (((x) & GENMASK(1, 0)) * 4))
38
+#define SPI_CTAR_FMSZ(x) (((x) << 27) & GENMASK(30, 27))
39
+#define SPI_CTAR_CPOL BIT(26)
40
+#define SPI_CTAR_CPHA BIT(25)
41
+#define SPI_CTAR_LSBFE BIT(24)
42
+#define SPI_CTAR_PCSSCK(x) (((x) << 22) & GENMASK(23, 22))
43
+#define SPI_CTAR_PASC(x) (((x) << 20) & GENMASK(21, 20))
44
+#define SPI_CTAR_PDT(x) (((x) << 18) & GENMASK(19, 18))
45
+#define SPI_CTAR_PBR(x) (((x) << 16) & GENMASK(17, 16))
46
+#define SPI_CTAR_CSSCK(x) (((x) << 12) & GENMASK(15, 12))
47
+#define SPI_CTAR_ASC(x) (((x) << 8) & GENMASK(11, 8))
48
+#define SPI_CTAR_DT(x) (((x) << 4) & GENMASK(7, 4))
49
+#define SPI_CTAR_BR(x) ((x) & GENMASK(3, 0))
50
+#define SPI_CTAR_SCALE_BITS 0xf
5351
54
-#define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4))
55
-#define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27)
56
-#define SPI_CTAR_CPOL(x) ((x) << 26)
57
-#define SPI_CTAR_CPHA(x) ((x) << 25)
58
-#define SPI_CTAR_LSBFE(x) ((x) << 24)
59
-#define SPI_CTAR_PCSSCK(x) (((x) & 0x00000003) << 22)
60
-#define SPI_CTAR_PASC(x) (((x) & 0x00000003) << 20)
61
-#define SPI_CTAR_PDT(x) (((x) & 0x00000003) << 18)
62
-#define SPI_CTAR_PBR(x) (((x) & 0x00000003) << 16)
63
-#define SPI_CTAR_CSSCK(x) (((x) & 0x0000000f) << 12)
64
-#define SPI_CTAR_ASC(x) (((x) & 0x0000000f) << 8)
65
-#define SPI_CTAR_DT(x) (((x) & 0x0000000f) << 4)
66
-#define SPI_CTAR_BR(x) ((x) & 0x0000000f)
67
-#define SPI_CTAR_SCALE_BITS 0xf
52
+#define SPI_CTAR0_SLAVE 0x0c
6853
69
-#define SPI_CTAR0_SLAVE 0x0c
54
+#define SPI_SR 0x2c
55
+#define SPI_SR_TCFQF BIT(31)
56
+#define SPI_SR_TFUF BIT(27)
57
+#define SPI_SR_TFFF BIT(25)
58
+#define SPI_SR_CMDTCF BIT(23)
59
+#define SPI_SR_SPEF BIT(21)
60
+#define SPI_SR_RFOF BIT(19)
61
+#define SPI_SR_TFIWF BIT(18)
62
+#define SPI_SR_RFDF BIT(17)
63
+#define SPI_SR_CMDFFF BIT(16)
64
+#define SPI_SR_CLEAR (SPI_SR_TCFQF | \
65
+ SPI_SR_TFUF | SPI_SR_TFFF | \
66
+ SPI_SR_CMDTCF | SPI_SR_SPEF | \
67
+ SPI_SR_RFOF | SPI_SR_TFIWF | \
68
+ SPI_SR_RFDF | SPI_SR_CMDFFF)
7069
71
-#define SPI_SR 0x2c
72
-#define SPI_SR_EOQF 0x10000000
73
-#define SPI_SR_TCFQF 0x80000000
74
-#define SPI_SR_CLEAR 0x9aaf0000
70
+#define SPI_RSER_TFFFE BIT(25)
71
+#define SPI_RSER_TFFFD BIT(24)
72
+#define SPI_RSER_RFDFE BIT(17)
73
+#define SPI_RSER_RFDFD BIT(16)
7574
76
-#define SPI_RSER_TFFFE BIT(25)
77
-#define SPI_RSER_TFFFD BIT(24)
78
-#define SPI_RSER_RFDFE BIT(17)
79
-#define SPI_RSER_RFDFD BIT(16)
75
+#define SPI_RSER 0x30
76
+#define SPI_RSER_TCFQE BIT(31)
77
+#define SPI_RSER_CMDTCFE BIT(23)
8078
81
-#define SPI_RSER 0x30
82
-#define SPI_RSER_EOQFE 0x10000000
83
-#define SPI_RSER_TCFQE 0x80000000
79
+#define SPI_PUSHR 0x34
80
+#define SPI_PUSHR_CMD_CONT BIT(15)
81
+#define SPI_PUSHR_CMD_CTAS(x) (((x) << 12 & GENMASK(14, 12)))
82
+#define SPI_PUSHR_CMD_EOQ BIT(11)
83
+#define SPI_PUSHR_CMD_CTCNT BIT(10)
84
+#define SPI_PUSHR_CMD_PCS(x) (BIT(x) & GENMASK(5, 0))
8485
85
-#define SPI_PUSHR 0x34
86
-#define SPI_PUSHR_CMD_CONT (1 << 15)
87
-#define SPI_PUSHR_CONT (SPI_PUSHR_CMD_CONT << 16)
88
-#define SPI_PUSHR_CMD_CTAS(x) (((x) & 0x0003) << 12)
89
-#define SPI_PUSHR_CTAS(x) (SPI_PUSHR_CMD_CTAS(x) << 16)
90
-#define SPI_PUSHR_CMD_EOQ (1 << 11)
91
-#define SPI_PUSHR_EOQ (SPI_PUSHR_CMD_EOQ << 16)
92
-#define SPI_PUSHR_CMD_CTCNT (1 << 10)
93
-#define SPI_PUSHR_CTCNT (SPI_PUSHR_CMD_CTCNT << 16)
94
-#define SPI_PUSHR_CMD_PCS(x) ((1 << x) & 0x003f)
95
-#define SPI_PUSHR_PCS(x) (SPI_PUSHR_CMD_PCS(x) << 16)
96
-#define SPI_PUSHR_TXDATA(x) ((x) & 0x0000ffff)
86
+#define SPI_PUSHR_SLAVE 0x34
9787
98
-#define SPI_PUSHR_SLAVE 0x34
88
+#define SPI_POPR 0x38
9989
100
-#define SPI_POPR 0x38
101
-#define SPI_POPR_RXDATA(x) ((x) & 0x0000ffff)
90
+#define SPI_TXFR0 0x3c
91
+#define SPI_TXFR1 0x40
92
+#define SPI_TXFR2 0x44
93
+#define SPI_TXFR3 0x48
94
+#define SPI_RXFR0 0x7c
95
+#define SPI_RXFR1 0x80
96
+#define SPI_RXFR2 0x84
97
+#define SPI_RXFR3 0x88
10298
103
-#define SPI_TXFR0 0x3c
104
-#define SPI_TXFR1 0x40
105
-#define SPI_TXFR2 0x44
106
-#define SPI_TXFR3 0x48
107
-#define SPI_RXFR0 0x7c
108
-#define SPI_RXFR1 0x80
109
-#define SPI_RXFR2 0x84
110
-#define SPI_RXFR3 0x88
99
+#define SPI_CTARE(x) (0x11c + (((x) & GENMASK(1, 0)) * 4))
100
+#define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16)
101
+#define SPI_CTARE_DTCP(x) ((x) & 0x7ff)
111102
112
-#define SPI_CTARE(x) (0x11c + (((x) & 0x3) * 4))
113
-#define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16)
114
-#define SPI_CTARE_DTCP(x) ((x) & 0x7ff)
103
+#define SPI_SREX 0x13c
115104
116
-#define SPI_SREX 0x13c
105
+#define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
106
+#define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
117107
118
-#define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
119
-#define SPI_FRAME_BITS_MASK SPI_CTAR_FMSZ(0xf)
120
-#define SPI_FRAME_BITS_16 SPI_CTAR_FMSZ(0xf)
121
-#define SPI_FRAME_BITS_8 SPI_CTAR_FMSZ(0x7)
122
-
123
-#define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
124
-#define SPI_FRAME_EBITS_MASK SPI_CTARE_FMSZE(1)
125
-
126
-/* Register offsets for regmap_pushr */
127
-#define PUSHR_CMD 0x0
128
-#define PUSHR_TX 0x2
129
-
130
-#define SPI_CS_INIT 0x01
131
-#define SPI_CS_ASSERT 0x02
132
-#define SPI_CS_DROP 0x04
133
-
134
-#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
108
+#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
135109
136110 struct chip_data {
137
- u32 ctar_val;
138
- u16 void_write_data;
111
+ u32 ctar_val;
139112 };
140113
141114 enum dspi_trans_mode {
142
- DSPI_EOQ_MODE = 0,
143
- DSPI_TCFQ_MODE,
115
+ DSPI_XSPI_MODE,
144116 DSPI_DMA_MODE,
145117 };
146118
147119 struct fsl_dspi_devtype_data {
148
- enum dspi_trans_mode trans_mode;
149
- u8 max_clock_factor;
150
- bool xspi_mode;
120
+ enum dspi_trans_mode trans_mode;
121
+ u8 max_clock_factor;
122
+ int fifo_size;
151123 };
152124
153
-static const struct fsl_dspi_devtype_data vf610_data = {
154
- .trans_mode = DSPI_DMA_MODE,
155
- .max_clock_factor = 2,
125
+enum {
126
+ LS1021A,
127
+ LS1012A,
128
+ LS1028A,
129
+ LS1043A,
130
+ LS1046A,
131
+ LS2080A,
132
+ LS2085A,
133
+ LX2160A,
134
+ MCF5441X,
135
+ VF610,
156136 };
157137
158
-static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
159
- .trans_mode = DSPI_TCFQ_MODE,
160
- .max_clock_factor = 8,
161
- .xspi_mode = true,
162
-};
163
-
164
-static const struct fsl_dspi_devtype_data ls2085a_data = {
165
- .trans_mode = DSPI_TCFQ_MODE,
166
- .max_clock_factor = 8,
167
-};
168
-
169
-static const struct fsl_dspi_devtype_data coldfire_data = {
170
- .trans_mode = DSPI_EOQ_MODE,
171
- .max_clock_factor = 8,
138
+static const struct fsl_dspi_devtype_data devtype_data[] = {
139
+ [VF610] = {
140
+ .trans_mode = DSPI_DMA_MODE,
141
+ .max_clock_factor = 2,
142
+ .fifo_size = 4,
143
+ },
144
+ [LS1021A] = {
145
+ /* Has A-011218 DMA erratum */
146
+ .trans_mode = DSPI_XSPI_MODE,
147
+ .max_clock_factor = 8,
148
+ .fifo_size = 4,
149
+ },
150
+ [LS1012A] = {
151
+ /* Has A-011218 DMA erratum */
152
+ .trans_mode = DSPI_XSPI_MODE,
153
+ .max_clock_factor = 8,
154
+ .fifo_size = 16,
155
+ },
156
+ [LS1028A] = {
157
+ .trans_mode = DSPI_XSPI_MODE,
158
+ .max_clock_factor = 8,
159
+ .fifo_size = 4,
160
+ },
161
+ [LS1043A] = {
162
+ /* Has A-011218 DMA erratum */
163
+ .trans_mode = DSPI_XSPI_MODE,
164
+ .max_clock_factor = 8,
165
+ .fifo_size = 16,
166
+ },
167
+ [LS1046A] = {
168
+ /* Has A-011218 DMA erratum */
169
+ .trans_mode = DSPI_XSPI_MODE,
170
+ .max_clock_factor = 8,
171
+ .fifo_size = 16,
172
+ },
173
+ [LS2080A] = {
174
+ .trans_mode = DSPI_XSPI_MODE,
175
+ .max_clock_factor = 8,
176
+ .fifo_size = 4,
177
+ },
178
+ [LS2085A] = {
179
+ .trans_mode = DSPI_XSPI_MODE,
180
+ .max_clock_factor = 8,
181
+ .fifo_size = 4,
182
+ },
183
+ [LX2160A] = {
184
+ .trans_mode = DSPI_XSPI_MODE,
185
+ .max_clock_factor = 8,
186
+ .fifo_size = 4,
187
+ },
188
+ [MCF5441X] = {
189
+ .trans_mode = DSPI_DMA_MODE,
190
+ .max_clock_factor = 8,
191
+ .fifo_size = 16,
192
+ },
172193 };
173194
174195 struct fsl_dspi_dma {
175
- /* Length of transfer in words of DSPI_FIFO_SIZE */
176
- u32 curr_xfer_len;
196
+ u32 *tx_dma_buf;
197
+ struct dma_chan *chan_tx;
198
+ dma_addr_t tx_dma_phys;
199
+ struct completion cmd_tx_complete;
200
+ struct dma_async_tx_descriptor *tx_desc;
177201
178
- u32 *tx_dma_buf;
179
- struct dma_chan *chan_tx;
180
- dma_addr_t tx_dma_phys;
181
- struct completion cmd_tx_complete;
182
- struct dma_async_tx_descriptor *tx_desc;
183
-
184
- u32 *rx_dma_buf;
185
- struct dma_chan *chan_rx;
186
- dma_addr_t rx_dma_phys;
187
- struct completion cmd_rx_complete;
188
- struct dma_async_tx_descriptor *rx_desc;
202
+ u32 *rx_dma_buf;
203
+ struct dma_chan *chan_rx;
204
+ dma_addr_t rx_dma_phys;
205
+ struct completion cmd_rx_complete;
206
+ struct dma_async_tx_descriptor *rx_desc;
189207 };
190208
191209 struct fsl_dspi {
192
- struct spi_master *master;
193
- struct platform_device *pdev;
210
+ struct spi_controller *ctlr;
211
+ struct platform_device *pdev;
194212
195
- struct regmap *regmap;
196
- struct regmap *regmap_pushr;
197
- int irq;
198
- struct clk *clk;
213
+ struct regmap *regmap;
214
+ struct regmap *regmap_pushr;
215
+ int irq;
216
+ struct clk *clk;
199217
200
- struct spi_transfer *cur_transfer;
201
- struct spi_message *cur_msg;
202
- struct chip_data *cur_chip;
203
- size_t len;
204
- const void *tx;
205
- void *rx;
206
- void *rx_end;
207
- u16 void_write_data;
208
- u16 tx_cmd;
209
- u8 bits_per_word;
210
- u8 bytes_per_word;
211
- const struct fsl_dspi_devtype_data *devtype_data;
218
+ struct spi_transfer *cur_transfer;
219
+ struct spi_message *cur_msg;
220
+ struct chip_data *cur_chip;
221
+ size_t progress;
222
+ size_t len;
223
+ const void *tx;
224
+ void *rx;
225
+ u16 tx_cmd;
226
+ const struct fsl_dspi_devtype_data *devtype_data;
212227
213
- wait_queue_head_t waitq;
214
- u32 waitflags;
228
+ struct completion xfer_done;
215229
216
- struct fsl_dspi_dma *dma;
230
+ struct fsl_dspi_dma *dma;
231
+
232
+ int oper_word_size;
233
+ int oper_bits_per_word;
234
+
235
+ int words_in_flight;
236
+
237
+ /*
238
+ * Offsets for CMD and TXDATA within SPI_PUSHR when accessed
239
+ * individually (in XSPI mode)
240
+ */
241
+ int pushr_cmd;
242
+ int pushr_tx;
243
+
244
+ void (*host_to_dev)(struct fsl_dspi *dspi, u32 *txdata);
245
+ void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata);
217246 };
218247
248
+static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
249
+{
250
+ switch (dspi->oper_word_size) {
251
+ case 1:
252
+ *txdata = *(u8 *)dspi->tx;
253
+ break;
254
+ case 2:
255
+ *txdata = *(u16 *)dspi->tx;
256
+ break;
257
+ case 4:
258
+ *txdata = *(u32 *)dspi->tx;
259
+ break;
260
+ }
261
+ dspi->tx += dspi->oper_word_size;
262
+}
263
+
264
+static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
265
+{
266
+ switch (dspi->oper_word_size) {
267
+ case 1:
268
+ *(u8 *)dspi->rx = rxdata;
269
+ break;
270
+ case 2:
271
+ *(u16 *)dspi->rx = rxdata;
272
+ break;
273
+ case 4:
274
+ *(u32 *)dspi->rx = rxdata;
275
+ break;
276
+ }
277
+ dspi->rx += dspi->oper_word_size;
278
+}
279
+
280
+static void dspi_8on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
281
+{
282
+ *txdata = cpu_to_be32(*(u32 *)dspi->tx);
283
+ dspi->tx += sizeof(u32);
284
+}
285
+
286
+static void dspi_8on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
287
+{
288
+ *(u32 *)dspi->rx = be32_to_cpu(rxdata);
289
+ dspi->rx += sizeof(u32);
290
+}
291
+
292
+static void dspi_8on16_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
293
+{
294
+ *txdata = cpu_to_be16(*(u16 *)dspi->tx);
295
+ dspi->tx += sizeof(u16);
296
+}
297
+
298
+static void dspi_8on16_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
299
+{
300
+ *(u16 *)dspi->rx = be16_to_cpu(rxdata);
301
+ dspi->rx += sizeof(u16);
302
+}
303
+
304
+static void dspi_16on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
305
+{
306
+ u16 hi = *(u16 *)dspi->tx;
307
+ u16 lo = *(u16 *)(dspi->tx + 2);
308
+
309
+ *txdata = (u32)hi << 16 | lo;
310
+ dspi->tx += sizeof(u32);
311
+}
312
+
313
+static void dspi_16on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
314
+{
315
+ u16 hi = rxdata & 0xffff;
316
+ u16 lo = rxdata >> 16;
317
+
318
+ *(u16 *)dspi->rx = lo;
319
+ *(u16 *)(dspi->rx + 2) = hi;
320
+ dspi->rx += sizeof(u32);
321
+}
322
+
323
+/*
324
+ * Pop one word from the TX buffer for pushing into the
325
+ * PUSHR register (TX FIFO)
326
+ */
219327 static u32 dspi_pop_tx(struct fsl_dspi *dspi)
220328 {
221329 u32 txdata = 0;
222330
223
- if (dspi->tx) {
224
- if (dspi->bytes_per_word == 1)
225
- txdata = *(u8 *)dspi->tx;
226
- else if (dspi->bytes_per_word == 2)
227
- txdata = *(u16 *)dspi->tx;
228
- else /* dspi->bytes_per_word == 4 */
229
- txdata = *(u32 *)dspi->tx;
230
- dspi->tx += dspi->bytes_per_word;
231
- }
232
- dspi->len -= dspi->bytes_per_word;
331
+ if (dspi->tx)
332
+ dspi->host_to_dev(dspi, &txdata);
333
+ dspi->len -= dspi->oper_word_size;
233334 return txdata;
234335 }
235336
337
+/* Prepare one TX FIFO entry (txdata plus cmd) */
236338 static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
237339 {
238340 u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
341
+
342
+ if (spi_controller_is_slave(dspi->ctlr))
343
+ return data;
239344
240345 if (dspi->len > 0)
241346 cmd |= SPI_PUSHR_CMD_CONT;
242347 return cmd << 16 | data;
243348 }
244349
350
+/* Push one word to the RX buffer from the POPR register (RX FIFO) */
245351 static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
246352 {
247353 if (!dspi->rx)
248354 return;
249
-
250
- /* Mask of undefined bits */
251
- rxdata &= (1 << dspi->bits_per_word) - 1;
252
-
253
- if (dspi->bytes_per_word == 1)
254
- *(u8 *)dspi->rx = rxdata;
255
- else if (dspi->bytes_per_word == 2)
256
- *(u16 *)dspi->rx = rxdata;
257
- else /* dspi->bytes_per_word == 4 */
258
- *(u32 *)dspi->rx = rxdata;
259
- dspi->rx += dspi->bytes_per_word;
355
+ dspi->dev_to_host(dspi, rxdata);
260356 }
261357
262358 static void dspi_tx_dma_callback(void *arg)
....@@ -274,7 +370,7 @@
274370 int i;
275371
276372 if (dspi->rx) {
277
- for (i = 0; i < dma->curr_xfer_len; i++)
373
+ for (i = 0; i < dspi->words_in_flight; i++)
278374 dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
279375 }
280376
....@@ -283,17 +379,17 @@
283379
284380 static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
285381 {
286
- struct fsl_dspi_dma *dma = dspi->dma;
287382 struct device *dev = &dspi->pdev->dev;
383
+ struct fsl_dspi_dma *dma = dspi->dma;
288384 int time_left;
289385 int i;
290386
291
- for (i = 0; i < dma->curr_xfer_len; i++)
387
+ for (i = 0; i < dspi->words_in_flight; i++)
292388 dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
293389
294390 dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
295391 dma->tx_dma_phys,
296
- dma->curr_xfer_len *
392
+ dspi->words_in_flight *
297393 DMA_SLAVE_BUSWIDTH_4_BYTES,
298394 DMA_MEM_TO_DEV,
299395 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
....@@ -311,7 +407,7 @@
311407
312408 dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
313409 dma->rx_dma_phys,
314
- dma->curr_xfer_len *
410
+ dspi->words_in_flight *
315411 DMA_SLAVE_BUSWIDTH_4_BYTES,
316412 DMA_DEV_TO_MEM,
317413 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
....@@ -333,8 +429,13 @@
333429 dma_async_issue_pending(dma->chan_rx);
334430 dma_async_issue_pending(dma->chan_tx);
335431
432
+ if (spi_controller_is_slave(dspi->ctlr)) {
433
+ wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
434
+ return 0;
435
+ }
436
+
336437 time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
337
- DMA_COMPLETION_TIMEOUT);
438
+ DMA_COMPLETION_TIMEOUT);
338439 if (time_left == 0) {
339440 dev_err(dev, "DMA tx timeout\n");
340441 dmaengine_terminate_all(dma->chan_tx);
....@@ -343,7 +444,7 @@
343444 }
344445
345446 time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
346
- DMA_COMPLETION_TIMEOUT);
447
+ DMA_COMPLETION_TIMEOUT);
347448 if (time_left == 0) {
348449 dev_err(dev, "DMA rx timeout\n");
349450 dmaengine_terminate_all(dma->chan_tx);
....@@ -354,77 +455,76 @@
354455 return 0;
355456 }
356457
458
+static void dspi_setup_accel(struct fsl_dspi *dspi);
459
+
357460 static int dspi_dma_xfer(struct fsl_dspi *dspi)
358461 {
359
- struct fsl_dspi_dma *dma = dspi->dma;
360
- struct device *dev = &dspi->pdev->dev;
361462 struct spi_message *message = dspi->cur_msg;
362
- int curr_remaining_bytes;
363
- int bytes_per_buffer;
463
+ struct device *dev = &dspi->pdev->dev;
364464 int ret = 0;
365465
366
- curr_remaining_bytes = dspi->len;
367
- bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
368
- while (curr_remaining_bytes) {
369
- /* Check if current transfer fits the DMA buffer */
370
- dma->curr_xfer_len = curr_remaining_bytes
371
- / dspi->bytes_per_word;
372
- if (dma->curr_xfer_len > bytes_per_buffer)
373
- dma->curr_xfer_len = bytes_per_buffer;
466
+ /*
467
+ * dspi->len gets decremented by dspi_pop_tx_pushr in
468
+ * dspi_next_xfer_dma_submit
469
+ */
470
+ while (dspi->len) {
471
+ /* Figure out operational bits-per-word for this chunk */
472
+ dspi_setup_accel(dspi);
473
+
474
+ dspi->words_in_flight = dspi->len / dspi->oper_word_size;
475
+ if (dspi->words_in_flight > dspi->devtype_data->fifo_size)
476
+ dspi->words_in_flight = dspi->devtype_data->fifo_size;
477
+
478
+ message->actual_length += dspi->words_in_flight *
479
+ dspi->oper_word_size;
374480
375481 ret = dspi_next_xfer_dma_submit(dspi);
376482 if (ret) {
377483 dev_err(dev, "DMA transfer failed\n");
378
- goto exit;
379
-
380
- } else {
381
- const int len =
382
- dma->curr_xfer_len * dspi->bytes_per_word;
383
- curr_remaining_bytes -= len;
384
- message->actual_length += len;
385
- if (curr_remaining_bytes < 0)
386
- curr_remaining_bytes = 0;
484
+ break;
387485 }
388486 }
389487
390
-exit:
391488 return ret;
392489 }
393490
394491 static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
395492 {
396
- struct fsl_dspi_dma *dma;
397
- struct dma_slave_config cfg;
493
+ int dma_bufsize = dspi->devtype_data->fifo_size * 2;
398494 struct device *dev = &dspi->pdev->dev;
495
+ struct dma_slave_config cfg;
496
+ struct fsl_dspi_dma *dma;
399497 int ret;
400498
401499 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
402500 if (!dma)
403501 return -ENOMEM;
404502
405
- dma->chan_rx = dma_request_slave_channel(dev, "rx");
406
- if (!dma->chan_rx) {
503
+ dma->chan_rx = dma_request_chan(dev, "rx");
504
+ if (IS_ERR(dma->chan_rx)) {
407505 dev_err(dev, "rx dma channel not available\n");
408
- ret = -ENODEV;
506
+ ret = PTR_ERR(dma->chan_rx);
409507 return ret;
410508 }
411509
412
- dma->chan_tx = dma_request_slave_channel(dev, "tx");
413
- if (!dma->chan_tx) {
510
+ dma->chan_tx = dma_request_chan(dev, "tx");
511
+ if (IS_ERR(dma->chan_tx)) {
414512 dev_err(dev, "tx dma channel not available\n");
415
- ret = -ENODEV;
513
+ ret = PTR_ERR(dma->chan_tx);
416514 goto err_tx_channel;
417515 }
418516
419
- dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
420
- &dma->tx_dma_phys, GFP_KERNEL);
517
+ dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev,
518
+ dma_bufsize, &dma->tx_dma_phys,
519
+ GFP_KERNEL);
421520 if (!dma->tx_dma_buf) {
422521 ret = -ENOMEM;
423522 goto err_tx_dma_buf;
424523 }
425524
426
- dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
427
- &dma->rx_dma_phys, GFP_KERNEL);
525
+ dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev,
526
+ dma_bufsize, &dma->rx_dma_phys,
527
+ GFP_KERNEL);
428528 if (!dma->rx_dma_buf) {
429529 ret = -ENOMEM;
430530 goto err_rx_dma_buf;
....@@ -461,11 +561,11 @@
461561 return 0;
462562
463563 err_slave_config:
464
- dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
465
- dma->rx_dma_buf, dma->rx_dma_phys);
564
+ dma_free_coherent(dma->chan_rx->device->dev,
565
+ dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys);
466566 err_rx_dma_buf:
467
- dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
468
- dma->tx_dma_buf, dma->tx_dma_phys);
567
+ dma_free_coherent(dma->chan_tx->device->dev,
568
+ dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys);
469569 err_tx_dma_buf:
470570 dma_release_channel(dma->chan_tx);
471571 err_tx_channel:
....@@ -479,33 +579,34 @@
479579
480580 static void dspi_release_dma(struct fsl_dspi *dspi)
481581 {
582
+ int dma_bufsize = dspi->devtype_data->fifo_size * 2;
482583 struct fsl_dspi_dma *dma = dspi->dma;
483
- struct device *dev = &dspi->pdev->dev;
484584
485
- if (dma) {
486
- if (dma->chan_tx) {
487
- dma_unmap_single(dev, dma->tx_dma_phys,
488
- DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
489
- dma_release_channel(dma->chan_tx);
490
- }
585
+ if (!dma)
586
+ return;
491587
492
- if (dma->chan_rx) {
493
- dma_unmap_single(dev, dma->rx_dma_phys,
494
- DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
495
- dma_release_channel(dma->chan_rx);
496
- }
588
+ if (dma->chan_tx) {
589
+ dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
590
+ dma->tx_dma_buf, dma->tx_dma_phys);
591
+ dma_release_channel(dma->chan_tx);
592
+ }
593
+
594
+ if (dma->chan_rx) {
595
+ dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
596
+ dma->rx_dma_buf, dma->rx_dma_phys);
597
+ dma_release_channel(dma->chan_rx);
497598 }
498599 }
499600
500601 static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
501
- unsigned long clkrate)
602
+ unsigned long clkrate)
502603 {
503604 /* Valid baud rate pre-scaler values */
504605 int pbr_tbl[4] = {2, 3, 5, 7};
505606 int brs[16] = { 2, 4, 6, 8,
506
- 16, 32, 64, 128,
507
- 256, 512, 1024, 2048,
508
- 4096, 8192, 16384, 32768 };
607
+ 16, 32, 64, 128,
608
+ 256, 512, 1024, 2048,
609
+ 4096, 8192, 16384, 32768 };
509610 int scale_needed, scale, minscale = INT_MAX;
510611 int i, j;
511612
....@@ -535,15 +636,15 @@
535636 }
536637
537638 static void ns_delay_scale(char *psc, char *sc, int delay_ns,
538
- unsigned long clkrate)
639
+ unsigned long clkrate)
539640 {
540
- int pscale_tbl[4] = {1, 3, 5, 7};
541641 int scale_needed, scale, minscale = INT_MAX;
542
- int i, j;
642
+ int pscale_tbl[4] = {1, 3, 5, 7};
543643 u32 remainder;
644
+ int i, j;
544645
545646 scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
546
- &remainder);
647
+ &remainder);
547648 if (remainder)
548649 scale_needed++;
549650
....@@ -568,55 +669,66 @@
568669 }
569670 }
570671
571
-static void fifo_write(struct fsl_dspi *dspi)
672
+static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd)
572673 {
573
- regmap_write(dspi->regmap, SPI_PUSHR, dspi_pop_tx_pushr(dspi));
574
-}
575
-
576
-static void cmd_fifo_write(struct fsl_dspi *dspi)
577
-{
578
- u16 cmd = dspi->tx_cmd;
579
-
580
- if (dspi->len > 0)
674
+ /*
675
+ * The only time when the PCS doesn't need continuation after this word
676
+ * is when it's last. We need to look ahead, because we actually call
677
+ * dspi_pop_tx (the function that decrements dspi->len) _after_
678
+ * dspi_pushr_cmd_write with XSPI mode. As for how much in advance? One
679
+ * word is enough. If there's more to transmit than that,
680
+ * dspi_xspi_write will know to split the FIFO writes in 2, and
681
+ * generate a new PUSHR command with the final word that will have PCS
682
+ * deasserted (not continued) here.
683
+ */
684
+ if (dspi->len > dspi->oper_word_size)
581685 cmd |= SPI_PUSHR_CMD_CONT;
582
- regmap_write(dspi->regmap_pushr, PUSHR_CMD, cmd);
686
+ regmap_write(dspi->regmap_pushr, dspi->pushr_cmd, cmd);
583687 }
584688
585
-static void tx_fifo_write(struct fsl_dspi *dspi, u16 txdata)
689
+static void dspi_pushr_txdata_write(struct fsl_dspi *dspi, u16 txdata)
586690 {
587
- regmap_write(dspi->regmap_pushr, PUSHR_TX, txdata);
691
+ regmap_write(dspi->regmap_pushr, dspi->pushr_tx, txdata);
588692 }
589693
590
-static void dspi_tcfq_write(struct fsl_dspi *dspi)
694
+static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words)
591695 {
592
- /* Clear transfer count */
593
- dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
696
+ int num_bytes = num_words * dspi->oper_word_size;
697
+ u16 tx_cmd = dspi->tx_cmd;
594698
595
- if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
596
- /* Write two TX FIFO entries first, and then the corresponding
597
- * CMD FIFO entry.
598
- */
699
+ /*
700
+ * If the PCS needs to de-assert (i.e. we're at the end of the buffer
701
+ * and cs_change does not want the PCS to stay on), then we need a new
702
+ * PUSHR command, since this one (for the body of the buffer)
703
+ * necessarily has the CONT bit set.
704
+ * So send one word less during this go, to force a split and a command
705
+ * with a single word next time, when CONT will be unset.
706
+ */
707
+ if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT) && num_bytes == dspi->len)
708
+ tx_cmd |= SPI_PUSHR_CMD_EOQ;
709
+
710
+ /* Update CTARE */
711
+ regmap_write(dspi->regmap, SPI_CTARE(0),
712
+ SPI_FRAME_EBITS(dspi->oper_bits_per_word) |
713
+ SPI_CTARE_DTCP(num_words));
714
+
715
+ /*
716
+ * Write the CMD FIFO entry first, and then the two
717
+ * corresponding TX FIFO entries (or one...).
718
+ */
719
+ dspi_pushr_cmd_write(dspi, tx_cmd);
720
+
721
+ /* Fill TX FIFO with as many transfers as possible */
722
+ while (num_words--) {
599723 u32 data = dspi_pop_tx(dspi);
600724
601
- if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE(1)) {
602
- /* LSB */
603
- tx_fifo_write(dspi, data & 0xFFFF);
604
- tx_fifo_write(dspi, data >> 16);
605
- } else {
606
- /* MSB */
607
- tx_fifo_write(dspi, data >> 16);
608
- tx_fifo_write(dspi, data & 0xFFFF);
609
- }
610
- cmd_fifo_write(dspi);
611
- } else {
612
- /* Write one entry to both TX FIFO and CMD FIFO
613
- * simultaneously.
614
- */
615
- fifo_write(dspi);
725
+ dspi_pushr_txdata_write(dspi, data & 0xFFFF);
726
+ if (dspi->oper_bits_per_word > 16)
727
+ dspi_pushr_txdata_write(dspi, data >> 16);
616728 }
617729 }
618730
619
-static u32 fifo_read(struct fsl_dspi *dspi)
731
+static u32 dspi_popr_read(struct fsl_dspi *dspi)
620732 {
621733 u32 rxdata = 0;
622734
....@@ -624,47 +736,177 @@
624736 return rxdata;
625737 }
626738
627
-static void dspi_tcfq_read(struct fsl_dspi *dspi)
739
+static void dspi_fifo_read(struct fsl_dspi *dspi)
628740 {
629
- dspi_push_rx(dspi, fifo_read(dspi));
741
+ int num_fifo_entries = dspi->words_in_flight;
742
+
743
+ /* Read one FIFO entry and push to rx buffer */
744
+ while (num_fifo_entries--)
745
+ dspi_push_rx(dspi, dspi_popr_read(dspi));
630746 }
631747
632
-static void dspi_eoq_write(struct fsl_dspi *dspi)
748
+static void dspi_setup_accel(struct fsl_dspi *dspi)
633749 {
634
- int fifo_size = DSPI_FIFO_SIZE;
635
- u16 xfer_cmd = dspi->tx_cmd;
750
+ struct spi_transfer *xfer = dspi->cur_transfer;
751
+ bool odd = !!(dspi->len & 1);
636752
637
- /* Fill TX FIFO with as many transfers as possible */
638
- while (dspi->len && fifo_size--) {
639
- dspi->tx_cmd = xfer_cmd;
640
- /* Request EOQF for last transfer in FIFO */
641
- if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
642
- dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
643
- /* Clear transfer count for first transfer in FIFO */
644
- if (fifo_size == (DSPI_FIFO_SIZE - 1))
645
- dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
646
- /* Write combined TX FIFO and CMD FIFO entry */
647
- fifo_write(dspi);
753
+ /* No accel for frames not multiple of 8 bits at the moment */
754
+ if (xfer->bits_per_word % 8)
755
+ goto no_accel;
756
+
757
+ if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) {
758
+ dspi->oper_bits_per_word = 16;
759
+ } else if (odd && dspi->len <= dspi->devtype_data->fifo_size) {
760
+ dspi->oper_bits_per_word = 8;
761
+ } else {
762
+ /* Start off with maximum supported by hardware */
763
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
764
+ dspi->oper_bits_per_word = 32;
765
+ else
766
+ dspi->oper_bits_per_word = 16;
767
+
768
+ /*
769
+ * And go down only if the buffer can't be sent with
770
+ * words this big
771
+ */
772
+ do {
773
+ if (dspi->len >= DIV_ROUND_UP(dspi->oper_bits_per_word, 8))
774
+ break;
775
+
776
+ dspi->oper_bits_per_word /= 2;
777
+ } while (dspi->oper_bits_per_word > 8);
648778 }
779
+
780
+ if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 32) {
781
+ dspi->dev_to_host = dspi_8on32_dev_to_host;
782
+ dspi->host_to_dev = dspi_8on32_host_to_dev;
783
+ } else if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 16) {
784
+ dspi->dev_to_host = dspi_8on16_dev_to_host;
785
+ dspi->host_to_dev = dspi_8on16_host_to_dev;
786
+ } else if (xfer->bits_per_word == 16 && dspi->oper_bits_per_word == 32) {
787
+ dspi->dev_to_host = dspi_16on32_dev_to_host;
788
+ dspi->host_to_dev = dspi_16on32_host_to_dev;
789
+ } else {
790
+no_accel:
791
+ dspi->dev_to_host = dspi_native_dev_to_host;
792
+ dspi->host_to_dev = dspi_native_host_to_dev;
793
+ dspi->oper_bits_per_word = xfer->bits_per_word;
794
+ }
795
+
796
+ dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8);
797
+
798
+ /*
799
+ * Update CTAR here (code is common for XSPI and DMA modes).
800
+ * We will update CTARE in the portion specific to XSPI, when we
801
+ * also know the preload value (DTCP).
802
+ */
803
+ regmap_write(dspi->regmap, SPI_CTAR(0),
804
+ dspi->cur_chip->ctar_val |
805
+ SPI_FRAME_BITS(dspi->oper_bits_per_word));
649806 }
650807
651
-static void dspi_eoq_read(struct fsl_dspi *dspi)
808
+static void dspi_fifo_write(struct fsl_dspi *dspi)
652809 {
653
- int fifo_size = DSPI_FIFO_SIZE;
810
+ int num_fifo_entries = dspi->devtype_data->fifo_size;
811
+ struct spi_transfer *xfer = dspi->cur_transfer;
812
+ struct spi_message *msg = dspi->cur_msg;
813
+ int num_words, num_bytes;
654814
655
- /* Read one FIFO entry at and push to rx buffer */
656
- while ((dspi->rx < dspi->rx_end) && fifo_size--)
657
- dspi_push_rx(dspi, fifo_read(dspi));
815
+ dspi_setup_accel(dspi);
816
+
817
+ /* In XSPI mode each 32-bit word occupies 2 TX FIFO entries */
818
+ if (dspi->oper_word_size == 4)
819
+ num_fifo_entries /= 2;
820
+
821
+ /*
822
+ * Integer division intentionally trims off odd (or non-multiple of 4)
823
+ * numbers of bytes at the end of the buffer, which will be sent next
824
+ * time using a smaller oper_word_size.
825
+ */
826
+ num_words = dspi->len / dspi->oper_word_size;
827
+ if (num_words > num_fifo_entries)
828
+ num_words = num_fifo_entries;
829
+
830
+ /* Update total number of bytes that were transferred */
831
+ num_bytes = num_words * dspi->oper_word_size;
832
+ msg->actual_length += num_bytes;
833
+ dspi->progress += num_bytes / DIV_ROUND_UP(xfer->bits_per_word, 8);
834
+
835
+ /*
836
+ * Update shared variable for use in the next interrupt (both in
837
+ * dspi_fifo_read and in dspi_fifo_write).
838
+ */
839
+ dspi->words_in_flight = num_words;
840
+
841
+ spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq);
842
+
843
+ dspi_xspi_fifo_write(dspi, num_words);
844
+ /*
845
+ * Everything after this point is in a potential race with the next
846
+ * interrupt, so we must never use dspi->words_in_flight again since it
847
+ * might already be modified by the next dspi_fifo_write.
848
+ */
849
+
850
+ spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
851
+ dspi->progress, !dspi->irq);
658852 }
659853
660
-static int dspi_transfer_one_message(struct spi_master *master,
661
- struct spi_message *message)
854
+static int dspi_rxtx(struct fsl_dspi *dspi)
662855 {
663
- struct fsl_dspi *dspi = spi_master_get_devdata(master);
856
+ dspi_fifo_read(dspi);
857
+
858
+ if (!dspi->len)
859
+ /* Success! */
860
+ return 0;
861
+
862
+ dspi_fifo_write(dspi);
863
+
864
+ return -EINPROGRESS;
865
+}
866
+
867
+static int dspi_poll(struct fsl_dspi *dspi)
868
+{
869
+ int tries = 1000;
870
+ u32 spi_sr;
871
+
872
+ do {
873
+ regmap_read(dspi->regmap, SPI_SR, &spi_sr);
874
+ regmap_write(dspi->regmap, SPI_SR, spi_sr);
875
+
876
+ if (spi_sr & SPI_SR_CMDTCF)
877
+ break;
878
+ } while (--tries);
879
+
880
+ if (!tries)
881
+ return -ETIMEDOUT;
882
+
883
+ return dspi_rxtx(dspi);
884
+}
885
+
886
+static irqreturn_t dspi_interrupt(int irq, void *dev_id)
887
+{
888
+ struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
889
+ u32 spi_sr;
890
+
891
+ regmap_read(dspi->regmap, SPI_SR, &spi_sr);
892
+ regmap_write(dspi->regmap, SPI_SR, spi_sr);
893
+
894
+ if (!(spi_sr & SPI_SR_CMDTCF))
895
+ return IRQ_NONE;
896
+
897
+ if (dspi_rxtx(dspi) == 0)
898
+ complete(&dspi->xfer_done);
899
+
900
+ return IRQ_HANDLED;
901
+}
902
+
903
+static int dspi_transfer_one_message(struct spi_controller *ctlr,
904
+ struct spi_message *message)
905
+{
906
+ struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
664907 struct spi_device *spi = message->spi;
665908 struct spi_transfer *transfer;
666909 int status = 0;
667
- enum dspi_trans_mode trans_mode;
668910
669911 message->actual_length = 0;
670912
....@@ -674,7 +916,7 @@
674916 dspi->cur_chip = spi_get_ctldata(spi);
675917 /* Prepare command word for CMD FIFO */
676918 dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0) |
677
- SPI_PUSHR_CMD_PCS(spi->chip_select);
919
+ SPI_PUSHR_CMD_PCS(spi->chip_select);
678920 if (list_is_last(&dspi->cur_transfer->transfer_list,
679921 &dspi->cur_msg->transfers)) {
680922 /* Leave PCS activated after last transfer when
....@@ -692,82 +934,54 @@
692934 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
693935 }
694936
695
- dspi->void_write_data = dspi->cur_chip->void_write_data;
696
-
697937 dspi->tx = transfer->tx_buf;
698938 dspi->rx = transfer->rx_buf;
699
- dspi->rx_end = dspi->rx + transfer->len;
700939 dspi->len = transfer->len;
701
- /* Validated transfer specific frame size (defaults applied) */
702
- dspi->bits_per_word = transfer->bits_per_word;
703
- if (transfer->bits_per_word <= 8)
704
- dspi->bytes_per_word = 1;
705
- else if (transfer->bits_per_word <= 16)
706
- dspi->bytes_per_word = 2;
707
- else
708
- dspi->bytes_per_word = 4;
940
+ dspi->progress = 0;
709941
710942 regmap_update_bits(dspi->regmap, SPI_MCR,
711943 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
712944 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
713
- regmap_write(dspi->regmap, SPI_CTAR(0),
714
- dspi->cur_chip->ctar_val |
715
- SPI_FRAME_BITS(transfer->bits_per_word));
716
- if (dspi->devtype_data->xspi_mode)
717
- regmap_write(dspi->regmap, SPI_CTARE(0),
718
- SPI_FRAME_EBITS(transfer->bits_per_word)
719
- | SPI_CTARE_DTCP(1));
720945
721
- trans_mode = dspi->devtype_data->trans_mode;
722
- switch (trans_mode) {
723
- case DSPI_EOQ_MODE:
724
- regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
725
- dspi_eoq_write(dspi);
726
- break;
727
- case DSPI_TCFQ_MODE:
728
- regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
729
- dspi_tcfq_write(dspi);
730
- break;
731
- case DSPI_DMA_MODE:
732
- regmap_write(dspi->regmap, SPI_RSER,
733
- SPI_RSER_TFFFE | SPI_RSER_TFFFD |
734
- SPI_RSER_RFDFE | SPI_RSER_RFDFD);
946
+ spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
947
+ dspi->progress, !dspi->irq);
948
+
949
+ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
735950 status = dspi_dma_xfer(dspi);
951
+ } else {
952
+ dspi_fifo_write(dspi);
953
+
954
+ if (dspi->irq) {
955
+ wait_for_completion(&dspi->xfer_done);
956
+ reinit_completion(&dspi->xfer_done);
957
+ } else {
958
+ do {
959
+ status = dspi_poll(dspi);
960
+ } while (status == -EINPROGRESS);
961
+ }
962
+ }
963
+ if (status)
736964 break;
737
- default:
738
- dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
739
- trans_mode);
740
- status = -EINVAL;
741
- goto out;
742
- }
743965
744
- if (trans_mode != DSPI_DMA_MODE) {
745
- if (wait_event_interruptible(dspi->waitq,
746
- dspi->waitflags))
747
- dev_err(&dspi->pdev->dev,
748
- "wait transfer complete fail!\n");
749
- dspi->waitflags = 0;
750
- }
751
-
752
- if (transfer->delay_usecs)
753
- udelay(transfer->delay_usecs);
966
+ spi_transfer_delay_exec(transfer);
754967 }
755968
756
-out:
757969 message->status = status;
758
- spi_finalize_current_message(master);
970
+ spi_finalize_current_message(ctlr);
759971
760972 return status;
761973 }
762974
763975 static int dspi_setup(struct spi_device *spi)
764976 {
765
- struct chip_data *chip;
766
- struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
767
- struct fsl_dspi_platform_data *pdata;
768
- u32 cs_sck_delay = 0, sck_cs_delay = 0;
977
+ struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
978
+ u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz);
769979 unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
980
+ u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4);
981
+ u32 cs_sck_delay = 0, sck_cs_delay = 0;
982
+ struct fsl_dspi_platform_data *pdata;
770983 unsigned char pasc = 0, asc = 0;
984
+ struct chip_data *chip;
771985 unsigned long clkrate;
772986
773987 /* Only alloc on first setup */
....@@ -782,16 +996,27 @@
782996
783997 if (!pdata) {
784998 of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
785
- &cs_sck_delay);
999
+ &cs_sck_delay);
7861000
7871001 of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
788
- &sck_cs_delay);
1002
+ &sck_cs_delay);
7891003 } else {
7901004 cs_sck_delay = pdata->cs_sck_delay;
7911005 sck_cs_delay = pdata->sck_cs_delay;
7921006 }
7931007
794
- chip->void_write_data = 0;
1008
+ /* Since tCSC and tASC apply to continuous transfers too, avoid SCK
1009
+ * glitches of half a cycle by never allowing tCSC + tASC to go below
1010
+ * half a SCK period.
1011
+ */
1012
+ if (cs_sck_delay < quarter_period_ns)
1013
+ cs_sck_delay = quarter_period_ns;
1014
+ if (sck_cs_delay < quarter_period_ns)
1015
+ sck_cs_delay = quarter_period_ns;
1016
+
1017
+ dev_dbg(&spi->dev,
1018
+ "DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n",
1019
+ cs_sck_delay, sck_cs_delay);
7951020
7961021 clkrate = clk_get_rate(dspi->clk);
7971022 hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
....@@ -802,15 +1027,23 @@
8021027 /* Set After SCK delay scale values */
8031028 ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
8041029
805
- chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
806
- | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0)
807
- | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0)
808
- | SPI_CTAR_PCSSCK(pcssck)
809
- | SPI_CTAR_CSSCK(cssck)
810
- | SPI_CTAR_PASC(pasc)
811
- | SPI_CTAR_ASC(asc)
812
- | SPI_CTAR_PBR(pbr)
813
- | SPI_CTAR_BR(br);
1030
+ chip->ctar_val = 0;
1031
+ if (spi->mode & SPI_CPOL)
1032
+ chip->ctar_val |= SPI_CTAR_CPOL;
1033
+ if (spi->mode & SPI_CPHA)
1034
+ chip->ctar_val |= SPI_CTAR_CPHA;
1035
+
1036
+ if (!spi_controller_is_slave(dspi->ctlr)) {
1037
+ chip->ctar_val |= SPI_CTAR_PCSSCK(pcssck) |
1038
+ SPI_CTAR_CSSCK(cssck) |
1039
+ SPI_CTAR_PASC(pasc) |
1040
+ SPI_CTAR_ASC(asc) |
1041
+ SPI_CTAR_PBR(pbr) |
1042
+ SPI_CTAR_BR(br);
1043
+
1044
+ if (spi->mode & SPI_LSB_FIRST)
1045
+ chip->ctar_val |= SPI_CTAR_LSBFE;
1046
+ }
8141047
8151048 spi_set_ctldata(spi, chip);
8161049
....@@ -822,74 +1055,40 @@
8221055 struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
8231056
8241057 dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
825
- spi->master->bus_num, spi->chip_select);
1058
+ spi->controller->bus_num, spi->chip_select);
8261059
8271060 kfree(chip);
8281061 }
8291062
830
-static irqreturn_t dspi_interrupt(int irq, void *dev_id)
831
-{
832
- struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
833
- struct spi_message *msg = dspi->cur_msg;
834
- enum dspi_trans_mode trans_mode;
835
- u32 spi_sr, spi_tcr;
836
- u16 spi_tcnt;
837
-
838
- regmap_read(dspi->regmap, SPI_SR, &spi_sr);
839
- regmap_write(dspi->regmap, SPI_SR, spi_sr);
840
-
841
-
842
- if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)) {
843
- /* Get transfer counter (in number of SPI transfers). It was
844
- * reset to 0 when transfer(s) were started.
845
- */
846
- regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
847
- spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
848
- /* Update total number of bytes that were transferred */
849
- msg->actual_length += spi_tcnt * dspi->bytes_per_word;
850
-
851
- trans_mode = dspi->devtype_data->trans_mode;
852
- switch (trans_mode) {
853
- case DSPI_EOQ_MODE:
854
- dspi_eoq_read(dspi);
855
- break;
856
- case DSPI_TCFQ_MODE:
857
- dspi_tcfq_read(dspi);
858
- break;
859
- default:
860
- dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
861
- trans_mode);
862
- return IRQ_HANDLED;
863
- }
864
-
865
- if (!dspi->len) {
866
- dspi->waitflags = 1;
867
- wake_up_interruptible(&dspi->waitq);
868
- } else {
869
- switch (trans_mode) {
870
- case DSPI_EOQ_MODE:
871
- dspi_eoq_write(dspi);
872
- break;
873
- case DSPI_TCFQ_MODE:
874
- dspi_tcfq_write(dspi);
875
- break;
876
- default:
877
- dev_err(&dspi->pdev->dev,
878
- "unsupported trans_mode %u\n",
879
- trans_mode);
880
- }
881
- }
882
-
883
- return IRQ_HANDLED;
884
- }
885
-
886
- return IRQ_NONE;
887
-}
888
-
8891063 static const struct of_device_id fsl_dspi_dt_ids[] = {
890
- { .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
891
- { .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
892
- { .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, },
1064
+ {
1065
+ .compatible = "fsl,vf610-dspi",
1066
+ .data = &devtype_data[VF610],
1067
+ }, {
1068
+ .compatible = "fsl,ls1021a-v1.0-dspi",
1069
+ .data = &devtype_data[LS1021A],
1070
+ }, {
1071
+ .compatible = "fsl,ls1012a-dspi",
1072
+ .data = &devtype_data[LS1012A],
1073
+ }, {
1074
+ .compatible = "fsl,ls1028a-dspi",
1075
+ .data = &devtype_data[LS1028A],
1076
+ }, {
1077
+ .compatible = "fsl,ls1043a-dspi",
1078
+ .data = &devtype_data[LS1043A],
1079
+ }, {
1080
+ .compatible = "fsl,ls1046a-dspi",
1081
+ .data = &devtype_data[LS1046A],
1082
+ }, {
1083
+ .compatible = "fsl,ls2080a-dspi",
1084
+ .data = &devtype_data[LS2080A],
1085
+ }, {
1086
+ .compatible = "fsl,ls2085a-dspi",
1087
+ .data = &devtype_data[LS2085A],
1088
+ }, {
1089
+ .compatible = "fsl,lx2160a-dspi",
1090
+ .data = &devtype_data[LX2160A],
1091
+ },
8931092 { /* sentinel */ }
8941093 };
8951094 MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
....@@ -897,12 +1096,11 @@
8971096 #ifdef CONFIG_PM_SLEEP
8981097 static int dspi_suspend(struct device *dev)
8991098 {
900
- struct spi_master *master = dev_get_drvdata(dev);
901
- struct fsl_dspi *dspi = spi_master_get_devdata(master);
1099
+ struct fsl_dspi *dspi = dev_get_drvdata(dev);
9021100
9031101 if (dspi->irq)
9041102 disable_irq(dspi->irq);
905
- spi_master_suspend(master);
1103
+ spi_controller_suspend(dspi->ctlr);
9061104 clk_disable_unprepare(dspi->clk);
9071105
9081106 pinctrl_pm_select_sleep_state(dev);
....@@ -912,8 +1110,7 @@
9121110
9131111 static int dspi_resume(struct device *dev)
9141112 {
915
- struct spi_master *master = dev_get_drvdata(dev);
916
- struct fsl_dspi *dspi = spi_master_get_devdata(master);
1113
+ struct fsl_dspi *dspi = dev_get_drvdata(dev);
9171114 int ret;
9181115
9191116 pinctrl_pm_select_default_state(dev);
....@@ -921,7 +1118,7 @@
9211118 ret = clk_prepare_enable(dspi->clk);
9221119 if (ret)
9231120 return ret;
924
- spi_master_resume(master);
1121
+ spi_controller_resume(dspi->ctlr);
9251122 if (dspi->irq)
9261123 enable_irq(dspi->irq);
9271124
....@@ -938,16 +1135,16 @@
9381135 };
9391136
9401137 static const struct regmap_access_table dspi_volatile_table = {
941
- .yes_ranges = dspi_volatile_ranges,
942
- .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
1138
+ .yes_ranges = dspi_volatile_ranges,
1139
+ .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
9431140 };
9441141
9451142 static const struct regmap_config dspi_regmap_config = {
946
- .reg_bits = 32,
947
- .val_bits = 32,
948
- .reg_stride = 4,
949
- .max_register = 0x88,
950
- .volatile_table = &dspi_volatile_table,
1143
+ .reg_bits = 32,
1144
+ .val_bits = 32,
1145
+ .reg_stride = 4,
1146
+ .max_register = 0x88,
1147
+ .volatile_table = &dspi_volatile_table,
9511148 };
9521149
9531150 static const struct regmap_range dspi_xspi_volatile_ranges[] = {
....@@ -958,107 +1155,168 @@
9581155 };
9591156
9601157 static const struct regmap_access_table dspi_xspi_volatile_table = {
961
- .yes_ranges = dspi_xspi_volatile_ranges,
962
- .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges),
1158
+ .yes_ranges = dspi_xspi_volatile_ranges,
1159
+ .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges),
9631160 };
9641161
9651162 static const struct regmap_config dspi_xspi_regmap_config[] = {
9661163 {
967
- .reg_bits = 32,
968
- .val_bits = 32,
969
- .reg_stride = 4,
970
- .max_register = 0x13c,
971
- .volatile_table = &dspi_xspi_volatile_table,
1164
+ .reg_bits = 32,
1165
+ .val_bits = 32,
1166
+ .reg_stride = 4,
1167
+ .max_register = 0x13c,
1168
+ .volatile_table = &dspi_xspi_volatile_table,
9721169 },
9731170 {
974
- .name = "pushr",
975
- .reg_bits = 16,
976
- .val_bits = 16,
977
- .reg_stride = 2,
978
- .max_register = 0x2,
1171
+ .name = "pushr",
1172
+ .reg_bits = 16,
1173
+ .val_bits = 16,
1174
+ .reg_stride = 2,
1175
+ .max_register = 0x2,
9791176 },
9801177 };
9811178
982
-static void dspi_init(struct fsl_dspi *dspi)
1179
+static int dspi_init(struct fsl_dspi *dspi)
9831180 {
984
- regmap_write(dspi->regmap, SPI_MCR, SPI_MCR_MASTER | SPI_MCR_PCSIS |
985
- (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0));
1181
+ unsigned int mcr;
1182
+
1183
+ /* Set idle states for all chip select signals to high */
1184
+ mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->max_native_cs - 1, 0));
1185
+
1186
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
1187
+ mcr |= SPI_MCR_XSPI;
1188
+ if (!spi_controller_is_slave(dspi->ctlr))
1189
+ mcr |= SPI_MCR_MASTER;
1190
+
1191
+ regmap_write(dspi->regmap, SPI_MCR, mcr);
9861192 regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
987
- if (dspi->devtype_data->xspi_mode)
988
- regmap_write(dspi->regmap, SPI_CTARE(0),
989
- SPI_CTARE_FMSZE(0) | SPI_CTARE_DTCP(1));
1193
+
1194
+ switch (dspi->devtype_data->trans_mode) {
1195
+ case DSPI_XSPI_MODE:
1196
+ regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE);
1197
+ break;
1198
+ case DSPI_DMA_MODE:
1199
+ regmap_write(dspi->regmap, SPI_RSER,
1200
+ SPI_RSER_TFFFE | SPI_RSER_TFFFD |
1201
+ SPI_RSER_RFDFE | SPI_RSER_RFDFD);
1202
+ break;
1203
+ default:
1204
+ dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
1205
+ dspi->devtype_data->trans_mode);
1206
+ return -EINVAL;
1207
+ }
1208
+
1209
+ return 0;
1210
+}
1211
+
1212
+static int dspi_slave_abort(struct spi_master *master)
1213
+{
1214
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
1215
+
1216
+ /*
1217
+ * Terminate all pending DMA transactions for the SPI working
1218
+ * in SLAVE mode.
1219
+ */
1220
+ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1221
+ dmaengine_terminate_sync(dspi->dma->chan_rx);
1222
+ dmaengine_terminate_sync(dspi->dma->chan_tx);
1223
+ }
1224
+
1225
+ /* Clear the internal DSPI RX and TX FIFO buffers */
1226
+ regmap_update_bits(dspi->regmap, SPI_MCR,
1227
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
1228
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
1229
+
1230
+ return 0;
9901231 }
9911232
9921233 static int dspi_probe(struct platform_device *pdev)
9931234 {
9941235 struct device_node *np = pdev->dev.of_node;
995
- struct spi_master *master;
1236
+ const struct regmap_config *regmap_config;
1237
+ struct fsl_dspi_platform_data *pdata;
1238
+ struct spi_controller *ctlr;
1239
+ int ret, cs_num, bus_num = -1;
9961240 struct fsl_dspi *dspi;
9971241 struct resource *res;
998
- const struct regmap_config *regmap_config;
9991242 void __iomem *base;
1000
- struct fsl_dspi_platform_data *pdata;
1001
- int ret = 0, cs_num, bus_num;
1243
+ bool big_endian;
10021244
1003
- master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
1004
- if (!master)
1245
+ dspi = devm_kzalloc(&pdev->dev, sizeof(*dspi), GFP_KERNEL);
1246
+ if (!dspi)
10051247 return -ENOMEM;
10061248
1007
- dspi = spi_master_get_devdata(master);
1249
+ ctlr = spi_alloc_master(&pdev->dev, 0);
1250
+ if (!ctlr)
1251
+ return -ENOMEM;
1252
+
1253
+ spi_controller_set_devdata(ctlr, dspi);
1254
+ platform_set_drvdata(pdev, dspi);
1255
+
10081256 dspi->pdev = pdev;
1009
- dspi->master = master;
1257
+ dspi->ctlr = ctlr;
10101258
1011
- master->transfer = NULL;
1012
- master->setup = dspi_setup;
1013
- master->transfer_one_message = dspi_transfer_one_message;
1014
- master->dev.of_node = pdev->dev.of_node;
1259
+ ctlr->setup = dspi_setup;
1260
+ ctlr->transfer_one_message = dspi_transfer_one_message;
1261
+ ctlr->dev.of_node = pdev->dev.of_node;
10151262
1016
- master->cleanup = dspi_cleanup;
1017
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1263
+ ctlr->cleanup = dspi_cleanup;
1264
+ ctlr->slave_abort = dspi_slave_abort;
1265
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
10181266
10191267 pdata = dev_get_platdata(&pdev->dev);
10201268 if (pdata) {
1021
- master->num_chipselect = pdata->cs_num;
1022
- master->bus_num = pdata->bus_num;
1269
+ ctlr->num_chipselect = ctlr->max_native_cs = pdata->cs_num;
1270
+ ctlr->bus_num = pdata->bus_num;
10231271
1024
- dspi->devtype_data = &coldfire_data;
1272
+ /* Only Coldfire uses platform data */
1273
+ dspi->devtype_data = &devtype_data[MCF5441X];
1274
+ big_endian = true;
10251275 } else {
10261276
10271277 ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
10281278 if (ret < 0) {
10291279 dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
1030
- goto out_master_put;
1280
+ goto out_ctlr_put;
10311281 }
1032
- master->num_chipselect = cs_num;
1282
+ ctlr->num_chipselect = ctlr->max_native_cs = cs_num;
10331283
1034
- ret = of_property_read_u32(np, "bus-num", &bus_num);
1035
- if (ret < 0) {
1036
- dev_err(&pdev->dev, "can't get bus-num\n");
1037
- goto out_master_put;
1038
- }
1039
- master->bus_num = bus_num;
1284
+ of_property_read_u32(np, "bus-num", &bus_num);
1285
+ ctlr->bus_num = bus_num;
1286
+
1287
+ if (of_property_read_bool(np, "spi-slave"))
1288
+ ctlr->slave = true;
10401289
10411290 dspi->devtype_data = of_device_get_match_data(&pdev->dev);
10421291 if (!dspi->devtype_data) {
10431292 dev_err(&pdev->dev, "can't get devtype_data\n");
10441293 ret = -EFAULT;
1045
- goto out_master_put;
1294
+ goto out_ctlr_put;
10461295 }
1296
+
1297
+ big_endian = of_device_is_big_endian(np);
1298
+ }
1299
+ if (big_endian) {
1300
+ dspi->pushr_cmd = 0;
1301
+ dspi->pushr_tx = 2;
1302
+ } else {
1303
+ dspi->pushr_cmd = 2;
1304
+ dspi->pushr_tx = 0;
10471305 }
10481306
1049
- if (dspi->devtype_data->xspi_mode)
1050
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1307
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
1308
+ ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
10511309 else
1052
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
1310
+ ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
10531311
10541312 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
10551313 base = devm_ioremap_resource(&pdev->dev, res);
10561314 if (IS_ERR(base)) {
10571315 ret = PTR_ERR(base);
1058
- goto out_master_put;
1316
+ goto out_ctlr_put;
10591317 }
10601318
1061
- if (dspi->devtype_data->xspi_mode)
1319
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
10621320 regmap_config = &dspi_xspi_regmap_config[0];
10631321 else
10641322 regmap_config = &dspi_regmap_config;
....@@ -1067,10 +1325,10 @@
10671325 dev_err(&pdev->dev, "failed to init regmap: %ld\n",
10681326 PTR_ERR(dspi->regmap));
10691327 ret = PTR_ERR(dspi->regmap);
1070
- goto out_master_put;
1328
+ goto out_ctlr_put;
10711329 }
10721330
1073
- if (dspi->devtype_data->xspi_mode) {
1331
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) {
10741332 dspi->regmap_pushr = devm_regmap_init_mmio(
10751333 &pdev->dev, base + SPI_PUSHR,
10761334 &dspi_xspi_regmap_config[1]);
....@@ -1079,7 +1337,7 @@
10791337 "failed to init pushr regmap: %ld\n",
10801338 PTR_ERR(dspi->regmap_pushr));
10811339 ret = PTR_ERR(dspi->regmap_pushr);
1082
- goto out_master_put;
1340
+ goto out_ctlr_put;
10831341 }
10841342 }
10851343
....@@ -1087,19 +1345,25 @@
10871345 if (IS_ERR(dspi->clk)) {
10881346 ret = PTR_ERR(dspi->clk);
10891347 dev_err(&pdev->dev, "unable to get clock\n");
1090
- goto out_master_put;
1348
+ goto out_ctlr_put;
10911349 }
10921350 ret = clk_prepare_enable(dspi->clk);
10931351 if (ret)
1094
- goto out_master_put;
1352
+ goto out_ctlr_put;
10951353
1096
- dspi_init(dspi);
1097
- dspi->irq = platform_get_irq(pdev, 0);
1098
- if (dspi->irq < 0) {
1099
- dev_err(&pdev->dev, "can't get platform irq\n");
1100
- ret = dspi->irq;
1354
+ ret = dspi_init(dspi);
1355
+ if (ret)
11011356 goto out_clk_put;
1357
+
1358
+ dspi->irq = platform_get_irq(pdev, 0);
1359
+ if (dspi->irq <= 0) {
1360
+ dev_info(&pdev->dev,
1361
+ "can't get platform irq, using poll mode\n");
1362
+ dspi->irq = 0;
1363
+ goto poll_mode;
11021364 }
1365
+
1366
+ init_completion(&dspi->xfer_done);
11031367
11041368 ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
11051369 IRQF_SHARED, pdev->name, dspi);
....@@ -1107,6 +1371,8 @@
11071371 dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
11081372 goto out_clk_put;
11091373 }
1374
+
1375
+poll_mode:
11101376
11111377 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
11121378 ret = dspi_request_dma(dspi, res->start);
....@@ -1116,15 +1382,15 @@
11161382 }
11171383 }
11181384
1119
- master->max_speed_hz =
1385
+ ctlr->max_speed_hz =
11201386 clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
11211387
1122
- init_waitqueue_head(&dspi->waitq);
1123
- platform_set_drvdata(pdev, master);
1388
+ if (dspi->devtype_data->trans_mode != DSPI_DMA_MODE)
1389
+ ctlr->ptp_sts_supported = true;
11241390
1125
- ret = spi_register_master(master);
1391
+ ret = spi_register_controller(ctlr);
11261392 if (ret != 0) {
1127
- dev_err(&pdev->dev, "Problem registering DSPI master\n");
1393
+ dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
11281394 goto out_release_dma;
11291395 }
11301396
....@@ -1137,19 +1403,18 @@
11371403 free_irq(dspi->irq, dspi);
11381404 out_clk_put:
11391405 clk_disable_unprepare(dspi->clk);
1140
-out_master_put:
1141
- spi_master_put(master);
1406
+out_ctlr_put:
1407
+ spi_controller_put(ctlr);
11421408
11431409 return ret;
11441410 }
11451411
11461412 static int dspi_remove(struct platform_device *pdev)
11471413 {
1148
- struct spi_master *master = platform_get_drvdata(pdev);
1149
- struct fsl_dspi *dspi = spi_master_get_devdata(master);
1414
+ struct fsl_dspi *dspi = platform_get_drvdata(pdev);
11501415
11511416 /* Disconnect from the SPI framework */
1152
- spi_unregister_controller(dspi->master);
1417
+ spi_unregister_controller(dspi->ctlr);
11531418
11541419 /* Disable RX and TX */
11551420 regmap_update_bits(dspi->regmap, SPI_MCR,
....@@ -1173,13 +1438,13 @@
11731438 }
11741439
11751440 static struct platform_driver fsl_dspi_driver = {
1176
- .driver.name = DRIVER_NAME,
1177
- .driver.of_match_table = fsl_dspi_dt_ids,
1178
- .driver.owner = THIS_MODULE,
1179
- .driver.pm = &dspi_pm,
1180
- .probe = dspi_probe,
1181
- .remove = dspi_remove,
1182
- .shutdown = dspi_shutdown,
1441
+ .driver.name = DRIVER_NAME,
1442
+ .driver.of_match_table = fsl_dspi_dt_ids,
1443
+ .driver.owner = THIS_MODULE,
1444
+ .driver.pm = &dspi_pm,
1445
+ .probe = dspi_probe,
1446
+ .remove = dspi_remove,
1447
+ .shutdown = dspi_shutdown,
11831448 };
11841449 module_platform_driver(fsl_dspi_driver);
11851450