hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/drivers/spi/spi-uniphier.c
....@@ -7,6 +7,8 @@
77 #include <linux/bitfield.h>
88 #include <linux/bitops.h>
99 #include <linux/clk.h>
10
+#include <linux/delay.h>
11
+#include <linux/dmaengine.h>
1012 #include <linux/interrupt.h>
1113 #include <linux/io.h>
1214 #include <linux/module.h>
....@@ -16,11 +18,13 @@
1618 #include <asm/unaligned.h>
1719
1820 #define SSI_TIMEOUT_MS 2000
21
+#define SSI_POLL_TIMEOUT_US 200
1922 #define SSI_MAX_CLK_DIVIDER 254
2023 #define SSI_MIN_CLK_DIVIDER 4
2124
2225 struct uniphier_spi_priv {
2326 void __iomem *base;
27
+ dma_addr_t base_dma_addr;
2428 struct clk *clk;
2529 struct spi_master *master;
2630 struct completion xfer_done;
....@@ -30,6 +34,7 @@
3034 unsigned int rx_bytes;
3135 const u8 *tx_buf;
3236 u8 *rx_buf;
37
+ atomic_t dma_busy;
3338
3439 bool is_save_param;
3540 u8 bits_per_word;
....@@ -59,11 +64,16 @@
5964 #define SSI_FPS_FSTRT BIT(14)
6065
6166 #define SSI_SR 0x14
67
+#define SSI_SR_BUSY BIT(7)
6268 #define SSI_SR_RNE BIT(0)
6369
6470 #define SSI_IE 0x18
71
+#define SSI_IE_TCIE BIT(4)
6572 #define SSI_IE_RCIE BIT(3)
73
+#define SSI_IE_TXRE BIT(2)
74
+#define SSI_IE_RXRE BIT(1)
6675 #define SSI_IE_RORIE BIT(0)
76
+#define SSI_IE_ALL_MASK GENMASK(4, 0)
6777
6878 #define SSI_IS 0x1c
6979 #define SSI_IS_RXRS BIT(9)
....@@ -85,15 +95,19 @@
8595 #define SSI_RXDR 0x24
8696
8797 #define SSI_FIFO_DEPTH 8U
98
+#define SSI_FIFO_BURST_NUM 1
99
+
100
+#define SSI_DMA_RX_BUSY BIT(1)
101
+#define SSI_DMA_TX_BUSY BIT(0)
88102
89103 static inline unsigned int bytes_per_word(unsigned int bits)
90104 {
91105 return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4);
92106 }
93107
94
-static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask)
108
+static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv,
109
+ u32 mask)
95110 {
96
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
97111 u32 val;
98112
99113 val = readl(priv->base + SSI_IE);
....@@ -101,9 +115,9 @@
101115 writel(val, priv->base + SSI_IE);
102116 }
103117
104
-static inline void uniphier_spi_irq_disable(struct spi_device *spi, u32 mask)
118
+static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv,
119
+ u32 mask)
105120 {
106
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
107121 u32 val;
108122
109123 val = readl(priv->base + SSI_IE);
....@@ -214,6 +228,7 @@
214228 if (!priv->is_save_param || priv->mode != spi->mode) {
215229 uniphier_spi_set_mode(spi);
216230 priv->mode = spi->mode;
231
+ priv->is_save_param = false;
217232 }
218233
219234 if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
....@@ -226,8 +241,7 @@
226241 priv->speed_hz = t->speed_hz;
227242 }
228243
229
- if (!priv->is_save_param)
230
- priv->is_save_param = true;
244
+ priv->is_save_param = true;
231245
232246 /* reset FIFOs */
233247 val = SSI_FC_TXFFL | SSI_FC_RXFFL;
....@@ -288,23 +302,32 @@
288302 }
289303 }
290304
291
-static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
305
+static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv,
306
+ unsigned int threshold)
292307 {
293
- unsigned int tx_count;
294308 u32 val;
295309
296
- tx_count = DIV_ROUND_UP(priv->tx_bytes,
297
- bytes_per_word(priv->bits_per_word));
298
- tx_count = min(tx_count, SSI_FIFO_DEPTH);
299
-
300
- /* set fifo threshold */
301310 val = readl(priv->base + SSI_FC);
302311 val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
303
- val |= FIELD_PREP(SSI_FC_TXFTH_MASK, tx_count);
304
- val |= FIELD_PREP(SSI_FC_RXFTH_MASK, tx_count);
312
+ val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold);
313
+ val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold);
305314 writel(val, priv->base + SSI_FC);
315
+}
306316
307
- while (tx_count--)
317
+static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
318
+{
319
+ unsigned int fifo_threshold, fill_words;
320
+ unsigned int bpw = bytes_per_word(priv->bits_per_word);
321
+
322
+ fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw);
323
+ fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
324
+
325
+ uniphier_spi_set_fifo_threshold(priv, fifo_threshold);
326
+
327
+ fill_words = fifo_threshold -
328
+ DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw);
329
+
330
+ while (fill_words--)
308331 uniphier_spi_send(priv);
309332 }
310333
....@@ -323,30 +346,211 @@
323346 writel(val, priv->base + SSI_FPS);
324347 }
325348
326
-static int uniphier_spi_transfer_one(struct spi_master *master,
327
- struct spi_device *spi,
328
- struct spi_transfer *t)
349
+static bool uniphier_spi_can_dma(struct spi_master *master,
350
+ struct spi_device *spi,
351
+ struct spi_transfer *t)
329352 {
330353 struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
331
- int status;
354
+ unsigned int bpw = bytes_per_word(priv->bits_per_word);
332355
333
- uniphier_spi_setup_transfer(spi, t);
356
+ if ((!master->dma_tx && !master->dma_rx)
357
+ || (!master->dma_tx && t->tx_buf)
358
+ || (!master->dma_rx && t->rx_buf))
359
+ return false;
360
+
361
+ return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH;
362
+}
363
+
364
+static void uniphier_spi_dma_rxcb(void *data)
365
+{
366
+ struct spi_master *master = data;
367
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
368
+ int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
369
+
370
+ uniphier_spi_irq_disable(priv, SSI_IE_RXRE);
371
+
372
+ if (!(state & SSI_DMA_TX_BUSY))
373
+ spi_finalize_current_transfer(master);
374
+}
375
+
376
+static void uniphier_spi_dma_txcb(void *data)
377
+{
378
+ struct spi_master *master = data;
379
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
380
+ int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
381
+
382
+ uniphier_spi_irq_disable(priv, SSI_IE_TXRE);
383
+
384
+ if (!(state & SSI_DMA_RX_BUSY))
385
+ spi_finalize_current_transfer(master);
386
+}
387
+
388
+static int uniphier_spi_transfer_one_dma(struct spi_master *master,
389
+ struct spi_device *spi,
390
+ struct spi_transfer *t)
391
+{
392
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
393
+ struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
394
+ int buswidth;
395
+
396
+ atomic_set(&priv->dma_busy, 0);
397
+
398
+ uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM);
399
+
400
+ if (priv->bits_per_word <= 8)
401
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
402
+ else if (priv->bits_per_word <= 16)
403
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
404
+ else
405
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
406
+
407
+ if (priv->rx_buf) {
408
+ struct dma_slave_config rxconf = {
409
+ .direction = DMA_DEV_TO_MEM,
410
+ .src_addr = priv->base_dma_addr + SSI_RXDR,
411
+ .src_addr_width = buswidth,
412
+ .src_maxburst = SSI_FIFO_BURST_NUM,
413
+ };
414
+
415
+ dmaengine_slave_config(master->dma_rx, &rxconf);
416
+
417
+ rxdesc = dmaengine_prep_slave_sg(
418
+ master->dma_rx,
419
+ t->rx_sg.sgl, t->rx_sg.nents,
420
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
421
+ if (!rxdesc)
422
+ goto out_err_prep;
423
+
424
+ rxdesc->callback = uniphier_spi_dma_rxcb;
425
+ rxdesc->callback_param = master;
426
+
427
+ uniphier_spi_irq_enable(priv, SSI_IE_RXRE);
428
+ atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy);
429
+
430
+ dmaengine_submit(rxdesc);
431
+ dma_async_issue_pending(master->dma_rx);
432
+ }
433
+
434
+ if (priv->tx_buf) {
435
+ struct dma_slave_config txconf = {
436
+ .direction = DMA_MEM_TO_DEV,
437
+ .dst_addr = priv->base_dma_addr + SSI_TXDR,
438
+ .dst_addr_width = buswidth,
439
+ .dst_maxburst = SSI_FIFO_BURST_NUM,
440
+ };
441
+
442
+ dmaengine_slave_config(master->dma_tx, &txconf);
443
+
444
+ txdesc = dmaengine_prep_slave_sg(
445
+ master->dma_tx,
446
+ t->tx_sg.sgl, t->tx_sg.nents,
447
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
448
+ if (!txdesc)
449
+ goto out_err_prep;
450
+
451
+ txdesc->callback = uniphier_spi_dma_txcb;
452
+ txdesc->callback_param = master;
453
+
454
+ uniphier_spi_irq_enable(priv, SSI_IE_TXRE);
455
+ atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy);
456
+
457
+ dmaengine_submit(txdesc);
458
+ dma_async_issue_pending(master->dma_tx);
459
+ }
460
+
461
+ /* signal that we need to wait for completion */
462
+ return (priv->tx_buf || priv->rx_buf);
463
+
464
+out_err_prep:
465
+ if (rxdesc)
466
+ dmaengine_terminate_sync(master->dma_rx);
467
+
468
+ return -EINVAL;
469
+}
470
+
471
+static int uniphier_spi_transfer_one_irq(struct spi_master *master,
472
+ struct spi_device *spi,
473
+ struct spi_transfer *t)
474
+{
475
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
476
+ struct device *dev = master->dev.parent;
477
+ unsigned long time_left;
334478
335479 reinit_completion(&priv->xfer_done);
336480
337481 uniphier_spi_fill_tx_fifo(priv);
338482
339
- uniphier_spi_irq_enable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
483
+ uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
340484
341
- status = wait_for_completion_timeout(&priv->xfer_done,
342
- msecs_to_jiffies(SSI_TIMEOUT_MS));
485
+ time_left = wait_for_completion_timeout(&priv->xfer_done,
486
+ msecs_to_jiffies(SSI_TIMEOUT_MS));
343487
344
- uniphier_spi_irq_disable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
488
+ uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
345489
346
- if (status < 0)
347
- return status;
490
+ if (!time_left) {
491
+ dev_err(dev, "transfer timeout.\n");
492
+ return -ETIMEDOUT;
493
+ }
348494
349495 return priv->error;
496
+}
497
+
498
+static int uniphier_spi_transfer_one_poll(struct spi_master *master,
499
+ struct spi_device *spi,
500
+ struct spi_transfer *t)
501
+{
502
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
503
+ int loop = SSI_POLL_TIMEOUT_US * 10;
504
+
505
+ while (priv->tx_bytes) {
506
+ uniphier_spi_fill_tx_fifo(priv);
507
+
508
+ while ((priv->rx_bytes - priv->tx_bytes) > 0) {
509
+ while (!(readl(priv->base + SSI_SR) & SSI_SR_RNE)
510
+ && loop--)
511
+ ndelay(100);
512
+
513
+ if (loop == -1)
514
+ goto irq_transfer;
515
+
516
+ uniphier_spi_recv(priv);
517
+ }
518
+ }
519
+
520
+ return 0;
521
+
522
+irq_transfer:
523
+ return uniphier_spi_transfer_one_irq(master, spi, t);
524
+}
525
+
526
+static int uniphier_spi_transfer_one(struct spi_master *master,
527
+ struct spi_device *spi,
528
+ struct spi_transfer *t)
529
+{
530
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
531
+ unsigned long threshold;
532
+ bool use_dma;
533
+
534
+ /* Terminate and return success for 0 byte length transfer */
535
+ if (!t->len)
536
+ return 0;
537
+
538
+ uniphier_spi_setup_transfer(spi, t);
539
+
540
+ use_dma = master->can_dma ? master->can_dma(master, spi, t) : false;
541
+ if (use_dma)
542
+ return uniphier_spi_transfer_one_dma(master, spi, t);
543
+
544
+ /*
545
+ * If the transfer operation will take longer than
546
+ * SSI_POLL_TIMEOUT_US, it should use irq.
547
+ */
548
+ threshold = DIV_ROUND_UP(SSI_POLL_TIMEOUT_US * priv->speed_hz,
549
+ USEC_PER_SEC * BITS_PER_BYTE);
550
+ if (t->len > threshold)
551
+ return uniphier_spi_transfer_one_irq(master, spi, t);
552
+ else
553
+ return uniphier_spi_transfer_one_poll(master, spi, t);
350554 }
351555
352556 static int uniphier_spi_prepare_transfer_hardware(struct spi_master *master)
....@@ -365,6 +569,32 @@
365569 writel(0, priv->base + SSI_CTL);
366570
367571 return 0;
572
+}
573
+
574
+static void uniphier_spi_handle_err(struct spi_master *master,
575
+ struct spi_message *msg)
576
+{
577
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
578
+ u32 val;
579
+
580
+ /* stop running spi transfer */
581
+ writel(0, priv->base + SSI_CTL);
582
+
583
+ /* reset FIFOs */
584
+ val = SSI_FC_TXFFL | SSI_FC_RXFFL;
585
+ writel(val, priv->base + SSI_FC);
586
+
587
+ uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK);
588
+
589
+ if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) {
590
+ dmaengine_terminate_async(master->dma_tx);
591
+ atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
592
+ }
593
+
594
+ if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) {
595
+ dmaengine_terminate_async(master->dma_rx);
596
+ atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
597
+ }
368598 }
369599
370600 static irqreturn_t uniphier_spi_handler(int irq, void *dev_id)
....@@ -413,6 +643,8 @@
413643 struct uniphier_spi_priv *priv;
414644 struct spi_master *master;
415645 struct resource *res;
646
+ struct dma_slave_caps caps;
647
+ u32 dma_tx_burst = 0, dma_rx_burst = 0;
416648 unsigned long clk_rate;
417649 int irq;
418650 int ret;
....@@ -427,12 +659,12 @@
427659 priv->master = master;
428660 priv->is_save_param = false;
429661
430
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
431
- priv->base = devm_ioremap_resource(&pdev->dev, res);
662
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
432663 if (IS_ERR(priv->base)) {
433664 ret = PTR_ERR(priv->base);
434665 goto out_master_put;
435666 }
667
+ priv->base_dma_addr = res->start;
436668
437669 priv->clk = devm_clk_get(&pdev->dev, NULL);
438670 if (IS_ERR(priv->clk)) {
....@@ -447,7 +679,6 @@
447679
448680 irq = platform_get_irq(pdev, 0);
449681 if (irq < 0) {
450
- dev_err(&pdev->dev, "failed to get IRQ\n");
451682 ret = irq;
452683 goto out_disable_clk;
453684 }
....@@ -476,13 +707,65 @@
476707 = uniphier_spi_prepare_transfer_hardware;
477708 master->unprepare_transfer_hardware
478709 = uniphier_spi_unprepare_transfer_hardware;
710
+ master->handle_err = uniphier_spi_handle_err;
711
+ master->can_dma = uniphier_spi_can_dma;
712
+
479713 master->num_chipselect = 1;
714
+ master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
715
+
716
+ master->dma_tx = dma_request_chan(&pdev->dev, "tx");
717
+ if (IS_ERR_OR_NULL(master->dma_tx)) {
718
+ if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) {
719
+ ret = -EPROBE_DEFER;
720
+ goto out_disable_clk;
721
+ }
722
+ master->dma_tx = NULL;
723
+ dma_tx_burst = INT_MAX;
724
+ } else {
725
+ ret = dma_get_slave_caps(master->dma_tx, &caps);
726
+ if (ret) {
727
+ dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
728
+ ret);
729
+ goto out_release_dma;
730
+ }
731
+ dma_tx_burst = caps.max_burst;
732
+ }
733
+
734
+ master->dma_rx = dma_request_chan(&pdev->dev, "rx");
735
+ if (IS_ERR_OR_NULL(master->dma_rx)) {
736
+ if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
737
+ ret = -EPROBE_DEFER;
738
+ goto out_release_dma;
739
+ }
740
+ master->dma_rx = NULL;
741
+ dma_rx_burst = INT_MAX;
742
+ } else {
743
+ ret = dma_get_slave_caps(master->dma_rx, &caps);
744
+ if (ret) {
745
+ dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
746
+ ret);
747
+ goto out_release_dma;
748
+ }
749
+ dma_rx_burst = caps.max_burst;
750
+ }
751
+
752
+ master->max_dma_len = min(dma_tx_burst, dma_rx_burst);
480753
481754 ret = devm_spi_register_master(&pdev->dev, master);
482755 if (ret)
483
- goto out_disable_clk;
756
+ goto out_release_dma;
484757
485758 return 0;
759
+
760
+out_release_dma:
761
+ if (!IS_ERR_OR_NULL(master->dma_rx)) {
762
+ dma_release_channel(master->dma_rx);
763
+ master->dma_rx = NULL;
764
+ }
765
+ if (!IS_ERR_OR_NULL(master->dma_tx)) {
766
+ dma_release_channel(master->dma_tx);
767
+ master->dma_tx = NULL;
768
+ }
486769
487770 out_disable_clk:
488771 clk_disable_unprepare(priv->clk);
....@@ -494,7 +777,13 @@
494777
495778 static int uniphier_spi_remove(struct platform_device *pdev)
496779 {
497
- struct uniphier_spi_priv *priv = platform_get_drvdata(pdev);
780
+ struct spi_master *master = platform_get_drvdata(pdev);
781
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
782
+
783
+ if (master->dma_tx)
784
+ dma_release_channel(master->dma_tx);
785
+ if (master->dma_rx)
786
+ dma_release_channel(master->dma_rx);
498787
499788 clk_disable_unprepare(priv->clk);
500789