hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/dma/at_hdmac.c
....@@ -1,13 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
34 *
45 * Copyright (C) 2008 Atmel Corporation
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License as published by
8
- * the Free Software Foundation; either version 2 of the License, or
9
- * (at your option) any later version.
10
- *
116 *
127 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
138 * The only Atmel DMA Controller that is not covered by this driver is the one
....@@ -134,7 +129,6 @@
134129 struct at_desc *ret = NULL;
135130 unsigned long flags;
136131 unsigned int i = 0;
137
- LIST_HEAD(tmp_list);
138132
139133 spin_lock_irqsave(&atchan->lock, flags);
140134 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
....@@ -152,17 +146,8 @@
152146 "scanned %u descriptors on freelist\n", i);
153147
154148 /* no more descriptor available in initial pool: create one more */
155
- if (!ret) {
156
- ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
157
- if (ret) {
158
- spin_lock_irqsave(&atchan->lock, flags);
159
- atchan->descs_allocated++;
160
- spin_unlock_irqrestore(&atchan->lock, flags);
161
- } else {
162
- dev_err(chan2dev(&atchan->chan_common),
163
- "not enough descriptors available\n");
164
- }
165
- }
149
+ if (!ret)
150
+ ret = atc_alloc_descriptor(&atchan->chan_common, GFP_NOWAIT);
166151
167152 return ret;
168153 }
....@@ -252,6 +237,8 @@
252237 ATC_SPIP_BOUNDARY(first->boundary));
253238 channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
254239 ATC_DPIP_BOUNDARY(first->boundary));
240
+ /* Don't allow CPU to reorder channel enable. */
241
+ wmb();
255242 dma_writel(atdma, CHER, atchan->mask);
256243
257244 vdbg_dump_regs(atchan);
....@@ -312,7 +299,8 @@
312299 struct at_desc *desc_first = atc_first_active(atchan);
313300 struct at_desc *desc;
314301 int ret;
315
- u32 ctrla, dscr, trials;
302
+ u32 ctrla, dscr;
303
+ unsigned int i;
316304
317305 /*
318306 * If the cookie doesn't match to the currently running transfer then
....@@ -382,7 +370,7 @@
382370 dscr = channel_readl(atchan, DSCR);
383371 rmb(); /* ensure DSCR is read before CTRLA */
384372 ctrla = channel_readl(atchan, CTRLA);
385
- for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
373
+ for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) {
386374 u32 new_dscr;
387375
388376 rmb(); /* ensure DSCR is read after CTRLA */
....@@ -408,7 +396,7 @@
408396 rmb(); /* ensure DSCR is read before CTRLA */
409397 ctrla = channel_readl(atchan, CTRLA);
410398 }
411
- if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
399
+ if (unlikely(i == ATC_MAX_DSCR_TRIALS))
412400 return -ETIMEDOUT;
413401
414402 /* for the first descriptor we can be more accurate */
....@@ -441,20 +429,39 @@
441429 * atc_chain_complete - finish work for one transaction chain
442430 * @atchan: channel we work on
443431 * @desc: descriptor at the head of the chain we want do complete
444
- *
445
- * Called with atchan->lock held and bh disabled */
432
+ */
446433 static void
447434 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
448435 {
449436 struct dma_async_tx_descriptor *txd = &desc->txd;
450437 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
438
+ unsigned long flags;
451439
452440 dev_vdbg(chan2dev(&atchan->chan_common),
453441 "descriptor %u complete\n", txd->cookie);
454442
443
+ spin_lock_irqsave(&atchan->lock, flags);
444
+
455445 /* mark the descriptor as complete for non cyclic cases only */
456446 if (!atc_chan_is_cyclic(atchan))
457447 dma_cookie_complete(txd);
448
+
449
+ spin_unlock_irqrestore(&atchan->lock, flags);
450
+
451
+ dma_descriptor_unmap(txd);
452
+ /* for cyclic transfers,
453
+ * no need to replay callback function while stopping */
454
+ if (!atc_chan_is_cyclic(atchan))
455
+ dmaengine_desc_get_callback_invoke(txd, NULL);
456
+
457
+ dma_run_dependencies(txd);
458
+
459
+ spin_lock_irqsave(&atchan->lock, flags);
460
+ /* move children to free_list */
461
+ list_splice_init(&desc->tx_list, &atchan->free_list);
462
+ /* add myself to free_list */
463
+ list_add(&desc->desc_node, &atchan->free_list);
464
+ spin_unlock_irqrestore(&atchan->lock, flags);
458465
459466 /* If the transfer was a memset, free our temporary buffer */
460467 if (desc->memset_buffer) {
....@@ -462,92 +469,52 @@
462469 desc->memset_paddr);
463470 desc->memset_buffer = false;
464471 }
465
-
466
- /* move children to free_list */
467
- list_splice_init(&desc->tx_list, &atchan->free_list);
468
- /* move myself to free_list */
469
- list_move(&desc->desc_node, &atchan->free_list);
470
-
471
- dma_descriptor_unmap(txd);
472
- /* for cyclic transfers,
473
- * no need to replay callback function while stopping */
474
- if (!atc_chan_is_cyclic(atchan)) {
475
- /*
476
- * The API requires that no submissions are done from a
477
- * callback, so we don't need to drop the lock here
478
- */
479
- dmaengine_desc_get_callback_invoke(txd, NULL);
480
- }
481
-
482
- dma_run_dependencies(txd);
483
-}
484
-
485
-/**
486
- * atc_complete_all - finish work for all transactions
487
- * @atchan: channel to complete transactions for
488
- *
489
- * Eventually submit queued descriptors if any
490
- *
491
- * Assume channel is idle while calling this function
492
- * Called with atchan->lock held and bh disabled
493
- */
494
-static void atc_complete_all(struct at_dma_chan *atchan)
495
-{
496
- struct at_desc *desc, *_desc;
497
- LIST_HEAD(list);
498
-
499
- dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
500
-
501
- /*
502
- * Submit queued descriptors ASAP, i.e. before we go through
503
- * the completed ones.
504
- */
505
- if (!list_empty(&atchan->queue))
506
- atc_dostart(atchan, atc_first_queued(atchan));
507
- /* empty active_list now it is completed */
508
- list_splice_init(&atchan->active_list, &list);
509
- /* empty queue list by moving descriptors (if any) to active_list */
510
- list_splice_init(&atchan->queue, &atchan->active_list);
511
-
512
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
513
- atc_chain_complete(atchan, desc);
514472 }
515473
516474 /**
517475 * atc_advance_work - at the end of a transaction, move forward
518476 * @atchan: channel where the transaction ended
519
- *
520
- * Called with atchan->lock held and bh disabled
521477 */
522478 static void atc_advance_work(struct at_dma_chan *atchan)
523479 {
480
+ struct at_desc *desc;
481
+ unsigned long flags;
482
+
524483 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
525484
526
- if (atc_chan_is_enabled(atchan))
527
- return;
485
+ spin_lock_irqsave(&atchan->lock, flags);
486
+ if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list))
487
+ return spin_unlock_irqrestore(&atchan->lock, flags);
528488
529
- if (list_empty(&atchan->active_list) ||
530
- list_is_singular(&atchan->active_list)) {
531
- atc_complete_all(atchan);
532
- } else {
533
- atc_chain_complete(atchan, atc_first_active(atchan));
534
- /* advance work */
535
- atc_dostart(atchan, atc_first_active(atchan));
489
+ desc = atc_first_active(atchan);
490
+ /* Remove the transfer node from the active list. */
491
+ list_del_init(&desc->desc_node);
492
+ spin_unlock_irqrestore(&atchan->lock, flags);
493
+ atc_chain_complete(atchan, desc);
494
+
495
+ /* advance work */
496
+ spin_lock_irqsave(&atchan->lock, flags);
497
+ if (!list_empty(&atchan->active_list)) {
498
+ desc = atc_first_queued(atchan);
499
+ list_move_tail(&desc->desc_node, &atchan->active_list);
500
+ atc_dostart(atchan, desc);
536501 }
502
+ spin_unlock_irqrestore(&atchan->lock, flags);
537503 }
538504
539505
540506 /**
541507 * atc_handle_error - handle errors reported by DMA controller
542508 * @atchan: channel where error occurs
543
- *
544
- * Called with atchan->lock held and bh disabled
545509 */
546510 static void atc_handle_error(struct at_dma_chan *atchan)
547511 {
548512 struct at_desc *bad_desc;
513
+ struct at_desc *desc;
549514 struct at_desc *child;
515
+ unsigned long flags;
550516
517
+ spin_lock_irqsave(&atchan->lock, flags);
551518 /*
552519 * The descriptor currently at the head of the active list is
553520 * broked. Since we don't have any way to report errors, we'll
....@@ -556,13 +523,12 @@
556523 bad_desc = atc_first_active(atchan);
557524 list_del_init(&bad_desc->desc_node);
558525
559
- /* As we are stopped, take advantage to push queued descriptors
560
- * in active_list */
561
- list_splice_init(&atchan->queue, atchan->active_list.prev);
562
-
563526 /* Try to restart the controller */
564
- if (!list_empty(&atchan->active_list))
565
- atc_dostart(atchan, atc_first_active(atchan));
527
+ if (!list_empty(&atchan->active_list)) {
528
+ desc = atc_first_queued(atchan);
529
+ list_move_tail(&desc->desc_node, &atchan->active_list);
530
+ atc_dostart(atchan, desc);
531
+ }
566532
567533 /*
568534 * KERN_CRITICAL may seem harsh, but since this only happens
....@@ -579,6 +545,8 @@
579545 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
580546 atc_dump_lli(atchan, &child->lli);
581547
548
+ spin_unlock_irqrestore(&atchan->lock, flags);
549
+
582550 /* Pretend the descriptor completed successfully */
583551 atc_chain_complete(atchan, bad_desc);
584552 }
....@@ -586,8 +554,6 @@
586554 /**
587555 * atc_handle_cyclic - at the end of a period, run callback function
588556 * @atchan: channel used for cyclic operations
589
- *
590
- * Called with atchan->lock held and bh disabled
591557 */
592558 static void atc_handle_cyclic(struct at_dma_chan *atchan)
593559 {
....@@ -603,20 +569,17 @@
603569
604570 /*-- IRQ & Tasklet ---------------------------------------------------*/
605571
606
-static void atc_tasklet(unsigned long data)
572
+static void atc_tasklet(struct tasklet_struct *t)
607573 {
608
- struct at_dma_chan *atchan = (struct at_dma_chan *)data;
609
- unsigned long flags;
574
+ struct at_dma_chan *atchan = from_tasklet(atchan, t, tasklet);
610575
611
- spin_lock_irqsave(&atchan->lock, flags);
612576 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
613
- atc_handle_error(atchan);
614
- else if (atc_chan_is_cyclic(atchan))
615
- atc_handle_cyclic(atchan);
616
- else
617
- atc_advance_work(atchan);
577
+ return atc_handle_error(atchan);
618578
619
- spin_unlock_irqrestore(&atchan->lock, flags);
579
+ if (atc_chan_is_cyclic(atchan))
580
+ return atc_handle_cyclic(atchan);
581
+
582
+ atc_advance_work(atchan);
620583 }
621584
622585 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
....@@ -664,7 +627,7 @@
664627
665628 /**
666629 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
667
- * @desc: descriptor at the head of the transaction chain
630
+ * @tx: descriptor at the head of the transaction chain
668631 *
669632 * Queue chain if DMA engine is working already
670633 *
....@@ -680,19 +643,11 @@
680643 spin_lock_irqsave(&atchan->lock, flags);
681644 cookie = dma_cookie_assign(tx);
682645
683
- if (list_empty(&atchan->active_list)) {
684
- dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
685
- desc->txd.cookie);
686
- atc_dostart(atchan, desc);
687
- list_add_tail(&desc->desc_node, &atchan->active_list);
688
- } else {
689
- dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
690
- desc->txd.cookie);
691
- list_add_tail(&desc->desc_node, &atchan->queue);
692
- }
693
-
646
+ list_add_tail(&desc->desc_node, &atchan->queue);
694647 spin_unlock_irqrestore(&atchan->lock, flags);
695648
649
+ dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
650
+ desc->txd.cookie);
696651 return cookie;
697652 }
698653
....@@ -946,7 +901,7 @@
946901 return NULL;
947902 }
948903
949
- vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
904
+ vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
950905 if (!vaddr) {
951906 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
952907 __func__);
....@@ -1004,7 +959,7 @@
1004959 return NULL;
1005960 }
1006961
1007
- vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
962
+ vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
1008963 if (!vaddr) {
1009964 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
1010965 __func__);
....@@ -1204,7 +1159,7 @@
12041159 return NULL;
12051160 }
12061161
1207
-/**
1162
+/*
12081163 * atc_dma_cyclic_check_values
12091164 * Check for too big/unaligned periods and unaligned DMA buffer
12101165 */
....@@ -1225,7 +1180,7 @@
12251180 return -EINVAL;
12261181 }
12271182
1228
-/**
1183
+/*
12291184 * atc_dma_cyclic_fill_desc - Fill one period descriptor
12301185 */
12311186 static int
....@@ -1320,7 +1275,7 @@
13201275 if (unlikely(!is_slave_direction(direction)))
13211276 goto err_out;
13221277
1323
- if (sconfig->direction == DMA_MEM_TO_DEV)
1278
+ if (direction == DMA_MEM_TO_DEV)
13241279 reg_width = convert_buswidth(sconfig->dst_addr_width);
13251280 else
13261281 reg_width = convert_buswidth(sconfig->src_addr_width);
....@@ -1387,8 +1342,6 @@
13871342 int chan_id = atchan->chan_common.chan_id;
13881343 unsigned long flags;
13891344
1390
- LIST_HEAD(list);
1391
-
13921345 dev_vdbg(chan2dev(chan), "%s\n", __func__);
13931346
13941347 spin_lock_irqsave(&atchan->lock, flags);
....@@ -1407,8 +1360,6 @@
14071360 struct at_dma *atdma = to_at_dma(chan->device);
14081361 int chan_id = atchan->chan_common.chan_id;
14091362 unsigned long flags;
1410
-
1411
- LIST_HEAD(list);
14121363
14131364 dev_vdbg(chan2dev(chan), "%s\n", __func__);
14141365
....@@ -1430,10 +1381,7 @@
14301381 struct at_dma_chan *atchan = to_at_dma_chan(chan);
14311382 struct at_dma *atdma = to_at_dma(chan->device);
14321383 int chan_id = atchan->chan_common.chan_id;
1433
- struct at_desc *desc, *_desc;
14341384 unsigned long flags;
1435
-
1436
- LIST_HEAD(list);
14371385
14381386 dev_vdbg(chan2dev(chan), "%s\n", __func__);
14391387
....@@ -1453,12 +1401,8 @@
14531401 cpu_relax();
14541402
14551403 /* active_list entries will end up before queued entries */
1456
- list_splice_init(&atchan->queue, &list);
1457
- list_splice_init(&atchan->active_list, &list);
1458
-
1459
- /* Flush all pending and queued descriptors */
1460
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
1461
- atc_chain_complete(atchan, desc);
1404
+ list_splice_tail_init(&atchan->queue, &atchan->free_list);
1405
+ list_splice_tail_init(&atchan->active_list, &atchan->free_list);
14621406
14631407 clear_bit(ATC_IS_PAUSED, &atchan->status);
14641408 /* if channel dedicated to cyclic operations, free it */
....@@ -1520,29 +1464,31 @@
15201464 }
15211465
15221466 /**
1523
- * atc_issue_pending - try to finish work
1467
+ * atc_issue_pending - takes the first transaction descriptor in the pending
1468
+ * queue and starts the transfer.
15241469 * @chan: target DMA channel
15251470 */
15261471 static void atc_issue_pending(struct dma_chan *chan)
15271472 {
1528
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
1529
- unsigned long flags;
1473
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
1474
+ struct at_desc *desc;
1475
+ unsigned long flags;
15301476
15311477 dev_vdbg(chan2dev(chan), "issue_pending\n");
15321478
1533
- /* Not needed for cyclic transfers */
1534
- if (atc_chan_is_cyclic(atchan))
1535
- return;
1536
-
15371479 spin_lock_irqsave(&atchan->lock, flags);
1538
- atc_advance_work(atchan);
1480
+ if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue))
1481
+ return spin_unlock_irqrestore(&atchan->lock, flags);
1482
+
1483
+ desc = atc_first_queued(atchan);
1484
+ list_move_tail(&desc->desc_node, &atchan->active_list);
1485
+ atc_dostart(atchan, desc);
15391486 spin_unlock_irqrestore(&atchan->lock, flags);
15401487 }
15411488
15421489 /**
15431490 * atc_alloc_chan_resources - allocate resources for DMA channel
15441491 * @chan: allocate descriptor resources for this channel
1545
- * @client: current client requesting the channel be ready for requests
15461492 *
15471493 * return - the number of allocated descriptors
15481494 */
....@@ -1552,16 +1498,19 @@
15521498 struct at_dma *atdma = to_at_dma(chan->device);
15531499 struct at_desc *desc;
15541500 struct at_dma_slave *atslave;
1555
- unsigned long flags;
15561501 int i;
15571502 u32 cfg;
1558
- LIST_HEAD(tmp_list);
15591503
15601504 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
15611505
15621506 /* ASSERT: channel is idle */
15631507 if (atc_chan_is_enabled(atchan)) {
15641508 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1509
+ return -EIO;
1510
+ }
1511
+
1512
+ if (!list_empty(&atchan->free_list)) {
1513
+ dev_dbg(chan2dev(chan), "can't allocate channel resources (channel not freed from a previous use)\n");
15651514 return -EIO;
15661515 }
15671516
....@@ -1580,11 +1529,6 @@
15801529 cfg = atslave->cfg;
15811530 }
15821531
1583
- /* have we already been set up?
1584
- * reconfigure channel but no need to reallocate descriptors */
1585
- if (!list_empty(&atchan->free_list))
1586
- return atchan->descs_allocated;
1587
-
15881532 /* Allocate initial pool of descriptors */
15891533 for (i = 0; i < init_nr_desc_per_channel; i++) {
15901534 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
....@@ -1593,23 +1537,18 @@
15931537 "Only %d initial descriptors\n", i);
15941538 break;
15951539 }
1596
- list_add_tail(&desc->desc_node, &tmp_list);
1540
+ list_add_tail(&desc->desc_node, &atchan->free_list);
15971541 }
15981542
1599
- spin_lock_irqsave(&atchan->lock, flags);
1600
- atchan->descs_allocated = i;
1601
- list_splice(&tmp_list, &atchan->free_list);
16021543 dma_cookie_init(chan);
1603
- spin_unlock_irqrestore(&atchan->lock, flags);
16041544
16051545 /* channel parameters */
16061546 channel_writel(atchan, CFG, cfg);
16071547
16081548 dev_dbg(chan2dev(chan),
1609
- "alloc_chan_resources: allocated %d descriptors\n",
1610
- atchan->descs_allocated);
1549
+ "alloc_chan_resources: allocated %d descriptors\n", i);
16111550
1612
- return atchan->descs_allocated;
1551
+ return i;
16131552 }
16141553
16151554 /**
....@@ -1623,9 +1562,6 @@
16231562 struct at_desc *desc, *_desc;
16241563 LIST_HEAD(list);
16251564
1626
- dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1627
- atchan->descs_allocated);
1628
-
16291565 /* ASSERT: channel is idle */
16301566 BUG_ON(!list_empty(&atchan->active_list));
16311567 BUG_ON(!list_empty(&atchan->queue));
....@@ -1638,7 +1574,6 @@
16381574 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
16391575 }
16401576 list_splice_init(&atchan->free_list, &list);
1641
- atchan->descs_allocated = 0;
16421577 atchan->status = 0;
16431578
16441579 /*
....@@ -1919,8 +1854,7 @@
19191854 INIT_LIST_HEAD(&atchan->queue);
19201855 INIT_LIST_HEAD(&atchan->free_list);
19211856
1922
- tasklet_init(&atchan->tasklet, atc_tasklet,
1923
- (unsigned long)atchan);
1857
+ tasklet_setup(&atchan->tasklet, atc_tasklet);
19241858 atc_enable_chan_irq(atdma, i);
19251859 }
19261860
....@@ -1967,7 +1901,11 @@
19671901 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
19681902 plat_dat->nr_channels);
19691903
1970
- dma_async_device_register(&atdma->dma_common);
1904
+ err = dma_async_device_register(&atdma->dma_common);
1905
+ if (err) {
1906
+ dev_err(&pdev->dev, "Unable to register: %d.\n", err);
1907
+ goto err_dma_async_device_register;
1908
+ }
19711909
19721910 /*
19731911 * Do not return an error if the dmac node is not present in order to
....@@ -1987,6 +1925,7 @@
19871925
19881926 err_of_dma_controller_register:
19891927 dma_async_device_unregister(&atdma->dma_common);
1928
+err_dma_async_device_register:
19901929 dma_pool_destroy(atdma->memset_pool);
19911930 err_memset_pool_create:
19921931 dma_pool_destroy(atdma->dma_desc_pool);