From 297b60346df8beafee954a0fd7c2d64f33f3b9bc Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Sat, 11 May 2024 01:44:05 +0000 Subject: [PATCH] rtl8211F_led_control --- kernel/drivers/dma/at_hdmac.c | 267 ++++++++++++++++++++-------------------------------- 1 files changed, 103 insertions(+), 164 deletions(-) diff --git a/kernel/drivers/dma/at_hdmac.c b/kernel/drivers/dma/at_hdmac.c index c52718b..6a4f969 100644 --- a/kernel/drivers/dma/at_hdmac.c +++ b/kernel/drivers/dma/at_hdmac.c @@ -1,13 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) * * Copyright (C) 2008 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * * * This supports the Atmel AHB DMA Controller found in several Atmel SoCs. * The only Atmel DMA Controller that is not covered by this driver is the one @@ -134,7 +129,6 @@ struct at_desc *ret = NULL; unsigned long flags; unsigned int i = 0; - LIST_HEAD(tmp_list); spin_lock_irqsave(&atchan->lock, flags); list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { @@ -152,17 +146,8 @@ "scanned %u descriptors on freelist\n", i); /* no more descriptor available in initial pool: create one more */ - if (!ret) { - ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); - if (ret) { - spin_lock_irqsave(&atchan->lock, flags); - atchan->descs_allocated++; - spin_unlock_irqrestore(&atchan->lock, flags); - } else { - dev_err(chan2dev(&atchan->chan_common), - "not enough descriptors available\n"); - } - } + if (!ret) + ret = atc_alloc_descriptor(&atchan->chan_common, GFP_NOWAIT); return ret; } @@ -252,6 +237,8 @@ ATC_SPIP_BOUNDARY(first->boundary)); channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) | ATC_DPIP_BOUNDARY(first->boundary)); + /* Don't allow CPU to reorder channel enable. */ + wmb(); dma_writel(atdma, CHER, atchan->mask); vdbg_dump_regs(atchan); @@ -312,7 +299,8 @@ struct at_desc *desc_first = atc_first_active(atchan); struct at_desc *desc; int ret; - u32 ctrla, dscr, trials; + u32 ctrla, dscr; + unsigned int i; /* * If the cookie doesn't match to the currently running transfer then @@ -382,7 +370,7 @@ dscr = channel_readl(atchan, DSCR); rmb(); /* ensure DSCR is read before CTRLA */ ctrla = channel_readl(atchan, CTRLA); - for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) { + for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) { u32 new_dscr; rmb(); /* ensure DSCR is read after CTRLA */ @@ -408,7 +396,7 @@ rmb(); /* ensure DSCR is read before CTRLA */ ctrla = channel_readl(atchan, CTRLA); } - if (unlikely(trials >= ATC_MAX_DSCR_TRIALS)) + if (unlikely(i == ATC_MAX_DSCR_TRIALS)) return -ETIMEDOUT; /* for the first descriptor we can be more accurate */ @@ -441,20 +429,39 @@ * atc_chain_complete - finish work for one transaction chain * @atchan: channel we work on * @desc: descriptor at the head of the chain we want do complete - * - * Called with atchan->lock held and bh disabled */ + */ static void atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) { struct dma_async_tx_descriptor *txd = &desc->txd; struct at_dma *atdma = to_at_dma(atchan->chan_common.device); + unsigned long flags; dev_vdbg(chan2dev(&atchan->chan_common), "descriptor %u complete\n", txd->cookie); + spin_lock_irqsave(&atchan->lock, flags); + /* mark the descriptor as complete for non cyclic cases only */ if (!atc_chan_is_cyclic(atchan)) dma_cookie_complete(txd); + + spin_unlock_irqrestore(&atchan->lock, flags); + + dma_descriptor_unmap(txd); + /* for cyclic transfers, + * no need to replay callback function while stopping */ + if (!atc_chan_is_cyclic(atchan)) + dmaengine_desc_get_callback_invoke(txd, NULL); + + dma_run_dependencies(txd); + + spin_lock_irqsave(&atchan->lock, flags); + /* move children to free_list */ + list_splice_init(&desc->tx_list, &atchan->free_list); + /* add myself to free_list */ + list_add(&desc->desc_node, &atchan->free_list); + spin_unlock_irqrestore(&atchan->lock, flags); /* If the transfer was a memset, free our temporary buffer */ if (desc->memset_buffer) { @@ -462,92 +469,52 @@ desc->memset_paddr); desc->memset_buffer = false; } - - /* move children to free_list */ - list_splice_init(&desc->tx_list, &atchan->free_list); - /* move myself to free_list */ - list_move(&desc->desc_node, &atchan->free_list); - - dma_descriptor_unmap(txd); - /* for cyclic transfers, - * no need to replay callback function while stopping */ - if (!atc_chan_is_cyclic(atchan)) { - /* - * The API requires that no submissions are done from a - * callback, so we don't need to drop the lock here - */ - dmaengine_desc_get_callback_invoke(txd, NULL); - } - - dma_run_dependencies(txd); -} - -/** - * atc_complete_all - finish work for all transactions - * @atchan: channel to complete transactions for - * - * Eventually submit queued descriptors if any - * - * Assume channel is idle while calling this function - * Called with atchan->lock held and bh disabled - */ -static void atc_complete_all(struct at_dma_chan *atchan) -{ - struct at_desc *desc, *_desc; - LIST_HEAD(list); - - dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); - - /* - * Submit queued descriptors ASAP, i.e. before we go through - * the completed ones. - */ - if (!list_empty(&atchan->queue)) - atc_dostart(atchan, atc_first_queued(atchan)); - /* empty active_list now it is completed */ - list_splice_init(&atchan->active_list, &list); - /* empty queue list by moving descriptors (if any) to active_list */ - list_splice_init(&atchan->queue, &atchan->active_list); - - list_for_each_entry_safe(desc, _desc, &list, desc_node) - atc_chain_complete(atchan, desc); } /** * atc_advance_work - at the end of a transaction, move forward * @atchan: channel where the transaction ended - * - * Called with atchan->lock held and bh disabled */ static void atc_advance_work(struct at_dma_chan *atchan) { + struct at_desc *desc; + unsigned long flags; + dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); - if (atc_chan_is_enabled(atchan)) - return; + spin_lock_irqsave(&atchan->lock, flags); + if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list)) + return spin_unlock_irqrestore(&atchan->lock, flags); - if (list_empty(&atchan->active_list) || - list_is_singular(&atchan->active_list)) { - atc_complete_all(atchan); - } else { - atc_chain_complete(atchan, atc_first_active(atchan)); - /* advance work */ - atc_dostart(atchan, atc_first_active(atchan)); + desc = atc_first_active(atchan); + /* Remove the transfer node from the active list. */ + list_del_init(&desc->desc_node); + spin_unlock_irqrestore(&atchan->lock, flags); + atc_chain_complete(atchan, desc); + + /* advance work */ + spin_lock_irqsave(&atchan->lock, flags); + if (!list_empty(&atchan->active_list)) { + desc = atc_first_queued(atchan); + list_move_tail(&desc->desc_node, &atchan->active_list); + atc_dostart(atchan, desc); } + spin_unlock_irqrestore(&atchan->lock, flags); } /** * atc_handle_error - handle errors reported by DMA controller * @atchan: channel where error occurs - * - * Called with atchan->lock held and bh disabled */ static void atc_handle_error(struct at_dma_chan *atchan) { struct at_desc *bad_desc; + struct at_desc *desc; struct at_desc *child; + unsigned long flags; + spin_lock_irqsave(&atchan->lock, flags); /* * The descriptor currently at the head of the active list is * broked. Since we don't have any way to report errors, we'll @@ -556,13 +523,12 @@ bad_desc = atc_first_active(atchan); list_del_init(&bad_desc->desc_node); - /* As we are stopped, take advantage to push queued descriptors - * in active_list */ - list_splice_init(&atchan->queue, atchan->active_list.prev); - /* Try to restart the controller */ - if (!list_empty(&atchan->active_list)) - atc_dostart(atchan, atc_first_active(atchan)); + if (!list_empty(&atchan->active_list)) { + desc = atc_first_queued(atchan); + list_move_tail(&desc->desc_node, &atchan->active_list); + atc_dostart(atchan, desc); + } /* * KERN_CRITICAL may seem harsh, but since this only happens @@ -579,6 +545,8 @@ list_for_each_entry(child, &bad_desc->tx_list, desc_node) atc_dump_lli(atchan, &child->lli); + spin_unlock_irqrestore(&atchan->lock, flags); + /* Pretend the descriptor completed successfully */ atc_chain_complete(atchan, bad_desc); } @@ -586,8 +554,6 @@ /** * atc_handle_cyclic - at the end of a period, run callback function * @atchan: channel used for cyclic operations - * - * Called with atchan->lock held and bh disabled */ static void atc_handle_cyclic(struct at_dma_chan *atchan) { @@ -603,20 +569,17 @@ /*-- IRQ & Tasklet ---------------------------------------------------*/ -static void atc_tasklet(unsigned long data) +static void atc_tasklet(struct tasklet_struct *t) { - struct at_dma_chan *atchan = (struct at_dma_chan *)data; - unsigned long flags; + struct at_dma_chan *atchan = from_tasklet(atchan, t, tasklet); - spin_lock_irqsave(&atchan->lock, flags); if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) - atc_handle_error(atchan); - else if (atc_chan_is_cyclic(atchan)) - atc_handle_cyclic(atchan); - else - atc_advance_work(atchan); + return atc_handle_error(atchan); - spin_unlock_irqrestore(&atchan->lock, flags); + if (atc_chan_is_cyclic(atchan)) + return atc_handle_cyclic(atchan); + + atc_advance_work(atchan); } static irqreturn_t at_dma_interrupt(int irq, void *dev_id) @@ -664,7 +627,7 @@ /** * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine - * @desc: descriptor at the head of the transaction chain + * @tx: descriptor at the head of the transaction chain * * Queue chain if DMA engine is working already * @@ -680,19 +643,11 @@ spin_lock_irqsave(&atchan->lock, flags); cookie = dma_cookie_assign(tx); - if (list_empty(&atchan->active_list)) { - dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", - desc->txd.cookie); - atc_dostart(atchan, desc); - list_add_tail(&desc->desc_node, &atchan->active_list); - } else { - dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", - desc->txd.cookie); - list_add_tail(&desc->desc_node, &atchan->queue); - } - + list_add_tail(&desc->desc_node, &atchan->queue); spin_unlock_irqrestore(&atchan->lock, flags); + dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", + desc->txd.cookie); return cookie; } @@ -946,7 +901,7 @@ return NULL; } - vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr); + vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr); if (!vaddr) { dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n", __func__); @@ -1004,7 +959,7 @@ return NULL; } - vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr); + vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr); if (!vaddr) { dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n", __func__); @@ -1204,7 +1159,7 @@ return NULL; } -/** +/* * atc_dma_cyclic_check_values * Check for too big/unaligned periods and unaligned DMA buffer */ @@ -1225,7 +1180,7 @@ return -EINVAL; } -/** +/* * atc_dma_cyclic_fill_desc - Fill one period descriptor */ static int @@ -1320,7 +1275,7 @@ if (unlikely(!is_slave_direction(direction))) goto err_out; - if (sconfig->direction == DMA_MEM_TO_DEV) + if (direction == DMA_MEM_TO_DEV) reg_width = convert_buswidth(sconfig->dst_addr_width); else reg_width = convert_buswidth(sconfig->src_addr_width); @@ -1387,8 +1342,6 @@ int chan_id = atchan->chan_common.chan_id; unsigned long flags; - LIST_HEAD(list); - dev_vdbg(chan2dev(chan), "%s\n", __func__); spin_lock_irqsave(&atchan->lock, flags); @@ -1407,8 +1360,6 @@ struct at_dma *atdma = to_at_dma(chan->device); int chan_id = atchan->chan_common.chan_id; unsigned long flags; - - LIST_HEAD(list); dev_vdbg(chan2dev(chan), "%s\n", __func__); @@ -1430,10 +1381,7 @@ struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); int chan_id = atchan->chan_common.chan_id; - struct at_desc *desc, *_desc; unsigned long flags; - - LIST_HEAD(list); dev_vdbg(chan2dev(chan), "%s\n", __func__); @@ -1453,12 +1401,8 @@ cpu_relax(); /* active_list entries will end up before queued entries */ - list_splice_init(&atchan->queue, &list); - list_splice_init(&atchan->active_list, &list); - - /* Flush all pending and queued descriptors */ - list_for_each_entry_safe(desc, _desc, &list, desc_node) - atc_chain_complete(atchan, desc); + list_splice_tail_init(&atchan->queue, &atchan->free_list); + list_splice_tail_init(&atchan->active_list, &atchan->free_list); clear_bit(ATC_IS_PAUSED, &atchan->status); /* if channel dedicated to cyclic operations, free it */ @@ -1520,29 +1464,31 @@ } /** - * atc_issue_pending - try to finish work + * atc_issue_pending - takes the first transaction descriptor in the pending + * queue and starts the transfer. * @chan: target DMA channel */ static void atc_issue_pending(struct dma_chan *chan) { - struct at_dma_chan *atchan = to_at_dma_chan(chan); - unsigned long flags; + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_desc *desc; + unsigned long flags; dev_vdbg(chan2dev(chan), "issue_pending\n"); - /* Not needed for cyclic transfers */ - if (atc_chan_is_cyclic(atchan)) - return; - spin_lock_irqsave(&atchan->lock, flags); - atc_advance_work(atchan); + if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue)) + return spin_unlock_irqrestore(&atchan->lock, flags); + + desc = atc_first_queued(atchan); + list_move_tail(&desc->desc_node, &atchan->active_list); + atc_dostart(atchan, desc); spin_unlock_irqrestore(&atchan->lock, flags); } /** * atc_alloc_chan_resources - allocate resources for DMA channel * @chan: allocate descriptor resources for this channel - * @client: current client requesting the channel be ready for requests * * return - the number of allocated descriptors */ @@ -1552,16 +1498,19 @@ struct at_dma *atdma = to_at_dma(chan->device); struct at_desc *desc; struct at_dma_slave *atslave; - unsigned long flags; int i; u32 cfg; - LIST_HEAD(tmp_list); dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); /* ASSERT: channel is idle */ if (atc_chan_is_enabled(atchan)) { dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); + return -EIO; + } + + if (!list_empty(&atchan->free_list)) { + dev_dbg(chan2dev(chan), "can't allocate channel resources (channel not freed from a previous use)\n"); return -EIO; } @@ -1580,11 +1529,6 @@ cfg = atslave->cfg; } - /* have we already been set up? - * reconfigure channel but no need to reallocate descriptors */ - if (!list_empty(&atchan->free_list)) - return atchan->descs_allocated; - /* Allocate initial pool of descriptors */ for (i = 0; i < init_nr_desc_per_channel; i++) { desc = atc_alloc_descriptor(chan, GFP_KERNEL); @@ -1593,23 +1537,18 @@ "Only %d initial descriptors\n", i); break; } - list_add_tail(&desc->desc_node, &tmp_list); + list_add_tail(&desc->desc_node, &atchan->free_list); } - spin_lock_irqsave(&atchan->lock, flags); - atchan->descs_allocated = i; - list_splice(&tmp_list, &atchan->free_list); dma_cookie_init(chan); - spin_unlock_irqrestore(&atchan->lock, flags); /* channel parameters */ channel_writel(atchan, CFG, cfg); dev_dbg(chan2dev(chan), - "alloc_chan_resources: allocated %d descriptors\n", - atchan->descs_allocated); + "alloc_chan_resources: allocated %d descriptors\n", i); - return atchan->descs_allocated; + return i; } /** @@ -1623,9 +1562,6 @@ struct at_desc *desc, *_desc; LIST_HEAD(list); - dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", - atchan->descs_allocated); - /* ASSERT: channel is idle */ BUG_ON(!list_empty(&atchan->active_list)); BUG_ON(!list_empty(&atchan->queue)); @@ -1638,7 +1574,6 @@ dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); } list_splice_init(&atchan->free_list, &list); - atchan->descs_allocated = 0; atchan->status = 0; /* @@ -1919,8 +1854,7 @@ INIT_LIST_HEAD(&atchan->queue); INIT_LIST_HEAD(&atchan->free_list); - tasklet_init(&atchan->tasklet, atc_tasklet, - (unsigned long)atchan); + tasklet_setup(&atchan->tasklet, atc_tasklet); atc_enable_chan_irq(atdma, i); } @@ -1967,7 +1901,11 @@ dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", plat_dat->nr_channels); - dma_async_device_register(&atdma->dma_common); + err = dma_async_device_register(&atdma->dma_common); + if (err) { + dev_err(&pdev->dev, "Unable to register: %d.\n", err); + goto err_dma_async_device_register; + } /* * Do not return an error if the dmac node is not present in order to @@ -1987,6 +1925,7 @@ err_of_dma_controller_register: dma_async_device_unregister(&atdma->dma_common); +err_dma_async_device_register: dma_pool_destroy(atdma->memset_pool); err_memset_pool_create: dma_pool_destroy(atdma->dma_desc_pool); -- Gitblit v1.6.2