From 7e970c18f85f99acc678d90128b6e01dce1bf273 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 01 Nov 2024 02:40:12 +0000
Subject: [PATCH] gmac get mac form eeprom
---
kernel/drivers/dma/imx-sdma.c | 195 ++++++++++++++++++++++++++++++++++++++++--------
1 files changed, 163 insertions(+), 32 deletions(-)
diff --git a/kernel/drivers/dma/imx-sdma.c b/kernel/drivers/dma/imx-sdma.c
index 2283dcd..3648d3c 100644
--- a/kernel/drivers/dma/imx-sdma.c
+++ b/kernel/drivers/dma/imx-sdma.c
@@ -444,6 +444,10 @@
struct sdma_buffer_descriptor *bd0;
/* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
bool clk_ratio;
+#ifdef CONFIG_IMX_SDMA_OOB
+ hard_spinlock_t oob_lock;
+ u32 pending_stat;
+#endif
};
static int sdma_config_write(struct dma_chan *chan,
@@ -748,6 +752,11 @@
return container_of(t, struct sdma_desc, vd.tx);
}
+static inline bool sdma_oob_capable(void)
+{
+ return IS_ENABLED(CONFIG_IMX_SDMA_OOB);
+}
+
static void sdma_start_desc(struct sdma_channel *sdmac)
{
struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
@@ -765,7 +774,8 @@
sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
- sdma_enable_channel(sdma, sdmac->channel);
+ if (!sdma_oob_capable() || !vchan_oob_pulsed(vd))
+ sdma_enable_channel(sdma, sdmac->channel);
}
static void sdma_update_channel_loop(struct sdma_channel *sdmac)
@@ -809,9 +819,9 @@
* SDMA transaction status by the time the client tasklet is
* executed.
*/
- spin_unlock(&sdmac->vc.lock);
+ vchan_unlock(&sdmac->vc);
dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
- spin_lock(&sdmac->vc.lock);
+ vchan_lock(&sdmac->vc);
if (error)
sdmac->status = old_status;
@@ -821,20 +831,21 @@
static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
{
struct sdma_channel *sdmac = (struct sdma_channel *) data;
+ struct sdma_desc *desc = sdmac->desc;
struct sdma_buffer_descriptor *bd;
int i, error = 0;
- sdmac->desc->chn_real_count = 0;
+ desc->chn_real_count = 0;
/*
* non loop mode. Iterate over all descriptors, collect
* errors and call callback function
*/
- for (i = 0; i < sdmac->desc->num_bd; i++) {
- bd = &sdmac->desc->bd[i];
+ for (i = 0; i < desc->num_bd; i++) {
+ bd = &desc->bd[i];
if (bd->mode.status & (BD_DONE | BD_RROR))
error = -EIO;
- sdmac->desc->chn_real_count += bd->mode.count;
+ desc->chn_real_count += bd->mode.count;
}
if (error)
@@ -843,36 +854,83 @@
sdmac->status = DMA_COMPLETE;
}
-static irqreturn_t sdma_int_handler(int irq, void *dev_id)
+static unsigned long sdma_do_channels(struct sdma_engine *sdma,
+ unsigned long stat)
{
- struct sdma_engine *sdma = dev_id;
- unsigned long stat;
+ unsigned long mask = stat;
- stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
- writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
- /* channel 0 is special and not handled here, see run_channel0() */
- stat &= ~1;
-
- while (stat) {
- int channel = fls(stat) - 1;
+ while (mask) {
+ int channel = fls(mask) - 1;
struct sdma_channel *sdmac = &sdma->channel[channel];
struct sdma_desc *desc;
- spin_lock(&sdmac->vc.lock);
+ vchan_lock(&sdmac->vc);
desc = sdmac->desc;
if (desc) {
+ if (running_oob() && !vchan_oob_handled(&desc->vd))
+ goto next;
if (sdmac->flags & IMX_DMA_SG_LOOP) {
sdma_update_channel_loop(sdmac);
} else {
mxc_sdma_handle_channel_normal(sdmac);
+ if (running_oob()) {
+ vchan_unlock(&sdmac->vc);
+ dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
+ __clear_bit(channel, &stat);
+ goto next_unlocked;
+ }
vchan_cookie_complete(&desc->vd);
sdma_start_desc(sdmac);
}
}
-
- spin_unlock(&sdmac->vc.lock);
__clear_bit(channel, &stat);
+ next:
+ vchan_unlock(&sdmac->vc);
+ next_unlocked:
+ __clear_bit(channel, &mask);
}
+
+ return stat;
+}
+
+static irqreturn_t sdma_int_handler(int irq, void *dev_id)
+{
+ struct sdma_engine *sdma = dev_id;
+ unsigned long stat, flags __maybe_unused;
+
+#ifdef CONFIG_IMX_SDMA_OOB
+ if (running_oob()) {
+ stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
+ writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
+ /*
+ * Locking is only to guard against IRQ migration with
+ * a delayed in-band event running from a remote CPU
+ * after some IRQ routing changed the affinity of the
+ * out-of-band handler in the meantime.
+ */
+ stat = sdma_do_channels(sdma, stat & ~1);
+ if (stat) {
+ raw_spin_lock(&sdma->oob_lock);
+ sdma->pending_stat |= stat;
+ raw_spin_unlock(&sdma->oob_lock);
+ /* Call us back from in-band context. */
+ irq_post_inband(irq);
+ }
+ return IRQ_HANDLED;
+ }
+
+ /* In-band IRQ context: stalled, but hard irqs are on. */
+ raw_spin_lock_irqsave(&sdma->oob_lock, flags);
+ stat = sdma->pending_stat;
+ sdma->pending_stat = 0;
+ raw_spin_unlock_irqrestore(&sdma->oob_lock, flags);
+ sdma_do_channels(sdma, stat);
+#else
+ stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
+ writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
+ /* channel 0 is special and not handled here, see run_channel0() */
+ sdma_do_channels(sdma, stat & ~1);
+#endif
return IRQ_HANDLED;
}
@@ -1060,9 +1118,9 @@
*/
usleep_range(1000, 2000);
- spin_lock_irqsave(&sdmac->vc.lock, flags);
+ vchan_lock_irqsave(&sdmac->vc, flags);
vchan_get_all_descriptors(&sdmac->vc, &head);
- spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ vchan_unlock_irqrestore(&sdmac->vc, flags);
vchan_dma_desc_free_list(&sdmac->vc, &head);
}
@@ -1071,17 +1129,18 @@
struct sdma_channel *sdmac = to_sdma_chan(chan);
unsigned long flags;
- spin_lock_irqsave(&sdmac->vc.lock, flags);
+ vchan_lock_irqsave(&sdmac->vc, flags);
sdma_disable_channel(chan);
if (sdmac->desc) {
vchan_terminate_vdesc(&sdmac->desc->vd);
sdmac->desc = NULL;
+ vchan_unlock_irqrestore(&sdmac->vc, flags);
schedule_work(&sdmac->terminate_worker);
+ } else {
+ vchan_unlock_irqrestore(&sdmac->vc, flags);
}
-
- spin_unlock_irqrestore(&sdmac->vc.lock, flags);
return 0;
}
@@ -1441,6 +1500,15 @@
struct scatterlist *sg;
struct sdma_desc *desc;
+ if (!sdma_oob_capable()) {
+ if (flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) {
+ dev_err(sdma->dev,
+ "%s: out-of-band slave transfers disabled\n",
+ __func__);
+ return NULL;
+ }
+ }
+
sdma_config_write(chan, &sdmac->slave_config, direction);
desc = sdma_transfer_init(sdmac, direction, sg_len);
@@ -1492,7 +1560,8 @@
if (i + 1 == sg_len) {
param |= BD_INTR;
- param |= BD_LAST;
+ if (!sdma_oob_capable() || !(flags & DMA_OOB_PULSE))
+ param |= BD_LAST;
param &= ~BD_CONT;
}
@@ -1526,6 +1595,20 @@
struct sdma_desc *desc;
dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
+
+ if (!sdma_oob_capable()) {
+ if (flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) {
+ dev_err(sdma->dev,
+ "%s: out-of-band cyclic transfers disabled\n",
+ __func__);
+ return NULL;
+ }
+ } else if (flags & DMA_OOB_PULSE) {
+ dev_err(chan->device->dev,
+ "%s: no pulse mode with out-of-band cyclic transfers\n",
+ __func__);
+ return NULL;
+ }
sdma_config_write(chan, &sdmac->slave_config, direction);
@@ -1649,7 +1732,7 @@
if (ret == DMA_COMPLETE || !txstate)
return ret;
- spin_lock_irqsave(&sdmac->vc.lock, flags);
+ vchan_lock_irqsave(&sdmac->vc, flags);
vd = vchan_find_desc(&sdmac->vc, cookie);
if (vd)
@@ -1667,7 +1750,7 @@
residue = 0;
}
- spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ vchan_unlock_irqrestore(&sdmac->vc, flags);
dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
residue);
@@ -1680,11 +1763,38 @@
struct sdma_channel *sdmac = to_sdma_chan(chan);
unsigned long flags;
- spin_lock_irqsave(&sdmac->vc.lock, flags);
+ vchan_lock_irqsave(&sdmac->vc, flags);
if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
sdma_start_desc(sdmac);
- spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ vchan_unlock_irqrestore(&sdmac->vc, flags);
}
+
+#ifdef CONFIG_IMX_SDMA_OOB
+static int sdma_pulse_oob(struct dma_chan *chan)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_desc *desc = sdmac->desc;
+ unsigned long flags;
+ int n, ret = -EIO;
+
+ vchan_lock_irqsave(&sdmac->vc, flags);
+ if (desc && vchan_oob_pulsed(&desc->vd)) {
+ for (n = 0; n < desc->num_bd - 1; n++)
+ desc->bd[n].mode.status |= BD_DONE;
+ desc->bd[n].mode.status |= BD_DONE|BD_WRAP;
+ sdma_enable_channel(sdmac->sdma, sdmac->channel);
+ ret = 0;
+ }
+ vchan_unlock_irqrestore(&sdmac->vc, flags);
+
+ return ret;
+}
+#else
+static int sdma_pulse_oob(struct dma_chan *chan)
+{
+ return -ENOTSUPP;
+}
+#endif
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
@@ -1920,6 +2030,9 @@
clk_disable(sdma->clk_ipg);
clk_disable(sdma->clk_ahb);
+#ifdef CONFIG_IMX_SDMA_OOB
+ raw_spin_lock_init(&sdma->oob_lock);
+#endif
return 0;
err_dma_alloc:
@@ -2035,8 +2148,9 @@
if (ret)
goto err_clk;
- ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
- sdma);
+ ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler,
+ IS_ENABLED(CONFIG_IMX_SDMA_OOB) ? IRQF_OOB : 0,
+ "sdma", sdma);
if (ret)
goto err_irq;
@@ -2055,6 +2169,7 @@
dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+ dma_cap_set(DMA_OOB, sdma->dma_device.cap_mask);
dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
INIT_LIST_HEAD(&sdma->dma_device.channels);
@@ -2106,6 +2221,7 @@
sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
sdma->dma_device.device_issue_pending = sdma_issue_pending;
+ sdma->dma_device.device_pulse_oob = sdma_pulse_oob;
sdma->dma_device.copy_align = 2;
dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
@@ -2160,6 +2276,16 @@
}
}
+ /*
+ * Keep the clocks enabled at any time if we plan to use the
+ * DMA from out-of-band context, bumping their refcount to
+ * keep them on until sdma_remove() is called eventually.
+ */
+ if (IS_ENABLED(CONFIG_IMX_SDMA_OOB)) {
+ clk_enable(sdma->clk_ipg);
+ clk_enable(sdma->clk_ahb);
+ }
+
return 0;
err_register:
@@ -2178,6 +2304,11 @@
struct sdma_engine *sdma = platform_get_drvdata(pdev);
int i;
+ if (IS_ENABLED(CONFIG_IMX_SDMA_OOB)) {
+ clk_disable(sdma->clk_ahb);
+ clk_disable(sdma->clk_ipg);
+ }
+
devm_free_irq(&pdev->dev, sdma->irq, sdma);
dma_async_device_unregister(&sdma->dma_device);
kfree(sdma->script_addrs);
--
Gitblit v1.6.2