From f70575805708cabdedea7498aaa3f710fde4d920 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Wed, 31 Jan 2024 03:29:01 +0000 Subject: [PATCH] add lvds1024*800 --- kernel/drivers/dma/pl330.c | 409 +++++++++++++++++++++++++++++++++++++++++---------------- 1 files changed, 291 insertions(+), 118 deletions(-) diff --git a/kernel/drivers/dma/pl330.c b/kernel/drivers/dma/pl330.c index 21cfb81..0d14147 100644 --- a/kernel/drivers/dma/pl330.c +++ b/kernel/drivers/dma/pl330.c @@ -1,16 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Copyright (C) 2010 Samsung Electronics Co. Ltd. * Jaswinder Singh <jassi.brar@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ +#include <linux/debugfs.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/init.h> @@ -28,6 +25,7 @@ #include <linux/err.h> #include <linux/pm_runtime.h> #include <linux/bug.h> +#include <linux/reset.h> #include "dmaengine.h" #define PL330_MAX_CHAN 8 @@ -37,16 +35,6 @@ #define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0) #define PL330_QUIRK_PERIPH_BURST BIT(1) - -#ifdef CONFIG_CPU_RV1126 -#undef writel -#define writel(v, c) \ - do { \ - readl_relaxed(c); \ - __iowmb(); \ - writel_relaxed(v, c); \ - } while (0) -#endif enum pl330_cachectrl { CCTRL0, /* Noncacheable and nonbufferable */ @@ -268,7 +256,7 @@ static unsigned cmd_line; #define PL330_DBGCMD_DUMP(off, x...) do { \ printk("%x:", cmd_line); \ - printk(x); \ + printk(KERN_CONT x); \ cmd_line += off; \ } while (0) #define PL330_DBGMC_START(addr) (cmd_line = addr) @@ -298,7 +286,7 @@ u32 irq_ns; }; -/** +/* * Request Configuration. * The PL330 core does not modify this and uses the last * working configuration if the request doesn't provide any. @@ -417,6 +405,12 @@ */ BUSY, /* + * Pause was called while descriptor was BUSY. Due to hardware + * limitations, only termination is possible for descriptors + * that have been paused. + */ + PAUSED, + /* * Sitting on the channel work_list but xfer done * by PL330 core */ @@ -460,8 +454,7 @@ /* DMA-mapped view of the FIFO; may differ if an IOMMU is present */ dma_addr_t fifo_dma; enum dma_data_direction dir; - unsigned int src_interlace_size; - unsigned int dst_interlace_size; + struct dma_slave_config slave_config; /* for runtime pm tracking */ bool active; @@ -470,9 +463,6 @@ struct pl330_dmac { /* DMA-Engine Device */ struct dma_device ddma; - - /* Holds info about sg limitations */ - struct device_dma_parameters dma_parms; /* Pool of descriptors available for the DMAC's channels */ struct list_head desc_pool; @@ -509,6 +499,9 @@ unsigned int num_peripherals; struct dma_pl330_chan *peripherals; /* keep at end */ int quirks; + + struct reset_control *rstc; + struct reset_control *rstc_ocp; }; static struct pl330_of_quirks { @@ -554,15 +547,19 @@ /* For cyclic capability */ bool cyclic; size_t num_periods; - /* interlace size */ - unsigned int src_interlace_size; - unsigned int dst_interlace_size; + + /* interleaved size */ + struct data_chunk sgl; }; struct _xfer_spec { u32 ccr; struct dma_pl330_desc *desc; }; + +static int pl330_config_write(struct dma_chan *chan, + struct dma_slave_config *slave_config, + enum dma_transfer_direction direction); static inline bool _queue_full(struct pl330_thread *thrd) { @@ -1080,7 +1077,7 @@ return true; } -static bool _start(struct pl330_thread *thrd) +static bool pl330_start_thread(struct pl330_thread *thrd) { switch (_state(thrd)) { case PL330_STATE_FAULT_COMPLETING: @@ -1088,16 +1085,16 @@ if (_state(thrd) == PL330_STATE_KILLING) UNTIL(thrd, PL330_STATE_STOPPED) - /* fall through */ + fallthrough; case PL330_STATE_FAULTING: _stop(thrd); - /* fall through */ + fallthrough; case PL330_STATE_KILLING: case PL330_STATE_COMPLETING: UNTIL(thrd, PL330_STATE_STOPPED) - /* fall through */ + fallthrough; case PL330_STATE_STOPPED: return _trigger(thrd); @@ -1148,7 +1145,6 @@ switch (direction) { case DMA_MEM_TO_MEM: - /* fall through */ case DMA_MEM_TO_DEV: off += _emit_LD(dry_run, &buf[off], cond); break; @@ -1182,7 +1178,6 @@ switch (direction) { case DMA_MEM_TO_MEM: - /* fall through */ case DMA_DEV_TO_MEM: off += _emit_ST(dry_run, &buf[off], cond); break; @@ -1213,7 +1208,7 @@ const struct _xfer_spec *pxs, int cyc, enum pl330_cond cond) { - int off = 0; + int off = 0, i = 0, burstn = 1; /* * do FLUSHP at beginning to clear any stale dma requests before the @@ -1221,22 +1216,31 @@ */ if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)) off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); + + if (pxs->desc->sgl.size) { + WARN_ON(BYTE_MOD_BURST_LEN(pxs->desc->sgl.size, pxs->ccr)); + burstn = BYTE_TO_BURST(pxs->desc->sgl.size, pxs->ccr); + } + while (cyc--) { - off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri); - off += _emit_load(dry_run, &buf[off], cond, pxs->desc->rqtype, - pxs->desc->peri); - off += _emit_store(dry_run, &buf[off], cond, pxs->desc->rqtype, - pxs->desc->peri); + for (i = 0; i < burstn; i++) { + off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri); + off += _emit_load(dry_run, &buf[off], cond, pxs->desc->rqtype, + pxs->desc->peri); + off += _emit_store(dry_run, &buf[off], cond, pxs->desc->rqtype, + pxs->desc->peri); + } + switch (pxs->desc->rqtype) { case DMA_DEV_TO_MEM: - if (pxs->desc->dst_interlace_size) + if (pxs->desc->sgl.dst_icg) off += _emit_ADDH(dry_run, &buf[off], DST, - pxs->desc->dst_interlace_size); + pxs->desc->sgl.dst_icg); break; case DMA_MEM_TO_DEV: - if (pxs->desc->src_interlace_size) + if (pxs->desc->sgl.src_icg) off += _emit_ADDH(dry_run, &buf[off], SRC, - pxs->desc->src_interlace_size); + pxs->desc->sgl.src_icg); break; default: WARN_ON(1); @@ -1258,7 +1262,6 @@ switch (pxs->desc->rqtype) { case DMA_MEM_TO_DEV: - /* fall through */ case DMA_DEV_TO_MEM: off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, cyc, cond); @@ -1293,7 +1296,7 @@ switch (pxs->desc->rqtype) { case DMA_MEM_TO_DEV: - /* fall through */ + fallthrough; case DMA_DEV_TO_MEM: /* * dregs_len = (total bytes - BURST_TO_BYTE(bursts, ccr)) / @@ -1457,8 +1460,7 @@ off += _emit_LPEND(dry_run, &buf[off], &lpend); } - if (!pxs->desc->src_interlace_size && - !pxs->desc->dst_interlace_size) { + if (!pxs->desc->sgl.src_icg && !pxs->desc->sgl.dst_icg) { num_dregs = BYTE_MOD_BURST_LEN(x->bytes, pxs->ccr); if (num_dregs) { @@ -1533,19 +1535,16 @@ BRST_SIZE(ccr); int off = 0; - if (pxs->desc->rqtype == DMA_DEV_TO_MEM) - bursts = x->bytes / (BRST_SIZE(ccr) * BRST_LEN(ccr) + - pxs->desc->dst_interlace_size); - else if (pxs->desc->rqtype == DMA_MEM_TO_DEV) - bursts = x->bytes / (BRST_SIZE(ccr) * BRST_LEN(ccr) + - pxs->desc->src_interlace_size); + if (pxs->desc->sgl.size) + bursts = x->bytes / pxs->desc->sgl.size; + while (bursts) { c = bursts; off += _loop(pl330, dry_run, &buf[off], &c, pxs); bursts -= c; } - if (!pxs->desc->src_interlace_size && - !pxs->desc->dst_interlace_size) + + if (!pxs->desc->sgl.src_icg && !pxs->desc->sgl.dst_icg) off += _dregs(pl330, dry_run, &buf[off], pxs, num_dregs); return off; @@ -1578,12 +1577,9 @@ unsigned long bursts = BYTE_TO_BURST(x->bytes, ccr); int off = 0; - if (pxs->desc->rqtype == DMA_DEV_TO_MEM) - bursts = x->bytes / (BRST_SIZE(ccr) * BRST_LEN(ccr) - + pxs->desc->dst_interlace_size); - else if (pxs->desc->rqtype == DMA_MEM_TO_DEV) - bursts = x->bytes / (BRST_SIZE(ccr) * BRST_LEN(ccr) - + pxs->desc->src_interlace_size); + if (pxs->desc->sgl.size) + bursts = x->bytes / pxs->desc->sgl.size; + /* Setup Loop(s) */ off += _loop_cyclic(pl330, dry_run, &buf[off], bursts, pxs, ev); @@ -1767,9 +1763,9 @@ tasklet_schedule(&pch->task); } -static void pl330_dotask(unsigned long data) +static void pl330_dotask(struct tasklet_struct *t) { - struct pl330_dmac *pl330 = (struct pl330_dmac *) data; + struct pl330_dmac *pl330 = from_tasklet(pl330, t, tasks); unsigned long flags; int i; @@ -1898,7 +1894,7 @@ thrd->req[active].desc = NULL; thrd->req_running = -1; /* Get going again ASAP */ - _start(thrd); + pl330_start_thread(thrd); } /* For now, just make a list of callbacks to be done */ @@ -2126,9 +2122,10 @@ if (ret) { dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n", __func__, __LINE__); - dma_free_coherent(pl330->ddma.dev, + dma_free_attrs(pl330->ddma.dev, chans * pl330->mcbufsz, - pl330->mcode_cpu, pl330->mcode_bus); + pl330->mcode_cpu, pl330->mcode_bus, + DMA_ATTR_PRIVILEGED); return ret; } @@ -2174,7 +2171,7 @@ return ret; } - tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330); + tasklet_setup(&pl330->tasks, pl330_dotask); pl330->state = INIT; @@ -2207,9 +2204,9 @@ /* Free DMAC resources */ dmac_free_threads(pl330); - dma_free_coherent(pl330->ddma.dev, + dma_free_attrs(pl330->ddma.dev, pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu, - pl330->mcode_bus); + pl330->mcode_bus, DMA_ATTR_PRIVILEGED); } /* forward declaration */ @@ -2238,7 +2235,7 @@ list_for_each_entry(desc, &pch->work_list, node) { /* If already submitted */ - if (desc->status == BUSY) + if (desc->status == BUSY || desc->status == PAUSED) continue; ret = pl330_submit_req(pch->thread, desc); @@ -2257,9 +2254,9 @@ } } -static void pl330_tasklet(unsigned long data) +static void pl330_tasklet(struct tasklet_struct *t) { - struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data; + struct dma_pl330_chan *pch = from_tasklet(pch, t, task); struct dma_pl330_desc *desc, *_dt; unsigned long flags; bool power_down = false; @@ -2299,7 +2296,7 @@ } else { /* Make sure the PL330 Channel thread is active */ spin_lock(&pch->thread->dmac->lock); - _start(pch->thread); + pl330_start_thread(pch->thread); spin_unlock(&pch->thread->dmac->lock); } @@ -2367,7 +2364,7 @@ return -ENOMEM; } - tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); + tasklet_setup(&pch->task, pl330_tasklet); spin_unlock_irqrestore(&pl330->lock, flags); @@ -2432,31 +2429,38 @@ return max_burst_len; } +static int pl330_config_write(struct dma_chan *chan, + struct dma_slave_config *slave_config, + enum dma_transfer_direction direction) +{ + struct dma_pl330_chan *pch = to_pchan(chan); + + pl330_unprep_slave_fifo(pch); + if (direction == DMA_MEM_TO_DEV) { + if (slave_config->dst_addr) + pch->fifo_addr = slave_config->dst_addr; + if (slave_config->dst_addr_width) + pch->burst_sz = __ffs(slave_config->dst_addr_width); + pch->burst_len = fixup_burst_len(slave_config->dst_maxburst, + pch->dmac->quirks); + } else if (direction == DMA_DEV_TO_MEM) { + if (slave_config->src_addr) + pch->fifo_addr = slave_config->src_addr; + if (slave_config->src_addr_width) + pch->burst_sz = __ffs(slave_config->src_addr_width); + pch->burst_len = fixup_burst_len(slave_config->src_maxburst, + pch->dmac->quirks); + } + + return 0; +} + static int pl330_config(struct dma_chan *chan, struct dma_slave_config *slave_config) { struct dma_pl330_chan *pch = to_pchan(chan); - pl330_unprep_slave_fifo(pch); - if (slave_config->direction == DMA_MEM_TO_DEV) { - if (slave_config->dst_addr) - pch->fifo_addr = slave_config->dst_addr; - if (slave_config->dst_addr_width) - pch->burst_sz = __ffs(slave_config->dst_addr_width); - if (slave_config->src_interlace_size) - pch->src_interlace_size = slave_config->src_interlace_size; - pch->burst_len = fixup_burst_len(slave_config->dst_maxburst, - pch->dmac->quirks); - } else if (slave_config->direction == DMA_DEV_TO_MEM) { - if (slave_config->src_addr) - pch->fifo_addr = slave_config->src_addr; - if (slave_config->src_addr_width) - pch->burst_sz = __ffs(slave_config->src_addr_width); - if (slave_config->dst_interlace_size) - pch->dst_interlace_size = slave_config->dst_interlace_size; - pch->burst_len = fixup_burst_len(slave_config->src_maxburst, - pch->dmac->quirks); - } + memcpy(&pch->slave_config, slave_config, sizeof(*slave_config)); return 0; } @@ -2467,7 +2471,6 @@ struct dma_pl330_desc *desc; unsigned long flags; struct pl330_dmac *pl330 = pch->dmac; - LIST_HEAD(list); bool power_down = false; pm_runtime_get_sync(pl330->ddma.dev); @@ -2517,6 +2520,7 @@ { struct dma_pl330_chan *pch = to_pchan(chan); struct pl330_dmac *pl330 = pch->dmac; + struct dma_pl330_desc *desc; unsigned long flags; pm_runtime_get_sync(pl330->ddma.dev); @@ -2526,6 +2530,10 @@ _stop(pch->thread); spin_unlock(&pl330->lock); + list_for_each_entry(desc, &pch->work_list, node) { + if (desc->status == BUSY) + desc->status = PAUSED; + } spin_unlock_irqrestore(&pch->lock, flags); pm_runtime_mark_last_busy(pl330->ddma.dev); pm_runtime_put_autosuspend(pl330->ddma.dev); @@ -2615,7 +2623,7 @@ else if (running && desc == running) transferred = pl330_get_current_xferred_count(pch, desc); - else if (desc->status == BUSY) + else if (desc->status == BUSY || desc->status == PAUSED) /* * Busy but not running means either just enqueued, * or finished and not yet marked done @@ -2631,6 +2639,9 @@ switch (desc->status) { case DONE: ret = DMA_COMPLETE; + break; + case PAUSED: + ret = DMA_PAUSED; break; case PREP: case BUSY: @@ -2672,7 +2683,7 @@ list_splice_tail_init(&pch->submitted_list, &pch->work_list); spin_unlock_irqrestore(&pch->lock, flags); - pl330_tasklet((unsigned long)pch); + pl330_tasklet(&pch->task); } /* @@ -2776,7 +2787,7 @@ /* If the DMAC pool is empty, alloc new */ if (!desc) { - DEFINE_SPINLOCK(lock); + static DEFINE_SPINLOCK(lock); LIST_HEAD(pool); if (!add_desc(&pool, &lock, GFP_ATOMIC, 1)) @@ -2796,6 +2807,10 @@ desc->cyclic = false; desc->num_periods = 1; + + desc->sgl.size = 0; + desc->sgl.src_icg = 0; + desc->sgl.dst_icg = 0; dma_async_tx_descriptor_init(&desc->txd, &pch->chan); @@ -2862,8 +2877,8 @@ { struct dma_pl330_desc *desc = NULL; struct dma_pl330_chan *pch = to_pchan(chan); - dma_addr_t dst; - dma_addr_t src; + dma_addr_t dst = 0; + dma_addr_t src = 0; if (len % period_len != 0) return NULL; @@ -2873,6 +2888,8 @@ __func__, __LINE__); return NULL; } + + pl330_config_write(chan, &pch->slave_config, direction); if (!pl330_prep_slave_fifo(pch, direction)) return NULL; @@ -2911,8 +2928,80 @@ desc->num_periods = len / period_len; desc->txd.flags = flags; - desc->src_interlace_size = pch->src_interlace_size; - desc->dst_interlace_size = pch->dst_interlace_size; + return &desc->txd; +} + +static struct dma_async_tx_descriptor *pl330_prep_interleaved_dma( + struct dma_chan *chan, struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct dma_pl330_desc *desc = NULL; + struct dma_pl330_chan *pch = to_pchan(chan); + dma_addr_t dst = 0, src = 0; + size_t size, src_icg, dst_icg, period_bytes, buffer_bytes, full_buffer_bytes; + size_t nump = 0, numf = 0; + + if (!xt->numf || !xt->sgl[0].size || xt->frame_size != 1) + return NULL; + +#ifdef CONFIG_NO_GKI + nump = xt->nump; +#endif + numf = xt->numf; + size = xt->sgl[0].size; + period_bytes = size * nump; + buffer_bytes = size * numf; + + if (flags & DMA_PREP_REPEAT && (!nump || (numf % nump))) + return NULL; + + src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); + dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); + + pl330_config_write(chan, &pch->slave_config, xt->dir); + + if (!pl330_prep_slave_fifo(pch, xt->dir)) + return NULL; + + desc = pl330_get_desc(pch); + if (!desc) { + dev_err(chan->device->dev, "Failed to get desc\n"); + return NULL; + } + + if (xt->dir == DMA_MEM_TO_DEV) { + desc->rqcfg.src_inc = 1; + desc->rqcfg.dst_inc = 0; + src = xt->src_start; + dst = pch->fifo_dma; + full_buffer_bytes = (size + src_icg) * numf; + } else { + desc->rqcfg.src_inc = 0; + desc->rqcfg.dst_inc = 1; + src = pch->fifo_dma; + dst = xt->dst_start; + full_buffer_bytes = (size + dst_icg) * numf; + } + + desc->rqtype = xt->dir; + desc->rqcfg.brst_size = pch->burst_sz; + desc->rqcfg.brst_len = pch->burst_len; + desc->bytes_requested = full_buffer_bytes; + desc->sgl.size = size; + desc->sgl.src_icg = src_icg; + desc->sgl.dst_icg = dst_icg; + desc->txd.flags = flags; + + if (flags & DMA_PREP_REPEAT) { + desc->cyclic = true; + desc->num_periods = numf / nump; + fill_px(&desc->px, dst, src, period_bytes); + } else { + fill_px(&desc->px, dst, src, buffer_bytes); + } + + dev_dbg(chan->device->dev, "size: %zu, src_icg: %zu, dst_icg: %zu, nump: %zu, numf: %zu\n", + size, src_icg, dst_icg, nump, numf); return &desc->txd; } @@ -3004,6 +3093,8 @@ if (unlikely(!pch || !sgl || !sg_len)) return NULL; + pl330_config_write(chan, &pch->slave_config, direction); + if (!pl330_prep_slave_fifo(pch, direction)) return NULL; @@ -3044,8 +3135,6 @@ desc->rqcfg.brst_len = pch->burst_len; desc->rqtype = direction; desc->bytes_requested = sg_dma_len(sg); - desc->src_interlace_size = pch->src_interlace_size; - desc->dst_interlace_size = pch->dst_interlace_size; } /* Return the last desc in the chain */ @@ -3068,6 +3157,55 @@ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) +#ifdef CONFIG_DEBUG_FS +static int pl330_debugfs_show(struct seq_file *s, void *data) +{ + struct pl330_dmac *pl330 = s->private; + int chans, pchs, ch, pr; + + chans = pl330->pcfg.num_chan; + pchs = pl330->num_peripherals; + + seq_puts(s, "PL330 physical channels:\n"); + seq_puts(s, "THREAD:\t\tCHANNEL:\n"); + seq_puts(s, "--------\t-----\n"); + for (ch = 0; ch < chans; ch++) { + struct pl330_thread *thrd = &pl330->channels[ch]; + int found = -1; + + for (pr = 0; pr < pchs; pr++) { + struct dma_pl330_chan *pch = &pl330->peripherals[pr]; + + if (!pch->thread || thrd->id != pch->thread->id) + continue; + + found = pr; + } + + seq_printf(s, "%d\t\t", thrd->id); + if (found == -1) + seq_puts(s, "--\n"); + else + seq_printf(s, "%d\n", found); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(pl330_debugfs); + +static inline void init_pl330_debugfs(struct pl330_dmac *pl330) +{ + debugfs_create_file(dev_name(pl330->ddma.dev), + S_IFREG | 0444, NULL, pl330, + &pl330_debugfs_fops); +} +#else +static inline void init_pl330_debugfs(struct pl330_dmac *pl330) +{ +} +#endif + /* * Runtime PM callbacks are provided by amba/bus.c driver. * @@ -3078,12 +3216,7 @@ { struct amba_device *pcdev = to_amba_device(dev); - pm_runtime_disable(dev); - - if (!pm_runtime_status_suspended(dev)) { - /* amba did not disable the clock */ - amba_pclk_disable(pcdev); - } + pm_runtime_force_suspend(dev); amba_pclk_unprepare(pcdev); return 0; @@ -3098,15 +3231,14 @@ if (ret) return ret; - if (!pm_runtime_status_suspended(dev)) - ret = amba_pclk_enable(pcdev); - - pm_runtime_enable(dev); + pm_runtime_force_resume(dev); return ret; } -static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume); +static const struct dev_pm_ops pl330_pm = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume) +}; static int pl330_probe(struct amba_device *adev, const struct amba_id *id) @@ -3118,6 +3250,7 @@ struct resource *res; int i, ret, irq; int num_chan; + int val; struct device_node *np = adev->dev.of_node; ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32)); @@ -3132,7 +3265,12 @@ pd = &pl330->ddma; pd->dev = &adev->dev; - pl330->mcbufsz = 0; + if (!device_property_read_u32(&adev->dev, "arm,pl330-mcbufsz-bytes", &val)) { + if ((val > 0) && (val <= PAGE_SIZE)) + pl330->mcbufsz = val; + + dev_info(&adev->dev, "mcbufsz: %d bytes\n", pl330->mcbufsz); + } /* get quirk */ for (i = 0; i < ARRAY_SIZE(of_quirks); i++) @@ -3145,6 +3283,29 @@ return PTR_ERR(pl330->base); amba_set_drvdata(adev, pl330); + + pl330->rstc = devm_reset_control_get_optional(&adev->dev, "dma"); + if (IS_ERR(pl330->rstc)) { + return dev_err_probe(&adev->dev, PTR_ERR(pl330->rstc), "Failed to get reset!\n"); + } else { + ret = reset_control_deassert(pl330->rstc); + if (ret) { + dev_err(&adev->dev, "Couldn't deassert the device from reset!\n"); + return ret; + } + } + + pl330->rstc_ocp = devm_reset_control_get_optional(&adev->dev, "dma-ocp"); + if (IS_ERR(pl330->rstc_ocp)) { + return dev_err_probe(&adev->dev, PTR_ERR(pl330->rstc_ocp), + "Failed to get OCP reset!\n"); + } else { + ret = reset_control_deassert(pl330->rstc_ocp); + if (ret) { + dev_err(&adev->dev, "Couldn't deassert the device from OCP reset!\n"); + return ret; + } + } for (i = 0; i < AMBA_NR_IRQS; i++) { irq = adev->irq[i]; @@ -3209,12 +3370,16 @@ dma_cap_set(DMA_SLAVE, pd->cap_mask); dma_cap_set(DMA_CYCLIC, pd->cap_mask); dma_cap_set(DMA_PRIVATE, pd->cap_mask); + dma_cap_set(DMA_INTERLEAVE, pd->cap_mask); + dma_cap_set(DMA_REPEAT, pd->cap_mask); + dma_cap_set(DMA_LOAD_EOT, pd->cap_mask); } pd->device_alloc_chan_resources = pl330_alloc_chan_resources; pd->device_free_chan_resources = pl330_free_chan_resources; pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic; + pd->device_prep_interleaved_dma = pl330_prep_interleaved_dma; pd->device_tx_status = pl330_tx_status; pd->device_prep_slave_sg = pl330_prep_slave_sg; pd->device_config = pl330_config; @@ -3242,8 +3407,6 @@ } } - adev->dev.dma_parms = &pl330->dma_parms; - /* * This is the limit for transfers with a buswidth of 1, larger * buswidths will have larger limits. @@ -3253,6 +3416,7 @@ dev_err(&adev->dev, "unable to set the seg size\n"); + init_pl330_debugfs(pl330); dev_info(&adev->dev, "Loaded driver for PL330 DMAC-%x\n", adev->periphid); dev_info(&adev->dev, @@ -3284,10 +3448,15 @@ probe_err2: pl330_del(pl330); + if (pl330->rstc_ocp) + reset_control_assert(pl330->rstc_ocp); + + if (pl330->rstc) + reset_control_assert(pl330->rstc); return ret; } -static int pl330_remove(struct amba_device *adev) +static void pl330_remove(struct amba_device *adev) { struct pl330_dmac *pl330 = amba_get_drvdata(adev); struct dma_pl330_chan *pch, *_p; @@ -3322,7 +3491,11 @@ pl330_del(pl330); - return 0; + if (pl330->rstc_ocp) + reset_control_assert(pl330->rstc_ocp); + + if (pl330->rstc) + reset_control_assert(pl330->rstc); } static const struct amba_id pl330_ids[] = { -- Gitblit v1.6.2