.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * drivers/uio/uio_dmem_genirq.c |
---|
3 | 4 | * |
---|
.. | .. |
---|
6 | 7 | * Copyright (C) 2012 Damian Hobson-Garcia |
---|
7 | 8 | * |
---|
8 | 9 | * Based on uio_pdrv_genirq.c by Magnus Damm |
---|
9 | | - * |
---|
10 | | - * This program is free software; you can redistribute it and/or modify it |
---|
11 | | - * under the terms of the GNU General Public License version 2 as published by |
---|
12 | | - * the Free Software Foundation. |
---|
13 | 10 | */ |
---|
14 | 11 | |
---|
15 | 12 | #include <linux/platform_device.h> |
---|
.. | .. |
---|
23 | 20 | #include <linux/pm_runtime.h> |
---|
24 | 21 | #include <linux/dma-mapping.h> |
---|
25 | 22 | #include <linux/slab.h> |
---|
| 23 | +#include <linux/irq.h> |
---|
26 | 24 | |
---|
27 | 25 | #include <linux/of.h> |
---|
28 | 26 | #include <linux/of_platform.h> |
---|
.. | .. |
---|
47 | 45 | { |
---|
48 | 46 | struct uio_dmem_genirq_platdata *priv = info->priv; |
---|
49 | 47 | struct uio_mem *uiomem; |
---|
50 | | - int ret = 0; |
---|
51 | 48 | int dmem_region = priv->dmem_region_start; |
---|
52 | 49 | |
---|
53 | 50 | uiomem = &priv->uioinfo->mem[priv->dmem_region_start]; |
---|
.. | .. |
---|
71 | 68 | mutex_unlock(&priv->alloc_lock); |
---|
72 | 69 | /* Wait until the Runtime PM code has woken up the device */ |
---|
73 | 70 | pm_runtime_get_sync(&priv->pdev->dev); |
---|
74 | | - return ret; |
---|
| 71 | + return 0; |
---|
75 | 72 | } |
---|
76 | 73 | |
---|
77 | 74 | static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode) |
---|
.. | .. |
---|
113 | 110 | * remember the state so we can allow user space to enable it later. |
---|
114 | 111 | */ |
---|
115 | 112 | |
---|
| 113 | + spin_lock(&priv->lock); |
---|
116 | 114 | if (!test_and_set_bit(0, &priv->flags)) |
---|
117 | 115 | disable_irq_nosync(irq); |
---|
| 116 | + spin_unlock(&priv->lock); |
---|
118 | 117 | |
---|
119 | 118 | return IRQ_HANDLED; |
---|
120 | 119 | } |
---|
.. | .. |
---|
128 | 127 | * in the interrupt controller, but keep track of the |
---|
129 | 128 | * state to prevent per-irq depth damage. |
---|
130 | 129 | * |
---|
131 | | - * Serialize this operation to support multiple tasks. |
---|
| 130 | + * Serialize this operation to support multiple tasks and concurrency |
---|
| 131 | + * with irq handler on SMP systems. |
---|
132 | 132 | */ |
---|
133 | 133 | |
---|
134 | 134 | spin_lock_irqsave(&priv->lock, flags); |
---|
135 | 135 | if (irq_on) { |
---|
136 | 136 | if (test_and_clear_bit(0, &priv->flags)) |
---|
137 | 137 | enable_irq(dev_info->irq); |
---|
138 | | - spin_unlock_irqrestore(&priv->lock, flags); |
---|
139 | 138 | } else { |
---|
140 | | - if (!test_and_set_bit(0, &priv->flags)) { |
---|
141 | | - spin_unlock_irqrestore(&priv->lock, flags); |
---|
142 | | - disable_irq(dev_info->irq); |
---|
143 | | - } |
---|
| 139 | + if (!test_and_set_bit(0, &priv->flags)) |
---|
| 140 | + disable_irq_nosync(dev_info->irq); |
---|
144 | 141 | } |
---|
| 142 | + spin_unlock_irqrestore(&priv->lock, flags); |
---|
145 | 143 | |
---|
146 | 144 | return 0; |
---|
147 | 145 | } |
---|
.. | .. |
---|
156 | 154 | int i; |
---|
157 | 155 | |
---|
158 | 156 | if (pdev->dev.of_node) { |
---|
159 | | - int irq; |
---|
160 | | - |
---|
161 | 157 | /* alloc uioinfo for one device */ |
---|
162 | 158 | uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL); |
---|
163 | 159 | if (!uioinfo) { |
---|
.. | .. |
---|
165 | 161 | dev_err(&pdev->dev, "unable to kmalloc\n"); |
---|
166 | 162 | goto bad2; |
---|
167 | 163 | } |
---|
168 | | - uioinfo->name = pdev->dev.of_node->name; |
---|
| 164 | + uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn", |
---|
| 165 | + pdev->dev.of_node); |
---|
169 | 166 | uioinfo->version = "devicetree"; |
---|
170 | | - |
---|
171 | | - /* Multiple IRQs are not supported */ |
---|
172 | | - irq = platform_get_irq(pdev, 0); |
---|
173 | | - if (irq == -ENXIO) |
---|
174 | | - uioinfo->irq = UIO_IRQ_NONE; |
---|
175 | | - else |
---|
176 | | - uioinfo->irq = irq; |
---|
177 | 167 | } |
---|
178 | 168 | |
---|
179 | 169 | if (!uioinfo || !uioinfo->name || !uioinfo->version) { |
---|
.. | .. |
---|
203 | 193 | mutex_init(&priv->alloc_lock); |
---|
204 | 194 | |
---|
205 | 195 | if (!uioinfo->irq) { |
---|
| 196 | + /* Multiple IRQs are not supported */ |
---|
206 | 197 | ret = platform_get_irq(pdev, 0); |
---|
207 | | - if (ret < 0) { |
---|
208 | | - dev_err(&pdev->dev, "failed to get IRQ\n"); |
---|
| 198 | + if (ret == -ENXIO && pdev->dev.of_node) |
---|
| 199 | + ret = UIO_IRQ_NONE; |
---|
| 200 | + else if (ret < 0) |
---|
209 | 201 | goto bad1; |
---|
210 | | - } |
---|
211 | 202 | uioinfo->irq = ret; |
---|
212 | 203 | } |
---|
| 204 | + |
---|
| 205 | + if (uioinfo->irq) { |
---|
| 206 | + struct irq_data *irq_data = irq_get_irq_data(uioinfo->irq); |
---|
| 207 | + |
---|
| 208 | + /* |
---|
| 209 | + * If a level interrupt, dont do lazy disable. Otherwise the |
---|
| 210 | + * irq will fire again since clearing of the actual cause, on |
---|
| 211 | + * device level, is done in userspace |
---|
| 212 | + * irqd_is_level_type() isn't used since isn't valid until |
---|
| 213 | + * irq is configured. |
---|
| 214 | + */ |
---|
| 215 | + if (irq_data && |
---|
| 216 | + irqd_get_trigger_type(irq_data) & IRQ_TYPE_LEVEL_MASK) { |
---|
| 217 | + dev_dbg(&pdev->dev, "disable lazy unmask\n"); |
---|
| 218 | + irq_set_status_flags(uioinfo->irq, IRQ_DISABLE_UNLAZY); |
---|
| 219 | + } |
---|
| 220 | + } |
---|
| 221 | + |
---|
213 | 222 | uiomem = &uioinfo->mem[0]; |
---|
214 | 223 | |
---|
215 | 224 | for (i = 0; i < pdev->num_resources; ++i) { |
---|