.. | .. |
---|
14 | 14 | #include <linux/dma-mapping.h> |
---|
15 | 15 | #include <linux/delay.h> |
---|
16 | 16 | #include <linux/io.h> |
---|
| 17 | +#include <linux/sys_soc.h> |
---|
17 | 18 | |
---|
18 | 19 | #include <linux/fsl/mc.h> |
---|
19 | 20 | #include <soc/fsl/dpaa2-io.h> |
---|
.. | .. |
---|
29 | 30 | struct dpio_priv { |
---|
30 | 31 | struct dpaa2_io *io; |
---|
31 | 32 | }; |
---|
| 33 | + |
---|
| 34 | +static cpumask_var_t cpus_unused_mask; |
---|
| 35 | + |
---|
| 36 | +static const struct soc_device_attribute ls1088a_soc[] = { |
---|
| 37 | + {.family = "QorIQ LS1088A"}, |
---|
| 38 | + { /* sentinel */ } |
---|
| 39 | +}; |
---|
| 40 | + |
---|
| 41 | +static const struct soc_device_attribute ls2080a_soc[] = { |
---|
| 42 | + {.family = "QorIQ LS2080A"}, |
---|
| 43 | + { /* sentinel */ } |
---|
| 44 | +}; |
---|
| 45 | + |
---|
| 46 | +static const struct soc_device_attribute ls2088a_soc[] = { |
---|
| 47 | + {.family = "QorIQ LS2088A"}, |
---|
| 48 | + { /* sentinel */ } |
---|
| 49 | +}; |
---|
| 50 | + |
---|
| 51 | +static const struct soc_device_attribute lx2160a_soc[] = { |
---|
| 52 | + {.family = "QorIQ LX2160A"}, |
---|
| 53 | + { /* sentinel */ } |
---|
| 54 | +}; |
---|
| 55 | + |
---|
| 56 | +static int dpaa2_dpio_get_cluster_sdest(struct fsl_mc_device *dpio_dev, int cpu) |
---|
| 57 | +{ |
---|
| 58 | + int cluster_base, cluster_size; |
---|
| 59 | + |
---|
| 60 | + if (soc_device_match(ls1088a_soc)) { |
---|
| 61 | + cluster_base = 2; |
---|
| 62 | + cluster_size = 4; |
---|
| 63 | + } else if (soc_device_match(ls2080a_soc) || |
---|
| 64 | + soc_device_match(ls2088a_soc) || |
---|
| 65 | + soc_device_match(lx2160a_soc)) { |
---|
| 66 | + cluster_base = 0; |
---|
| 67 | + cluster_size = 2; |
---|
| 68 | + } else { |
---|
| 69 | + dev_err(&dpio_dev->dev, "unknown SoC version\n"); |
---|
| 70 | + return -1; |
---|
| 71 | + } |
---|
| 72 | + |
---|
| 73 | + return cluster_base + cpu / cluster_size; |
---|
| 74 | +} |
---|
32 | 75 | |
---|
33 | 76 | static irqreturn_t dpio_irq_handler(int irq_num, void *arg) |
---|
34 | 77 | { |
---|
.. | .. |
---|
50 | 93 | |
---|
51 | 94 | static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu) |
---|
52 | 95 | { |
---|
53 | | - struct dpio_priv *priv; |
---|
54 | 96 | int error; |
---|
55 | 97 | struct fsl_mc_device_irq *irq; |
---|
56 | | - |
---|
57 | | - priv = dev_get_drvdata(&dpio_dev->dev); |
---|
58 | 98 | |
---|
59 | 99 | irq = dpio_dev->irqs[0]; |
---|
60 | 100 | error = devm_request_irq(&dpio_dev->dev, |
---|
.. | .. |
---|
86 | 126 | struct dpio_priv *priv; |
---|
87 | 127 | int err = -ENOMEM; |
---|
88 | 128 | struct device *dev = &dpio_dev->dev; |
---|
89 | | - static int next_cpu = -1; |
---|
| 129 | + int possible_next_cpu; |
---|
| 130 | + int sdest; |
---|
90 | 131 | |
---|
91 | 132 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); |
---|
92 | 133 | if (!priv) |
---|
.. | .. |
---|
106 | 147 | if (err) { |
---|
107 | 148 | dev_err(dev, "dpio_open() failed\n"); |
---|
108 | 149 | goto err_open; |
---|
| 150 | + } |
---|
| 151 | + |
---|
| 152 | + err = dpio_reset(dpio_dev->mc_io, 0, dpio_dev->mc_handle); |
---|
| 153 | + if (err) { |
---|
| 154 | + dev_err(dev, "dpio_reset() failed\n"); |
---|
| 155 | + goto err_reset; |
---|
109 | 156 | } |
---|
110 | 157 | |
---|
111 | 158 | err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle, |
---|
.. | .. |
---|
128 | 175 | desc.dpio_id = dpio_dev->obj_desc.id; |
---|
129 | 176 | |
---|
130 | 177 | /* get the cpu to use for the affinity hint */ |
---|
131 | | - if (next_cpu == -1) |
---|
132 | | - next_cpu = cpumask_first(cpu_online_mask); |
---|
133 | | - else |
---|
134 | | - next_cpu = cpumask_next(next_cpu, cpu_online_mask); |
---|
135 | | - |
---|
136 | | - if (!cpu_possible(next_cpu)) { |
---|
| 178 | + possible_next_cpu = cpumask_first(cpus_unused_mask); |
---|
| 179 | + if (possible_next_cpu >= nr_cpu_ids) { |
---|
137 | 180 | dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n"); |
---|
138 | 181 | err = -ERANGE; |
---|
139 | 182 | goto err_allocate_irqs; |
---|
140 | 183 | } |
---|
141 | | - desc.cpu = next_cpu; |
---|
| 184 | + desc.cpu = possible_next_cpu; |
---|
| 185 | + cpumask_clear_cpu(possible_next_cpu, cpus_unused_mask); |
---|
142 | 186 | |
---|
143 | | - /* |
---|
144 | | - * Set the CENA regs to be the cache inhibited area of the portal to |
---|
145 | | - * avoid coherency issues if a user migrates to another core. |
---|
146 | | - */ |
---|
147 | | - desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start, |
---|
148 | | - resource_size(&dpio_dev->regions[1]), |
---|
149 | | - MEMREMAP_WC); |
---|
| 187 | + sdest = dpaa2_dpio_get_cluster_sdest(dpio_dev, desc.cpu); |
---|
| 188 | + if (sdest >= 0) { |
---|
| 189 | + err = dpio_set_stashing_destination(dpio_dev->mc_io, 0, |
---|
| 190 | + dpio_dev->mc_handle, |
---|
| 191 | + sdest); |
---|
| 192 | + if (err) |
---|
| 193 | + dev_err(dev, "dpio_set_stashing_destination failed for cpu%d\n", |
---|
| 194 | + desc.cpu); |
---|
| 195 | + } |
---|
| 196 | + |
---|
| 197 | + if (dpio_dev->obj_desc.region_count < 3) { |
---|
| 198 | + /* No support for DDR backed portals, use classic mapping */ |
---|
| 199 | + /* |
---|
| 200 | + * Set the CENA regs to be the cache inhibited area of the |
---|
| 201 | + * portal to avoid coherency issues if a user migrates to |
---|
| 202 | + * another core. |
---|
| 203 | + */ |
---|
| 204 | + desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start, |
---|
| 205 | + resource_size(&dpio_dev->regions[1]), |
---|
| 206 | + MEMREMAP_WC); |
---|
| 207 | + } else { |
---|
| 208 | + desc.regs_cena = devm_memremap(dev, dpio_dev->regions[2].start, |
---|
| 209 | + resource_size(&dpio_dev->regions[2]), |
---|
| 210 | + MEMREMAP_WB); |
---|
| 211 | + } |
---|
| 212 | + |
---|
150 | 213 | if (IS_ERR(desc.regs_cena)) { |
---|
151 | 214 | dev_err(dev, "devm_memremap failed\n"); |
---|
152 | 215 | err = PTR_ERR(desc.regs_cena); |
---|
.. | .. |
---|
167 | 230 | goto err_allocate_irqs; |
---|
168 | 231 | } |
---|
169 | 232 | |
---|
170 | | - err = register_dpio_irq_handlers(dpio_dev, desc.cpu); |
---|
171 | | - if (err) |
---|
172 | | - goto err_register_dpio_irq; |
---|
173 | | - |
---|
174 | | - priv->io = dpaa2_io_create(&desc); |
---|
| 233 | + priv->io = dpaa2_io_create(&desc, dev); |
---|
175 | 234 | if (!priv->io) { |
---|
176 | 235 | dev_err(dev, "dpaa2_io_create failed\n"); |
---|
177 | 236 | err = -ENOMEM; |
---|
178 | 237 | goto err_dpaa2_io_create; |
---|
179 | 238 | } |
---|
180 | 239 | |
---|
| 240 | + err = register_dpio_irq_handlers(dpio_dev, desc.cpu); |
---|
| 241 | + if (err) |
---|
| 242 | + goto err_register_dpio_irq; |
---|
| 243 | + |
---|
181 | 244 | dev_info(dev, "probed\n"); |
---|
182 | 245 | dev_dbg(dev, " receives_notifications = %d\n", |
---|
183 | 246 | desc.receives_notifications); |
---|
184 | 247 | dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle); |
---|
185 | | - fsl_mc_portal_free(dpio_dev->mc_io); |
---|
186 | 248 | |
---|
187 | 249 | return 0; |
---|
188 | 250 | |
---|
.. | .. |
---|
193 | 255 | err_allocate_irqs: |
---|
194 | 256 | dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle); |
---|
195 | 257 | err_get_attr: |
---|
| 258 | +err_reset: |
---|
196 | 259 | dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle); |
---|
197 | 260 | err_open: |
---|
198 | 261 | fsl_mc_portal_free(dpio_dev->mc_io); |
---|
.. | .. |
---|
211 | 274 | { |
---|
212 | 275 | struct device *dev; |
---|
213 | 276 | struct dpio_priv *priv; |
---|
214 | | - int err; |
---|
| 277 | + int err = 0, cpu; |
---|
215 | 278 | |
---|
216 | 279 | dev = &dpio_dev->dev; |
---|
217 | 280 | priv = dev_get_drvdata(dev); |
---|
| 281 | + cpu = dpaa2_io_get_cpu(priv->io); |
---|
218 | 282 | |
---|
219 | 283 | dpaa2_io_down(priv->io); |
---|
220 | 284 | |
---|
221 | 285 | dpio_teardown_irqs(dpio_dev); |
---|
222 | 286 | |
---|
223 | | - err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io); |
---|
224 | | - if (err) { |
---|
225 | | - dev_err(dev, "MC portal allocation failed\n"); |
---|
226 | | - goto err_mcportal; |
---|
227 | | - } |
---|
| 287 | + cpumask_set_cpu(cpu, cpus_unused_mask); |
---|
228 | 288 | |
---|
229 | 289 | err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id, |
---|
230 | 290 | &dpio_dev->mc_handle); |
---|
.. | .. |
---|
243 | 303 | |
---|
244 | 304 | err_open: |
---|
245 | 305 | fsl_mc_portal_free(dpio_dev->mc_io); |
---|
246 | | -err_mcportal: |
---|
| 306 | + |
---|
247 | 307 | return err; |
---|
248 | 308 | } |
---|
249 | 309 | |
---|
.. | .. |
---|
267 | 327 | |
---|
268 | 328 | static int dpio_driver_init(void) |
---|
269 | 329 | { |
---|
| 330 | + if (!zalloc_cpumask_var(&cpus_unused_mask, GFP_KERNEL)) |
---|
| 331 | + return -ENOMEM; |
---|
| 332 | + cpumask_copy(cpus_unused_mask, cpu_online_mask); |
---|
| 333 | + |
---|
270 | 334 | return fsl_mc_driver_register(&dpaa2_dpio_driver); |
---|
271 | 335 | } |
---|
272 | 336 | |
---|
273 | 337 | static void dpio_driver_exit(void) |
---|
274 | 338 | { |
---|
| 339 | + free_cpumask_var(cpus_unused_mask); |
---|
275 | 340 | fsl_mc_driver_unregister(&dpaa2_dpio_driver); |
---|
276 | 341 | } |
---|
277 | 342 | module_init(dpio_driver_init); |
---|