.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | | - * This program is free software; you can redistribute it and/or modify it |
---|
3 | | - * under the terms of the GNU General Public License version 2 as published |
---|
4 | | - * by the Free Software Foundation. |
---|
5 | 3 | * |
---|
6 | 4 | * Copyright (C) 2010 John Crispin <john@phrozen.org> |
---|
7 | 5 | * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com> |
---|
.. | .. |
---|
22 | 20 | #include <irq.h> |
---|
23 | 21 | |
---|
24 | 22 | /* register definitions - internal irqs */ |
---|
25 | | -#define LTQ_ICU_IM0_ISR 0x0000 |
---|
26 | | -#define LTQ_ICU_IM0_IER 0x0008 |
---|
27 | | -#define LTQ_ICU_IM0_IOSR 0x0010 |
---|
28 | | -#define LTQ_ICU_IM0_IRSR 0x0018 |
---|
29 | | -#define LTQ_ICU_IM0_IMR 0x0020 |
---|
30 | | -#define LTQ_ICU_IM1_ISR 0x0028 |
---|
31 | | -#define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR) |
---|
| 23 | +#define LTQ_ICU_ISR 0x0000 |
---|
| 24 | +#define LTQ_ICU_IER 0x0008 |
---|
| 25 | +#define LTQ_ICU_IOSR 0x0010 |
---|
| 26 | +#define LTQ_ICU_IRSR 0x0018 |
---|
| 27 | +#define LTQ_ICU_IMR 0x0020 |
---|
| 28 | + |
---|
| 29 | +#define LTQ_ICU_IM_SIZE 0x28 |
---|
32 | 30 | |
---|
33 | 31 | /* register definitions - external irqs */ |
---|
34 | 32 | #define LTQ_EIU_EXIN_C 0x0000 |
---|
.. | .. |
---|
48 | 46 | */ |
---|
49 | 47 | #define LTQ_ICU_EBU_IRQ 22 |
---|
50 | 48 | |
---|
51 | | -#define ltq_icu_w32(m, x, y) ltq_w32((x), ltq_icu_membase[m] + (y)) |
---|
52 | | -#define ltq_icu_r32(m, x) ltq_r32(ltq_icu_membase[m] + (x)) |
---|
| 49 | +#define ltq_icu_w32(vpe, m, x, y) \ |
---|
| 50 | + ltq_w32((x), ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (y)) |
---|
| 51 | + |
---|
| 52 | +#define ltq_icu_r32(vpe, m, x) \ |
---|
| 53 | + ltq_r32(ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (x)) |
---|
53 | 54 | |
---|
54 | 55 | #define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y)) |
---|
55 | 56 | #define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x)) |
---|
56 | | - |
---|
57 | | -/* our 2 ipi interrupts for VSMP */ |
---|
58 | | -#define MIPS_CPU_IPI_RESCHED_IRQ 0 |
---|
59 | | -#define MIPS_CPU_IPI_CALL_IRQ 1 |
---|
60 | 57 | |
---|
61 | 58 | /* we have a cascade of 8 irqs */ |
---|
62 | 59 | #define MIPS_CPU_IRQ_CASCADE 8 |
---|
63 | 60 | |
---|
64 | 61 | static int exin_avail; |
---|
65 | 62 | static u32 ltq_eiu_irq[MAX_EIU]; |
---|
66 | | -static void __iomem *ltq_icu_membase[MAX_IM]; |
---|
| 63 | +static void __iomem *ltq_icu_membase[NR_CPUS]; |
---|
67 | 64 | static void __iomem *ltq_eiu_membase; |
---|
68 | 65 | static struct irq_domain *ltq_domain; |
---|
| 66 | +static DEFINE_SPINLOCK(ltq_eiu_lock); |
---|
| 67 | +static DEFINE_RAW_SPINLOCK(ltq_icu_lock); |
---|
69 | 68 | static int ltq_perfcount_irq; |
---|
70 | 69 | |
---|
71 | 70 | int ltq_eiu_get_irq(int exin) |
---|
.. | .. |
---|
77 | 76 | |
---|
78 | 77 | void ltq_disable_irq(struct irq_data *d) |
---|
79 | 78 | { |
---|
80 | | - u32 ier = LTQ_ICU_IM0_IER; |
---|
81 | | - int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; |
---|
82 | | - int im = offset / INT_NUM_IM_OFFSET; |
---|
| 79 | + unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; |
---|
| 80 | + unsigned long im = offset / INT_NUM_IM_OFFSET; |
---|
| 81 | + unsigned long flags; |
---|
| 82 | + int vpe; |
---|
83 | 83 | |
---|
84 | 84 | offset %= INT_NUM_IM_OFFSET; |
---|
85 | | - ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier); |
---|
| 85 | + |
---|
| 86 | + raw_spin_lock_irqsave(<q_icu_lock, flags); |
---|
| 87 | + for_each_present_cpu(vpe) { |
---|
| 88 | + ltq_icu_w32(vpe, im, |
---|
| 89 | + ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset), |
---|
| 90 | + LTQ_ICU_IER); |
---|
| 91 | + } |
---|
| 92 | + raw_spin_unlock_irqrestore(<q_icu_lock, flags); |
---|
86 | 93 | } |
---|
87 | 94 | |
---|
88 | 95 | void ltq_mask_and_ack_irq(struct irq_data *d) |
---|
89 | 96 | { |
---|
90 | | - u32 ier = LTQ_ICU_IM0_IER; |
---|
91 | | - u32 isr = LTQ_ICU_IM0_ISR; |
---|
92 | | - int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; |
---|
93 | | - int im = offset / INT_NUM_IM_OFFSET; |
---|
| 97 | + unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; |
---|
| 98 | + unsigned long im = offset / INT_NUM_IM_OFFSET; |
---|
| 99 | + unsigned long flags; |
---|
| 100 | + int vpe; |
---|
94 | 101 | |
---|
95 | 102 | offset %= INT_NUM_IM_OFFSET; |
---|
96 | | - ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier); |
---|
97 | | - ltq_icu_w32(im, BIT(offset), isr); |
---|
| 103 | + |
---|
| 104 | + raw_spin_lock_irqsave(<q_icu_lock, flags); |
---|
| 105 | + for_each_present_cpu(vpe) { |
---|
| 106 | + ltq_icu_w32(vpe, im, |
---|
| 107 | + ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset), |
---|
| 108 | + LTQ_ICU_IER); |
---|
| 109 | + ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR); |
---|
| 110 | + } |
---|
| 111 | + raw_spin_unlock_irqrestore(<q_icu_lock, flags); |
---|
98 | 112 | } |
---|
99 | 113 | |
---|
100 | 114 | static void ltq_ack_irq(struct irq_data *d) |
---|
101 | 115 | { |
---|
102 | | - u32 isr = LTQ_ICU_IM0_ISR; |
---|
103 | | - int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; |
---|
104 | | - int im = offset / INT_NUM_IM_OFFSET; |
---|
| 116 | + unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; |
---|
| 117 | + unsigned long im = offset / INT_NUM_IM_OFFSET; |
---|
| 118 | + unsigned long flags; |
---|
| 119 | + int vpe; |
---|
105 | 120 | |
---|
106 | 121 | offset %= INT_NUM_IM_OFFSET; |
---|
107 | | - ltq_icu_w32(im, BIT(offset), isr); |
---|
| 122 | + |
---|
| 123 | + raw_spin_lock_irqsave(<q_icu_lock, flags); |
---|
| 124 | + for_each_present_cpu(vpe) { |
---|
| 125 | + ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR); |
---|
| 126 | + } |
---|
| 127 | + raw_spin_unlock_irqrestore(<q_icu_lock, flags); |
---|
108 | 128 | } |
---|
109 | 129 | |
---|
110 | 130 | void ltq_enable_irq(struct irq_data *d) |
---|
111 | 131 | { |
---|
112 | | - u32 ier = LTQ_ICU_IM0_IER; |
---|
113 | | - int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; |
---|
114 | | - int im = offset / INT_NUM_IM_OFFSET; |
---|
| 132 | + unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; |
---|
| 133 | + unsigned long im = offset / INT_NUM_IM_OFFSET; |
---|
| 134 | + unsigned long flags; |
---|
| 135 | + int vpe; |
---|
115 | 136 | |
---|
116 | 137 | offset %= INT_NUM_IM_OFFSET; |
---|
117 | | - ltq_icu_w32(im, ltq_icu_r32(im, ier) | BIT(offset), ier); |
---|
| 138 | + |
---|
| 139 | + vpe = cpumask_first(irq_data_get_effective_affinity_mask(d)); |
---|
| 140 | + |
---|
| 141 | + /* This shouldn't be even possible, maybe during CPU hotplug spam */ |
---|
| 142 | + if (unlikely(vpe >= nr_cpu_ids)) |
---|
| 143 | + vpe = smp_processor_id(); |
---|
| 144 | + |
---|
| 145 | + raw_spin_lock_irqsave(<q_icu_lock, flags); |
---|
| 146 | + |
---|
| 147 | + ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, LTQ_ICU_IER) | BIT(offset), |
---|
| 148 | + LTQ_ICU_IER); |
---|
| 149 | + |
---|
| 150 | + raw_spin_unlock_irqrestore(<q_icu_lock, flags); |
---|
118 | 151 | } |
---|
119 | 152 | |
---|
120 | 153 | static int ltq_eiu_settype(struct irq_data *d, unsigned int type) |
---|
121 | 154 | { |
---|
122 | 155 | int i; |
---|
| 156 | + unsigned long flags; |
---|
123 | 157 | |
---|
124 | 158 | for (i = 0; i < exin_avail; i++) { |
---|
125 | 159 | if (d->hwirq == ltq_eiu_irq[i]) { |
---|
.. | .. |
---|
156 | 190 | if (edge) |
---|
157 | 191 | irq_set_handler(d->hwirq, handle_edge_irq); |
---|
158 | 192 | |
---|
| 193 | + spin_lock_irqsave(<q_eiu_lock, flags); |
---|
159 | 194 | ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) & |
---|
160 | 195 | (~(7 << (i * 4)))) | (val << (i * 4)), |
---|
161 | 196 | LTQ_EIU_EXIN_C); |
---|
| 197 | + spin_unlock_irqrestore(<q_eiu_lock, flags); |
---|
162 | 198 | } |
---|
163 | 199 | } |
---|
164 | 200 | |
---|
.. | .. |
---|
202 | 238 | } |
---|
203 | 239 | } |
---|
204 | 240 | |
---|
| 241 | +#if defined(CONFIG_SMP) |
---|
| 242 | +static int ltq_icu_irq_set_affinity(struct irq_data *d, |
---|
| 243 | + const struct cpumask *cpumask, bool force) |
---|
| 244 | +{ |
---|
| 245 | + struct cpumask tmask; |
---|
| 246 | + |
---|
| 247 | + if (!cpumask_and(&tmask, cpumask, cpu_online_mask)) |
---|
| 248 | + return -EINVAL; |
---|
| 249 | + |
---|
| 250 | + irq_data_update_effective_affinity(d, &tmask); |
---|
| 251 | + |
---|
| 252 | + return IRQ_SET_MASK_OK; |
---|
| 253 | +} |
---|
| 254 | +#endif |
---|
| 255 | + |
---|
205 | 256 | static struct irq_chip ltq_irq_type = { |
---|
206 | 257 | .name = "icu", |
---|
207 | 258 | .irq_enable = ltq_enable_irq, |
---|
.. | .. |
---|
210 | 261 | .irq_ack = ltq_ack_irq, |
---|
211 | 262 | .irq_mask = ltq_disable_irq, |
---|
212 | 263 | .irq_mask_ack = ltq_mask_and_ack_irq, |
---|
| 264 | +#if defined(CONFIG_SMP) |
---|
| 265 | + .irq_set_affinity = ltq_icu_irq_set_affinity, |
---|
| 266 | +#endif |
---|
213 | 267 | }; |
---|
214 | 268 | |
---|
215 | 269 | static struct irq_chip ltq_eiu_type = { |
---|
.. | .. |
---|
223 | 277 | .irq_mask = ltq_disable_irq, |
---|
224 | 278 | .irq_mask_ack = ltq_mask_and_ack_irq, |
---|
225 | 279 | .irq_set_type = ltq_eiu_settype, |
---|
| 280 | +#if defined(CONFIG_SMP) |
---|
| 281 | + .irq_set_affinity = ltq_icu_irq_set_affinity, |
---|
| 282 | +#endif |
---|
226 | 283 | }; |
---|
227 | 284 | |
---|
228 | 285 | static void ltq_hw_irq_handler(struct irq_desc *desc) |
---|
229 | 286 | { |
---|
230 | | - int module = irq_desc_get_irq(desc) - 2; |
---|
| 287 | + unsigned int module = irq_desc_get_irq(desc) - 2; |
---|
231 | 288 | u32 irq; |
---|
232 | | - int hwirq; |
---|
| 289 | + irq_hw_number_t hwirq; |
---|
| 290 | + int vpe = smp_processor_id(); |
---|
233 | 291 | |
---|
234 | | - irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); |
---|
| 292 | + irq = ltq_icu_r32(vpe, module, LTQ_ICU_IOSR); |
---|
235 | 293 | if (irq == 0) |
---|
236 | 294 | return; |
---|
237 | 295 | |
---|
.. | .. |
---|
252 | 310 | static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) |
---|
253 | 311 | { |
---|
254 | 312 | struct irq_chip *chip = <q_irq_type; |
---|
| 313 | + struct irq_data *data; |
---|
255 | 314 | int i; |
---|
256 | 315 | |
---|
257 | 316 | if (hw < MIPS_CPU_IRQ_CASCADE) |
---|
.. | .. |
---|
260 | 319 | for (i = 0; i < exin_avail; i++) |
---|
261 | 320 | if (hw == ltq_eiu_irq[i]) |
---|
262 | 321 | chip = <q_eiu_type; |
---|
| 322 | + |
---|
| 323 | + data = irq_get_irq_data(irq); |
---|
| 324 | + |
---|
| 325 | + irq_data_update_effective_affinity(data, cpumask_of(0)); |
---|
263 | 326 | |
---|
264 | 327 | irq_set_chip_and_handler(irq, chip, handle_level_irq); |
---|
265 | 328 | |
---|
.. | .. |
---|
275 | 338 | { |
---|
276 | 339 | struct device_node *eiu_node; |
---|
277 | 340 | struct resource res; |
---|
278 | | - int i, ret; |
---|
| 341 | + int i, ret, vpe; |
---|
279 | 342 | |
---|
280 | | - for (i = 0; i < MAX_IM; i++) { |
---|
281 | | - if (of_address_to_resource(node, i, &res)) |
---|
282 | | - panic("Failed to get icu memory range"); |
---|
| 343 | + /* load register regions of available ICUs */ |
---|
| 344 | + for_each_possible_cpu(vpe) { |
---|
| 345 | + if (of_address_to_resource(node, vpe, &res)) |
---|
| 346 | + panic("Failed to get icu%i memory range", vpe); |
---|
283 | 347 | |
---|
284 | 348 | if (!request_mem_region(res.start, resource_size(&res), |
---|
285 | 349 | res.name)) |
---|
286 | | - pr_err("Failed to request icu memory"); |
---|
| 350 | + pr_err("Failed to request icu%i memory\n", vpe); |
---|
287 | 351 | |
---|
288 | | - ltq_icu_membase[i] = ioremap_nocache(res.start, |
---|
| 352 | + ltq_icu_membase[vpe] = ioremap(res.start, |
---|
289 | 353 | resource_size(&res)); |
---|
290 | | - if (!ltq_icu_membase[i]) |
---|
291 | | - panic("Failed to remap icu memory"); |
---|
| 354 | + |
---|
| 355 | + if (!ltq_icu_membase[vpe]) |
---|
| 356 | + panic("Failed to remap icu%i memory", vpe); |
---|
292 | 357 | } |
---|
293 | 358 | |
---|
294 | 359 | /* turn off all irqs by default */ |
---|
295 | | - for (i = 0; i < MAX_IM; i++) { |
---|
296 | | - /* make sure all irqs are turned off by default */ |
---|
297 | | - ltq_icu_w32(i, 0, LTQ_ICU_IM0_IER); |
---|
298 | | - /* clear all possibly pending interrupts */ |
---|
299 | | - ltq_icu_w32(i, ~0, LTQ_ICU_IM0_ISR); |
---|
| 360 | + for_each_possible_cpu(vpe) { |
---|
| 361 | + for (i = 0; i < MAX_IM; i++) { |
---|
| 362 | + /* make sure all irqs are turned off by default */ |
---|
| 363 | + ltq_icu_w32(vpe, i, 0, LTQ_ICU_IER); |
---|
| 364 | + |
---|
| 365 | + /* clear all possibly pending interrupts */ |
---|
| 366 | + ltq_icu_w32(vpe, i, ~0, LTQ_ICU_ISR); |
---|
| 367 | + ltq_icu_w32(vpe, i, ~0, LTQ_ICU_IMR); |
---|
| 368 | + |
---|
| 369 | + /* clear resend */ |
---|
| 370 | + ltq_icu_w32(vpe, i, 0, LTQ_ICU_IRSR); |
---|
| 371 | + } |
---|
300 | 372 | } |
---|
301 | 373 | |
---|
302 | 374 | mips_cpu_irq_init(); |
---|
.. | .. |
---|
310 | 382 | |
---|
311 | 383 | /* tell oprofile which irq to use */ |
---|
312 | 384 | ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); |
---|
313 | | - |
---|
314 | | - /* |
---|
315 | | - * if the timer irq is not one of the mips irqs we need to |
---|
316 | | - * create a mapping |
---|
317 | | - */ |
---|
318 | | - if (MIPS_CPU_TIMER_IRQ != 7) |
---|
319 | | - irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ); |
---|
320 | 385 | |
---|
321 | 386 | /* the external interrupts are optional and xway only */ |
---|
322 | 387 | eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); |
---|
.. | .. |
---|
337 | 402 | res.name)) |
---|
338 | 403 | pr_err("Failed to request eiu memory"); |
---|
339 | 404 | |
---|
340 | | - ltq_eiu_membase = ioremap_nocache(res.start, |
---|
| 405 | + ltq_eiu_membase = ioremap(res.start, |
---|
341 | 406 | resource_size(&res)); |
---|
342 | 407 | if (!ltq_eiu_membase) |
---|
343 | 408 | panic("Failed to remap eiu memory"); |
---|
.. | .. |
---|
354 | 419 | |
---|
355 | 420 | unsigned int get_c0_compare_int(void) |
---|
356 | 421 | { |
---|
357 | | - return MIPS_CPU_TIMER_IRQ; |
---|
| 422 | + return CP0_LEGACY_COMPARE_IRQ; |
---|
358 | 423 | } |
---|
359 | 424 | |
---|
360 | | -static struct of_device_id __initdata of_irq_ids[] = { |
---|
| 425 | +static const struct of_device_id of_irq_ids[] __initconst = { |
---|
361 | 426 | { .compatible = "lantiq,icu", .data = icu_of_init }, |
---|
362 | 427 | {}, |
---|
363 | 428 | }; |
---|