forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-10-16 50a212ec906f7524620675f0c57357691c26c81f
kernel/arch/mips/lantiq/irq.c
....@@ -1,7 +1,5 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
2
- * This program is free software; you can redistribute it and/or modify it
3
- * under the terms of the GNU General Public License version 2 as published
4
- * by the Free Software Foundation.
53 *
64 * Copyright (C) 2010 John Crispin <john@phrozen.org>
75 * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
....@@ -22,13 +20,13 @@
2220 #include <irq.h>
2321
2422 /* register definitions - internal irqs */
25
-#define LTQ_ICU_IM0_ISR 0x0000
26
-#define LTQ_ICU_IM0_IER 0x0008
27
-#define LTQ_ICU_IM0_IOSR 0x0010
28
-#define LTQ_ICU_IM0_IRSR 0x0018
29
-#define LTQ_ICU_IM0_IMR 0x0020
30
-#define LTQ_ICU_IM1_ISR 0x0028
31
-#define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR)
23
+#define LTQ_ICU_ISR 0x0000
24
+#define LTQ_ICU_IER 0x0008
25
+#define LTQ_ICU_IOSR 0x0010
26
+#define LTQ_ICU_IRSR 0x0018
27
+#define LTQ_ICU_IMR 0x0020
28
+
29
+#define LTQ_ICU_IM_SIZE 0x28
3230
3331 /* register definitions - external irqs */
3432 #define LTQ_EIU_EXIN_C 0x0000
....@@ -48,24 +46,25 @@
4846 */
4947 #define LTQ_ICU_EBU_IRQ 22
5048
51
-#define ltq_icu_w32(m, x, y) ltq_w32((x), ltq_icu_membase[m] + (y))
52
-#define ltq_icu_r32(m, x) ltq_r32(ltq_icu_membase[m] + (x))
49
+#define ltq_icu_w32(vpe, m, x, y) \
50
+ ltq_w32((x), ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (y))
51
+
52
+#define ltq_icu_r32(vpe, m, x) \
53
+ ltq_r32(ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (x))
5354
5455 #define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
5556 #define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
56
-
57
-/* our 2 ipi interrupts for VSMP */
58
-#define MIPS_CPU_IPI_RESCHED_IRQ 0
59
-#define MIPS_CPU_IPI_CALL_IRQ 1
6057
6158 /* we have a cascade of 8 irqs */
6259 #define MIPS_CPU_IRQ_CASCADE 8
6360
6461 static int exin_avail;
6562 static u32 ltq_eiu_irq[MAX_EIU];
66
-static void __iomem *ltq_icu_membase[MAX_IM];
63
+static void __iomem *ltq_icu_membase[NR_CPUS];
6764 static void __iomem *ltq_eiu_membase;
6865 static struct irq_domain *ltq_domain;
66
+static DEFINE_SPINLOCK(ltq_eiu_lock);
67
+static DEFINE_RAW_SPINLOCK(ltq_icu_lock);
6968 static int ltq_perfcount_irq;
7069
7170 int ltq_eiu_get_irq(int exin)
....@@ -77,49 +76,84 @@
7776
7877 void ltq_disable_irq(struct irq_data *d)
7978 {
80
- u32 ier = LTQ_ICU_IM0_IER;
81
- int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
82
- int im = offset / INT_NUM_IM_OFFSET;
79
+ unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
80
+ unsigned long im = offset / INT_NUM_IM_OFFSET;
81
+ unsigned long flags;
82
+ int vpe;
8383
8484 offset %= INT_NUM_IM_OFFSET;
85
- ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
85
+
86
+ raw_spin_lock_irqsave(&ltq_icu_lock, flags);
87
+ for_each_present_cpu(vpe) {
88
+ ltq_icu_w32(vpe, im,
89
+ ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
90
+ LTQ_ICU_IER);
91
+ }
92
+ raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
8693 }
8794
8895 void ltq_mask_and_ack_irq(struct irq_data *d)
8996 {
90
- u32 ier = LTQ_ICU_IM0_IER;
91
- u32 isr = LTQ_ICU_IM0_ISR;
92
- int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
93
- int im = offset / INT_NUM_IM_OFFSET;
97
+ unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
98
+ unsigned long im = offset / INT_NUM_IM_OFFSET;
99
+ unsigned long flags;
100
+ int vpe;
94101
95102 offset %= INT_NUM_IM_OFFSET;
96
- ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
97
- ltq_icu_w32(im, BIT(offset), isr);
103
+
104
+ raw_spin_lock_irqsave(&ltq_icu_lock, flags);
105
+ for_each_present_cpu(vpe) {
106
+ ltq_icu_w32(vpe, im,
107
+ ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
108
+ LTQ_ICU_IER);
109
+ ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
110
+ }
111
+ raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
98112 }
99113
100114 static void ltq_ack_irq(struct irq_data *d)
101115 {
102
- u32 isr = LTQ_ICU_IM0_ISR;
103
- int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
104
- int im = offset / INT_NUM_IM_OFFSET;
116
+ unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
117
+ unsigned long im = offset / INT_NUM_IM_OFFSET;
118
+ unsigned long flags;
119
+ int vpe;
105120
106121 offset %= INT_NUM_IM_OFFSET;
107
- ltq_icu_w32(im, BIT(offset), isr);
122
+
123
+ raw_spin_lock_irqsave(&ltq_icu_lock, flags);
124
+ for_each_present_cpu(vpe) {
125
+ ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
126
+ }
127
+ raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
108128 }
109129
110130 void ltq_enable_irq(struct irq_data *d)
111131 {
112
- u32 ier = LTQ_ICU_IM0_IER;
113
- int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
114
- int im = offset / INT_NUM_IM_OFFSET;
132
+ unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
133
+ unsigned long im = offset / INT_NUM_IM_OFFSET;
134
+ unsigned long flags;
135
+ int vpe;
115136
116137 offset %= INT_NUM_IM_OFFSET;
117
- ltq_icu_w32(im, ltq_icu_r32(im, ier) | BIT(offset), ier);
138
+
139
+ vpe = cpumask_first(irq_data_get_effective_affinity_mask(d));
140
+
141
+ /* This shouldn't be even possible, maybe during CPU hotplug spam */
142
+ if (unlikely(vpe >= nr_cpu_ids))
143
+ vpe = smp_processor_id();
144
+
145
+ raw_spin_lock_irqsave(&ltq_icu_lock, flags);
146
+
147
+ ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, LTQ_ICU_IER) | BIT(offset),
148
+ LTQ_ICU_IER);
149
+
150
+ raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
118151 }
119152
120153 static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
121154 {
122155 int i;
156
+ unsigned long flags;
123157
124158 for (i = 0; i < exin_avail; i++) {
125159 if (d->hwirq == ltq_eiu_irq[i]) {
....@@ -156,9 +190,11 @@
156190 if (edge)
157191 irq_set_handler(d->hwirq, handle_edge_irq);
158192
193
+ spin_lock_irqsave(&ltq_eiu_lock, flags);
159194 ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
160195 (~(7 << (i * 4)))) | (val << (i * 4)),
161196 LTQ_EIU_EXIN_C);
197
+ spin_unlock_irqrestore(&ltq_eiu_lock, flags);
162198 }
163199 }
164200
....@@ -202,6 +238,21 @@
202238 }
203239 }
204240
241
+#if defined(CONFIG_SMP)
242
+static int ltq_icu_irq_set_affinity(struct irq_data *d,
243
+ const struct cpumask *cpumask, bool force)
244
+{
245
+ struct cpumask tmask;
246
+
247
+ if (!cpumask_and(&tmask, cpumask, cpu_online_mask))
248
+ return -EINVAL;
249
+
250
+ irq_data_update_effective_affinity(d, &tmask);
251
+
252
+ return IRQ_SET_MASK_OK;
253
+}
254
+#endif
255
+
205256 static struct irq_chip ltq_irq_type = {
206257 .name = "icu",
207258 .irq_enable = ltq_enable_irq,
....@@ -210,6 +261,9 @@
210261 .irq_ack = ltq_ack_irq,
211262 .irq_mask = ltq_disable_irq,
212263 .irq_mask_ack = ltq_mask_and_ack_irq,
264
+#if defined(CONFIG_SMP)
265
+ .irq_set_affinity = ltq_icu_irq_set_affinity,
266
+#endif
213267 };
214268
215269 static struct irq_chip ltq_eiu_type = {
....@@ -223,15 +277,19 @@
223277 .irq_mask = ltq_disable_irq,
224278 .irq_mask_ack = ltq_mask_and_ack_irq,
225279 .irq_set_type = ltq_eiu_settype,
280
+#if defined(CONFIG_SMP)
281
+ .irq_set_affinity = ltq_icu_irq_set_affinity,
282
+#endif
226283 };
227284
228285 static void ltq_hw_irq_handler(struct irq_desc *desc)
229286 {
230
- int module = irq_desc_get_irq(desc) - 2;
287
+ unsigned int module = irq_desc_get_irq(desc) - 2;
231288 u32 irq;
232
- int hwirq;
289
+ irq_hw_number_t hwirq;
290
+ int vpe = smp_processor_id();
233291
234
- irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
292
+ irq = ltq_icu_r32(vpe, module, LTQ_ICU_IOSR);
235293 if (irq == 0)
236294 return;
237295
....@@ -252,6 +310,7 @@
252310 static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
253311 {
254312 struct irq_chip *chip = &ltq_irq_type;
313
+ struct irq_data *data;
255314 int i;
256315
257316 if (hw < MIPS_CPU_IRQ_CASCADE)
....@@ -260,6 +319,10 @@
260319 for (i = 0; i < exin_avail; i++)
261320 if (hw == ltq_eiu_irq[i])
262321 chip = &ltq_eiu_type;
322
+
323
+ data = irq_get_irq_data(irq);
324
+
325
+ irq_data_update_effective_affinity(data, cpumask_of(0));
263326
264327 irq_set_chip_and_handler(irq, chip, handle_level_irq);
265328
....@@ -275,28 +338,37 @@
275338 {
276339 struct device_node *eiu_node;
277340 struct resource res;
278
- int i, ret;
341
+ int i, ret, vpe;
279342
280
- for (i = 0; i < MAX_IM; i++) {
281
- if (of_address_to_resource(node, i, &res))
282
- panic("Failed to get icu memory range");
343
+ /* load register regions of available ICUs */
344
+ for_each_possible_cpu(vpe) {
345
+ if (of_address_to_resource(node, vpe, &res))
346
+ panic("Failed to get icu%i memory range", vpe);
283347
284348 if (!request_mem_region(res.start, resource_size(&res),
285349 res.name))
286
- pr_err("Failed to request icu memory");
350
+ pr_err("Failed to request icu%i memory\n", vpe);
287351
288
- ltq_icu_membase[i] = ioremap_nocache(res.start,
352
+ ltq_icu_membase[vpe] = ioremap(res.start,
289353 resource_size(&res));
290
- if (!ltq_icu_membase[i])
291
- panic("Failed to remap icu memory");
354
+
355
+ if (!ltq_icu_membase[vpe])
356
+ panic("Failed to remap icu%i memory", vpe);
292357 }
293358
294359 /* turn off all irqs by default */
295
- for (i = 0; i < MAX_IM; i++) {
296
- /* make sure all irqs are turned off by default */
297
- ltq_icu_w32(i, 0, LTQ_ICU_IM0_IER);
298
- /* clear all possibly pending interrupts */
299
- ltq_icu_w32(i, ~0, LTQ_ICU_IM0_ISR);
360
+ for_each_possible_cpu(vpe) {
361
+ for (i = 0; i < MAX_IM; i++) {
362
+ /* make sure all irqs are turned off by default */
363
+ ltq_icu_w32(vpe, i, 0, LTQ_ICU_IER);
364
+
365
+ /* clear all possibly pending interrupts */
366
+ ltq_icu_w32(vpe, i, ~0, LTQ_ICU_ISR);
367
+ ltq_icu_w32(vpe, i, ~0, LTQ_ICU_IMR);
368
+
369
+ /* clear resend */
370
+ ltq_icu_w32(vpe, i, 0, LTQ_ICU_IRSR);
371
+ }
300372 }
301373
302374 mips_cpu_irq_init();
....@@ -310,13 +382,6 @@
310382
311383 /* tell oprofile which irq to use */
312384 ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
313
-
314
- /*
315
- * if the timer irq is not one of the mips irqs we need to
316
- * create a mapping
317
- */
318
- if (MIPS_CPU_TIMER_IRQ != 7)
319
- irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ);
320385
321386 /* the external interrupts are optional and xway only */
322387 eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
....@@ -337,7 +402,7 @@
337402 res.name))
338403 pr_err("Failed to request eiu memory");
339404
340
- ltq_eiu_membase = ioremap_nocache(res.start,
405
+ ltq_eiu_membase = ioremap(res.start,
341406 resource_size(&res));
342407 if (!ltq_eiu_membase)
343408 panic("Failed to remap eiu memory");
....@@ -354,10 +419,10 @@
354419
355420 unsigned int get_c0_compare_int(void)
356421 {
357
- return MIPS_CPU_TIMER_IRQ;
422
+ return CP0_LEGACY_COMPARE_IRQ;
358423 }
359424
360
-static struct of_device_id __initdata of_irq_ids[] = {
425
+static const struct of_device_id of_irq_ids[] __initconst = {
361426 { .compatible = "lantiq,icu", .data = icu_of_init },
362427 {},
363428 };