hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/irqchip/irq-sifive-plic.c
....@@ -4,10 +4,12 @@
44 * Copyright (C) 2018 Christoph Hellwig
55 */
66 #define pr_fmt(fmt) "plic: " fmt
7
+#include <linux/cpu.h>
78 #include <linux/interrupt.h>
89 #include <linux/io.h>
910 #include <linux/irq.h>
1011 #include <linux/irqchip.h>
12
+#include <linux/irqchip/chained_irq.h>
1113 #include <linux/irqdomain.h>
1214 #include <linux/module.h>
1315 #include <linux/of.h>
....@@ -15,6 +17,7 @@
1517 #include <linux/of_irq.h>
1618 #include <linux/platform_device.h>
1719 #include <linux/spinlock.h>
20
+#include <asm/smp.h>
1821
1922 /*
2023 * This driver implements a version of the RISC-V PLIC with the actual layout
....@@ -54,91 +57,169 @@
5457 #define CONTEXT_THRESHOLD 0x00
5558 #define CONTEXT_CLAIM 0x04
5659
57
-static void __iomem *plic_regs;
60
+#define PLIC_DISABLE_THRESHOLD 0x7
61
+#define PLIC_ENABLE_THRESHOLD 0
62
+
63
+struct plic_priv {
64
+ struct cpumask lmask;
65
+ struct irq_domain *irqdomain;
66
+ void __iomem *regs;
67
+};
5868
5969 struct plic_handler {
6070 bool present;
61
- int ctxid;
71
+ void __iomem *hart_base;
72
+ /*
73
+ * Protect mask operations on the registers given that we can't
74
+ * assume atomic memory operations work on them.
75
+ */
76
+ raw_spinlock_t enable_lock;
77
+ void __iomem *enable_base;
78
+ struct plic_priv *priv;
6279 };
80
+static int plic_parent_irq;
81
+static bool plic_cpuhp_setup_done;
6382 static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
6483
65
-static inline void __iomem *plic_hart_offset(int ctxid)
84
+static inline void plic_toggle(struct plic_handler *handler,
85
+ int hwirq, int enable)
6686 {
67
- return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART;
68
-}
69
-
70
-static inline u32 __iomem *plic_enable_base(int ctxid)
71
-{
72
- return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART;
73
-}
74
-
75
-/*
76
- * Protect mask operations on the registers given that we can't assume that
77
- * atomic memory operations work on them.
78
- */
79
-static DEFINE_RAW_SPINLOCK(plic_toggle_lock);
80
-
81
-static inline void plic_toggle(int ctxid, int hwirq, int enable)
82
-{
83
- u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32);
87
+ u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32);
8488 u32 hwirq_mask = 1 << (hwirq % 32);
8589
86
- raw_spin_lock(&plic_toggle_lock);
90
+ raw_spin_lock(&handler->enable_lock);
8791 if (enable)
8892 writel(readl(reg) | hwirq_mask, reg);
8993 else
9094 writel(readl(reg) & ~hwirq_mask, reg);
91
- raw_spin_unlock(&plic_toggle_lock);
95
+ raw_spin_unlock(&handler->enable_lock);
9296 }
9397
94
-static inline void plic_irq_toggle(struct irq_data *d, int enable)
98
+static inline void plic_irq_toggle(const struct cpumask *mask,
99
+ struct irq_data *d, int enable)
95100 {
96101 int cpu;
102
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
97103
98
- writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
99
- for_each_cpu(cpu, irq_data_get_affinity_mask(d)) {
104
+ writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
105
+ for_each_cpu(cpu, mask) {
100106 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
101107
102
- if (handler->present)
103
- plic_toggle(handler->ctxid, d->hwirq, enable);
108
+ if (handler->present &&
109
+ cpumask_test_cpu(cpu, &handler->priv->lmask))
110
+ plic_toggle(handler, d->hwirq, enable);
104111 }
105112 }
106113
107
-static void plic_irq_enable(struct irq_data *d)
114
+static void plic_irq_unmask(struct irq_data *d)
108115 {
109
- plic_irq_toggle(d, 1);
116
+ struct cpumask amask;
117
+ unsigned int cpu;
118
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
119
+
120
+ cpumask_and(&amask, &priv->lmask, cpu_online_mask);
121
+ cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
122
+ &amask);
123
+ if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
124
+ return;
125
+ plic_irq_toggle(cpumask_of(cpu), d, 1);
110126 }
111127
112
-static void plic_irq_disable(struct irq_data *d)
128
+static void plic_irq_mask(struct irq_data *d)
113129 {
114
- plic_irq_toggle(d, 0);
130
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
131
+
132
+ plic_irq_toggle(&priv->lmask, d, 0);
133
+}
134
+
135
+#ifdef CONFIG_SMP
136
+static int plic_set_affinity(struct irq_data *d,
137
+ const struct cpumask *mask_val, bool force)
138
+{
139
+ unsigned int cpu;
140
+ struct cpumask amask;
141
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
142
+
143
+ cpumask_and(&amask, &priv->lmask, mask_val);
144
+
145
+ if (force)
146
+ cpu = cpumask_first(&amask);
147
+ else
148
+ cpu = cpumask_any_and(&amask, cpu_online_mask);
149
+
150
+ if (cpu >= nr_cpu_ids)
151
+ return -EINVAL;
152
+
153
+ plic_irq_toggle(&priv->lmask, d, 0);
154
+ plic_irq_toggle(cpumask_of(cpu), d, !irqd_irq_masked(d));
155
+
156
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
157
+
158
+ return IRQ_SET_MASK_OK_DONE;
159
+}
160
+#endif
161
+
162
+static void plic_irq_eoi(struct irq_data *d)
163
+{
164
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
165
+
166
+ if (irqd_irq_masked(d)) {
167
+ plic_irq_unmask(d);
168
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
169
+ plic_irq_mask(d);
170
+ } else {
171
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
172
+ }
115173 }
116174
117175 static struct irq_chip plic_chip = {
118176 .name = "SiFive PLIC",
119
- /*
120
- * There is no need to mask/unmask PLIC interrupts. They are "masked"
121
- * by reading claim and "unmasked" when writing it back.
122
- */
123
- .irq_enable = plic_irq_enable,
124
- .irq_disable = plic_irq_disable,
177
+ .irq_mask = plic_irq_mask,
178
+ .irq_unmask = plic_irq_unmask,
179
+ .irq_eoi = plic_irq_eoi,
180
+#ifdef CONFIG_SMP
181
+ .irq_set_affinity = plic_set_affinity,
182
+#endif
125183 };
126184
127185 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
128186 irq_hw_number_t hwirq)
129187 {
130
- irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq);
131
- irq_set_chip_data(irq, NULL);
188
+ struct plic_priv *priv = d->host_data;
189
+
190
+ irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data,
191
+ handle_fasteoi_irq, NULL, NULL);
132192 irq_set_noprobe(irq);
193
+ irq_set_affinity(irq, &priv->lmask);
194
+ return 0;
195
+}
196
+
197
+static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
198
+ unsigned int nr_irqs, void *arg)
199
+{
200
+ int i, ret;
201
+ irq_hw_number_t hwirq;
202
+ unsigned int type;
203
+ struct irq_fwspec *fwspec = arg;
204
+
205
+ ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
206
+ if (ret)
207
+ return ret;
208
+
209
+ for (i = 0; i < nr_irqs; i++) {
210
+ ret = plic_irqdomain_map(domain, virq + i, hwirq + i);
211
+ if (ret)
212
+ return ret;
213
+ }
214
+
133215 return 0;
134216 }
135217
136218 static const struct irq_domain_ops plic_irqdomain_ops = {
137
- .map = plic_irqdomain_map,
138
- .xlate = irq_domain_xlate_onecell,
219
+ .translate = irq_domain_translate_onecell,
220
+ .alloc = plic_irq_domain_alloc,
221
+ .free = irq_domain_free_irqs_top,
139222 };
140
-
141
-static struct irq_domain *plic_irqdomain;
142223
143224 /*
144225 * Handling an interrupt is a two-step process: first you claim the interrupt
....@@ -146,113 +227,174 @@
146227 * that source ID back to the same claim register. This automatically enables
147228 * and disables the interrupt, so there's nothing else to do.
148229 */
149
-static void plic_handle_irq(struct pt_regs *regs)
230
+static void plic_handle_irq(struct irq_desc *desc)
150231 {
151232 struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
152
- void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM;
233
+ struct irq_chip *chip = irq_desc_get_chip(desc);
234
+ void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
153235 irq_hw_number_t hwirq;
154236
155237 WARN_ON_ONCE(!handler->present);
156238
157
- csr_clear(sie, SIE_SEIE);
239
+ chained_irq_enter(chip, desc);
240
+
158241 while ((hwirq = readl(claim))) {
159
- int irq = irq_find_mapping(plic_irqdomain, hwirq);
242
+ int irq = irq_find_mapping(handler->priv->irqdomain, hwirq);
160243
161244 if (unlikely(irq <= 0))
162245 pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
163246 hwirq);
164247 else
165248 generic_handle_irq(irq);
166
- writel(hwirq, claim);
167249 }
168
- csr_set(sie, SIE_SEIE);
250
+
251
+ chained_irq_exit(chip, desc);
169252 }
170253
171
-/*
172
- * Walk up the DT tree until we find an active RISC-V core (HART) node and
173
- * extract the cpuid from it.
174
- */
175
-static int plic_find_hart_id(struct device_node *node)
254
+static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
176255 {
177
- for (; node; node = node->parent) {
178
- if (of_device_is_compatible(node, "riscv"))
179
- return riscv_of_processor_hart(node);
180
- }
256
+ /* priority must be > threshold to trigger an interrupt */
257
+ writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
258
+}
181259
182
- return -1;
260
+static int plic_dying_cpu(unsigned int cpu)
261
+{
262
+ if (plic_parent_irq)
263
+ disable_percpu_irq(plic_parent_irq);
264
+
265
+ return 0;
266
+}
267
+
268
+static int plic_starting_cpu(unsigned int cpu)
269
+{
270
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
271
+
272
+ if (plic_parent_irq)
273
+ enable_percpu_irq(plic_parent_irq,
274
+ irq_get_trigger_type(plic_parent_irq));
275
+ else
276
+ pr_warn("cpu%d: parent irq not available\n", cpu);
277
+ plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
278
+
279
+ return 0;
183280 }
184281
185282 static int __init plic_init(struct device_node *node,
186283 struct device_node *parent)
187284 {
188
- int error = 0, nr_handlers, nr_mapped = 0, i;
285
+ int error = 0, nr_contexts, nr_handlers = 0, i;
189286 u32 nr_irqs;
287
+ struct plic_priv *priv;
288
+ struct plic_handler *handler;
190289
191
- if (plic_regs) {
192
- pr_warn("PLIC already present.\n");
193
- return -ENXIO;
290
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
291
+ if (!priv)
292
+ return -ENOMEM;
293
+
294
+ priv->regs = of_iomap(node, 0);
295
+ if (WARN_ON(!priv->regs)) {
296
+ error = -EIO;
297
+ goto out_free_priv;
194298 }
195
-
196
- plic_regs = of_iomap(node, 0);
197
- if (WARN_ON(!plic_regs))
198
- return -EIO;
199299
200300 error = -EINVAL;
201301 of_property_read_u32(node, "riscv,ndev", &nr_irqs);
202302 if (WARN_ON(!nr_irqs))
203303 goto out_iounmap;
204304
205
- nr_handlers = of_irq_count(node);
206
- if (WARN_ON(!nr_handlers))
207
- goto out_iounmap;
208
- if (WARN_ON(nr_handlers < num_possible_cpus()))
305
+ nr_contexts = of_irq_count(node);
306
+ if (WARN_ON(!nr_contexts))
209307 goto out_iounmap;
210308
211309 error = -ENOMEM;
212
- plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
213
- &plic_irqdomain_ops, NULL);
214
- if (WARN_ON(!plic_irqdomain))
310
+ priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
311
+ &plic_irqdomain_ops, priv);
312
+ if (WARN_ON(!priv->irqdomain))
215313 goto out_iounmap;
216314
217
- for (i = 0; i < nr_handlers; i++) {
315
+ for (i = 0; i < nr_contexts; i++) {
218316 struct of_phandle_args parent;
219
- struct plic_handler *handler;
220317 irq_hw_number_t hwirq;
221
- int cpu;
318
+ int cpu, hartid;
222319
223320 if (of_irq_parse_one(node, i, &parent)) {
224321 pr_err("failed to parse parent for context %d.\n", i);
225322 continue;
226323 }
227324
228
- /* skip context holes */
229
- if (parent.args[0] == -1)
325
+ /*
326
+ * Skip contexts other than external interrupts for our
327
+ * privilege level.
328
+ */
329
+ if (parent.args[0] != RV_IRQ_EXT)
230330 continue;
231331
232
- cpu = plic_find_hart_id(parent.np);
233
- if (cpu < 0) {
332
+ hartid = riscv_of_parent_hartid(parent.np);
333
+ if (hartid < 0) {
234334 pr_warn("failed to parse hart ID for context %d.\n", i);
235335 continue;
236336 }
237337
238
- handler = per_cpu_ptr(&plic_handlers, cpu);
239
- handler->present = true;
240
- handler->ctxid = i;
338
+ cpu = riscv_hartid_to_cpuid(hartid);
339
+ if (cpu < 0) {
340
+ pr_warn("Invalid cpuid for context %d\n", i);
341
+ continue;
342
+ }
241343
242
- /* priority must be > threshold to trigger an interrupt */
243
- writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD);
344
+ /* Find parent domain and register chained handler */
345
+ if (!plic_parent_irq && irq_find_host(parent.np)) {
346
+ plic_parent_irq = irq_of_parse_and_map(node, i);
347
+ if (plic_parent_irq)
348
+ irq_set_chained_handler(plic_parent_irq,
349
+ plic_handle_irq);
350
+ }
351
+
352
+ /*
353
+ * When running in M-mode we need to ignore the S-mode handler.
354
+ * Here we assume it always comes later, but that might be a
355
+ * little fragile.
356
+ */
357
+ handler = per_cpu_ptr(&plic_handlers, cpu);
358
+ if (handler->present) {
359
+ pr_warn("handler already present for context %d.\n", i);
360
+ plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
361
+ goto done;
362
+ }
363
+
364
+ cpumask_set_cpu(cpu, &priv->lmask);
365
+ handler->present = true;
366
+ handler->hart_base =
367
+ priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
368
+ raw_spin_lock_init(&handler->enable_lock);
369
+ handler->enable_base =
370
+ priv->regs + ENABLE_BASE + i * ENABLE_PER_HART;
371
+ handler->priv = priv;
372
+done:
244373 for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
245
- plic_toggle(i, hwirq, 0);
246
- nr_mapped++;
374
+ plic_toggle(handler, hwirq, 0);
375
+ nr_handlers++;
247376 }
248377
249
- pr_info("mapped %d interrupts to %d (out of %d) handlers.\n",
250
- nr_irqs, nr_mapped, nr_handlers);
251
- set_handle_irq(plic_handle_irq);
378
+ /*
379
+ * We can have multiple PLIC instances so setup cpuhp state only
380
+ * when context handler for current/boot CPU is present.
381
+ */
382
+ handler = this_cpu_ptr(&plic_handlers);
383
+ if (handler->present && !plic_cpuhp_setup_done) {
384
+ cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
385
+ "irqchip/sifive/plic:starting",
386
+ plic_starting_cpu, plic_dying_cpu);
387
+ plic_cpuhp_setup_done = true;
388
+ }
389
+
390
+ pr_info("%pOFP: mapped %d interrupts with %d handlers for"
391
+ " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
252392 return 0;
253393
254394 out_iounmap:
255
- iounmap(plic_regs);
395
+ iounmap(priv->regs);
396
+out_free_priv:
397
+ kfree(priv);
256398 return error;
257399 }
258400