| .. | .. |
|---|
| 4 | 4 | * Copyright (C) 2018 Christoph Hellwig |
|---|
| 5 | 5 | */ |
|---|
| 6 | 6 | #define pr_fmt(fmt) "plic: " fmt |
|---|
| 7 | +#include <linux/cpu.h> |
|---|
| 7 | 8 | #include <linux/interrupt.h> |
|---|
| 8 | 9 | #include <linux/io.h> |
|---|
| 9 | 10 | #include <linux/irq.h> |
|---|
| 10 | 11 | #include <linux/irqchip.h> |
|---|
| 12 | +#include <linux/irqchip/chained_irq.h> |
|---|
| 11 | 13 | #include <linux/irqdomain.h> |
|---|
| 12 | 14 | #include <linux/module.h> |
|---|
| 13 | 15 | #include <linux/of.h> |
|---|
| .. | .. |
|---|
| 15 | 17 | #include <linux/of_irq.h> |
|---|
| 16 | 18 | #include <linux/platform_device.h> |
|---|
| 17 | 19 | #include <linux/spinlock.h> |
|---|
| 20 | +#include <asm/smp.h> |
|---|
| 18 | 21 | |
|---|
| 19 | 22 | /* |
|---|
| 20 | 23 | * This driver implements a version of the RISC-V PLIC with the actual layout |
|---|
| .. | .. |
|---|
| 54 | 57 | #define CONTEXT_THRESHOLD 0x00 |
|---|
| 55 | 58 | #define CONTEXT_CLAIM 0x04 |
|---|
| 56 | 59 | |
|---|
| 57 | | -static void __iomem *plic_regs; |
|---|
| 60 | +#define PLIC_DISABLE_THRESHOLD 0x7 |
|---|
| 61 | +#define PLIC_ENABLE_THRESHOLD 0 |
|---|
| 62 | + |
|---|
| 63 | +struct plic_priv { |
|---|
| 64 | + struct cpumask lmask; |
|---|
| 65 | + struct irq_domain *irqdomain; |
|---|
| 66 | + void __iomem *regs; |
|---|
| 67 | +}; |
|---|
| 58 | 68 | |
|---|
| 59 | 69 | struct plic_handler { |
|---|
| 60 | 70 | bool present; |
|---|
| 61 | | - int ctxid; |
|---|
| 71 | + void __iomem *hart_base; |
|---|
| 72 | + /* |
|---|
| 73 | + * Protect mask operations on the registers given that we can't |
|---|
| 74 | + * assume atomic memory operations work on them. |
|---|
| 75 | + */ |
|---|
| 76 | + raw_spinlock_t enable_lock; |
|---|
| 77 | + void __iomem *enable_base; |
|---|
| 78 | + struct plic_priv *priv; |
|---|
| 62 | 79 | }; |
|---|
| 80 | +static int plic_parent_irq; |
|---|
| 81 | +static bool plic_cpuhp_setup_done; |
|---|
| 63 | 82 | static DEFINE_PER_CPU(struct plic_handler, plic_handlers); |
|---|
| 64 | 83 | |
|---|
| 65 | | -static inline void __iomem *plic_hart_offset(int ctxid) |
|---|
| 84 | +static inline void plic_toggle(struct plic_handler *handler, |
|---|
| 85 | + int hwirq, int enable) |
|---|
| 66 | 86 | { |
|---|
| 67 | | - return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART; |
|---|
| 68 | | -} |
|---|
| 69 | | - |
|---|
| 70 | | -static inline u32 __iomem *plic_enable_base(int ctxid) |
|---|
| 71 | | -{ |
|---|
| 72 | | - return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART; |
|---|
| 73 | | -} |
|---|
| 74 | | - |
|---|
| 75 | | -/* |
|---|
| 76 | | - * Protect mask operations on the registers given that we can't assume that |
|---|
| 77 | | - * atomic memory operations work on them. |
|---|
| 78 | | - */ |
|---|
| 79 | | -static DEFINE_RAW_SPINLOCK(plic_toggle_lock); |
|---|
| 80 | | - |
|---|
| 81 | | -static inline void plic_toggle(int ctxid, int hwirq, int enable) |
|---|
| 82 | | -{ |
|---|
| 83 | | - u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32); |
|---|
| 87 | + u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32); |
|---|
| 84 | 88 | u32 hwirq_mask = 1 << (hwirq % 32); |
|---|
| 85 | 89 | |
|---|
| 86 | | - raw_spin_lock(&plic_toggle_lock); |
|---|
| 90 | + raw_spin_lock(&handler->enable_lock); |
|---|
| 87 | 91 | if (enable) |
|---|
| 88 | 92 | writel(readl(reg) | hwirq_mask, reg); |
|---|
| 89 | 93 | else |
|---|
| 90 | 94 | writel(readl(reg) & ~hwirq_mask, reg); |
|---|
| 91 | | - raw_spin_unlock(&plic_toggle_lock); |
|---|
| 95 | + raw_spin_unlock(&handler->enable_lock); |
|---|
| 92 | 96 | } |
|---|
| 93 | 97 | |
|---|
| 94 | | -static inline void plic_irq_toggle(struct irq_data *d, int enable) |
|---|
| 98 | +static inline void plic_irq_toggle(const struct cpumask *mask, |
|---|
| 99 | + struct irq_data *d, int enable) |
|---|
| 95 | 100 | { |
|---|
| 96 | 101 | int cpu; |
|---|
| 102 | + struct plic_priv *priv = irq_data_get_irq_chip_data(d); |
|---|
| 97 | 103 | |
|---|
| 98 | | - writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); |
|---|
| 99 | | - for_each_cpu(cpu, irq_data_get_affinity_mask(d)) { |
|---|
| 104 | + writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); |
|---|
| 105 | + for_each_cpu(cpu, mask) { |
|---|
| 100 | 106 | struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); |
|---|
| 101 | 107 | |
|---|
| 102 | | - if (handler->present) |
|---|
| 103 | | - plic_toggle(handler->ctxid, d->hwirq, enable); |
|---|
| 108 | + if (handler->present && |
|---|
| 109 | + cpumask_test_cpu(cpu, &handler->priv->lmask)) |
|---|
| 110 | + plic_toggle(handler, d->hwirq, enable); |
|---|
| 104 | 111 | } |
|---|
| 105 | 112 | } |
|---|
| 106 | 113 | |
|---|
| 107 | | -static void plic_irq_enable(struct irq_data *d) |
|---|
| 114 | +static void plic_irq_unmask(struct irq_data *d) |
|---|
| 108 | 115 | { |
|---|
| 109 | | - plic_irq_toggle(d, 1); |
|---|
| 116 | + struct cpumask amask; |
|---|
| 117 | + unsigned int cpu; |
|---|
| 118 | + struct plic_priv *priv = irq_data_get_irq_chip_data(d); |
|---|
| 119 | + |
|---|
| 120 | + cpumask_and(&amask, &priv->lmask, cpu_online_mask); |
|---|
| 121 | + cpu = cpumask_any_and(irq_data_get_affinity_mask(d), |
|---|
| 122 | + &amask); |
|---|
| 123 | + if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) |
|---|
| 124 | + return; |
|---|
| 125 | + plic_irq_toggle(cpumask_of(cpu), d, 1); |
|---|
| 110 | 126 | } |
|---|
| 111 | 127 | |
|---|
| 112 | | -static void plic_irq_disable(struct irq_data *d) |
|---|
| 128 | +static void plic_irq_mask(struct irq_data *d) |
|---|
| 113 | 129 | { |
|---|
| 114 | | - plic_irq_toggle(d, 0); |
|---|
| 130 | + struct plic_priv *priv = irq_data_get_irq_chip_data(d); |
|---|
| 131 | + |
|---|
| 132 | + plic_irq_toggle(&priv->lmask, d, 0); |
|---|
| 133 | +} |
|---|
| 134 | + |
|---|
| 135 | +#ifdef CONFIG_SMP |
|---|
| 136 | +static int plic_set_affinity(struct irq_data *d, |
|---|
| 137 | + const struct cpumask *mask_val, bool force) |
|---|
| 138 | +{ |
|---|
| 139 | + unsigned int cpu; |
|---|
| 140 | + struct cpumask amask; |
|---|
| 141 | + struct plic_priv *priv = irq_data_get_irq_chip_data(d); |
|---|
| 142 | + |
|---|
| 143 | + cpumask_and(&amask, &priv->lmask, mask_val); |
|---|
| 144 | + |
|---|
| 145 | + if (force) |
|---|
| 146 | + cpu = cpumask_first(&amask); |
|---|
| 147 | + else |
|---|
| 148 | + cpu = cpumask_any_and(&amask, cpu_online_mask); |
|---|
| 149 | + |
|---|
| 150 | + if (cpu >= nr_cpu_ids) |
|---|
| 151 | + return -EINVAL; |
|---|
| 152 | + |
|---|
| 153 | + plic_irq_toggle(&priv->lmask, d, 0); |
|---|
| 154 | + plic_irq_toggle(cpumask_of(cpu), d, !irqd_irq_masked(d)); |
|---|
| 155 | + |
|---|
| 156 | + irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
|---|
| 157 | + |
|---|
| 158 | + return IRQ_SET_MASK_OK_DONE; |
|---|
| 159 | +} |
|---|
| 160 | +#endif |
|---|
| 161 | + |
|---|
| 162 | +static void plic_irq_eoi(struct irq_data *d) |
|---|
| 163 | +{ |
|---|
| 164 | + struct plic_handler *handler = this_cpu_ptr(&plic_handlers); |
|---|
| 165 | + |
|---|
| 166 | + if (irqd_irq_masked(d)) { |
|---|
| 167 | + plic_irq_unmask(d); |
|---|
| 168 | + writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); |
|---|
| 169 | + plic_irq_mask(d); |
|---|
| 170 | + } else { |
|---|
| 171 | + writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); |
|---|
| 172 | + } |
|---|
| 115 | 173 | } |
|---|
| 116 | 174 | |
|---|
| 117 | 175 | static struct irq_chip plic_chip = { |
|---|
| 118 | 176 | .name = "SiFive PLIC", |
|---|
| 119 | | - /* |
|---|
| 120 | | - * There is no need to mask/unmask PLIC interrupts. They are "masked" |
|---|
| 121 | | - * by reading claim and "unmasked" when writing it back. |
|---|
| 122 | | - */ |
|---|
| 123 | | - .irq_enable = plic_irq_enable, |
|---|
| 124 | | - .irq_disable = plic_irq_disable, |
|---|
| 177 | + .irq_mask = plic_irq_mask, |
|---|
| 178 | + .irq_unmask = plic_irq_unmask, |
|---|
| 179 | + .irq_eoi = plic_irq_eoi, |
|---|
| 180 | +#ifdef CONFIG_SMP |
|---|
| 181 | + .irq_set_affinity = plic_set_affinity, |
|---|
| 182 | +#endif |
|---|
| 125 | 183 | }; |
|---|
| 126 | 184 | |
|---|
| 127 | 185 | static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, |
|---|
| 128 | 186 | irq_hw_number_t hwirq) |
|---|
| 129 | 187 | { |
|---|
| 130 | | - irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq); |
|---|
| 131 | | - irq_set_chip_data(irq, NULL); |
|---|
| 188 | + struct plic_priv *priv = d->host_data; |
|---|
| 189 | + |
|---|
| 190 | + irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data, |
|---|
| 191 | + handle_fasteoi_irq, NULL, NULL); |
|---|
| 132 | 192 | irq_set_noprobe(irq); |
|---|
| 193 | + irq_set_affinity(irq, &priv->lmask); |
|---|
| 194 | + return 0; |
|---|
| 195 | +} |
|---|
| 196 | + |
|---|
| 197 | +static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
|---|
| 198 | + unsigned int nr_irqs, void *arg) |
|---|
| 199 | +{ |
|---|
| 200 | + int i, ret; |
|---|
| 201 | + irq_hw_number_t hwirq; |
|---|
| 202 | + unsigned int type; |
|---|
| 203 | + struct irq_fwspec *fwspec = arg; |
|---|
| 204 | + |
|---|
| 205 | + ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type); |
|---|
| 206 | + if (ret) |
|---|
| 207 | + return ret; |
|---|
| 208 | + |
|---|
| 209 | + for (i = 0; i < nr_irqs; i++) { |
|---|
| 210 | + ret = plic_irqdomain_map(domain, virq + i, hwirq + i); |
|---|
| 211 | + if (ret) |
|---|
| 212 | + return ret; |
|---|
| 213 | + } |
|---|
| 214 | + |
|---|
| 133 | 215 | return 0; |
|---|
| 134 | 216 | } |
|---|
| 135 | 217 | |
|---|
| 136 | 218 | static const struct irq_domain_ops plic_irqdomain_ops = { |
|---|
| 137 | | - .map = plic_irqdomain_map, |
|---|
| 138 | | - .xlate = irq_domain_xlate_onecell, |
|---|
| 219 | + .translate = irq_domain_translate_onecell, |
|---|
| 220 | + .alloc = plic_irq_domain_alloc, |
|---|
| 221 | + .free = irq_domain_free_irqs_top, |
|---|
| 139 | 222 | }; |
|---|
| 140 | | - |
|---|
| 141 | | -static struct irq_domain *plic_irqdomain; |
|---|
| 142 | 223 | |
|---|
| 143 | 224 | /* |
|---|
| 144 | 225 | * Handling an interrupt is a two-step process: first you claim the interrupt |
|---|
| .. | .. |
|---|
| 146 | 227 | * that source ID back to the same claim register. This automatically enables |
|---|
| 147 | 228 | * and disables the interrupt, so there's nothing else to do. |
|---|
| 148 | 229 | */ |
|---|
| 149 | | -static void plic_handle_irq(struct pt_regs *regs) |
|---|
| 230 | +static void plic_handle_irq(struct irq_desc *desc) |
|---|
| 150 | 231 | { |
|---|
| 151 | 232 | struct plic_handler *handler = this_cpu_ptr(&plic_handlers); |
|---|
| 152 | | - void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM; |
|---|
| 233 | + struct irq_chip *chip = irq_desc_get_chip(desc); |
|---|
| 234 | + void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; |
|---|
| 153 | 235 | irq_hw_number_t hwirq; |
|---|
| 154 | 236 | |
|---|
| 155 | 237 | WARN_ON_ONCE(!handler->present); |
|---|
| 156 | 238 | |
|---|
| 157 | | - csr_clear(sie, SIE_SEIE); |
|---|
| 239 | + chained_irq_enter(chip, desc); |
|---|
| 240 | + |
|---|
| 158 | 241 | while ((hwirq = readl(claim))) { |
|---|
| 159 | | - int irq = irq_find_mapping(plic_irqdomain, hwirq); |
|---|
| 242 | + int irq = irq_find_mapping(handler->priv->irqdomain, hwirq); |
|---|
| 160 | 243 | |
|---|
| 161 | 244 | if (unlikely(irq <= 0)) |
|---|
| 162 | 245 | pr_warn_ratelimited("can't find mapping for hwirq %lu\n", |
|---|
| 163 | 246 | hwirq); |
|---|
| 164 | 247 | else |
|---|
| 165 | 248 | generic_handle_irq(irq); |
|---|
| 166 | | - writel(hwirq, claim); |
|---|
| 167 | 249 | } |
|---|
| 168 | | - csr_set(sie, SIE_SEIE); |
|---|
| 250 | + |
|---|
| 251 | + chained_irq_exit(chip, desc); |
|---|
| 169 | 252 | } |
|---|
| 170 | 253 | |
|---|
| 171 | | -/* |
|---|
| 172 | | - * Walk up the DT tree until we find an active RISC-V core (HART) node and |
|---|
| 173 | | - * extract the cpuid from it. |
|---|
| 174 | | - */ |
|---|
| 175 | | -static int plic_find_hart_id(struct device_node *node) |
|---|
| 254 | +static void plic_set_threshold(struct plic_handler *handler, u32 threshold) |
|---|
| 176 | 255 | { |
|---|
| 177 | | - for (; node; node = node->parent) { |
|---|
| 178 | | - if (of_device_is_compatible(node, "riscv")) |
|---|
| 179 | | - return riscv_of_processor_hart(node); |
|---|
| 180 | | - } |
|---|
| 256 | + /* priority must be > threshold to trigger an interrupt */ |
|---|
| 257 | + writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); |
|---|
| 258 | +} |
|---|
| 181 | 259 | |
|---|
| 182 | | - return -1; |
|---|
| 260 | +static int plic_dying_cpu(unsigned int cpu) |
|---|
| 261 | +{ |
|---|
| 262 | + if (plic_parent_irq) |
|---|
| 263 | + disable_percpu_irq(plic_parent_irq); |
|---|
| 264 | + |
|---|
| 265 | + return 0; |
|---|
| 266 | +} |
|---|
| 267 | + |
|---|
| 268 | +static int plic_starting_cpu(unsigned int cpu) |
|---|
| 269 | +{ |
|---|
| 270 | + struct plic_handler *handler = this_cpu_ptr(&plic_handlers); |
|---|
| 271 | + |
|---|
| 272 | + if (plic_parent_irq) |
|---|
| 273 | + enable_percpu_irq(plic_parent_irq, |
|---|
| 274 | + irq_get_trigger_type(plic_parent_irq)); |
|---|
| 275 | + else |
|---|
| 276 | + pr_warn("cpu%d: parent irq not available\n", cpu); |
|---|
| 277 | + plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); |
|---|
| 278 | + |
|---|
| 279 | + return 0; |
|---|
| 183 | 280 | } |
|---|
| 184 | 281 | |
|---|
| 185 | 282 | static int __init plic_init(struct device_node *node, |
|---|
| 186 | 283 | struct device_node *parent) |
|---|
| 187 | 284 | { |
|---|
| 188 | | - int error = 0, nr_handlers, nr_mapped = 0, i; |
|---|
| 285 | + int error = 0, nr_contexts, nr_handlers = 0, i; |
|---|
| 189 | 286 | u32 nr_irqs; |
|---|
| 287 | + struct plic_priv *priv; |
|---|
| 288 | + struct plic_handler *handler; |
|---|
| 190 | 289 | |
|---|
| 191 | | - if (plic_regs) { |
|---|
| 192 | | - pr_warn("PLIC already present.\n"); |
|---|
| 193 | | - return -ENXIO; |
|---|
| 290 | + priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
|---|
| 291 | + if (!priv) |
|---|
| 292 | + return -ENOMEM; |
|---|
| 293 | + |
|---|
| 294 | + priv->regs = of_iomap(node, 0); |
|---|
| 295 | + if (WARN_ON(!priv->regs)) { |
|---|
| 296 | + error = -EIO; |
|---|
| 297 | + goto out_free_priv; |
|---|
| 194 | 298 | } |
|---|
| 195 | | - |
|---|
| 196 | | - plic_regs = of_iomap(node, 0); |
|---|
| 197 | | - if (WARN_ON(!plic_regs)) |
|---|
| 198 | | - return -EIO; |
|---|
| 199 | 299 | |
|---|
| 200 | 300 | error = -EINVAL; |
|---|
| 201 | 301 | of_property_read_u32(node, "riscv,ndev", &nr_irqs); |
|---|
| 202 | 302 | if (WARN_ON(!nr_irqs)) |
|---|
| 203 | 303 | goto out_iounmap; |
|---|
| 204 | 304 | |
|---|
| 205 | | - nr_handlers = of_irq_count(node); |
|---|
| 206 | | - if (WARN_ON(!nr_handlers)) |
|---|
| 207 | | - goto out_iounmap; |
|---|
| 208 | | - if (WARN_ON(nr_handlers < num_possible_cpus())) |
|---|
| 305 | + nr_contexts = of_irq_count(node); |
|---|
| 306 | + if (WARN_ON(!nr_contexts)) |
|---|
| 209 | 307 | goto out_iounmap; |
|---|
| 210 | 308 | |
|---|
| 211 | 309 | error = -ENOMEM; |
|---|
| 212 | | - plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1, |
|---|
| 213 | | - &plic_irqdomain_ops, NULL); |
|---|
| 214 | | - if (WARN_ON(!plic_irqdomain)) |
|---|
| 310 | + priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1, |
|---|
| 311 | + &plic_irqdomain_ops, priv); |
|---|
| 312 | + if (WARN_ON(!priv->irqdomain)) |
|---|
| 215 | 313 | goto out_iounmap; |
|---|
| 216 | 314 | |
|---|
| 217 | | - for (i = 0; i < nr_handlers; i++) { |
|---|
| 315 | + for (i = 0; i < nr_contexts; i++) { |
|---|
| 218 | 316 | struct of_phandle_args parent; |
|---|
| 219 | | - struct plic_handler *handler; |
|---|
| 220 | 317 | irq_hw_number_t hwirq; |
|---|
| 221 | | - int cpu; |
|---|
| 318 | + int cpu, hartid; |
|---|
| 222 | 319 | |
|---|
| 223 | 320 | if (of_irq_parse_one(node, i, &parent)) { |
|---|
| 224 | 321 | pr_err("failed to parse parent for context %d.\n", i); |
|---|
| 225 | 322 | continue; |
|---|
| 226 | 323 | } |
|---|
| 227 | 324 | |
|---|
| 228 | | - /* skip context holes */ |
|---|
| 229 | | - if (parent.args[0] == -1) |
|---|
| 325 | + /* |
|---|
| 326 | + * Skip contexts other than external interrupts for our |
|---|
| 327 | + * privilege level. |
|---|
| 328 | + */ |
|---|
| 329 | + if (parent.args[0] != RV_IRQ_EXT) |
|---|
| 230 | 330 | continue; |
|---|
| 231 | 331 | |
|---|
| 232 | | - cpu = plic_find_hart_id(parent.np); |
|---|
| 233 | | - if (cpu < 0) { |
|---|
| 332 | + hartid = riscv_of_parent_hartid(parent.np); |
|---|
| 333 | + if (hartid < 0) { |
|---|
| 234 | 334 | pr_warn("failed to parse hart ID for context %d.\n", i); |
|---|
| 235 | 335 | continue; |
|---|
| 236 | 336 | } |
|---|
| 237 | 337 | |
|---|
| 238 | | - handler = per_cpu_ptr(&plic_handlers, cpu); |
|---|
| 239 | | - handler->present = true; |
|---|
| 240 | | - handler->ctxid = i; |
|---|
| 338 | + cpu = riscv_hartid_to_cpuid(hartid); |
|---|
| 339 | + if (cpu < 0) { |
|---|
| 340 | + pr_warn("Invalid cpuid for context %d\n", i); |
|---|
| 341 | + continue; |
|---|
| 342 | + } |
|---|
| 241 | 343 | |
|---|
| 242 | | - /* priority must be > threshold to trigger an interrupt */ |
|---|
| 243 | | - writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD); |
|---|
| 344 | + /* Find parent domain and register chained handler */ |
|---|
| 345 | + if (!plic_parent_irq && irq_find_host(parent.np)) { |
|---|
| 346 | + plic_parent_irq = irq_of_parse_and_map(node, i); |
|---|
| 347 | + if (plic_parent_irq) |
|---|
| 348 | + irq_set_chained_handler(plic_parent_irq, |
|---|
| 349 | + plic_handle_irq); |
|---|
| 350 | + } |
|---|
| 351 | + |
|---|
| 352 | + /* |
|---|
| 353 | + * When running in M-mode we need to ignore the S-mode handler. |
|---|
| 354 | + * Here we assume it always comes later, but that might be a |
|---|
| 355 | + * little fragile. |
|---|
| 356 | + */ |
|---|
| 357 | + handler = per_cpu_ptr(&plic_handlers, cpu); |
|---|
| 358 | + if (handler->present) { |
|---|
| 359 | + pr_warn("handler already present for context %d.\n", i); |
|---|
| 360 | + plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); |
|---|
| 361 | + goto done; |
|---|
| 362 | + } |
|---|
| 363 | + |
|---|
| 364 | + cpumask_set_cpu(cpu, &priv->lmask); |
|---|
| 365 | + handler->present = true; |
|---|
| 366 | + handler->hart_base = |
|---|
| 367 | + priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART; |
|---|
| 368 | + raw_spin_lock_init(&handler->enable_lock); |
|---|
| 369 | + handler->enable_base = |
|---|
| 370 | + priv->regs + ENABLE_BASE + i * ENABLE_PER_HART; |
|---|
| 371 | + handler->priv = priv; |
|---|
| 372 | +done: |
|---|
| 244 | 373 | for (hwirq = 1; hwirq <= nr_irqs; hwirq++) |
|---|
| 245 | | - plic_toggle(i, hwirq, 0); |
|---|
| 246 | | - nr_mapped++; |
|---|
| 374 | + plic_toggle(handler, hwirq, 0); |
|---|
| 375 | + nr_handlers++; |
|---|
| 247 | 376 | } |
|---|
| 248 | 377 | |
|---|
| 249 | | - pr_info("mapped %d interrupts to %d (out of %d) handlers.\n", |
|---|
| 250 | | - nr_irqs, nr_mapped, nr_handlers); |
|---|
| 251 | | - set_handle_irq(plic_handle_irq); |
|---|
| 378 | + /* |
|---|
| 379 | + * We can have multiple PLIC instances so setup cpuhp state only |
|---|
| 380 | + * when context handler for current/boot CPU is present. |
|---|
| 381 | + */ |
|---|
| 382 | + handler = this_cpu_ptr(&plic_handlers); |
|---|
| 383 | + if (handler->present && !plic_cpuhp_setup_done) { |
|---|
| 384 | + cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, |
|---|
| 385 | + "irqchip/sifive/plic:starting", |
|---|
| 386 | + plic_starting_cpu, plic_dying_cpu); |
|---|
| 387 | + plic_cpuhp_setup_done = true; |
|---|
| 388 | + } |
|---|
| 389 | + |
|---|
| 390 | + pr_info("%pOFP: mapped %d interrupts with %d handlers for" |
|---|
| 391 | + " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts); |
|---|
| 252 | 392 | return 0; |
|---|
| 253 | 393 | |
|---|
| 254 | 394 | out_iounmap: |
|---|
| 255 | | - iounmap(plic_regs); |
|---|
| 395 | + iounmap(priv->regs); |
|---|
| 396 | +out_free_priv: |
|---|
| 397 | + kfree(priv); |
|---|
| 256 | 398 | return error; |
|---|
| 257 | 399 | } |
|---|
| 258 | 400 | |
|---|