forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/irqchip/irq-gic-v3.c
....@@ -1,18 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
34 * Author: Marc Zyngier <marc.zyngier@arm.com>
4
- *
5
- * This program is free software; you can redistribute it and/or modify
6
- * it under the terms of the GNU General Public License version 2 as
7
- * published by the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
165 */
176
187 #define pr_fmt(fmt) "GICv3: " fmt
....@@ -27,8 +16,11 @@
2716 #include <linux/of_address.h>
2817 #include <linux/of_irq.h>
2918 #include <linux/percpu.h>
19
+#include <linux/refcount.h>
3020 #include <linux/slab.h>
21
+#include <linux/syscore_ops.h>
3122 #include <linux/wakeup_reason.h>
23
+#include <trace/hooks/gic_v3.h>
3224
3325
3426 #include <linux/irqchip.h>
....@@ -41,7 +33,16 @@
4133 #include <asm/smp_plat.h>
4234 #include <asm/virt.h>
4335
36
+#include <trace/hooks/gic.h>
37
+
4438 #include "irq-gic-common.h"
39
+
40
+#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80)
41
+
42
+#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
43
+#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
44
+
45
+#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
4546
4647 struct redist_region {
4748 void __iomem *redist_base;
....@@ -49,21 +50,71 @@
4950 bool single_redist;
5051 };
5152
52
-struct gic_chip_data {
53
- struct fwnode_handle *fwnode;
54
- void __iomem *dist_base;
55
- struct redist_region *redist_regions;
56
- struct rdists rdists;
57
- struct irq_domain *domain;
58
- u64 redist_stride;
59
- u32 nr_redist_regions;
60
- bool has_rss;
61
- unsigned int irq_nr;
62
- struct partition_desc *ppi_descs[16];
63
-};
64
-
6553 static struct gic_chip_data gic_data __read_mostly;
6654 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
55
+
56
+#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
57
+#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
58
+#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
59
+
60
+/*
61
+ * The behaviours of RPR and PMR registers differ depending on the value of
62
+ * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
63
+ * distributor and redistributors depends on whether security is enabled in the
64
+ * GIC.
65
+ *
66
+ * When security is enabled, non-secure priority values from the (re)distributor
67
+ * are presented to the GIC CPUIF as follow:
68
+ * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
69
+ *
70
+ * If SCR_EL3.FIQ == 1, the values writen to/read from PMR and RPR at non-secure
71
+ * EL1 are subject to a similar operation thus matching the priorities presented
72
+ * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
73
+ * these values are unchanched by the GIC.
74
+ *
75
+ * see GICv3/GICv4 Architecture Specification (IHI0069D):
76
+ * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
77
+ * priorities.
78
+ * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
79
+ * interrupt.
80
+ */
81
+static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
82
+
83
+/*
84
+ * Global static key controlling whether an update to PMR allowing more
85
+ * interrupts requires to be propagated to the redistributor (DSB SY).
86
+ * And this needs to be exported for modules to be able to enable
87
+ * interrupts...
88
+ */
89
+DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
90
+EXPORT_SYMBOL(gic_pmr_sync);
91
+
92
+DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
93
+EXPORT_SYMBOL(gic_nonsecure_priorities);
94
+
95
+/*
96
+ * When the Non-secure world has access to group 0 interrupts (as a
97
+ * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
98
+ * return the Distributor's view of the interrupt priority.
99
+ *
100
+ * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
101
+ * written by software is moved to the Non-secure range by the Distributor.
102
+ *
103
+ * If both are true (which is when gic_nonsecure_priorities gets enabled),
104
+ * we need to shift down the priority programmed by software to match it
105
+ * against the value returned by ICC_RPR_EL1.
106
+ */
107
+#define GICD_INT_RPR_PRI(priority) \
108
+ ({ \
109
+ u32 __priority = (priority); \
110
+ if (static_branch_unlikely(&gic_nonsecure_priorities)) \
111
+ __priority = 0x80 | (__priority >> 1); \
112
+ \
113
+ __priority; \
114
+ })
115
+
116
+/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
117
+static refcount_t *ppi_nmi_refs;
67118
68119 static struct gic_kvm_info gic_v3_kvm_info;
69120 static DEFINE_PER_CPU(bool, has_rss);
....@@ -76,32 +127,82 @@
76127 /* Our default, arbitrary priority value. Linux only uses one anyway. */
77128 #define DEFAULT_PMR_VALUE 0xf0
78129
130
+enum gic_intid_range {
131
+ SGI_RANGE,
132
+ PPI_RANGE,
133
+ SPI_RANGE,
134
+ EPPI_RANGE,
135
+ ESPI_RANGE,
136
+ LPI_RANGE,
137
+ __INVALID_RANGE__
138
+};
139
+
140
+static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
141
+{
142
+ switch (hwirq) {
143
+ case 0 ... 15:
144
+ return SGI_RANGE;
145
+ case 16 ... 31:
146
+ return PPI_RANGE;
147
+ case 32 ... 1019:
148
+ return SPI_RANGE;
149
+ case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
150
+ return EPPI_RANGE;
151
+ case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
152
+ return ESPI_RANGE;
153
+ case 8192 ... GENMASK(23, 0):
154
+ return LPI_RANGE;
155
+ default:
156
+ return __INVALID_RANGE__;
157
+ }
158
+}
159
+
160
+static enum gic_intid_range get_intid_range(struct irq_data *d)
161
+{
162
+ return __get_intid_range(d->hwirq);
163
+}
164
+
79165 static inline unsigned int gic_irq(struct irq_data *d)
80166 {
81167 return d->hwirq;
82168 }
83169
84
-static inline int gic_irq_in_rdist(struct irq_data *d)
170
+static inline bool gic_irq_in_rdist(struct irq_data *d)
85171 {
86
- return gic_irq(d) < 32;
172
+ switch (get_intid_range(d)) {
173
+ case SGI_RANGE:
174
+ case PPI_RANGE:
175
+ case EPPI_RANGE:
176
+ return true;
177
+ default:
178
+ return false;
179
+ }
87180 }
88181
89182 static inline void __iomem *gic_dist_base(struct irq_data *d)
90183 {
91
- if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
184
+ switch (get_intid_range(d)) {
185
+ case SGI_RANGE:
186
+ case PPI_RANGE:
187
+ case EPPI_RANGE:
188
+ /* SGI+PPI -> SGI_base for this CPU */
92189 return gic_data_rdist_sgi_base();
93190
94
- if (d->hwirq <= 1023) /* SPI -> dist_base */
191
+ case SPI_RANGE:
192
+ case ESPI_RANGE:
193
+ /* SPI -> dist_base */
95194 return gic_data.dist_base;
96195
97
- return NULL;
196
+ default:
197
+ return NULL;
198
+ }
98199 }
99200
100
-static void gic_do_wait_for_rwp(void __iomem *base)
201
+static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
101202 {
102203 u32 count = 1000000; /* 1s! */
103204
104
- while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
205
+ while (readl_relaxed(base + GICD_CTLR) & bit) {
105206 count--;
106207 if (!count) {
107208 pr_err_ratelimited("RWP timeout, gone fishing\n");
....@@ -109,19 +210,19 @@
109210 }
110211 cpu_relax();
111212 udelay(1);
112
- };
213
+ }
113214 }
114215
115216 /* Wait for completion of a distributor change */
116217 static void gic_dist_wait_for_rwp(void)
117218 {
118
- gic_do_wait_for_rwp(gic_data.dist_base);
219
+ gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
119220 }
120221
121222 /* Wait for completion of a redistributor change */
122223 static void gic_redist_wait_for_rwp(void)
123224 {
124
- gic_do_wait_for_rwp(gic_data_rdist_rd_base());
225
+ gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
125226 }
126227
127228 #ifdef CONFIG_ARM64
....@@ -140,6 +241,9 @@
140241 void __iomem *rbase;
141242 u32 count = 1000000; /* 1s! */
142243 u32 val;
244
+
245
+ if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
246
+ return;
143247
144248 rbase = gic_data_rdist_rd_base();
145249
....@@ -163,7 +267,7 @@
163267 break;
164268 cpu_relax();
165269 udelay(1);
166
- };
270
+ }
167271 if (!count)
168272 pr_err_ratelimited("redistributor failed to %s...\n",
169273 enable ? "wakeup" : "sleep");
....@@ -172,24 +276,80 @@
172276 /*
173277 * Routines to disable, enable, EOI and route interrupts
174278 */
279
+static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
280
+{
281
+ switch (get_intid_range(d)) {
282
+ case SGI_RANGE:
283
+ case PPI_RANGE:
284
+ case SPI_RANGE:
285
+ *index = d->hwirq;
286
+ return offset;
287
+ case EPPI_RANGE:
288
+ /*
289
+ * Contrary to the ESPI range, the EPPI range is contiguous
290
+ * to the PPI range in the registers, so let's adjust the
291
+ * displacement accordingly. Consistency is overrated.
292
+ */
293
+ *index = d->hwirq - EPPI_BASE_INTID + 32;
294
+ return offset;
295
+ case ESPI_RANGE:
296
+ *index = d->hwirq - ESPI_BASE_INTID;
297
+ switch (offset) {
298
+ case GICD_ISENABLER:
299
+ return GICD_ISENABLERnE;
300
+ case GICD_ICENABLER:
301
+ return GICD_ICENABLERnE;
302
+ case GICD_ISPENDR:
303
+ return GICD_ISPENDRnE;
304
+ case GICD_ICPENDR:
305
+ return GICD_ICPENDRnE;
306
+ case GICD_ISACTIVER:
307
+ return GICD_ISACTIVERnE;
308
+ case GICD_ICACTIVER:
309
+ return GICD_ICACTIVERnE;
310
+ case GICD_IPRIORITYR:
311
+ return GICD_IPRIORITYRnE;
312
+ case GICD_ICFGR:
313
+ return GICD_ICFGRnE;
314
+ case GICD_IROUTER:
315
+ return GICD_IROUTERnE;
316
+ default:
317
+ break;
318
+ }
319
+ break;
320
+ default:
321
+ break;
322
+ }
323
+
324
+ WARN_ON(1);
325
+ *index = d->hwirq;
326
+ return offset;
327
+}
328
+
175329 static int gic_peek_irq(struct irq_data *d, u32 offset)
176330 {
177
- u32 mask = 1 << (gic_irq(d) % 32);
178331 void __iomem *base;
332
+ u32 index, mask;
333
+
334
+ offset = convert_offset_index(d, offset, &index);
335
+ mask = 1 << (index % 32);
179336
180337 if (gic_irq_in_rdist(d))
181338 base = gic_data_rdist_sgi_base();
182339 else
183340 base = gic_data.dist_base;
184341
185
- return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
342
+ return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
186343 }
187344
188345 static void gic_poke_irq(struct irq_data *d, u32 offset)
189346 {
190
- u32 mask = 1 << (gic_irq(d) % 32);
191347 void (*rwp_wait)(void);
192348 void __iomem *base;
349
+ u32 index, mask;
350
+
351
+ offset = convert_offset_index(d, offset, &index);
352
+ mask = 1 << (index % 32);
193353
194354 if (gic_irq_in_rdist(d)) {
195355 base = gic_data_rdist_sgi_base();
....@@ -199,7 +359,7 @@
199359 rwp_wait = gic_dist_wait_for_rwp;
200360 }
201361
202
- writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
362
+ writel_relaxed(mask, base + offset + (index / 32) * 4);
203363 rwp_wait();
204364 }
205365
....@@ -228,21 +388,18 @@
228388 gic_poke_irq(d, GICD_ISENABLER);
229389 }
230390
231
-#ifdef CONFIG_ARCH_ROCKCHIP
232
-static int gic_retrigger(struct irq_data *d)
391
+static inline bool gic_supports_nmi(void)
233392 {
234
- gic_poke_irq(d, GICD_ISPENDR);
235
- /* the genirq layer expects 0 if we can't retrigger in hardware */
236
- return 0;
393
+ return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
394
+ static_branch_likely(&supports_pseudo_nmis);
237395 }
238
-#endif
239396
240397 static int gic_irq_set_irqchip_state(struct irq_data *d,
241398 enum irqchip_irq_state which, bool val)
242399 {
243400 u32 reg;
244401
245
- if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
402
+ if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
246403 return -EINVAL;
247404
248405 switch (which) {
....@@ -269,7 +426,7 @@
269426 static int gic_irq_get_irqchip_state(struct irq_data *d,
270427 enum irqchip_irq_state which, bool *val)
271428 {
272
- if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
429
+ if (d->hwirq >= 8192) /* PPI/SPI only */
273430 return -EINVAL;
274431
275432 switch (which) {
....@@ -292,6 +449,98 @@
292449 return 0;
293450 }
294451
452
+static void gic_irq_set_prio(struct irq_data *d, u8 prio)
453
+{
454
+ void __iomem *base = gic_dist_base(d);
455
+ u32 offset, index;
456
+
457
+ offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
458
+
459
+ writeb_relaxed(prio, base + offset + index);
460
+}
461
+
462
+static u32 gic_get_ppi_index(struct irq_data *d)
463
+{
464
+ switch (get_intid_range(d)) {
465
+ case PPI_RANGE:
466
+ return d->hwirq - 16;
467
+ case EPPI_RANGE:
468
+ return d->hwirq - EPPI_BASE_INTID + 16;
469
+ default:
470
+ unreachable();
471
+ }
472
+}
473
+
474
+static int gic_irq_nmi_setup(struct irq_data *d)
475
+{
476
+ struct irq_desc *desc = irq_to_desc(d->irq);
477
+
478
+ if (!gic_supports_nmi())
479
+ return -EINVAL;
480
+
481
+ if (gic_peek_irq(d, GICD_ISENABLER)) {
482
+ pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
483
+ return -EINVAL;
484
+ }
485
+
486
+ /*
487
+ * A secondary irq_chip should be in charge of LPI request,
488
+ * it should not be possible to get there
489
+ */
490
+ if (WARN_ON(gic_irq(d) >= 8192))
491
+ return -EINVAL;
492
+
493
+ /* desc lock should already be held */
494
+ if (gic_irq_in_rdist(d)) {
495
+ u32 idx = gic_get_ppi_index(d);
496
+
497
+ /* Setting up PPI as NMI, only switch handler for first NMI */
498
+ if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
499
+ refcount_set(&ppi_nmi_refs[idx], 1);
500
+ desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
501
+ }
502
+ } else {
503
+ desc->handle_irq = handle_fasteoi_nmi;
504
+ }
505
+
506
+ gic_irq_set_prio(d, GICD_INT_NMI_PRI);
507
+
508
+ return 0;
509
+}
510
+
511
+static void gic_irq_nmi_teardown(struct irq_data *d)
512
+{
513
+ struct irq_desc *desc = irq_to_desc(d->irq);
514
+
515
+ if (WARN_ON(!gic_supports_nmi()))
516
+ return;
517
+
518
+ if (gic_peek_irq(d, GICD_ISENABLER)) {
519
+ pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
520
+ return;
521
+ }
522
+
523
+ /*
524
+ * A secondary irq_chip should be in charge of LPI request,
525
+ * it should not be possible to get there
526
+ */
527
+ if (WARN_ON(gic_irq(d) >= 8192))
528
+ return;
529
+
530
+ /* desc lock should already be held */
531
+ if (gic_irq_in_rdist(d)) {
532
+ u32 idx = gic_get_ppi_index(d);
533
+
534
+ /* Tearing down NMI, only switch handler for last NMI */
535
+ if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
536
+ desc->handle_irq = handle_percpu_devid_irq;
537
+ } else {
538
+ desc->handle_irq = handle_fasteoi_irq;
539
+ }
540
+
541
+ gic_irq_set_prio(d, GICD_INT_DEF_PRI);
542
+}
543
+
295544 static void gic_eoi_irq(struct irq_data *d)
296545 {
297546 gic_write_eoir(gic_irq(d));
....@@ -310,17 +559,22 @@
310559
311560 static int gic_set_type(struct irq_data *d, unsigned int type)
312561 {
562
+ enum gic_intid_range range;
313563 unsigned int irq = gic_irq(d);
314564 void (*rwp_wait)(void);
315565 void __iomem *base;
566
+ u32 offset, index;
567
+ int ret;
568
+
569
+ range = get_intid_range(d);
316570
317571 /* Interrupt configuration for SGIs can't be changed */
318
- if (irq < 16)
319
- return -EINVAL;
572
+ if (range == SGI_RANGE)
573
+ return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
320574
321575 /* SPIs have restrictions on the supported types */
322
- if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
323
- type != IRQ_TYPE_EDGE_RISING)
576
+ if ((range == SPI_RANGE || range == ESPI_RANGE) &&
577
+ type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
324578 return -EINVAL;
325579
326580 if (gic_irq_in_rdist(d)) {
....@@ -331,11 +585,23 @@
331585 rwp_wait = gic_dist_wait_for_rwp;
332586 }
333587
334
- return gic_configure_irq(irq, type, base, rwp_wait);
588
+ offset = convert_offset_index(d, GICD_ICFGR, &index);
589
+
590
+ ret = gic_configure_irq(index, type, base + offset, rwp_wait);
591
+ if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
592
+ /* Misconfigured PPIs are usually not fatal */
593
+ pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
594
+ ret = 0;
595
+ }
596
+
597
+ return ret;
335598 }
336599
337600 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
338601 {
602
+ if (get_intid_range(d) == SGI_RANGE)
603
+ return -EINVAL;
604
+
339605 if (vcpu)
340606 irqd_set_forwarded_to_vcpu(d);
341607 else
....@@ -355,54 +621,143 @@
355621 return aff;
356622 }
357623
624
+static void gic_deactivate_unhandled(u32 irqnr)
625
+{
626
+ if (static_branch_likely(&supports_deactivate_key)) {
627
+ if (irqnr < 8192)
628
+ gic_write_dir(irqnr);
629
+ } else {
630
+ gic_write_eoir(irqnr);
631
+ }
632
+}
633
+
634
+static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
635
+{
636
+ bool irqs_enabled = interrupts_enabled(regs);
637
+ int err;
638
+
639
+ if (irqs_enabled)
640
+ nmi_enter();
641
+
642
+ if (static_branch_likely(&supports_deactivate_key))
643
+ gic_write_eoir(irqnr);
644
+ /*
645
+ * Leave the PSR.I bit set to prevent other NMIs to be
646
+ * received while handling this one.
647
+ * PSR.I will be restored when we ERET to the
648
+ * interrupted context.
649
+ */
650
+ err = handle_domain_nmi(gic_data.domain, irqnr, regs);
651
+ if (err)
652
+ gic_deactivate_unhandled(irqnr);
653
+
654
+ if (irqs_enabled)
655
+ nmi_exit();
656
+}
657
+
658
+static u32 do_read_iar(struct pt_regs *regs)
659
+{
660
+ u32 iar;
661
+
662
+ if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
663
+ u64 pmr;
664
+
665
+ /*
666
+ * We were in a context with IRQs disabled. However, the
667
+ * entry code has set PMR to a value that allows any
668
+ * interrupt to be acknowledged, and not just NMIs. This can
669
+ * lead to surprising effects if the NMI has been retired in
670
+ * the meantime, and that there is an IRQ pending. The IRQ
671
+ * would then be taken in NMI context, something that nobody
672
+ * wants to debug twice.
673
+ *
674
+ * Until we sort this, drop PMR again to a level that will
675
+ * actually only allow NMIs before reading IAR, and then
676
+ * restore it to what it was.
677
+ */
678
+ pmr = gic_read_pmr();
679
+ gic_pmr_mask_irqs();
680
+ isb();
681
+
682
+ iar = gic_read_iar();
683
+
684
+ gic_write_pmr(pmr);
685
+ } else {
686
+ iar = gic_read_iar();
687
+ }
688
+
689
+ return iar;
690
+}
691
+
358692 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
359693 {
360694 u32 irqnr;
361695
362
- do {
363
- irqnr = gic_read_iar();
696
+ irqnr = do_read_iar(regs);
364697
365
- if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
366
- int err;
698
+ /* Check for special IDs first */
699
+ if ((irqnr >= 1020 && irqnr <= 1023))
700
+ return;
367701
368
- if (static_branch_likely(&supports_deactivate_key))
369
- gic_write_eoir(irqnr);
370
- else
371
- isb();
702
+ if (gic_supports_nmi() &&
703
+ unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI))) {
704
+ gic_handle_nmi(irqnr, regs);
705
+ return;
706
+ }
372707
373
- err = handle_domain_irq(gic_data.domain, irqnr, regs);
374
- if (err) {
375
- WARN_ONCE(true, "Unexpected interrupt received!\n");
376
- log_abnormal_wakeup_reason(
377
- "unexpected HW IRQ %u", irqnr);
378
- if (static_branch_likely(&supports_deactivate_key)) {
379
- if (irqnr < 8192)
380
- gic_write_dir(irqnr);
381
- } else {
382
- gic_write_eoir(irqnr);
383
- }
384
- }
385
- continue;
386
- }
387
- if (irqnr < 16) {
388
- gic_write_eoir(irqnr);
389
- if (static_branch_likely(&supports_deactivate_key))
390
- gic_write_dir(irqnr);
391
-#ifdef CONFIG_SMP
392
- /*
393
- * Unlike GICv2, we don't need an smp_rmb() here.
394
- * The control dependency from gic_read_iar to
395
- * the ISB in gic_write_eoir is enough to ensure
396
- * that any shared data read by handle_IPI will
397
- * be read after the ACK.
398
- */
399
- handle_IPI(irqnr, regs);
400
-#else
401
- WARN_ONCE(true, "Unexpected SGI received!\n");
402
-#endif
403
- continue;
404
- }
405
- } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
708
+ if (gic_prio_masking_enabled()) {
709
+ gic_pmr_mask_irqs();
710
+ gic_arch_enable_irqs();
711
+ }
712
+
713
+ if (static_branch_likely(&supports_deactivate_key))
714
+ gic_write_eoir(irqnr);
715
+ else
716
+ isb();
717
+
718
+ if (handle_domain_irq(gic_data.domain, irqnr, regs)) {
719
+ WARN_ONCE(true, "Unexpected interrupt received!\n");
720
+ log_abnormal_wakeup_reason("unexpected HW IRQ %u", irqnr);
721
+ gic_deactivate_unhandled(irqnr);
722
+ }
723
+}
724
+
725
+static u32 gic_get_pribits(void)
726
+{
727
+ u32 pribits;
728
+
729
+ pribits = gic_read_ctlr();
730
+ pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
731
+ pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
732
+ pribits++;
733
+
734
+ return pribits;
735
+}
736
+
737
+static bool gic_has_group0(void)
738
+{
739
+ u32 val;
740
+ u32 old_pmr;
741
+
742
+ old_pmr = gic_read_pmr();
743
+
744
+ /*
745
+ * Let's find out if Group0 is under control of EL3 or not by
746
+ * setting the highest possible, non-zero priority in PMR.
747
+ *
748
+ * If SCR_EL3.FIQ is set, the priority gets shifted down in
749
+ * order for the CPU interface to set bit 7, and keep the
750
+ * actual priority in the non-secure range. In the process, it
751
+ * looses the least significant bit and the actual priority
752
+ * becomes 0x80. Reading it back returns 0, indicating that
753
+ * we're don't have access to Group0.
754
+ */
755
+ gic_write_pmr(BIT(8 - gic_get_pribits()));
756
+ val = gic_read_pmr();
757
+
758
+ gic_write_pmr(old_pmr);
759
+
760
+ return val != 0;
406761 }
407762
408763 static void __init gic_dist_init(void)
....@@ -410,6 +765,7 @@
410765 unsigned int i;
411766 u64 affinity;
412767 void __iomem *base = gic_data.dist_base;
768
+ u32 val;
413769
414770 /* Disable the distributor */
415771 writel_relaxed(0, base + GICD_CTLR);
....@@ -421,22 +777,46 @@
421777 * do the right thing if the kernel is running in secure mode,
422778 * but that's not the intended use case anyway.
423779 */
424
- for (i = 32; i < gic_data.irq_nr; i += 32)
780
+ for (i = 32; i < GIC_LINE_NR; i += 32)
425781 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
426782
427
- gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
783
+ /* Extended SPI range, not handled by the GICv2/GICv3 common code */
784
+ for (i = 0; i < GIC_ESPI_NR; i += 32) {
785
+ writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
786
+ writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
787
+ }
788
+
789
+ for (i = 0; i < GIC_ESPI_NR; i += 32)
790
+ writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
791
+
792
+ for (i = 0; i < GIC_ESPI_NR; i += 16)
793
+ writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
794
+
795
+ for (i = 0; i < GIC_ESPI_NR; i += 4)
796
+ writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
797
+
798
+ /* Now do the common stuff, and wait for the distributor to drain */
799
+ gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
800
+
801
+ val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
802
+ if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
803
+ pr_info("Enabling SGIs without active state\n");
804
+ val |= GICD_CTLR_nASSGIreq;
805
+ }
428806
429807 /* Enable distributor with ARE, Group1 */
430
- writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
431
- base + GICD_CTLR);
808
+ writel_relaxed(val, base + GICD_CTLR);
432809
433810 /*
434811 * Set all global interrupts to the boot CPU only. ARE must be
435812 * enabled.
436813 */
437814 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
438
- for (i = 32; i < gic_data.irq_nr; i++)
815
+ for (i = 32; i < GIC_LINE_NR; i++)
439816 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
817
+
818
+ for (i = 0; i < GIC_ESPI_NR; i++)
819
+ gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
440820 }
441821
442822 static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
....@@ -496,6 +876,7 @@
496876 typer = gic_read_typer(ptr + GICR_TYPER);
497877 if ((typer >> 32) == aff) {
498878 u64 offset = ptr - region->redist_base;
879
+ raw_spin_lock_init(&gic_data_rdist()->rd_lock);
499880 gic_data_rdist_rd_base() = ptr;
500881 gic_data_rdist()->phys_base = region->phys_base + offset;
501882
....@@ -522,22 +903,65 @@
522903 return -ENODEV;
523904 }
524905
525
-static int __gic_update_vlpi_properties(struct redist_region *region,
526
- void __iomem *ptr)
906
+static int __gic_update_rdist_properties(struct redist_region *region,
907
+ void __iomem *ptr)
527908 {
528909 u64 typer = gic_read_typer(ptr + GICR_TYPER);
910
+
911
+ /* Boot-time cleanip */
912
+ if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
913
+ u64 val;
914
+
915
+ /* Deactivate any present vPE */
916
+ val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
917
+ if (val & GICR_VPENDBASER_Valid)
918
+ gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
919
+ ptr + SZ_128K + GICR_VPENDBASER);
920
+
921
+ /* Mark the VPE table as invalid */
922
+ val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
923
+ val &= ~GICR_VPROPBASER_4_1_VALID;
924
+ gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
925
+ }
926
+
529927 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
530
- gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
928
+
929
+ /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */
930
+ gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
931
+ gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
932
+ gic_data.rdists.has_rvpeid);
933
+ gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
934
+
935
+ /* Detect non-sensical configurations */
936
+ if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
937
+ gic_data.rdists.has_direct_lpi = false;
938
+ gic_data.rdists.has_vlpis = false;
939
+ gic_data.rdists.has_rvpeid = false;
940
+ }
941
+
942
+ gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
531943
532944 return 1;
533945 }
534946
535
-static void gic_update_vlpi_properties(void)
947
+static void gic_update_rdist_properties(void)
536948 {
537
- gic_iterate_rdists(__gic_update_vlpi_properties);
538
- pr_info("%sVLPI support, %sdirect LPI support\n",
539
- !gic_data.rdists.has_vlpis ? "no " : "",
540
- !gic_data.rdists.has_direct_lpi ? "no " : "");
949
+ gic_data.ppi_nr = UINT_MAX;
950
+ gic_iterate_rdists(__gic_update_rdist_properties);
951
+ if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
952
+ gic_data.ppi_nr = 0;
953
+ pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
954
+ if (gic_data.rdists.has_vlpis)
955
+ pr_info("GICv4 features: %s%s%s\n",
956
+ gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
957
+ gic_data.rdists.has_rvpeid ? "RVPEID " : "",
958
+ gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
959
+}
960
+
961
+/* Check whether it's single security state view */
962
+static inline bool gic_dist_security_disabled(void)
963
+{
964
+ return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
541965 }
542966
543967 static void gic_cpu_sys_reg_init(void)
....@@ -546,7 +970,7 @@
546970 u64 mpidr = cpu_logical_map(cpu);
547971 u64 need_rss = MPIDR_RS(mpidr);
548972 bool group0;
549
- u32 val, pribits;
973
+ u32 pribits;
550974
551975 /*
552976 * Need to check that the SRE bit has actually been set. If
....@@ -558,28 +982,28 @@
558982 if (!gic_enable_sre())
559983 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
560984
561
- pribits = gic_read_ctlr();
562
- pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
563
- pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
564
- pribits++;
985
+ pribits = gic_get_pribits();
565986
566
- /*
567
- * Let's find out if Group0 is under control of EL3 or not by
568
- * setting the highest possible, non-zero priority in PMR.
569
- *
570
- * If SCR_EL3.FIQ is set, the priority gets shifted down in
571
- * order for the CPU interface to set bit 7, and keep the
572
- * actual priority in the non-secure range. In the process, it
573
- * looses the least significant bit and the actual priority
574
- * becomes 0x80. Reading it back returns 0, indicating that
575
- * we're don't have access to Group0.
576
- */
577
- write_gicreg(BIT(8 - pribits), ICC_PMR_EL1);
578
- val = read_gicreg(ICC_PMR_EL1);
579
- group0 = val != 0;
987
+ group0 = gic_has_group0();
580988
581989 /* Set priority mask register */
582
- write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
990
+ if (!gic_prio_masking_enabled()) {
991
+ write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
992
+ } else if (gic_supports_nmi()) {
993
+ /*
994
+ * Mismatch configuration with boot CPU, the system is likely
995
+ * to die as interrupt masking will not work properly on all
996
+ * CPUs
997
+ *
998
+ * The boot CPU calls this function before enabling NMI support,
999
+ * and as a result we'll never see this warning in the boot path
1000
+ * for that CPU.
1001
+ */
1002
+ if (static_branch_unlikely(&gic_nonsecure_priorities))
1003
+ WARN_ON(!group0 || gic_dist_security_disabled());
1004
+ else
1005
+ WARN_ON(group0 && !gic_dist_security_disabled());
1006
+ }
5831007
5841008 /*
5851009 * Some firmwares hand over to the kernel with the BPR changed from
....@@ -604,8 +1028,10 @@
6041028 case 7:
6051029 write_gicreg(0, ICC_AP0R3_EL1);
6061030 write_gicreg(0, ICC_AP0R2_EL1);
1031
+ fallthrough;
6071032 case 6:
6081033 write_gicreg(0, ICC_AP0R1_EL1);
1034
+ fallthrough;
6091035 case 5:
6101036 case 4:
6111037 write_gicreg(0, ICC_AP0R0_EL1);
....@@ -619,8 +1045,10 @@
6191045 case 7:
6201046 write_gicreg(0, ICC_AP1R3_EL1);
6211047 write_gicreg(0, ICC_AP1R2_EL1);
1048
+ fallthrough;
6221049 case 6:
6231050 write_gicreg(0, ICC_AP1R1_EL1);
1051
+ fallthrough;
6241052 case 5:
6251053 case 4:
6261054 write_gicreg(0, ICC_AP1R0_EL1);
....@@ -666,12 +1094,15 @@
6661094
6671095 static int gic_dist_supports_lpis(void)
6681096 {
669
- return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && !gicv3_nolpi;
1097
+ return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
1098
+ !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
1099
+ !gicv3_nolpi);
6701100 }
6711101
6721102 static void gic_cpu_init(void)
6731103 {
6741104 void __iomem *rbase;
1105
+ int i;
6751106
6761107 /* Register ourselves with the rest of the world */
6771108 if (gic_populate_rdist())
....@@ -679,16 +1110,18 @@
6791110
6801111 gic_enable_redist(true);
6811112
1113
+ WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
1114
+ !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
1115
+ "Distributor has extended ranges, but CPU%d doesn't\n",
1116
+ smp_processor_id());
1117
+
6821118 rbase = gic_data_rdist_sgi_base();
6831119
6841120 /* Configure SGIs/PPIs as non-secure Group-1 */
685
- writel_relaxed(~0, rbase + GICR_IGROUPR0);
1121
+ for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
1122
+ writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
6861123
687
- gic_cpu_config(rbase, gic_redist_wait_for_rwp);
688
-
689
- /* Give LPIs a spin */
690
- if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
691
- its_cpu_init();
1124
+ gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
6921125
6931126 /* initialise system registers */
6941127 gic_cpu_sys_reg_init();
....@@ -702,6 +1135,10 @@
7021135 static int gic_starting_cpu(unsigned int cpu)
7031136 {
7041137 gic_cpu_init();
1138
+
1139
+ if (gic_dist_supports_lpis())
1140
+ its_cpu_init();
1141
+
7051142 return 0;
7061143 }
7071144
....@@ -751,11 +1188,11 @@
7511188 gic_write_sgi1r(val);
7521189 }
7531190
754
-static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
1191
+static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
7551192 {
7561193 int cpu;
7571194
758
- if (WARN_ON(irq >= 16))
1195
+ if (WARN_ON(d->hwirq >= 16))
7591196 return;
7601197
7611198 /*
....@@ -769,25 +1206,40 @@
7691206 u16 tlist;
7701207
7711208 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
772
- gic_send_sgi(cluster_id, tlist, irq);
1209
+ gic_send_sgi(cluster_id, tlist, d->hwirq);
7731210 }
7741211
7751212 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
7761213 isb();
7771214 }
7781215
779
-static void gic_smp_init(void)
1216
+static void __init gic_smp_init(void)
7801217 {
781
- set_smp_cross_call(gic_raise_softirq);
1218
+ struct irq_fwspec sgi_fwspec = {
1219
+ .fwnode = gic_data.fwnode,
1220
+ .param_count = 1,
1221
+ };
1222
+ int base_sgi;
1223
+
7821224 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
7831225 "irqchip/arm/gicv3:starting",
7841226 gic_starting_cpu, NULL);
1227
+
1228
+ /* Register all 8 non-secure SGIs */
1229
+ base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8,
1230
+ NUMA_NO_NODE, &sgi_fwspec,
1231
+ false, NULL);
1232
+ if (WARN_ON(base_sgi <= 0))
1233
+ return;
1234
+
1235
+ set_smp_ipi_range(base_sgi, 8);
7851236 }
7861237
7871238 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
7881239 bool force)
7891240 {
7901241 unsigned int cpu;
1242
+ u32 offset, index;
7911243 void __iomem *reg;
7921244 int enabled;
7931245 u64 val;
....@@ -808,9 +1260,11 @@
8081260 if (enabled)
8091261 gic_mask_irq(d);
8101262
811
- reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
1263
+ offset = convert_offset_index(d, GICD_IROUTER, &index);
1264
+ reg = gic_dist_base(d) + offset + (index * 8);
8121265 val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
8131266
1267
+ trace_android_rvh_gic_v3_set_affinity(d, mask_val, &val, force, gic_dist_base(d));
8141268 gic_write_irouter(val, reg);
8151269
8161270 /*
....@@ -828,16 +1282,16 @@
8281282 }
8291283 #else
8301284 #define gic_set_affinity NULL
1285
+#define gic_ipi_send_mask NULL
8311286 #define gic_smp_init() do { } while(0)
8321287 #endif
8331288
834
-#ifdef CONFIG_CPU_PM
835
-/* Check whether it's single security state view */
836
-static bool gic_dist_security_disabled(void)
1289
+static int gic_retrigger(struct irq_data *data)
8371290 {
838
- return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
1291
+ return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
8391292 }
8401293
1294
+#ifdef CONFIG_CPU_PM
8411295 static int gic_cpu_pm_notifier(struct notifier_block *self,
8421296 unsigned long cmd, void *v)
8431297 {
....@@ -865,18 +1319,41 @@
8651319 static inline void gic_cpu_pm_init(void) { }
8661320 #endif /* CONFIG_CPU_PM */
8671321
1322
+#ifdef CONFIG_PM
1323
+void gic_resume(void)
1324
+{
1325
+ trace_android_vh_gic_resume(&gic_data);
1326
+}
1327
+EXPORT_SYMBOL_GPL(gic_resume);
1328
+
1329
+static struct syscore_ops gic_syscore_ops = {
1330
+ .resume = gic_resume,
1331
+};
1332
+
1333
+static void gic_syscore_init(void)
1334
+{
1335
+ register_syscore_ops(&gic_syscore_ops);
1336
+}
1337
+
1338
+#else
1339
+static inline void gic_syscore_init(void) { }
1340
+void gic_resume(void) { }
1341
+#endif
1342
+
1343
+
8681344 static struct irq_chip gic_chip = {
8691345 .name = "GICv3",
8701346 .irq_mask = gic_mask_irq,
8711347 .irq_unmask = gic_unmask_irq,
8721348 .irq_eoi = gic_eoi_irq,
8731349 .irq_set_type = gic_set_type,
874
-#ifdef CONFIG_ARCH_ROCKCHIP
875
- .irq_retrigger = gic_retrigger,
876
-#endif
8771350 .irq_set_affinity = gic_set_affinity,
1351
+ .irq_retrigger = gic_retrigger,
8781352 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
8791353 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
1354
+ .irq_nmi_setup = gic_irq_nmi_setup,
1355
+ .irq_nmi_teardown = gic_irq_nmi_teardown,
1356
+ .ipi_send_mask = gic_ipi_send_mask,
8801357 .flags = IRQCHIP_SET_TYPE_MASKED |
8811358 IRQCHIP_SKIP_SET_WAKE |
8821359 IRQCHIP_MASK_ON_SUSPEND,
....@@ -887,71 +1364,79 @@
8871364 .irq_mask = gic_eoimode1_mask_irq,
8881365 .irq_unmask = gic_unmask_irq,
8891366 .irq_eoi = gic_eoimode1_eoi_irq,
890
-#ifdef CONFIG_ARCH_ROCKCHIP
891
- .irq_retrigger = gic_retrigger,
892
-#endif
8931367 .irq_set_type = gic_set_type,
8941368 .irq_set_affinity = gic_set_affinity,
1369
+ .irq_retrigger = gic_retrigger,
8951370 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
8961371 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
8971372 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
1373
+ .irq_nmi_setup = gic_irq_nmi_setup,
1374
+ .irq_nmi_teardown = gic_irq_nmi_teardown,
1375
+ .ipi_send_mask = gic_ipi_send_mask,
8981376 .flags = IRQCHIP_SET_TYPE_MASKED |
8991377 IRQCHIP_SKIP_SET_WAKE |
9001378 IRQCHIP_MASK_ON_SUSPEND,
9011379 };
9021380
903
-#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
904
-
9051381 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
9061382 irq_hw_number_t hw)
9071383 {
9081384 struct irq_chip *chip = &gic_chip;
1385
+ struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
9091386
9101387 if (static_branch_likely(&supports_deactivate_key))
9111388 chip = &gic_eoimode1_chip;
9121389
913
- /* SGIs are private to the core kernel */
914
- if (hw < 16)
915
- return -EPERM;
916
- /* Nothing here */
917
- if (hw >= gic_data.irq_nr && hw < 8192)
918
- return -EPERM;
919
- /* Off limits */
920
- if (hw >= GIC_ID_NR)
921
- return -EPERM;
1390
+ switch (__get_intid_range(hw)) {
1391
+ case SGI_RANGE:
1392
+ irq_set_percpu_devid(irq);
1393
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
1394
+ handle_percpu_devid_fasteoi_ipi,
1395
+ NULL, NULL);
1396
+ break;
9221397
923
- /* PPIs */
924
- if (hw < 32) {
1398
+ case PPI_RANGE:
1399
+ case EPPI_RANGE:
9251400 irq_set_percpu_devid(irq);
9261401 irq_domain_set_info(d, irq, hw, chip, d->host_data,
9271402 handle_percpu_devid_irq, NULL, NULL);
928
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
929
- }
930
- /* SPIs */
931
- if (hw >= 32 && hw < gic_data.irq_nr) {
1403
+ break;
1404
+
1405
+ case SPI_RANGE:
1406
+ case ESPI_RANGE:
9321407 irq_domain_set_info(d, irq, hw, chip, d->host_data,
9331408 handle_fasteoi_irq, NULL, NULL);
9341409 irq_set_probe(irq);
935
- irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
936
- }
937
- /* LPIs */
938
- if (hw >= 8192 && hw < GIC_ID_NR) {
1410
+ irqd_set_single_target(irqd);
1411
+ break;
1412
+
1413
+ case LPI_RANGE:
9391414 if (!gic_dist_supports_lpis())
9401415 return -EPERM;
9411416 irq_domain_set_info(d, irq, hw, chip, d->host_data,
9421417 handle_fasteoi_irq, NULL, NULL);
1418
+ break;
1419
+
1420
+ default:
1421
+ return -EPERM;
9431422 }
9441423
1424
+ /* Prevents SW retriggers which mess up the ACK/EOI ordering */
1425
+ irqd_set_handle_enforce_irqctx(irqd);
9451426 return 0;
9461427 }
947
-
948
-#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
9491428
9501429 static int gic_irq_domain_translate(struct irq_domain *d,
9511430 struct irq_fwspec *fwspec,
9521431 unsigned long *hwirq,
9531432 unsigned int *type)
9541433 {
1434
+ if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
1435
+ *hwirq = fwspec->param[0];
1436
+ *type = IRQ_TYPE_EDGE_RISING;
1437
+ return 0;
1438
+ }
1439
+
9551440 if (is_of_node(fwspec->fwnode)) {
9561441 if (fwspec->param_count < 3)
9571442 return -EINVAL;
....@@ -961,11 +1446,23 @@
9611446 *hwirq = fwspec->param[1] + 32;
9621447 break;
9631448 case 1: /* PPI */
964
- case GIC_IRQ_TYPE_PARTITION:
9651449 *hwirq = fwspec->param[1] + 16;
1450
+ break;
1451
+ case 2: /* ESPI */
1452
+ *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
1453
+ break;
1454
+ case 3: /* EPPI */
1455
+ *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
9661456 break;
9671457 case GIC_IRQ_TYPE_LPI: /* LPI */
9681458 *hwirq = fwspec->param[1];
1459
+ break;
1460
+ case GIC_IRQ_TYPE_PARTITION:
1461
+ *hwirq = fwspec->param[1];
1462
+ if (fwspec->param[1] >= 16)
1463
+ *hwirq += EPPI_BASE_INTID - 16;
1464
+ else
1465
+ *hwirq += 16;
9691466 break;
9701467 default:
9711468 return -EINVAL;
....@@ -985,6 +1482,12 @@
9851482 if (is_fwnode_irqchip(fwspec->fwnode)) {
9861483 if(fwspec->param_count != 2)
9871484 return -EINVAL;
1485
+
1486
+ if (fwspec->param[0] < 16) {
1487
+ pr_err(FW_BUG "Illegal GSI%d translation request\n",
1488
+ fwspec->param[0]);
1489
+ return -EINVAL;
1490
+ }
9881491
9891492 *hwirq = fwspec->param[0];
9901493 *type = fwspec->param[1];
....@@ -1046,7 +1549,8 @@
10461549 * then we need to match the partition domain.
10471550 */
10481551 if (fwspec->param_count >= 4 &&
1049
- fwspec->param[0] == 1 && fwspec->param[3] != 0)
1552
+ fwspec->param[0] == 1 && fwspec->param[3] != 0 &&
1553
+ gic_data.ppi_descs)
10501554 return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
10511555
10521556 return d == gic_data.domain;
....@@ -1066,6 +1570,9 @@
10661570 {
10671571 struct device_node *np;
10681572 int ret;
1573
+
1574
+ if (!gic_data.ppi_descs)
1575
+ return -ENOMEM;
10691576
10701577 np = of_find_node_by_phandle(fwspec->param[3]);
10711578 if (WARN_ON(!np))
....@@ -1087,6 +1594,142 @@
10871594 .select = gic_irq_domain_select,
10881595 };
10891596
1597
+static bool gic_enable_quirk_msm8996(void *data)
1598
+{
1599
+ struct gic_chip_data *d = data;
1600
+
1601
+ d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1602
+
1603
+ return true;
1604
+}
1605
+
1606
+static bool gic_enable_quirk_cavium_38539(void *data)
1607
+{
1608
+ struct gic_chip_data *d = data;
1609
+
1610
+ d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
1611
+
1612
+ return true;
1613
+}
1614
+
1615
+static bool gic_enable_quirk_hip06_07(void *data)
1616
+{
1617
+ struct gic_chip_data *d = data;
1618
+
1619
+ /*
1620
+ * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
1621
+ * not being an actual ARM implementation). The saving grace is
1622
+ * that GIC-600 doesn't have ESPI, so nothing to do in that case.
1623
+ * HIP07 doesn't even have a proper IIDR, and still pretends to
1624
+ * have ESPI. In both cases, put them right.
1625
+ */
1626
+ if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
1627
+ /* Zero both ESPI and the RES0 field next to it... */
1628
+ d->rdists.gicd_typer &= ~GENMASK(9, 8);
1629
+ return true;
1630
+ }
1631
+
1632
+ return false;
1633
+}
1634
+
1635
+static const struct gic_quirk gic_quirks[] = {
1636
+ {
1637
+ .desc = "GICv3: Qualcomm MSM8996 broken firmware",
1638
+ .compatible = "qcom,msm8996-gic-v3",
1639
+ .init = gic_enable_quirk_msm8996,
1640
+ },
1641
+ {
1642
+ .desc = "GICv3: HIP06 erratum 161010803",
1643
+ .iidr = 0x0204043b,
1644
+ .mask = 0xffffffff,
1645
+ .init = gic_enable_quirk_hip06_07,
1646
+ },
1647
+ {
1648
+ .desc = "GICv3: HIP07 erratum 161010803",
1649
+ .iidr = 0x00000000,
1650
+ .mask = 0xffffffff,
1651
+ .init = gic_enable_quirk_hip06_07,
1652
+ },
1653
+ {
1654
+ /*
1655
+ * Reserved register accesses generate a Synchronous
1656
+ * External Abort. This erratum applies to:
1657
+ * - ThunderX: CN88xx
1658
+ * - OCTEON TX: CN83xx, CN81xx
1659
+ * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
1660
+ */
1661
+ .desc = "GICv3: Cavium erratum 38539",
1662
+ .iidr = 0xa000034c,
1663
+ .mask = 0xe8f00fff,
1664
+ .init = gic_enable_quirk_cavium_38539,
1665
+ },
1666
+ {
1667
+ }
1668
+};
1669
+
1670
+static void gic_enable_nmi_support(void)
1671
+{
1672
+ int i;
1673
+
1674
+ if (!gic_prio_masking_enabled())
1675
+ return;
1676
+
1677
+ ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
1678
+ if (!ppi_nmi_refs)
1679
+ return;
1680
+
1681
+ for (i = 0; i < gic_data.ppi_nr; i++)
1682
+ refcount_set(&ppi_nmi_refs[i], 0);
1683
+
1684
+ /*
1685
+ * Linux itself doesn't use 1:N distribution, so has no need to
1686
+ * set PMHE. The only reason to have it set is if EL3 requires it
1687
+ * (and we can't change it).
1688
+ */
1689
+ if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK)
1690
+ static_branch_enable(&gic_pmr_sync);
1691
+
1692
+ pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
1693
+ static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed");
1694
+
1695
+ /*
1696
+ * How priority values are used by the GIC depends on two things:
1697
+ * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
1698
+ * and if Group 0 interrupts can be delivered to Linux in the non-secure
1699
+ * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
1700
+ * the ICC_PMR_EL1 register and the priority that software assigns to
1701
+ * interrupts:
1702
+ *
1703
+ * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
1704
+ * -----------------------------------------------------------
1705
+ * 1 | - | unchanged | unchanged
1706
+ * -----------------------------------------------------------
1707
+ * 0 | 1 | non-secure | non-secure
1708
+ * -----------------------------------------------------------
1709
+ * 0 | 0 | unchanged | non-secure
1710
+ *
1711
+ * where non-secure means that the value is right-shifted by one and the
1712
+ * MSB bit set, to make it fit in the non-secure priority range.
1713
+ *
1714
+ * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
1715
+ * are both either modified or unchanged, we can use the same set of
1716
+ * priorities.
1717
+ *
1718
+ * In the last case, where only the interrupt priorities are modified to
1719
+ * be in the non-secure range, we use a different PMR value to mask IRQs
1720
+ * and the rest of the values that we use remain unchanged.
1721
+ */
1722
+ if (gic_has_group0() && !gic_dist_security_disabled())
1723
+ static_branch_enable(&gic_nonsecure_priorities);
1724
+
1725
+ static_branch_enable(&supports_pseudo_nmis);
1726
+
1727
+ if (static_branch_likely(&supports_deactivate_key))
1728
+ gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1729
+ else
1730
+ gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1731
+}
1732
+
10901733 static int __init gic_init_bases(void __iomem *dist_base,
10911734 struct redist_region *rdist_regs,
10921735 u32 nr_redist_regions,
....@@ -1094,7 +1737,6 @@
10941737 struct fwnode_handle *handle)
10951738 {
10961739 u32 typer;
1097
- int gic_irqs;
10981740 int err;
10991741
11001742 if (!is_hyp_mode_available())
....@@ -1111,26 +1753,37 @@
11111753
11121754 /*
11131755 * Find out how many interrupts are supported.
1114
- * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
11151756 */
11161757 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
11171758 gic_data.rdists.gicd_typer = typer;
1118
- gic_irqs = GICD_TYPER_IRQS(typer);
1119
- if (gic_irqs > 1020)
1120
- gic_irqs = 1020;
1121
- gic_data.irq_nr = gic_irqs;
1759
+
1760
+ gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
1761
+ gic_quirks, &gic_data);
1762
+
1763
+ pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
1764
+ pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
1765
+
1766
+ /*
1767
+ * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
1768
+ * architecture spec (which says that reserved registers are RES0).
1769
+ */
1770
+ if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
1771
+ gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
11221772
11231773 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
11241774 &gic_data);
1125
- irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
11261775 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
1776
+ gic_data.rdists.has_rvpeid = true;
11271777 gic_data.rdists.has_vlpis = true;
11281778 gic_data.rdists.has_direct_lpi = true;
1779
+ gic_data.rdists.has_vpend_valid_dirty = true;
11291780
11301781 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
11311782 err = -ENOMEM;
11321783 goto out_free;
11331784 }
1785
+
1786
+ irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
11341787
11351788 gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
11361789 pr_info("Distributor has %sRange Selector support\n",
....@@ -1144,15 +1797,23 @@
11441797
11451798 set_handle_irq(gic_handle_irq);
11461799
1147
- gic_update_vlpi_properties();
1800
+ gic_update_rdist_properties();
11481801
1149
- if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
1150
- its_init(handle, &gic_data.rdists, gic_data.domain);
1151
-
1152
- gic_smp_init();
11531802 gic_dist_init();
11541803 gic_cpu_init();
1804
+ gic_smp_init();
11551805 gic_cpu_pm_init();
1806
+ gic_syscore_init();
1807
+
1808
+ if (gic_dist_supports_lpis()) {
1809
+ its_init(handle, &gic_data.rdists, gic_data.domain);
1810
+ its_cpu_init();
1811
+ } else {
1812
+ if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1813
+ gicv2m_init(handle, gic_data.domain);
1814
+ }
1815
+
1816
+ gic_enable_nmi_support();
11561817
11571818 return 0;
11581819
....@@ -1185,6 +1846,10 @@
11851846 if (!parts_node)
11861847 return;
11871848
1849
+ gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
1850
+ if (!gic_data.ppi_descs)
1851
+ goto out_put_node;
1852
+
11881853 nr_parts = of_get_child_count(parts_node);
11891854
11901855 if (!nr_parts)
....@@ -1202,8 +1867,8 @@
12021867
12031868 part->partition_id = of_node_to_fwnode(child_part);
12041869
1205
- pr_info("GIC: PPI partition %s[%d] { ",
1206
- child_part->name, part_idx);
1870
+ pr_info("GIC: PPI partition %pOFn[%d] { ",
1871
+ child_part, part_idx);
12071872
12081873 n = of_property_count_elems_of_size(child_part, "affinity",
12091874 sizeof(u32));
....@@ -1224,19 +1889,22 @@
12241889 continue;
12251890
12261891 cpu = of_cpu_node_to_id(cpu_node);
1227
- if (WARN_ON(cpu < 0))
1892
+ if (WARN_ON(cpu < 0)) {
1893
+ of_node_put(cpu_node);
12281894 continue;
1895
+ }
12291896
12301897 pr_cont("%pOF[%d] ", cpu_node, cpu);
12311898
12321899 cpumask_set_cpu(cpu, &part->mask);
1900
+ of_node_put(cpu_node);
12331901 }
12341902
12351903 pr_cont("}\n");
12361904 part_idx++;
12371905 }
12381906
1239
- for (i = 0; i < 16; i++) {
1907
+ for (i = 0; i < gic_data.ppi_nr; i++) {
12401908 unsigned int irq;
12411909 struct partition_desc *desc;
12421910 struct irq_fwspec ppi_fwspec = {
....@@ -1286,10 +1954,11 @@
12861954 gic_v3_kvm_info.vcpu = r;
12871955
12881956 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
1957
+ gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
12891958 gic_set_kvm_info(&gic_v3_kvm_info);
12901959 }
12911960
1292
-static int __init gicv3_of_init(struct device_node *node, struct device_node *parent)
1961
+static int __init gic_of_init(struct device_node *node, struct device_node *parent)
12931962 {
12941963 void __iomem *dist_base;
12951964 struct redist_region *rdist_regs;
....@@ -1336,6 +2005,8 @@
13362005 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
13372006 redist_stride = 0;
13382007
2008
+ gic_enable_of_quirks(node, gic_quirks, &gic_data);
2009
+
13392010 err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
13402011 redist_stride, &node->fwnode);
13412012 if (err)
....@@ -1357,7 +2028,7 @@
13572028 return err;
13582029 }
13592030
1360
-IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gicv3_of_init);
2031
+IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
13612032
13622033 #ifdef CONFIG_ACPI
13632034 static struct
....@@ -1384,7 +2055,7 @@
13842055 }
13852056
13862057 static int __init
1387
-gic_acpi_parse_madt_redist(struct acpi_subtable_header *header,
2058
+gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
13882059 const unsigned long end)
13892060 {
13902061 struct acpi_madt_generic_redistributor *redist =
....@@ -1402,7 +2073,7 @@
14022073 }
14032074
14042075 static int __init
1405
-gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
2076
+gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
14062077 const unsigned long end)
14072078 {
14082079 struct acpi_madt_generic_interrupt *gicc =
....@@ -1444,14 +2115,14 @@
14442115 return -ENODEV;
14452116 }
14462117
1447
-static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header,
2118
+static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
14482119 const unsigned long end)
14492120 {
14502121 /* Subtable presence means that redist exists, that's it */
14512122 return 0;
14522123 }
14532124
1454
-static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
2125
+static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
14552126 const unsigned long end)
14562127 {
14572128 struct acpi_madt_generic_interrupt *gicc =
....@@ -1521,7 +2192,7 @@
15212192 return true;
15222193 }
15232194
1524
-static int __init gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header *header,
2195
+static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
15252196 const unsigned long end)
15262197 {
15272198 struct acpi_madt_generic_interrupt *gicc =
....@@ -1599,11 +2270,12 @@
15992270 }
16002271
16012272 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2273
+ gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
16022274 gic_set_kvm_info(&gic_v3_kvm_info);
16032275 }
16042276
16052277 static int __init
1606
-gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
2278
+gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
16072279 {
16082280 struct acpi_madt_generic_distributor *dist;
16092281 struct fwnode_handle *domain_handle;
....@@ -1637,7 +2309,7 @@
16372309 if (err)
16382310 goto out_redist_unmap;
16392311
1640
- domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base);
2312
+ domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
16412313 if (!domain_handle) {
16422314 err = -ENOMEM;
16432315 goto out_redist_unmap;