hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/drivers/irqchip/irq-gic-v3-its.c
....@@ -268,13 +268,23 @@
268268 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
269269 }
270270
271
+static struct irq_chip its_vpe_irq_chip;
272
+
271273 static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
272274 {
273
- struct its_vlpi_map *map = get_vlpi_map(d);
275
+ struct its_vpe *vpe = NULL;
274276 int cpu;
275277
276
- if (map) {
277
- cpu = vpe_to_cpuid_lock(map->vpe, flags);
278
+ if (d->chip == &its_vpe_irq_chip) {
279
+ vpe = irq_data_get_irq_chip_data(d);
280
+ } else {
281
+ struct its_vlpi_map *map = get_vlpi_map(d);
282
+ if (map)
283
+ vpe = map->vpe;
284
+ }
285
+
286
+ if (vpe) {
287
+ cpu = vpe_to_cpuid_lock(vpe, flags);
278288 } else {
279289 /* Physical LPIs are already locked via the irq_desc lock */
280290 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
....@@ -288,10 +298,18 @@
288298
289299 static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
290300 {
291
- struct its_vlpi_map *map = get_vlpi_map(d);
301
+ struct its_vpe *vpe = NULL;
292302
293
- if (map)
294
- vpe_to_cpuid_unlock(map->vpe, flags);
303
+ if (d->chip == &its_vpe_irq_chip) {
304
+ vpe = irq_data_get_irq_chip_data(d);
305
+ } else {
306
+ struct its_vlpi_map *map = get_vlpi_map(d);
307
+ if (map)
308
+ vpe = map->vpe;
309
+ }
310
+
311
+ if (vpe)
312
+ vpe_to_cpuid_unlock(vpe, flags);
295313 }
296314
297315 static struct its_collection *valid_col(struct its_collection *col)
....@@ -1423,13 +1441,28 @@
14231441 cpu_relax();
14241442 }
14251443
1444
+static void __direct_lpi_inv(struct irq_data *d, u64 val)
1445
+{
1446
+ void __iomem *rdbase;
1447
+ unsigned long flags;
1448
+ int cpu;
1449
+
1450
+ /* Target the redistributor this LPI is currently routed to */
1451
+ cpu = irq_to_cpuid_lock(d, &flags);
1452
+ raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
1453
+
1454
+ rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
1455
+ gic_write_lpir(val, rdbase + GICR_INVLPIR);
1456
+ wait_for_syncr(rdbase);
1457
+
1458
+ raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
1459
+ irq_to_cpuid_unlock(d, flags);
1460
+}
1461
+
14261462 static void direct_lpi_inv(struct irq_data *d)
14271463 {
14281464 struct its_vlpi_map *map = get_vlpi_map(d);
1429
- void __iomem *rdbase;
1430
- unsigned long flags;
14311465 u64 val;
1432
- int cpu;
14331466
14341467 if (map) {
14351468 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
....@@ -1443,15 +1476,7 @@
14431476 val = d->hwirq;
14441477 }
14451478
1446
- /* Target the redistributor this LPI is currently routed to */
1447
- cpu = irq_to_cpuid_lock(d, &flags);
1448
- raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
1449
- rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
1450
- gic_write_lpir(val, rdbase + GICR_INVLPIR);
1451
-
1452
- wait_for_syncr(rdbase);
1453
- raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
1454
- irq_to_cpuid_unlock(d, flags);
1479
+ __direct_lpi_inv(d, val);
14551480 }
14561481
14571482 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
....@@ -1493,7 +1518,7 @@
14931518 *
14941519 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
14951520 * value or to 1023, depending on the enable bit. But that
1496
- * would be issueing a mapping for an /existing/ DevID+EventID
1521
+ * would be issuing a mapping for an /existing/ DevID+EventID
14971522 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
14981523 * to the /same/ vPE, using this opportunity to adjust the
14991524 * doorbell. Mouahahahaha. We loves it, Precious.
....@@ -3172,7 +3197,7 @@
31723197
31733198 /*
31743199 * It's possible for CPU to receive VLPIs before it is
3175
- * sheduled as a vPE, especially for the first CPU, and the
3200
+ * scheduled as a vPE, especially for the first CPU, and the
31763201 * VLPI with INTID larger than 2^(IDbits+1) will be considered
31773202 * as out of range and dropped by GIC.
31783203 * So we initialize IDbits to known value to avoid VLPI drop.
....@@ -3694,7 +3719,7 @@
36943719
36953720 /*
36963721 * If all interrupts have been freed, start mopping the
3697
- * floor. This is conditionned on the device not being shared.
3722
+ * floor. This is conditioned on the device not being shared.
36983723 */
36993724 if (!its_dev->shared &&
37003725 bitmap_empty(its_dev->event_map.lpi_map,
....@@ -3999,18 +4024,10 @@
39994024 {
40004025 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
40014026
4002
- if (gic_rdists->has_direct_lpi) {
4003
- void __iomem *rdbase;
4004
-
4005
- /* Target the redistributor this VPE is currently known on */
4006
- raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
4007
- rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
4008
- gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
4009
- wait_for_syncr(rdbase);
4010
- raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
4011
- } else {
4027
+ if (gic_rdists->has_direct_lpi)
4028
+ __direct_lpi_inv(d, d->parent_data->hwirq);
4029
+ else
40124030 its_vpe_send_cmd(vpe, its_send_inv);
4013
- }
40144031 }
40154032
40164033 static void its_vpe_mask_irq(struct irq_data *d)
....@@ -4272,7 +4289,7 @@
42724289 {
42734290 /*
42744291 * There is no notion of affinity for virtual SGIs, at least
4275
- * not on the host (since they can only be targetting a vPE).
4292
+ * not on the host (since they can only be targeting a vPE).
42764293 * Tell the kernel we've done whatever it asked for.
42774294 */
42784295 irq_data_update_effective_affinity(d, mask_val);
....@@ -4317,7 +4334,7 @@
43174334 /*
43184335 * Locking galore! We can race against two different events:
43194336 *
4320
- * - Concurent vPE affinity change: we must make sure it cannot
4337
+ * - Concurrent vPE affinity change: we must make sure it cannot
43214338 * happen, or we'll talk to the wrong redistributor. This is
43224339 * identical to what happens with vLPIs.
43234340 *