| .. | .. |
|---|
| 268 | 268 | raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); |
|---|
| 269 | 269 | } |
|---|
| 270 | 270 | |
|---|
| 271 | +static struct irq_chip its_vpe_irq_chip; |
|---|
| 272 | + |
|---|
| 271 | 273 | static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) |
|---|
| 272 | 274 | { |
|---|
| 273 | | - struct its_vlpi_map *map = get_vlpi_map(d); |
|---|
| 275 | + struct its_vpe *vpe = NULL; |
|---|
| 274 | 276 | int cpu; |
|---|
| 275 | 277 | |
|---|
| 276 | | - if (map) { |
|---|
| 277 | | - cpu = vpe_to_cpuid_lock(map->vpe, flags); |
|---|
| 278 | + if (d->chip == &its_vpe_irq_chip) { |
|---|
| 279 | + vpe = irq_data_get_irq_chip_data(d); |
|---|
| 280 | + } else { |
|---|
| 281 | + struct its_vlpi_map *map = get_vlpi_map(d); |
|---|
| 282 | + if (map) |
|---|
| 283 | + vpe = map->vpe; |
|---|
| 284 | + } |
|---|
| 285 | + |
|---|
| 286 | + if (vpe) { |
|---|
| 287 | + cpu = vpe_to_cpuid_lock(vpe, flags); |
|---|
| 278 | 288 | } else { |
|---|
| 279 | 289 | /* Physical LPIs are already locked via the irq_desc lock */ |
|---|
| 280 | 290 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
|---|
| .. | .. |
|---|
| 288 | 298 | |
|---|
| 289 | 299 | static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) |
|---|
| 290 | 300 | { |
|---|
| 291 | | - struct its_vlpi_map *map = get_vlpi_map(d); |
|---|
| 301 | + struct its_vpe *vpe = NULL; |
|---|
| 292 | 302 | |
|---|
| 293 | | - if (map) |
|---|
| 294 | | - vpe_to_cpuid_unlock(map->vpe, flags); |
|---|
| 303 | + if (d->chip == &its_vpe_irq_chip) { |
|---|
| 304 | + vpe = irq_data_get_irq_chip_data(d); |
|---|
| 305 | + } else { |
|---|
| 306 | + struct its_vlpi_map *map = get_vlpi_map(d); |
|---|
| 307 | + if (map) |
|---|
| 308 | + vpe = map->vpe; |
|---|
| 309 | + } |
|---|
| 310 | + |
|---|
| 311 | + if (vpe) |
|---|
| 312 | + vpe_to_cpuid_unlock(vpe, flags); |
|---|
| 295 | 313 | } |
|---|
| 296 | 314 | |
|---|
| 297 | 315 | static struct its_collection *valid_col(struct its_collection *col) |
|---|
| .. | .. |
|---|
| 1423 | 1441 | cpu_relax(); |
|---|
| 1424 | 1442 | } |
|---|
| 1425 | 1443 | |
|---|
| 1444 | +static void __direct_lpi_inv(struct irq_data *d, u64 val) |
|---|
| 1445 | +{ |
|---|
| 1446 | + void __iomem *rdbase; |
|---|
| 1447 | + unsigned long flags; |
|---|
| 1448 | + int cpu; |
|---|
| 1449 | + |
|---|
| 1450 | + /* Target the redistributor this LPI is currently routed to */ |
|---|
| 1451 | + cpu = irq_to_cpuid_lock(d, &flags); |
|---|
| 1452 | + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); |
|---|
| 1453 | + |
|---|
| 1454 | + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; |
|---|
| 1455 | + gic_write_lpir(val, rdbase + GICR_INVLPIR); |
|---|
| 1456 | + wait_for_syncr(rdbase); |
|---|
| 1457 | + |
|---|
| 1458 | + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); |
|---|
| 1459 | + irq_to_cpuid_unlock(d, flags); |
|---|
| 1460 | +} |
|---|
| 1461 | + |
|---|
| 1426 | 1462 | static void direct_lpi_inv(struct irq_data *d) |
|---|
| 1427 | 1463 | { |
|---|
| 1428 | 1464 | struct its_vlpi_map *map = get_vlpi_map(d); |
|---|
| 1429 | | - void __iomem *rdbase; |
|---|
| 1430 | | - unsigned long flags; |
|---|
| 1431 | 1465 | u64 val; |
|---|
| 1432 | | - int cpu; |
|---|
| 1433 | 1466 | |
|---|
| 1434 | 1467 | if (map) { |
|---|
| 1435 | 1468 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
|---|
| .. | .. |
|---|
| 1443 | 1476 | val = d->hwirq; |
|---|
| 1444 | 1477 | } |
|---|
| 1445 | 1478 | |
|---|
| 1446 | | - /* Target the redistributor this LPI is currently routed to */ |
|---|
| 1447 | | - cpu = irq_to_cpuid_lock(d, &flags); |
|---|
| 1448 | | - raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); |
|---|
| 1449 | | - rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; |
|---|
| 1450 | | - gic_write_lpir(val, rdbase + GICR_INVLPIR); |
|---|
| 1451 | | - |
|---|
| 1452 | | - wait_for_syncr(rdbase); |
|---|
| 1453 | | - raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); |
|---|
| 1454 | | - irq_to_cpuid_unlock(d, flags); |
|---|
| 1479 | + __direct_lpi_inv(d, val); |
|---|
| 1455 | 1480 | } |
|---|
| 1456 | 1481 | |
|---|
| 1457 | 1482 | static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) |
|---|
| .. | .. |
|---|
| 1493 | 1518 | * |
|---|
| 1494 | 1519 | * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI |
|---|
| 1495 | 1520 | * value or to 1023, depending on the enable bit. But that |
|---|
| 1496 | | - * would be issueing a mapping for an /existing/ DevID+EventID |
|---|
| 1521 | + * would be issuing a mapping for an /existing/ DevID+EventID |
|---|
| 1497 | 1522 | * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI |
|---|
| 1498 | 1523 | * to the /same/ vPE, using this opportunity to adjust the |
|---|
| 1499 | 1524 | * doorbell. Mouahahahaha. We loves it, Precious. |
|---|
| .. | .. |
|---|
| 3172 | 3197 | |
|---|
| 3173 | 3198 | /* |
|---|
| 3174 | 3199 | * It's possible for CPU to receive VLPIs before it is |
|---|
| 3175 | | - * sheduled as a vPE, especially for the first CPU, and the |
|---|
| 3200 | + * scheduled as a vPE, especially for the first CPU, and the |
|---|
| 3176 | 3201 | * VLPI with INTID larger than 2^(IDbits+1) will be considered |
|---|
| 3177 | 3202 | * as out of range and dropped by GIC. |
|---|
| 3178 | 3203 | * So we initialize IDbits to known value to avoid VLPI drop. |
|---|
| .. | .. |
|---|
| 3694 | 3719 | |
|---|
| 3695 | 3720 | /* |
|---|
| 3696 | 3721 | * If all interrupts have been freed, start mopping the |
|---|
| 3697 | | - * floor. This is conditionned on the device not being shared. |
|---|
| 3722 | + * floor. This is conditioned on the device not being shared. |
|---|
| 3698 | 3723 | */ |
|---|
| 3699 | 3724 | if (!its_dev->shared && |
|---|
| 3700 | 3725 | bitmap_empty(its_dev->event_map.lpi_map, |
|---|
| .. | .. |
|---|
| 3999 | 4024 | { |
|---|
| 4000 | 4025 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
|---|
| 4001 | 4026 | |
|---|
| 4002 | | - if (gic_rdists->has_direct_lpi) { |
|---|
| 4003 | | - void __iomem *rdbase; |
|---|
| 4004 | | - |
|---|
| 4005 | | - /* Target the redistributor this VPE is currently known on */ |
|---|
| 4006 | | - raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); |
|---|
| 4007 | | - rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; |
|---|
| 4008 | | - gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR); |
|---|
| 4009 | | - wait_for_syncr(rdbase); |
|---|
| 4010 | | - raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); |
|---|
| 4011 | | - } else { |
|---|
| 4027 | + if (gic_rdists->has_direct_lpi) |
|---|
| 4028 | + __direct_lpi_inv(d, d->parent_data->hwirq); |
|---|
| 4029 | + else |
|---|
| 4012 | 4030 | its_vpe_send_cmd(vpe, its_send_inv); |
|---|
| 4013 | | - } |
|---|
| 4014 | 4031 | } |
|---|
| 4015 | 4032 | |
|---|
| 4016 | 4033 | static void its_vpe_mask_irq(struct irq_data *d) |
|---|
| .. | .. |
|---|
| 4272 | 4289 | { |
|---|
| 4273 | 4290 | /* |
|---|
| 4274 | 4291 | * There is no notion of affinity for virtual SGIs, at least |
|---|
| 4275 | | - * not on the host (since they can only be targetting a vPE). |
|---|
| 4292 | + * not on the host (since they can only be targeting a vPE). |
|---|
| 4276 | 4293 | * Tell the kernel we've done whatever it asked for. |
|---|
| 4277 | 4294 | */ |
|---|
| 4278 | 4295 | irq_data_update_effective_affinity(d, mask_val); |
|---|
| .. | .. |
|---|
| 4317 | 4334 | /* |
|---|
| 4318 | 4335 | * Locking galore! We can race against two different events: |
|---|
| 4319 | 4336 | * |
|---|
| 4320 | | - * - Concurent vPE affinity change: we must make sure it cannot |
|---|
| 4337 | + * - Concurrent vPE affinity change: we must make sure it cannot |
|---|
| 4321 | 4338 | * happen, or we'll talk to the wrong redistributor. This is |
|---|
| 4322 | 4339 | * identical to what happens with vLPIs. |
|---|
| 4323 | 4340 | * |
|---|