.. | .. |
---|
268 | 268 | raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); |
---|
269 | 269 | } |
---|
270 | 270 | |
---|
| 271 | +static struct irq_chip its_vpe_irq_chip; |
---|
| 272 | + |
---|
271 | 273 | static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) |
---|
272 | 274 | { |
---|
273 | | - struct its_vlpi_map *map = get_vlpi_map(d); |
---|
| 275 | + struct its_vpe *vpe = NULL; |
---|
274 | 276 | int cpu; |
---|
275 | 277 | |
---|
276 | | - if (map) { |
---|
277 | | - cpu = vpe_to_cpuid_lock(map->vpe, flags); |
---|
| 278 | + if (d->chip == &its_vpe_irq_chip) { |
---|
| 279 | + vpe = irq_data_get_irq_chip_data(d); |
---|
| 280 | + } else { |
---|
| 281 | + struct its_vlpi_map *map = get_vlpi_map(d); |
---|
| 282 | + if (map) |
---|
| 283 | + vpe = map->vpe; |
---|
| 284 | + } |
---|
| 285 | + |
---|
| 286 | + if (vpe) { |
---|
| 287 | + cpu = vpe_to_cpuid_lock(vpe, flags); |
---|
278 | 288 | } else { |
---|
279 | 289 | /* Physical LPIs are already locked via the irq_desc lock */ |
---|
280 | 290 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
---|
.. | .. |
---|
288 | 298 | |
---|
289 | 299 | static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) |
---|
290 | 300 | { |
---|
291 | | - struct its_vlpi_map *map = get_vlpi_map(d); |
---|
| 301 | + struct its_vpe *vpe = NULL; |
---|
292 | 302 | |
---|
293 | | - if (map) |
---|
294 | | - vpe_to_cpuid_unlock(map->vpe, flags); |
---|
| 303 | + if (d->chip == &its_vpe_irq_chip) { |
---|
| 304 | + vpe = irq_data_get_irq_chip_data(d); |
---|
| 305 | + } else { |
---|
| 306 | + struct its_vlpi_map *map = get_vlpi_map(d); |
---|
| 307 | + if (map) |
---|
| 308 | + vpe = map->vpe; |
---|
| 309 | + } |
---|
| 310 | + |
---|
| 311 | + if (vpe) |
---|
| 312 | + vpe_to_cpuid_unlock(vpe, flags); |
---|
295 | 313 | } |
---|
296 | 314 | |
---|
297 | 315 | static struct its_collection *valid_col(struct its_collection *col) |
---|
.. | .. |
---|
1423 | 1441 | cpu_relax(); |
---|
1424 | 1442 | } |
---|
1425 | 1443 | |
---|
| 1444 | +static void __direct_lpi_inv(struct irq_data *d, u64 val) |
---|
| 1445 | +{ |
---|
| 1446 | + void __iomem *rdbase; |
---|
| 1447 | + unsigned long flags; |
---|
| 1448 | + int cpu; |
---|
| 1449 | + |
---|
| 1450 | + /* Target the redistributor this LPI is currently routed to */ |
---|
| 1451 | + cpu = irq_to_cpuid_lock(d, &flags); |
---|
| 1452 | + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); |
---|
| 1453 | + |
---|
| 1454 | + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; |
---|
| 1455 | + gic_write_lpir(val, rdbase + GICR_INVLPIR); |
---|
| 1456 | + wait_for_syncr(rdbase); |
---|
| 1457 | + |
---|
| 1458 | + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); |
---|
| 1459 | + irq_to_cpuid_unlock(d, flags); |
---|
| 1460 | +} |
---|
| 1461 | + |
---|
1426 | 1462 | static void direct_lpi_inv(struct irq_data *d) |
---|
1427 | 1463 | { |
---|
1428 | 1464 | struct its_vlpi_map *map = get_vlpi_map(d); |
---|
1429 | | - void __iomem *rdbase; |
---|
1430 | | - unsigned long flags; |
---|
1431 | 1465 | u64 val; |
---|
1432 | | - int cpu; |
---|
1433 | 1466 | |
---|
1434 | 1467 | if (map) { |
---|
1435 | 1468 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
---|
.. | .. |
---|
1443 | 1476 | val = d->hwirq; |
---|
1444 | 1477 | } |
---|
1445 | 1478 | |
---|
1446 | | - /* Target the redistributor this LPI is currently routed to */ |
---|
1447 | | - cpu = irq_to_cpuid_lock(d, &flags); |
---|
1448 | | - raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); |
---|
1449 | | - rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; |
---|
1450 | | - gic_write_lpir(val, rdbase + GICR_INVLPIR); |
---|
1451 | | - |
---|
1452 | | - wait_for_syncr(rdbase); |
---|
1453 | | - raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); |
---|
1454 | | - irq_to_cpuid_unlock(d, flags); |
---|
| 1479 | + __direct_lpi_inv(d, val); |
---|
1455 | 1480 | } |
---|
1456 | 1481 | |
---|
1457 | 1482 | static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) |
---|
.. | .. |
---|
1493 | 1518 | * |
---|
1494 | 1519 | * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI |
---|
1495 | 1520 | * value or to 1023, depending on the enable bit. But that |
---|
1496 | | - * would be issueing a mapping for an /existing/ DevID+EventID |
---|
| 1521 | + * would be issuing a mapping for an /existing/ DevID+EventID |
---|
1497 | 1522 | * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI |
---|
1498 | 1523 | * to the /same/ vPE, using this opportunity to adjust the |
---|
1499 | 1524 | * doorbell. Mouahahahaha. We loves it, Precious. |
---|
.. | .. |
---|
2168 | 2193 | { |
---|
2169 | 2194 | struct page *prop_page; |
---|
2170 | 2195 | |
---|
2171 | | - if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566")) |
---|
| 2196 | + if (of_machine_is_compatible("rockchip,rk3568") || |
---|
| 2197 | + of_machine_is_compatible("rockchip,rk3567") || |
---|
| 2198 | + of_machine_is_compatible("rockchip,rk3566")) |
---|
2172 | 2199 | gfp_flags |= GFP_DMA32; |
---|
2173 | 2200 | prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); |
---|
2174 | 2201 | if (!prop_page) |
---|
.. | .. |
---|
2306 | 2333 | } |
---|
2307 | 2334 | |
---|
2308 | 2335 | gfp_flags = GFP_KERNEL | __GFP_ZERO; |
---|
2309 | | - if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566")) |
---|
| 2336 | + if (of_machine_is_compatible("rockchip,rk3568") || |
---|
| 2337 | + of_machine_is_compatible("rockchip,rk3567") || |
---|
| 2338 | + of_machine_is_compatible("rockchip,rk3566")) |
---|
2310 | 2339 | gfp_flags |= GFP_DMA32; |
---|
2311 | 2340 | page = alloc_pages_node(its->numa_node, gfp_flags, order); |
---|
2312 | 2341 | if (!page) |
---|
.. | .. |
---|
2357 | 2386 | |
---|
2358 | 2387 | if (IS_ENABLED(CONFIG_NO_GKI) && |
---|
2359 | 2388 | (of_machine_is_compatible("rockchip,rk3568") || |
---|
| 2389 | + of_machine_is_compatible("rockchip,rk3567") || |
---|
2360 | 2390 | of_machine_is_compatible("rockchip,rk3566") || |
---|
2361 | 2391 | of_machine_is_compatible("rockchip,rk3588"))) { |
---|
2362 | 2392 | if (tmp & GITS_BASER_SHAREABILITY_MASK) |
---|
.. | .. |
---|
2947 | 2977 | { |
---|
2948 | 2978 | struct page *pend_page; |
---|
2949 | 2979 | |
---|
2950 | | - if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566")) |
---|
| 2980 | + if (of_machine_is_compatible("rockchip,rk3568") || |
---|
| 2981 | + of_machine_is_compatible("rockchip,rk3567") || |
---|
| 2982 | + of_machine_is_compatible("rockchip,rk3566")) |
---|
2951 | 2983 | gfp_flags |= GFP_DMA32; |
---|
2952 | 2984 | pend_page = alloc_pages(gfp_flags | __GFP_ZERO, |
---|
2953 | 2985 | get_order(LPI_PENDBASE_SZ)); |
---|
.. | .. |
---|
3108 | 3140 | |
---|
3109 | 3141 | if (IS_ENABLED(CONFIG_NO_GKI) && |
---|
3110 | 3142 | (of_machine_is_compatible("rockchip,rk3568") || |
---|
| 3143 | + of_machine_is_compatible("rockchip,rk3567") || |
---|
3111 | 3144 | of_machine_is_compatible("rockchip,rk3566") || |
---|
3112 | 3145 | of_machine_is_compatible("rockchip,rk3588"))) |
---|
3113 | 3146 | tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK; |
---|
.. | .. |
---|
3138 | 3171 | |
---|
3139 | 3172 | if (IS_ENABLED(CONFIG_NO_GKI) && |
---|
3140 | 3173 | (of_machine_is_compatible("rockchip,rk3568") || |
---|
| 3174 | + of_machine_is_compatible("rockchip,rk3567") || |
---|
3141 | 3175 | of_machine_is_compatible("rockchip,rk3566") || |
---|
3142 | 3176 | of_machine_is_compatible("rockchip,rk3588"))) |
---|
3143 | 3177 | tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK; |
---|
.. | .. |
---|
3163 | 3197 | |
---|
3164 | 3198 | /* |
---|
3165 | 3199 | * It's possible for CPU to receive VLPIs before it is |
---|
3166 | | - * sheduled as a vPE, especially for the first CPU, and the |
---|
| 3200 | + * scheduled as a vPE, especially for the first CPU, and the |
---|
3167 | 3201 | * VLPI with INTID larger than 2^(IDbits+1) will be considered |
---|
3168 | 3202 | * as out of range and dropped by GIC. |
---|
3169 | 3203 | * So we initialize IDbits to known value to avoid VLPI drop. |
---|
.. | .. |
---|
3306 | 3340 | if (!table[idx]) { |
---|
3307 | 3341 | gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO; |
---|
3308 | 3342 | |
---|
3309 | | - if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566")) |
---|
| 3343 | + if (of_machine_is_compatible("rockchip,rk3568") || |
---|
| 3344 | + of_machine_is_compatible("rockchip,rk3567") || |
---|
| 3345 | + of_machine_is_compatible("rockchip,rk3566")) |
---|
3310 | 3346 | gfp_flags |= GFP_DMA32; |
---|
3311 | 3347 | page = alloc_pages_node(its->numa_node, gfp_flags, |
---|
3312 | 3348 | get_order(baser->psz)); |
---|
.. | .. |
---|
3414 | 3450 | sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); |
---|
3415 | 3451 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; |
---|
3416 | 3452 | gfp_flags = GFP_KERNEL; |
---|
3417 | | - if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566")) { |
---|
| 3453 | + if (of_machine_is_compatible("rockchip,rk3568") || |
---|
| 3454 | + of_machine_is_compatible("rockchip,rk3567") || |
---|
| 3455 | + of_machine_is_compatible("rockchip,rk3566")) { |
---|
3418 | 3456 | gfp_flags |= GFP_DMA32; |
---|
3419 | 3457 | itt = (void *)__get_free_pages(gfp_flags, get_order(sz)); |
---|
3420 | 3458 | } else { |
---|
.. | .. |
---|
3436 | 3474 | kfree(dev); |
---|
3437 | 3475 | |
---|
3438 | 3476 | if (of_machine_is_compatible("rockchip,rk3568") || |
---|
| 3477 | + of_machine_is_compatible("rockchip,rk3567") || |
---|
3439 | 3478 | of_machine_is_compatible("rockchip,rk3566")) |
---|
3440 | 3479 | free_pages((unsigned long)itt, get_order(sz)); |
---|
3441 | 3480 | else |
---|
.. | .. |
---|
3480 | 3519 | kfree(its_dev->event_map.col_map); |
---|
3481 | 3520 | |
---|
3482 | 3521 | if (of_machine_is_compatible("rockchip,rk3568") || |
---|
| 3522 | + of_machine_is_compatible("rockchip,rk3567") || |
---|
3483 | 3523 | of_machine_is_compatible("rockchip,rk3566")) |
---|
3484 | 3524 | free_pages((unsigned long)its_dev->itt, get_order(its_dev->itt_sz)); |
---|
3485 | 3525 | else |
---|
.. | .. |
---|
3679 | 3719 | |
---|
3680 | 3720 | /* |
---|
3681 | 3721 | * If all interrupts have been freed, start mopping the |
---|
3682 | | - * floor. This is conditionned on the device not being shared. |
---|
| 3722 | + * floor. This is conditioned on the device not being shared. |
---|
3683 | 3723 | */ |
---|
3684 | 3724 | if (!its_dev->shared && |
---|
3685 | 3725 | bitmap_empty(its_dev->event_map.lpi_map, |
---|
.. | .. |
---|
3984 | 4024 | { |
---|
3985 | 4025 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
---|
3986 | 4026 | |
---|
3987 | | - if (gic_rdists->has_direct_lpi) { |
---|
3988 | | - void __iomem *rdbase; |
---|
3989 | | - |
---|
3990 | | - /* Target the redistributor this VPE is currently known on */ |
---|
3991 | | - raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); |
---|
3992 | | - rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; |
---|
3993 | | - gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR); |
---|
3994 | | - wait_for_syncr(rdbase); |
---|
3995 | | - raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); |
---|
3996 | | - } else { |
---|
| 4027 | + if (gic_rdists->has_direct_lpi) |
---|
| 4028 | + __direct_lpi_inv(d, d->parent_data->hwirq); |
---|
| 4029 | + else |
---|
3997 | 4030 | its_vpe_send_cmd(vpe, its_send_inv); |
---|
3998 | | - } |
---|
3999 | 4031 | } |
---|
4000 | 4032 | |
---|
4001 | 4033 | static void its_vpe_mask_irq(struct irq_data *d) |
---|
.. | .. |
---|
4257 | 4289 | { |
---|
4258 | 4290 | /* |
---|
4259 | 4291 | * There is no notion of affinity for virtual SGIs, at least |
---|
4260 | | - * not on the host (since they can only be targetting a vPE). |
---|
| 4292 | + * not on the host (since they can only be targeting a vPE). |
---|
4261 | 4293 | * Tell the kernel we've done whatever it asked for. |
---|
4262 | 4294 | */ |
---|
4263 | 4295 | irq_data_update_effective_affinity(d, mask_val); |
---|
.. | .. |
---|
4302 | 4334 | /* |
---|
4303 | 4335 | * Locking galore! We can race against two different events: |
---|
4304 | 4336 | * |
---|
4305 | | - * - Concurent vPE affinity change: we must make sure it cannot |
---|
| 4337 | + * - Concurrent vPE affinity change: we must make sure it cannot |
---|
4306 | 4338 | * happen, or we'll talk to the wrong redistributor. This is |
---|
4307 | 4339 | * identical to what happens with vLPIs. |
---|
4308 | 4340 | * |
---|
.. | .. |
---|
5085 | 5117 | its->numa_node = numa_node; |
---|
5086 | 5118 | |
---|
5087 | 5119 | gfp_flags = GFP_KERNEL | __GFP_ZERO; |
---|
5088 | | - if (of_machine_is_compatible("rockchip,rk3568") || of_machine_is_compatible("rockchip,rk3566")) |
---|
| 5120 | + if (of_machine_is_compatible("rockchip,rk3568") || |
---|
| 5121 | + of_machine_is_compatible("rockchip,rk3567") || |
---|
| 5122 | + of_machine_is_compatible("rockchip,rk3566")) |
---|
5089 | 5123 | gfp_flags |= GFP_DMA32; |
---|
5090 | 5124 | page = alloc_pages_node(its->numa_node, gfp_flags, |
---|
5091 | 5125 | get_order(ITS_CMD_QUEUE_SZ)); |
---|
.. | .. |
---|
5120 | 5154 | |
---|
5121 | 5155 | if (IS_ENABLED(CONFIG_NO_GKI) && |
---|
5122 | 5156 | (of_machine_is_compatible("rockchip,rk3568") || |
---|
| 5157 | + of_machine_is_compatible("rockchip,rk3567") || |
---|
5123 | 5158 | of_machine_is_compatible("rockchip,rk3566") || |
---|
5124 | 5159 | of_machine_is_compatible("rockchip,rk3588"))) |
---|
5125 | 5160 | tmp &= ~GITS_CBASER_SHAREABILITY_MASK; |
---|