.. | .. |
---|
48 | 48 | |
---|
49 | 49 | static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); |
---|
50 | 50 | |
---|
51 | | -static DEFINE_SPINLOCK(gic_lock); |
---|
| 51 | +static DEFINE_RAW_SPINLOCK(gic_lock); |
---|
52 | 52 | static struct irq_domain *gic_irq_domain; |
---|
53 | 53 | static int gic_shared_intrs; |
---|
54 | 54 | static unsigned int gic_cpu_pin; |
---|
.. | .. |
---|
209 | 209 | |
---|
210 | 210 | irq = GIC_HWIRQ_TO_SHARED(d->hwirq); |
---|
211 | 211 | |
---|
212 | | - spin_lock_irqsave(&gic_lock, flags); |
---|
| 212 | + raw_spin_lock_irqsave(&gic_lock, flags); |
---|
213 | 213 | switch (type & IRQ_TYPE_SENSE_MASK) { |
---|
214 | 214 | case IRQ_TYPE_EDGE_FALLING: |
---|
215 | 215 | pol = GIC_POL_FALLING_EDGE; |
---|
.. | .. |
---|
249 | 249 | else |
---|
250 | 250 | irq_set_chip_handler_name_locked(d, &gic_level_irq_controller, |
---|
251 | 251 | handle_level_irq, NULL); |
---|
252 | | - spin_unlock_irqrestore(&gic_lock, flags); |
---|
| 252 | + raw_spin_unlock_irqrestore(&gic_lock, flags); |
---|
253 | 253 | |
---|
254 | 254 | return 0; |
---|
255 | 255 | } |
---|
.. | .. |
---|
267 | 267 | return -EINVAL; |
---|
268 | 268 | |
---|
269 | 269 | /* Assumption : cpumask refers to a single CPU */ |
---|
270 | | - spin_lock_irqsave(&gic_lock, flags); |
---|
| 270 | + raw_spin_lock_irqsave(&gic_lock, flags); |
---|
271 | 271 | |
---|
272 | 272 | /* Re-route this IRQ */ |
---|
273 | 273 | write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); |
---|
.. | .. |
---|
278 | 278 | set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); |
---|
279 | 279 | |
---|
280 | 280 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
---|
281 | | - spin_unlock_irqrestore(&gic_lock, flags); |
---|
| 281 | + raw_spin_unlock_irqrestore(&gic_lock, flags); |
---|
282 | 282 | |
---|
283 | 283 | return IRQ_SET_MASK_OK; |
---|
284 | 284 | } |
---|
.. | .. |
---|
356 | 356 | cd = irq_data_get_irq_chip_data(d); |
---|
357 | 357 | cd->mask = false; |
---|
358 | 358 | |
---|
359 | | - spin_lock_irqsave(&gic_lock, flags); |
---|
| 359 | + raw_spin_lock_irqsave(&gic_lock, flags); |
---|
360 | 360 | for_each_online_cpu(cpu) { |
---|
361 | 361 | write_gic_vl_other(mips_cm_vp_id(cpu)); |
---|
362 | 362 | write_gic_vo_rmask(BIT(intr)); |
---|
363 | 363 | } |
---|
364 | | - spin_unlock_irqrestore(&gic_lock, flags); |
---|
| 364 | + raw_spin_unlock_irqrestore(&gic_lock, flags); |
---|
365 | 365 | } |
---|
366 | 366 | |
---|
367 | 367 | static void gic_unmask_local_irq_all_vpes(struct irq_data *d) |
---|
.. | .. |
---|
374 | 374 | cd = irq_data_get_irq_chip_data(d); |
---|
375 | 375 | cd->mask = true; |
---|
376 | 376 | |
---|
377 | | - spin_lock_irqsave(&gic_lock, flags); |
---|
| 377 | + raw_spin_lock_irqsave(&gic_lock, flags); |
---|
378 | 378 | for_each_online_cpu(cpu) { |
---|
379 | 379 | write_gic_vl_other(mips_cm_vp_id(cpu)); |
---|
380 | 380 | write_gic_vo_smask(BIT(intr)); |
---|
381 | 381 | } |
---|
382 | | - spin_unlock_irqrestore(&gic_lock, flags); |
---|
| 382 | + raw_spin_unlock_irqrestore(&gic_lock, flags); |
---|
383 | 383 | } |
---|
384 | 384 | |
---|
385 | | -static void gic_all_vpes_irq_cpu_online(struct irq_data *d) |
---|
| 385 | +static void gic_all_vpes_irq_cpu_online(void) |
---|
386 | 386 | { |
---|
387 | | - struct gic_all_vpes_chip_data *cd; |
---|
388 | | - unsigned int intr; |
---|
| 387 | + static const unsigned int local_intrs[] = { |
---|
| 388 | + GIC_LOCAL_INT_TIMER, |
---|
| 389 | + GIC_LOCAL_INT_PERFCTR, |
---|
| 390 | + GIC_LOCAL_INT_FDC, |
---|
| 391 | + }; |
---|
| 392 | + unsigned long flags; |
---|
| 393 | + int i; |
---|
389 | 394 | |
---|
390 | | - intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); |
---|
391 | | - cd = irq_data_get_irq_chip_data(d); |
---|
| 395 | + raw_spin_lock_irqsave(&gic_lock, flags); |
---|
392 | 396 | |
---|
393 | | - write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); |
---|
394 | | - if (cd->mask) |
---|
395 | | - write_gic_vl_smask(BIT(intr)); |
---|
| 397 | + for (i = 0; i < ARRAY_SIZE(local_intrs); i++) { |
---|
| 398 | + unsigned int intr = local_intrs[i]; |
---|
| 399 | + struct gic_all_vpes_chip_data *cd; |
---|
| 400 | + |
---|
| 401 | + cd = &gic_all_vpes_chip_data[intr]; |
---|
| 402 | + write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); |
---|
| 403 | + if (cd->mask) |
---|
| 404 | + write_gic_vl_smask(BIT(intr)); |
---|
| 405 | + } |
---|
| 406 | + |
---|
| 407 | + raw_spin_unlock_irqrestore(&gic_lock, flags); |
---|
396 | 408 | } |
---|
397 | 409 | |
---|
398 | 410 | static struct irq_chip gic_all_vpes_local_irq_controller = { |
---|
399 | 411 | .name = "MIPS GIC Local", |
---|
400 | 412 | .irq_mask = gic_mask_local_irq_all_vpes, |
---|
401 | 413 | .irq_unmask = gic_unmask_local_irq_all_vpes, |
---|
402 | | - .irq_cpu_online = gic_all_vpes_irq_cpu_online, |
---|
403 | 414 | }; |
---|
404 | 415 | |
---|
405 | 416 | static void __gic_irq_dispatch(void) |
---|
.. | .. |
---|
423 | 434 | |
---|
424 | 435 | data = irq_get_irq_data(virq); |
---|
425 | 436 | |
---|
426 | | - spin_lock_irqsave(&gic_lock, flags); |
---|
| 437 | + raw_spin_lock_irqsave(&gic_lock, flags); |
---|
427 | 438 | write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); |
---|
428 | 439 | write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); |
---|
429 | 440 | irq_data_update_effective_affinity(data, cpumask_of(cpu)); |
---|
430 | | - spin_unlock_irqrestore(&gic_lock, flags); |
---|
| 441 | + raw_spin_unlock_irqrestore(&gic_lock, flags); |
---|
431 | 442 | |
---|
432 | 443 | return 0; |
---|
433 | 444 | } |
---|
.. | .. |
---|
480 | 491 | intr = GIC_HWIRQ_TO_LOCAL(hwirq); |
---|
481 | 492 | map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; |
---|
482 | 493 | |
---|
| 494 | + /* |
---|
| 495 | + * If adding support for more per-cpu interrupts, keep the the |
---|
| 496 | + * array in gic_all_vpes_irq_cpu_online() in sync. |
---|
| 497 | + */ |
---|
483 | 498 | switch (intr) { |
---|
484 | 499 | case GIC_LOCAL_INT_TIMER: |
---|
485 | 500 | /* CONFIG_MIPS_CMP workaround (see __gic_init) */ |
---|
.. | .. |
---|
518 | 533 | if (!gic_local_irq_is_routable(intr)) |
---|
519 | 534 | return -EPERM; |
---|
520 | 535 | |
---|
521 | | - spin_lock_irqsave(&gic_lock, flags); |
---|
| 536 | + raw_spin_lock_irqsave(&gic_lock, flags); |
---|
522 | 537 | for_each_online_cpu(cpu) { |
---|
523 | 538 | write_gic_vl_other(mips_cm_vp_id(cpu)); |
---|
524 | 539 | write_gic_vo_map(mips_gic_vx_map_reg(intr), map); |
---|
525 | 540 | } |
---|
526 | | - spin_unlock_irqrestore(&gic_lock, flags); |
---|
| 541 | + raw_spin_unlock_irqrestore(&gic_lock, flags); |
---|
527 | 542 | |
---|
528 | 543 | return 0; |
---|
529 | 544 | } |
---|
.. | .. |
---|
710 | 725 | /* Clear all local IRQ masks (ie. disable all local interrupts) */ |
---|
711 | 726 | write_gic_vl_rmask(~0); |
---|
712 | 727 | |
---|
713 | | - /* Invoke irq_cpu_online callbacks to enable desired interrupts */ |
---|
714 | | - irq_cpu_online(); |
---|
| 728 | + /* Enable desired interrupts */ |
---|
| 729 | + gic_all_vpes_irq_cpu_online(); |
---|
715 | 730 | |
---|
716 | 731 | return 0; |
---|
717 | 732 | } |
---|