.. | .. |
---|
46 | 46 | |
---|
47 | 47 | void __iomem *mips_gic_base; |
---|
48 | 48 | |
---|
49 | | -DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); |
---|
| 49 | +static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); |
---|
50 | 50 | |
---|
51 | | -static DEFINE_SPINLOCK(gic_lock); |
---|
| 51 | +static DEFINE_RAW_SPINLOCK(gic_lock); |
---|
52 | 52 | static struct irq_domain *gic_irq_domain; |
---|
53 | | -static struct irq_domain *gic_ipi_domain; |
---|
54 | 53 | static int gic_shared_intrs; |
---|
55 | 54 | static unsigned int gic_cpu_pin; |
---|
56 | 55 | static unsigned int timer_cpu_pin; |
---|
57 | 56 | static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; |
---|
| 57 | + |
---|
| 58 | +#ifdef CONFIG_GENERIC_IRQ_IPI |
---|
58 | 59 | static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); |
---|
59 | 60 | static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); |
---|
| 61 | +#endif /* CONFIG_GENERIC_IRQ_IPI */ |
---|
60 | 62 | |
---|
61 | 63 | static struct gic_all_vpes_chip_data { |
---|
62 | 64 | u32 map; |
---|
.. | .. |
---|
207 | 209 | |
---|
208 | 210 | irq = GIC_HWIRQ_TO_SHARED(d->hwirq); |
---|
209 | 211 | |
---|
210 | | - spin_lock_irqsave(&gic_lock, flags); |
---|
| 212 | + raw_spin_lock_irqsave(&gic_lock, flags); |
---|
211 | 213 | switch (type & IRQ_TYPE_SENSE_MASK) { |
---|
212 | 214 | case IRQ_TYPE_EDGE_FALLING: |
---|
213 | 215 | pol = GIC_POL_FALLING_EDGE; |
---|
.. | .. |
---|
247 | 249 | else |
---|
248 | 250 | irq_set_chip_handler_name_locked(d, &gic_level_irq_controller, |
---|
249 | 251 | handle_level_irq, NULL); |
---|
250 | | - spin_unlock_irqrestore(&gic_lock, flags); |
---|
| 252 | + raw_spin_unlock_irqrestore(&gic_lock, flags); |
---|
251 | 253 | |
---|
252 | 254 | return 0; |
---|
253 | 255 | } |
---|
.. | .. |
---|
265 | 267 | return -EINVAL; |
---|
266 | 268 | |
---|
267 | 269 | /* Assumption : cpumask refers to a single CPU */ |
---|
268 | | - spin_lock_irqsave(&gic_lock, flags); |
---|
| 270 | + raw_spin_lock_irqsave(&gic_lock, flags); |
---|
269 | 271 | |
---|
270 | 272 | /* Re-route this IRQ */ |
---|
271 | 273 | write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); |
---|
.. | .. |
---|
276 | 278 | set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); |
---|
277 | 279 | |
---|
278 | 280 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
---|
279 | | - spin_unlock_irqrestore(&gic_lock, flags); |
---|
| 281 | + raw_spin_unlock_irqrestore(&gic_lock, flags); |
---|
280 | 282 | |
---|
281 | 283 | return IRQ_SET_MASK_OK; |
---|
282 | 284 | } |
---|
.. | .. |
---|
354 | 356 | cd = irq_data_get_irq_chip_data(d); |
---|
355 | 357 | cd->mask = false; |
---|
356 | 358 | |
---|
357 | | - spin_lock_irqsave(&gic_lock, flags); |
---|
| 359 | + raw_spin_lock_irqsave(&gic_lock, flags); |
---|
358 | 360 | for_each_online_cpu(cpu) { |
---|
359 | 361 | write_gic_vl_other(mips_cm_vp_id(cpu)); |
---|
360 | 362 | write_gic_vo_rmask(BIT(intr)); |
---|
361 | 363 | } |
---|
362 | | - spin_unlock_irqrestore(&gic_lock, flags); |
---|
| 364 | + raw_spin_unlock_irqrestore(&gic_lock, flags); |
---|
363 | 365 | } |
---|
364 | 366 | |
---|
365 | 367 | static void gic_unmask_local_irq_all_vpes(struct irq_data *d) |
---|
.. | .. |
---|
372 | 374 | cd = irq_data_get_irq_chip_data(d); |
---|
373 | 375 | cd->mask = true; |
---|
374 | 376 | |
---|
375 | | - spin_lock_irqsave(&gic_lock, flags); |
---|
| 377 | + raw_spin_lock_irqsave(&gic_lock, flags); |
---|
376 | 378 | for_each_online_cpu(cpu) { |
---|
377 | 379 | write_gic_vl_other(mips_cm_vp_id(cpu)); |
---|
378 | 380 | write_gic_vo_smask(BIT(intr)); |
---|
379 | 381 | } |
---|
380 | | - spin_unlock_irqrestore(&gic_lock, flags); |
---|
| 382 | + raw_spin_unlock_irqrestore(&gic_lock, flags); |
---|
381 | 383 | } |
---|
382 | 384 | |
---|
383 | | -static void gic_all_vpes_irq_cpu_online(struct irq_data *d) |
---|
| 385 | +static void gic_all_vpes_irq_cpu_online(void) |
---|
384 | 386 | { |
---|
385 | | - struct gic_all_vpes_chip_data *cd; |
---|
386 | | - unsigned int intr; |
---|
| 387 | + static const unsigned int local_intrs[] = { |
---|
| 388 | + GIC_LOCAL_INT_TIMER, |
---|
| 389 | + GIC_LOCAL_INT_PERFCTR, |
---|
| 390 | + GIC_LOCAL_INT_FDC, |
---|
| 391 | + }; |
---|
| 392 | + unsigned long flags; |
---|
| 393 | + int i; |
---|
387 | 394 | |
---|
388 | | - intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); |
---|
389 | | - cd = irq_data_get_irq_chip_data(d); |
---|
| 395 | + raw_spin_lock_irqsave(&gic_lock, flags); |
---|
390 | 396 | |
---|
391 | | - write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); |
---|
392 | | - if (cd->mask) |
---|
393 | | - write_gic_vl_smask(BIT(intr)); |
---|
| 397 | + for (i = 0; i < ARRAY_SIZE(local_intrs); i++) { |
---|
| 398 | + unsigned int intr = local_intrs[i]; |
---|
| 399 | + struct gic_all_vpes_chip_data *cd; |
---|
| 400 | + |
---|
| 401 | + cd = &gic_all_vpes_chip_data[intr]; |
---|
| 402 | + write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); |
---|
| 403 | + if (cd->mask) |
---|
| 404 | + write_gic_vl_smask(BIT(intr)); |
---|
| 405 | + } |
---|
| 406 | + |
---|
| 407 | + raw_spin_unlock_irqrestore(&gic_lock, flags); |
---|
394 | 408 | } |
---|
395 | 409 | |
---|
396 | 410 | static struct irq_chip gic_all_vpes_local_irq_controller = { |
---|
397 | 411 | .name = "MIPS GIC Local", |
---|
398 | 412 | .irq_mask = gic_mask_local_irq_all_vpes, |
---|
399 | 413 | .irq_unmask = gic_unmask_local_irq_all_vpes, |
---|
400 | | - .irq_cpu_online = gic_all_vpes_irq_cpu_online, |
---|
401 | 414 | }; |
---|
402 | 415 | |
---|
403 | 416 | static void __gic_irq_dispatch(void) |
---|
.. | .. |
---|
421 | 434 | |
---|
422 | 435 | data = irq_get_irq_data(virq); |
---|
423 | 436 | |
---|
424 | | - spin_lock_irqsave(&gic_lock, flags); |
---|
| 437 | + raw_spin_lock_irqsave(&gic_lock, flags); |
---|
425 | 438 | write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); |
---|
426 | 439 | write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); |
---|
427 | 440 | irq_data_update_effective_affinity(data, cpumask_of(cpu)); |
---|
428 | | - spin_unlock_irqrestore(&gic_lock, flags); |
---|
| 441 | + raw_spin_unlock_irqrestore(&gic_lock, flags); |
---|
429 | 442 | |
---|
430 | 443 | return 0; |
---|
431 | 444 | } |
---|
.. | .. |
---|
459 | 472 | u32 map; |
---|
460 | 473 | |
---|
461 | 474 | if (hwirq >= GIC_SHARED_HWIRQ_BASE) { |
---|
| 475 | +#ifdef CONFIG_GENERIC_IRQ_IPI |
---|
462 | 476 | /* verify that shared irqs don't conflict with an IPI irq */ |
---|
463 | 477 | if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv)) |
---|
464 | 478 | return -EBUSY; |
---|
| 479 | +#endif /* CONFIG_GENERIC_IRQ_IPI */ |
---|
465 | 480 | |
---|
466 | 481 | err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, |
---|
467 | 482 | &gic_level_irq_controller, |
---|
.. | .. |
---|
476 | 491 | intr = GIC_HWIRQ_TO_LOCAL(hwirq); |
---|
477 | 492 | map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; |
---|
478 | 493 | |
---|
| 494 | + /* |
---|
| 495 | + * If adding support for more per-cpu interrupts, keep the the |
---|
| 496 | + * array in gic_all_vpes_irq_cpu_online() in sync. |
---|
| 497 | + */ |
---|
479 | 498 | switch (intr) { |
---|
480 | 499 | case GIC_LOCAL_INT_TIMER: |
---|
481 | 500 | /* CONFIG_MIPS_CMP workaround (see __gic_init) */ |
---|
482 | 501 | map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin; |
---|
483 | | - /* fall-through */ |
---|
| 502 | + fallthrough; |
---|
484 | 503 | case GIC_LOCAL_INT_PERFCTR: |
---|
485 | 504 | case GIC_LOCAL_INT_FDC: |
---|
486 | 505 | /* |
---|
.. | .. |
---|
514 | 533 | if (!gic_local_irq_is_routable(intr)) |
---|
515 | 534 | return -EPERM; |
---|
516 | 535 | |
---|
517 | | - spin_lock_irqsave(&gic_lock, flags); |
---|
| 536 | + raw_spin_lock_irqsave(&gic_lock, flags); |
---|
518 | 537 | for_each_online_cpu(cpu) { |
---|
519 | 538 | write_gic_vl_other(mips_cm_vp_id(cpu)); |
---|
520 | 539 | write_gic_vo_map(mips_gic_vx_map_reg(intr), map); |
---|
521 | 540 | } |
---|
522 | | - spin_unlock_irqrestore(&gic_lock, flags); |
---|
| 541 | + raw_spin_unlock_irqrestore(&gic_lock, flags); |
---|
523 | 542 | |
---|
524 | 543 | return 0; |
---|
525 | 544 | } |
---|
.. | .. |
---|
549 | 568 | .free = gic_irq_domain_free, |
---|
550 | 569 | .map = gic_irq_domain_map, |
---|
551 | 570 | }; |
---|
| 571 | + |
---|
| 572 | +#ifdef CONFIG_GENERIC_IRQ_IPI |
---|
552 | 573 | |
---|
553 | 574 | static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, |
---|
554 | 575 | const u32 *intspec, unsigned int intsize, |
---|
.. | .. |
---|
617 | 638 | return ret; |
---|
618 | 639 | } |
---|
619 | 640 | |
---|
620 | | -void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq, |
---|
621 | | - unsigned int nr_irqs) |
---|
| 641 | +static void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq, |
---|
| 642 | + unsigned int nr_irqs) |
---|
622 | 643 | { |
---|
623 | 644 | irq_hw_number_t base_hwirq; |
---|
624 | 645 | struct irq_data *data; |
---|
.. | .. |
---|
631 | 652 | bitmap_set(ipi_available, base_hwirq, nr_irqs); |
---|
632 | 653 | } |
---|
633 | 654 | |
---|
634 | | -int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node, |
---|
635 | | - enum irq_domain_bus_token bus_token) |
---|
| 655 | +static int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node, |
---|
| 656 | + enum irq_domain_bus_token bus_token) |
---|
636 | 657 | { |
---|
637 | 658 | bool is_ipi; |
---|
638 | 659 | |
---|
.. | .. |
---|
653 | 674 | .match = gic_ipi_domain_match, |
---|
654 | 675 | }; |
---|
655 | 676 | |
---|
| 677 | +static int gic_register_ipi_domain(struct device_node *node) |
---|
| 678 | +{ |
---|
| 679 | + struct irq_domain *gic_ipi_domain; |
---|
| 680 | + unsigned int v[2], num_ipis; |
---|
| 681 | + |
---|
| 682 | + gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, |
---|
| 683 | + IRQ_DOMAIN_FLAG_IPI_PER_CPU, |
---|
| 684 | + GIC_NUM_LOCAL_INTRS + gic_shared_intrs, |
---|
| 685 | + node, &gic_ipi_domain_ops, NULL); |
---|
| 686 | + if (!gic_ipi_domain) { |
---|
| 687 | + pr_err("Failed to add IPI domain"); |
---|
| 688 | + return -ENXIO; |
---|
| 689 | + } |
---|
| 690 | + |
---|
| 691 | + irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); |
---|
| 692 | + |
---|
| 693 | + if (node && |
---|
| 694 | + !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { |
---|
| 695 | + bitmap_set(ipi_resrv, v[0], v[1]); |
---|
| 696 | + } else { |
---|
| 697 | + /* |
---|
| 698 | + * Reserve 2 interrupts per possible CPU/VP for use as IPIs, |
---|
| 699 | + * meeting the requirements of arch/mips SMP. |
---|
| 700 | + */ |
---|
| 701 | + num_ipis = 2 * num_possible_cpus(); |
---|
| 702 | + bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); |
---|
| 703 | + } |
---|
| 704 | + |
---|
| 705 | + bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); |
---|
| 706 | + |
---|
| 707 | + return 0; |
---|
| 708 | +} |
---|
| 709 | + |
---|
| 710 | +#else /* !CONFIG_GENERIC_IRQ_IPI */ |
---|
| 711 | + |
---|
| 712 | +static inline int gic_register_ipi_domain(struct device_node *node) |
---|
| 713 | +{ |
---|
| 714 | + return 0; |
---|
| 715 | +} |
---|
| 716 | + |
---|
| 717 | +#endif /* !CONFIG_GENERIC_IRQ_IPI */ |
---|
| 718 | + |
---|
656 | 719 | static int gic_cpu_startup(unsigned int cpu) |
---|
657 | 720 | { |
---|
658 | 721 | /* Enable or disable EIC */ |
---|
.. | .. |
---|
662 | 725 | /* Clear all local IRQ masks (ie. disable all local interrupts) */ |
---|
663 | 726 | write_gic_vl_rmask(~0); |
---|
664 | 727 | |
---|
665 | | - /* Invoke irq_cpu_online callbacks to enable desired interrupts */ |
---|
666 | | - irq_cpu_online(); |
---|
| 728 | + /* Enable desired interrupts */ |
---|
| 729 | + gic_all_vpes_irq_cpu_online(); |
---|
667 | 730 | |
---|
668 | 731 | return 0; |
---|
669 | 732 | } |
---|
.. | .. |
---|
671 | 734 | static int __init gic_of_init(struct device_node *node, |
---|
672 | 735 | struct device_node *parent) |
---|
673 | 736 | { |
---|
674 | | - unsigned int cpu_vec, i, gicconfig, v[2], num_ipis; |
---|
| 737 | + unsigned int cpu_vec, i, gicconfig; |
---|
675 | 738 | unsigned long reserved; |
---|
676 | 739 | phys_addr_t gic_base; |
---|
677 | 740 | struct resource res; |
---|
678 | 741 | size_t gic_len; |
---|
| 742 | + int ret; |
---|
679 | 743 | |
---|
680 | 744 | /* Find the first available CPU vector. */ |
---|
681 | 745 | i = 0; |
---|
.. | .. |
---|
716 | 780 | __sync(); |
---|
717 | 781 | } |
---|
718 | 782 | |
---|
719 | | - mips_gic_base = ioremap_nocache(gic_base, gic_len); |
---|
| 783 | + mips_gic_base = ioremap(gic_base, gic_len); |
---|
| 784 | + if (!mips_gic_base) { |
---|
| 785 | + pr_err("Failed to ioremap gic_base\n"); |
---|
| 786 | + return -ENOMEM; |
---|
| 787 | + } |
---|
720 | 788 | |
---|
721 | 789 | gicconfig = read_gic_config(); |
---|
722 | 790 | gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; |
---|
.. | .. |
---|
764 | 832 | return -ENXIO; |
---|
765 | 833 | } |
---|
766 | 834 | |
---|
767 | | - gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, |
---|
768 | | - IRQ_DOMAIN_FLAG_IPI_PER_CPU, |
---|
769 | | - GIC_NUM_LOCAL_INTRS + gic_shared_intrs, |
---|
770 | | - node, &gic_ipi_domain_ops, NULL); |
---|
771 | | - if (!gic_ipi_domain) { |
---|
772 | | - pr_err("Failed to add IPI domain"); |
---|
773 | | - return -ENXIO; |
---|
774 | | - } |
---|
775 | | - |
---|
776 | | - irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); |
---|
777 | | - |
---|
778 | | - if (node && |
---|
779 | | - !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { |
---|
780 | | - bitmap_set(ipi_resrv, v[0], v[1]); |
---|
781 | | - } else { |
---|
782 | | - /* |
---|
783 | | - * Reserve 2 interrupts per possible CPU/VP for use as IPIs, |
---|
784 | | - * meeting the requirements of arch/mips SMP. |
---|
785 | | - */ |
---|
786 | | - num_ipis = 2 * num_possible_cpus(); |
---|
787 | | - bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); |
---|
788 | | - } |
---|
789 | | - |
---|
790 | | - bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); |
---|
| 835 | + ret = gic_register_ipi_domain(node); |
---|
| 836 | + if (ret) |
---|
| 837 | + return ret; |
---|
791 | 838 | |
---|
792 | 839 | board_bind_eic_interrupt = &gic_bind_eic_interrupt; |
---|
793 | 840 | |
---|