.. | .. |
---|
31 | 31 | cpulist_parse(str, irq_default_affinity); |
---|
32 | 32 | /* |
---|
33 | 33 | * Set at least the boot cpu. We don't want to end up with |
---|
34 | | - * bugreports caused by random comandline masks |
---|
| 34 | + * bugreports caused by random commandline masks |
---|
35 | 35 | */ |
---|
36 | 36 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); |
---|
37 | 37 | return 1; |
---|
.. | .. |
---|
275 | 275 | &actions_attr.attr, |
---|
276 | 276 | NULL |
---|
277 | 277 | }; |
---|
| 278 | +ATTRIBUTE_GROUPS(irq); |
---|
278 | 279 | |
---|
279 | 280 | static struct kobj_type irq_kobj_type = { |
---|
280 | 281 | .release = irq_kobj_release, |
---|
281 | 282 | .sysfs_ops = &kobj_sysfs_ops, |
---|
282 | | - .default_attrs = irq_attrs, |
---|
| 283 | + .default_groups = irq_groups, |
---|
283 | 284 | }; |
---|
284 | 285 | |
---|
285 | 286 | static void irq_sysfs_add(int irq, struct irq_desc *desc) |
---|
.. | .. |
---|
287 | 288 | if (irq_kobj_base) { |
---|
288 | 289 | /* |
---|
289 | 290 | * Continue even in case of failure as this is nothing |
---|
290 | | - * crucial. |
---|
| 291 | + * crucial and failures in the late irq_sysfs_init() |
---|
| 292 | + * cannot be rolled back. |
---|
291 | 293 | */ |
---|
292 | 294 | if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) |
---|
293 | 295 | pr_warn("Failed to add kobject for irq %d\n", irq); |
---|
| 296 | + else |
---|
| 297 | + desc->istate |= IRQS_SYSFS; |
---|
294 | 298 | } |
---|
295 | 299 | } |
---|
296 | 300 | |
---|
297 | 301 | static void irq_sysfs_del(struct irq_desc *desc) |
---|
298 | 302 | { |
---|
299 | 303 | /* |
---|
300 | | - * If irq_sysfs_init() has not yet been invoked (early boot), then |
---|
301 | | - * irq_kobj_base is NULL and the descriptor was never added. |
---|
302 | | - * kobject_del() complains about a object with no parent, so make |
---|
303 | | - * it conditional. |
---|
| 304 | + * Only invoke kobject_del() when kobject_add() was successfully |
---|
| 305 | + * invoked for the descriptor. This covers both early boot, where |
---|
| 306 | + * sysfs is not initialized yet, and the case of a failed |
---|
| 307 | + * kobject_add() invocation. |
---|
304 | 308 | */ |
---|
305 | | - if (irq_kobj_base) |
---|
| 309 | + if (desc->istate & IRQS_SYSFS) |
---|
306 | 310 | kobject_del(&desc->kobj); |
---|
307 | 311 | } |
---|
308 | 312 | |
---|
.. | .. |
---|
404 | 408 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
---|
405 | 409 | mutex_init(&desc->request_mutex); |
---|
406 | 410 | init_rcu_head(&desc->rcu); |
---|
| 411 | + init_waitqueue_head(&desc->wait_for_threads); |
---|
407 | 412 | |
---|
408 | 413 | desc_set_defaults(irq, desc, node, affinity, owner); |
---|
409 | 414 | irqd_set(&desc->irq_data, flags); |
---|
.. | .. |
---|
463 | 468 | } |
---|
464 | 469 | |
---|
465 | 470 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, |
---|
466 | | - const struct cpumask *affinity, struct module *owner) |
---|
| 471 | + const struct irq_affinity_desc *affinity, |
---|
| 472 | + struct module *owner) |
---|
467 | 473 | { |
---|
468 | | - const struct cpumask *mask = NULL; |
---|
469 | 474 | struct irq_desc *desc; |
---|
470 | | - unsigned int flags; |
---|
471 | 475 | int i; |
---|
472 | 476 | |
---|
473 | 477 | /* Validate affinity mask(s) */ |
---|
474 | 478 | if (affinity) { |
---|
475 | | - for (i = 0, mask = affinity; i < cnt; i++, mask++) { |
---|
476 | | - if (cpumask_empty(mask)) |
---|
| 479 | + for (i = 0; i < cnt; i++) { |
---|
| 480 | + if (cpumask_empty(&affinity[i].mask)) |
---|
477 | 481 | return -EINVAL; |
---|
478 | 482 | } |
---|
479 | 483 | } |
---|
480 | 484 | |
---|
481 | | - flags = affinity ? IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN : 0; |
---|
482 | | - mask = NULL; |
---|
483 | | - |
---|
484 | 485 | for (i = 0; i < cnt; i++) { |
---|
| 486 | + const struct cpumask *mask = NULL; |
---|
| 487 | + unsigned int flags = 0; |
---|
| 488 | + |
---|
485 | 489 | if (affinity) { |
---|
486 | | - node = cpu_to_node(cpumask_first(affinity)); |
---|
487 | | - mask = affinity; |
---|
| 490 | + if (affinity->is_managed) { |
---|
| 491 | + flags = IRQD_AFFINITY_MANAGED | |
---|
| 492 | + IRQD_MANAGED_SHUTDOWN; |
---|
| 493 | + } |
---|
| 494 | + mask = &affinity->mask; |
---|
| 495 | + node = cpu_to_node(cpumask_first(mask)); |
---|
488 | 496 | affinity++; |
---|
489 | 497 | } |
---|
| 498 | + |
---|
490 | 499 | desc = alloc_desc(start + i, node, flags, mask, owner); |
---|
491 | 500 | if (!desc) |
---|
492 | 501 | goto err; |
---|
.. | .. |
---|
568 | 577 | raw_spin_lock_init(&desc[i].lock); |
---|
569 | 578 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
---|
570 | 579 | mutex_init(&desc[i].request_mutex); |
---|
| 580 | + init_waitqueue_head(&desc[i].wait_for_threads); |
---|
571 | 581 | desc_set_defaults(i, &desc[i], node, NULL, NULL); |
---|
572 | 582 | } |
---|
573 | 583 | return arch_early_irq_init(); |
---|
.. | .. |
---|
590 | 600 | } |
---|
591 | 601 | |
---|
592 | 602 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, |
---|
593 | | - const struct cpumask *affinity, |
---|
| 603 | + const struct irq_affinity_desc *affinity, |
---|
594 | 604 | struct module *owner) |
---|
595 | 605 | { |
---|
596 | 606 | u32 i; |
---|
.. | .. |
---|
633 | 643 | int generic_handle_irq(unsigned int irq) |
---|
634 | 644 | { |
---|
635 | 645 | struct irq_desc *desc = irq_to_desc(irq); |
---|
| 646 | + struct irq_data *data; |
---|
636 | 647 | |
---|
637 | 648 | if (!desc) |
---|
638 | 649 | return -EINVAL; |
---|
| 650 | + |
---|
| 651 | + data = irq_desc_get_irq_data(desc); |
---|
| 652 | + if (WARN_ON_ONCE(!in_irq() && handle_enforce_irqctx(data))) |
---|
| 653 | + return -EPERM; |
---|
| 654 | + |
---|
639 | 655 | generic_handle_irq_desc(desc); |
---|
640 | 656 | return 0; |
---|
641 | 657 | } |
---|
.. | .. |
---|
656 | 672 | { |
---|
657 | 673 | struct pt_regs *old_regs = set_irq_regs(regs); |
---|
658 | 674 | unsigned int irq = hwirq; |
---|
| 675 | + struct irq_desc *desc; |
---|
659 | 676 | int ret = 0; |
---|
660 | | - |
---|
661 | | - irq_enter(); |
---|
662 | 677 | |
---|
663 | 678 | #ifdef CONFIG_IRQ_DOMAIN |
---|
664 | 679 | if (lookup) |
---|
.. | .. |
---|
669 | 684 | * Some hardware gives randomly wrong interrupts. Rather |
---|
670 | 685 | * than crashing, do something sensible. |
---|
671 | 686 | */ |
---|
672 | | - if (unlikely(!irq || irq >= nr_irqs)) { |
---|
| 687 | + if (unlikely(!irq || irq >= nr_irqs || !(desc = irq_to_desc(irq)))) { |
---|
673 | 688 | ack_bad_irq(irq); |
---|
674 | 689 | ret = -EINVAL; |
---|
675 | | - } else { |
---|
676 | | - generic_handle_irq(irq); |
---|
| 690 | + goto out; |
---|
677 | 691 | } |
---|
678 | 692 | |
---|
679 | | - irq_exit(); |
---|
| 693 | + if (IS_ENABLED(CONFIG_ARCH_WANTS_IRQ_RAW) && |
---|
| 694 | + unlikely(irq_settings_is_raw(desc))) { |
---|
| 695 | + generic_handle_irq_desc(desc); |
---|
| 696 | + } else { |
---|
| 697 | + irq_enter(); |
---|
| 698 | + generic_handle_irq_desc(desc); |
---|
| 699 | + irq_exit(); |
---|
| 700 | + } |
---|
| 701 | + |
---|
| 702 | +out: |
---|
680 | 703 | set_irq_regs(old_regs); |
---|
681 | 704 | return ret; |
---|
682 | 705 | } |
---|
| 706 | + |
---|
| 707 | +#ifdef CONFIG_IRQ_DOMAIN |
---|
| 708 | +/** |
---|
| 709 | + * handle_domain_nmi - Invoke the handler for a HW irq belonging to a domain |
---|
| 710 | + * @domain: The domain where to perform the lookup |
---|
| 711 | + * @hwirq: The HW irq number to convert to a logical one |
---|
| 712 | + * @regs: Register file coming from the low-level handling code |
---|
| 713 | + * |
---|
| 714 | + * This function must be called from an NMI context. |
---|
| 715 | + * |
---|
| 716 | + * Returns: 0 on success, or -EINVAL if conversion has failed |
---|
| 717 | + */ |
---|
| 718 | +int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, |
---|
| 719 | + struct pt_regs *regs) |
---|
| 720 | +{ |
---|
| 721 | + struct pt_regs *old_regs = set_irq_regs(regs); |
---|
| 722 | + unsigned int irq; |
---|
| 723 | + int ret = 0; |
---|
| 724 | + |
---|
| 725 | + /* |
---|
| 726 | + * NMI context needs to be setup earlier in order to deal with tracing. |
---|
| 727 | + */ |
---|
| 728 | + WARN_ON(!in_nmi()); |
---|
| 729 | + |
---|
| 730 | + irq = irq_find_mapping(domain, hwirq); |
---|
| 731 | + |
---|
| 732 | + /* |
---|
| 733 | + * ack_bad_irq is not NMI-safe, just report |
---|
| 734 | + * an invalid interrupt. |
---|
| 735 | + */ |
---|
| 736 | + if (likely(irq)) |
---|
| 737 | + generic_handle_irq(irq); |
---|
| 738 | + else |
---|
| 739 | + ret = -EINVAL; |
---|
| 740 | + |
---|
| 741 | + set_irq_regs(old_regs); |
---|
| 742 | + return ret; |
---|
| 743 | +} |
---|
| 744 | +#endif |
---|
683 | 745 | #endif |
---|
684 | 746 | |
---|
685 | 747 | /* Dynamic interrupt handling */ |
---|
.. | .. |
---|
706 | 768 | EXPORT_SYMBOL_GPL(irq_free_descs); |
---|
707 | 769 | |
---|
708 | 770 | /** |
---|
709 | | - * irq_alloc_descs - allocate and initialize a range of irq descriptors |
---|
| 771 | + * __irq_alloc_descs - allocate and initialize a range of irq descriptors |
---|
710 | 772 | * @irq: Allocate for specific irq number if irq >= 0 |
---|
711 | 773 | * @from: Start the search from this irq number |
---|
712 | 774 | * @cnt: Number of consecutive irqs to allocate. |
---|
.. | .. |
---|
720 | 782 | */ |
---|
721 | 783 | int __ref |
---|
722 | 784 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
---|
723 | | - struct module *owner, const struct cpumask *affinity) |
---|
| 785 | + struct module *owner, const struct irq_affinity_desc *affinity) |
---|
724 | 786 | { |
---|
725 | 787 | int start, ret; |
---|
726 | 788 | |
---|
.. | .. |
---|
847 | 909 | } |
---|
848 | 910 | |
---|
849 | 911 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) |
---|
| 912 | + __releases(&desc->lock) |
---|
850 | 913 | { |
---|
851 | 914 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
---|
852 | 915 | if (bus) |
---|
.. | .. |
---|
918 | 981 | return desc && desc->kstat_irqs ? |
---|
919 | 982 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; |
---|
920 | 983 | } |
---|
| 984 | +EXPORT_SYMBOL_GPL(kstat_irqs_cpu); |
---|
| 985 | + |
---|
| 986 | +static bool irq_is_nmi(struct irq_desc *desc) |
---|
| 987 | +{ |
---|
| 988 | + return desc->istate & IRQS_NMI; |
---|
| 989 | +} |
---|
921 | 990 | |
---|
922 | 991 | /** |
---|
923 | 992 | * kstat_irqs - Get the statistics for an interrupt |
---|
.. | .. |
---|
936 | 1005 | if (!desc || !desc->kstat_irqs) |
---|
937 | 1006 | return 0; |
---|
938 | 1007 | if (!irq_settings_is_per_cpu_devid(desc) && |
---|
939 | | - !irq_settings_is_per_cpu(desc)) |
---|
| 1008 | + !irq_settings_is_per_cpu(desc) && |
---|
| 1009 | + !irq_is_nmi(desc)) |
---|
940 | 1010 | return desc->tot_count; |
---|
941 | 1011 | |
---|
942 | 1012 | for_each_possible_cpu(cpu) |
---|