| .. | .. | 
|---|
| 275 | 275 |  	&actions_attr.attr, | 
|---|
| 276 | 276 |  	NULL | 
|---|
| 277 | 277 |  }; | 
|---|
 | 278 | +ATTRIBUTE_GROUPS(irq);  | 
|---|
| 278 | 279 |   | 
|---|
| 279 | 280 |  static struct kobj_type irq_kobj_type = { | 
|---|
| 280 | 281 |  	.release	= irq_kobj_release, | 
|---|
| 281 | 282 |  	.sysfs_ops	= &kobj_sysfs_ops, | 
|---|
| 282 |  | -	.default_attrs	= irq_attrs,  | 
|---|
 | 283 | +	.default_groups = irq_groups,  | 
|---|
| 283 | 284 |  }; | 
|---|
| 284 | 285 |   | 
|---|
| 285 | 286 |  static void irq_sysfs_add(int irq, struct irq_desc *desc) | 
|---|
| .. | .. | 
|---|
| 404 | 405 |  	lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 
|---|
| 405 | 406 |  	mutex_init(&desc->request_mutex); | 
|---|
| 406 | 407 |  	init_rcu_head(&desc->rcu); | 
|---|
 | 408 | +	init_waitqueue_head(&desc->wait_for_threads);  | 
|---|
| 407 | 409 |   | 
|---|
| 408 | 410 |  	desc_set_defaults(irq, desc, node, affinity, owner); | 
|---|
| 409 | 411 |  	irqd_set(&desc->irq_data, flags); | 
|---|
| .. | .. | 
|---|
| 463 | 465 |  } | 
|---|
| 464 | 466 |   | 
|---|
| 465 | 467 |  static int alloc_descs(unsigned int start, unsigned int cnt, int node, | 
|---|
| 466 |  | -		       const struct cpumask *affinity, struct module *owner)  | 
|---|
 | 468 | +		       const struct irq_affinity_desc *affinity,  | 
|---|
 | 469 | +		       struct module *owner)  | 
|---|
| 467 | 470 |  { | 
|---|
| 468 |  | -	const struct cpumask *mask = NULL;  | 
|---|
| 469 | 471 |  	struct irq_desc *desc; | 
|---|
| 470 |  | -	unsigned int flags;  | 
|---|
| 471 | 472 |  	int i; | 
|---|
| 472 | 473 |   | 
|---|
| 473 | 474 |  	/* Validate affinity mask(s) */ | 
|---|
| 474 | 475 |  	if (affinity) { | 
|---|
| 475 |  | -		for (i = 0, mask = affinity; i < cnt; i++, mask++) {  | 
|---|
| 476 |  | -			if (cpumask_empty(mask))  | 
|---|
 | 476 | +		for (i = 0; i < cnt; i++) {  | 
|---|
 | 477 | +			if (cpumask_empty(&affinity[i].mask))  | 
|---|
| 477 | 478 |  				return -EINVAL; | 
|---|
| 478 | 479 |  		} | 
|---|
| 479 | 480 |  	} | 
|---|
| 480 | 481 |   | 
|---|
| 481 |  | -	flags = affinity ? IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN : 0;  | 
|---|
| 482 |  | -	mask = NULL;  | 
|---|
| 483 |  | -  | 
|---|
| 484 | 482 |  	for (i = 0; i < cnt; i++) { | 
|---|
 | 483 | +		const struct cpumask *mask = NULL;  | 
|---|
 | 484 | +		unsigned int flags = 0;  | 
|---|
 | 485 | +  | 
|---|
| 485 | 486 |  		if (affinity) { | 
|---|
| 486 |  | -			node = cpu_to_node(cpumask_first(affinity));  | 
|---|
| 487 |  | -			mask = affinity;  | 
|---|
 | 487 | +			if (affinity->is_managed) {  | 
|---|
 | 488 | +				flags = IRQD_AFFINITY_MANAGED |  | 
|---|
 | 489 | +					IRQD_MANAGED_SHUTDOWN;  | 
|---|
 | 490 | +			}  | 
|---|
 | 491 | +			mask = &affinity->mask;  | 
|---|
 | 492 | +			node = cpu_to_node(cpumask_first(mask));  | 
|---|
| 488 | 493 |  			affinity++; | 
|---|
| 489 | 494 |  		} | 
|---|
 | 495 | +  | 
|---|
| 490 | 496 |  		desc = alloc_desc(start + i, node, flags, mask, owner); | 
|---|
| 491 | 497 |  		if (!desc) | 
|---|
| 492 | 498 |  			goto err; | 
|---|
| .. | .. | 
|---|
| 568 | 574 |  		raw_spin_lock_init(&desc[i].lock); | 
|---|
| 569 | 575 |  		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 
|---|
| 570 | 576 |  		mutex_init(&desc[i].request_mutex); | 
|---|
 | 577 | +		init_waitqueue_head(&desc[i].wait_for_threads);  | 
|---|
| 571 | 578 |  		desc_set_defaults(i, &desc[i], node, NULL, NULL); | 
|---|
| 572 | 579 |  	} | 
|---|
| 573 | 580 |  	return arch_early_irq_init(); | 
|---|
| .. | .. | 
|---|
| 590 | 597 |  } | 
|---|
| 591 | 598 |   | 
|---|
| 592 | 599 |  static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, | 
|---|
| 593 |  | -			      const struct cpumask *affinity,  | 
|---|
 | 600 | +			      const struct irq_affinity_desc *affinity,  | 
|---|
| 594 | 601 |  			      struct module *owner) | 
|---|
| 595 | 602 |  { | 
|---|
| 596 | 603 |  	u32 i; | 
|---|
| .. | .. | 
|---|
| 633 | 640 |  int generic_handle_irq(unsigned int irq) | 
|---|
| 634 | 641 |  { | 
|---|
| 635 | 642 |  	struct irq_desc *desc = irq_to_desc(irq); | 
|---|
 | 643 | +	struct irq_data *data;  | 
|---|
| 636 | 644 |   | 
|---|
| 637 | 645 |  	if (!desc) | 
|---|
| 638 | 646 |  		return -EINVAL; | 
|---|
 | 647 | +  | 
|---|
 | 648 | +	data = irq_desc_get_irq_data(desc);  | 
|---|
 | 649 | +	if (WARN_ON_ONCE(!in_irq() && handle_enforce_irqctx(data)))  | 
|---|
 | 650 | +		return -EPERM;  | 
|---|
 | 651 | +  | 
|---|
| 639 | 652 |  	generic_handle_irq_desc(desc); | 
|---|
| 640 | 653 |  	return 0; | 
|---|
| 641 | 654 |  } | 
|---|
| .. | .. | 
|---|
| 656 | 669 |  { | 
|---|
| 657 | 670 |  	struct pt_regs *old_regs = set_irq_regs(regs); | 
|---|
| 658 | 671 |  	unsigned int irq = hwirq; | 
|---|
 | 672 | +	struct irq_desc *desc;  | 
|---|
| 659 | 673 |  	int ret = 0; | 
|---|
| 660 |  | -  | 
|---|
| 661 |  | -	irq_enter();  | 
|---|
| 662 | 674 |   | 
|---|
| 663 | 675 |  #ifdef CONFIG_IRQ_DOMAIN | 
|---|
| 664 | 676 |  	if (lookup) | 
|---|
| .. | .. | 
|---|
| 669 | 681 |  	 * Some hardware gives randomly wrong interrupts.  Rather | 
|---|
| 670 | 682 |  	 * than crashing, do something sensible. | 
|---|
| 671 | 683 |  	 */ | 
|---|
| 672 |  | -	if (unlikely(!irq || irq >= nr_irqs)) {  | 
|---|
 | 684 | +	if (unlikely(!irq || irq >= nr_irqs || !(desc = irq_to_desc(irq)))) {  | 
|---|
| 673 | 685 |  		ack_bad_irq(irq); | 
|---|
| 674 | 686 |  		ret = -EINVAL; | 
|---|
| 675 |  | -	} else {  | 
|---|
| 676 |  | -		generic_handle_irq(irq);  | 
|---|
 | 687 | +		goto out;  | 
|---|
| 677 | 688 |  	} | 
|---|
| 678 | 689 |   | 
|---|
| 679 |  | -	irq_exit();  | 
|---|
 | 690 | +	if (IS_ENABLED(CONFIG_ARCH_WANTS_IRQ_RAW) &&  | 
|---|
 | 691 | +	    unlikely(irq_settings_is_raw(desc))) {  | 
|---|
 | 692 | +		generic_handle_irq_desc(desc);  | 
|---|
 | 693 | +	} else {  | 
|---|
 | 694 | +		irq_enter();  | 
|---|
 | 695 | +		generic_handle_irq_desc(desc);  | 
|---|
 | 696 | +		irq_exit();  | 
|---|
 | 697 | +	}  | 
|---|
 | 698 | +  | 
|---|
 | 699 | +out:  | 
|---|
| 680 | 700 |  	set_irq_regs(old_regs); | 
|---|
| 681 | 701 |  	return ret; | 
|---|
| 682 | 702 |  } | 
|---|
 | 703 | +  | 
|---|
 | 704 | +#ifdef CONFIG_IRQ_DOMAIN  | 
|---|
 | 705 | +/**  | 
|---|
 | 706 | + * handle_domain_nmi - Invoke the handler for a HW irq belonging to a domain  | 
|---|
 | 707 | + * @domain:	The domain where to perform the lookup  | 
|---|
 | 708 | + * @hwirq:	The HW irq number to convert to a logical one  | 
|---|
 | 709 | + * @regs:	Register file coming from the low-level handling code  | 
|---|
 | 710 | + *  | 
|---|
 | 711 | + *		This function must be called from an NMI context.  | 
|---|
 | 712 | + *  | 
|---|
 | 713 | + * Returns:	0 on success, or -EINVAL if conversion has failed  | 
|---|
 | 714 | + */  | 
|---|
 | 715 | +int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,  | 
|---|
 | 716 | +		      struct pt_regs *regs)  | 
|---|
 | 717 | +{  | 
|---|
 | 718 | +	struct pt_regs *old_regs = set_irq_regs(regs);  | 
|---|
 | 719 | +	unsigned int irq;  | 
|---|
 | 720 | +	int ret = 0;  | 
|---|
 | 721 | +  | 
|---|
 | 722 | +	/*  | 
|---|
 | 723 | +	 * NMI context needs to be setup earlier in order to deal with tracing.  | 
|---|
 | 724 | +	 */  | 
|---|
 | 725 | +	WARN_ON(!in_nmi());  | 
|---|
 | 726 | +  | 
|---|
 | 727 | +	irq = irq_find_mapping(domain, hwirq);  | 
|---|
 | 728 | +  | 
|---|
 | 729 | +	/*  | 
|---|
 | 730 | +	 * ack_bad_irq is not NMI-safe, just report  | 
|---|
 | 731 | +	 * an invalid interrupt.  | 
|---|
 | 732 | +	 */  | 
|---|
 | 733 | +	if (likely(irq))  | 
|---|
 | 734 | +		generic_handle_irq(irq);  | 
|---|
 | 735 | +	else  | 
|---|
 | 736 | +		ret = -EINVAL;  | 
|---|
 | 737 | +  | 
|---|
 | 738 | +	set_irq_regs(old_regs);  | 
|---|
 | 739 | +	return ret;  | 
|---|
 | 740 | +}  | 
|---|
 | 741 | +#endif  | 
|---|
| 683 | 742 |  #endif | 
|---|
| 684 | 743 |   | 
|---|
| 685 | 744 |  /* Dynamic interrupt handling */ | 
|---|
| .. | .. | 
|---|
| 706 | 765 |  EXPORT_SYMBOL_GPL(irq_free_descs); | 
|---|
| 707 | 766 |   | 
|---|
| 708 | 767 |  /** | 
|---|
| 709 |  | - * irq_alloc_descs - allocate and initialize a range of irq descriptors  | 
|---|
 | 768 | + * __irq_alloc_descs - allocate and initialize a range of irq descriptors  | 
|---|
| 710 | 769 |   * @irq:	Allocate for specific irq number if irq >= 0 | 
|---|
| 711 | 770 |   * @from:	Start the search from this irq number | 
|---|
| 712 | 771 |   * @cnt:	Number of consecutive irqs to allocate. | 
|---|
| .. | .. | 
|---|
| 720 | 779 |   */ | 
|---|
| 721 | 780 |  int __ref | 
|---|
| 722 | 781 |  __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | 
|---|
| 723 |  | -		  struct module *owner, const struct cpumask *affinity)  | 
|---|
 | 782 | +		  struct module *owner, const struct irq_affinity_desc *affinity)  | 
|---|
| 724 | 783 |  { | 
|---|
| 725 | 784 |  	int start, ret; | 
|---|
| 726 | 785 |   | 
|---|
| .. | .. | 
|---|
| 847 | 906 |  } | 
|---|
| 848 | 907 |   | 
|---|
| 849 | 908 |  void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) | 
|---|
 | 909 | +	__releases(&desc->lock)  | 
|---|
| 850 | 910 |  { | 
|---|
| 851 | 911 |  	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
|---|
| 852 | 912 |  	if (bus) | 
|---|
| .. | .. | 
|---|
| 918 | 978 |  	return desc && desc->kstat_irqs ? | 
|---|
| 919 | 979 |  			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0; | 
|---|
| 920 | 980 |  } | 
|---|
 | 981 | +EXPORT_SYMBOL_GPL(kstat_irqs_cpu);  | 
|---|
 | 982 | +  | 
|---|
 | 983 | +static bool irq_is_nmi(struct irq_desc *desc)  | 
|---|
 | 984 | +{  | 
|---|
 | 985 | +	return desc->istate & IRQS_NMI;  | 
|---|
 | 986 | +}  | 
|---|
| 921 | 987 |   | 
|---|
| 922 | 988 |  /** | 
|---|
| 923 | 989 |   * kstat_irqs - Get the statistics for an interrupt | 
|---|
| .. | .. | 
|---|
| 936 | 1002 |  	if (!desc || !desc->kstat_irqs) | 
|---|
| 937 | 1003 |  		return 0; | 
|---|
| 938 | 1004 |  	if (!irq_settings_is_per_cpu_devid(desc) && | 
|---|
| 939 |  | -	    !irq_settings_is_per_cpu(desc))  | 
|---|
 | 1005 | +	    !irq_settings_is_per_cpu(desc) &&  | 
|---|
 | 1006 | +	    !irq_is_nmi(desc))  | 
|---|
| 940 | 1007 |  	    return desc->tot_count; | 
|---|
| 941 | 1008 |   | 
|---|
| 942 | 1009 |  	for_each_possible_cpu(cpu) | 
|---|