| .. | .. | 
|---|
| 18 | 18 |  #include <linux/sched.h> | 
|---|
| 19 | 19 |  #include <linux/sched/rt.h> | 
|---|
| 20 | 20 |  #include <linux/sched/task.h> | 
|---|
 | 21 | +#include <linux/sched/isolation.h>  | 
|---|
| 21 | 22 |  #include <uapi/linux/sched/types.h> | 
|---|
| 22 | 23 |  #include <linux/task_work.h> | 
|---|
| 23 | 24 |   | 
|---|
| 24 | 25 |  #include "internals.h" | 
|---|
| 25 | 26 |   | 
|---|
| 26 |  | -#ifdef CONFIG_IRQ_FORCED_THREADING  | 
|---|
| 27 |  | -# ifndef CONFIG_PREEMPT_RT_BASE  | 
|---|
 | 27 | +#if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)  | 
|---|
| 28 | 28 |  __read_mostly bool force_irqthreads; | 
|---|
| 29 | 29 |  EXPORT_SYMBOL_GPL(force_irqthreads); | 
|---|
| 30 | 30 |   | 
|---|
| .. | .. | 
|---|
| 34 | 34 |  	return 0; | 
|---|
| 35 | 35 |  } | 
|---|
| 36 | 36 |  early_param("threadirqs", setup_forced_irqthreads); | 
|---|
| 37 |  | -# endif  | 
|---|
| 38 | 37 |  #endif | 
|---|
| 39 | 38 |   | 
|---|
| 40 | 39 |  static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) | 
|---|
| .. | .. | 
|---|
| 224 | 223 |  { | 
|---|
| 225 | 224 |  	struct irq_desc *desc = irq_data_to_desc(data); | 
|---|
| 226 | 225 |  	struct irq_chip *chip = irq_data_get_irq_chip(data); | 
|---|
 | 226 | +	const struct cpumask  *prog_mask;  | 
|---|
| 227 | 227 |  	int ret; | 
|---|
 | 228 | +  | 
|---|
 | 229 | +	static DEFINE_RAW_SPINLOCK(tmp_mask_lock);  | 
|---|
 | 230 | +	static struct cpumask tmp_mask;  | 
|---|
| 228 | 231 |   | 
|---|
| 229 | 232 |  	if (!chip || !chip->irq_set_affinity) | 
|---|
| 230 | 233 |  		return -EINVAL; | 
|---|
| 231 | 234 |   | 
|---|
| 232 |  | -	ret = chip->irq_set_affinity(data, mask, force);  | 
|---|
 | 235 | +	raw_spin_lock(&tmp_mask_lock);  | 
|---|
 | 236 | +	/*  | 
|---|
 | 237 | +	 * If this is a managed interrupt and housekeeping is enabled on  | 
|---|
 | 238 | +	 * it check whether the requested affinity mask intersects with  | 
|---|
 | 239 | +	 * a housekeeping CPU. If so, then remove the isolated CPUs from  | 
|---|
 | 240 | +	 * the mask and just keep the housekeeping CPU(s). This prevents  | 
|---|
 | 241 | +	 * the affinity setter from routing the interrupt to an isolated  | 
|---|
 | 242 | +	 * CPU to avoid that I/O submitted from a housekeeping CPU causes  | 
|---|
 | 243 | +	 * interrupts on an isolated one.  | 
|---|
 | 244 | +	 *  | 
|---|
 | 245 | +	 * If the masks do not intersect or include online CPU(s) then  | 
|---|
 | 246 | +	 * keep the requested mask. The isolated target CPUs are only  | 
|---|
 | 247 | +	 * receiving interrupts when the I/O operation was submitted  | 
|---|
 | 248 | +	 * directly from them.  | 
|---|
 | 249 | +	 *  | 
|---|
 | 250 | +	 * If all housekeeping CPUs in the affinity mask are offline, the  | 
|---|
 | 251 | +	 * interrupt will be migrated by the CPU hotplug code once a  | 
|---|
 | 252 | +	 * housekeeping CPU which belongs to the affinity mask comes  | 
|---|
 | 253 | +	 * online.  | 
|---|
 | 254 | +	 */  | 
|---|
 | 255 | +	if (irqd_affinity_is_managed(data) &&  | 
|---|
 | 256 | +	    housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {  | 
|---|
 | 257 | +		const struct cpumask *hk_mask;  | 
|---|
 | 258 | +  | 
|---|
 | 259 | +		hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);  | 
|---|
 | 260 | +  | 
|---|
 | 261 | +		cpumask_and(&tmp_mask, mask, hk_mask);  | 
|---|
 | 262 | +		if (!cpumask_intersects(&tmp_mask, cpu_online_mask))  | 
|---|
 | 263 | +			prog_mask = mask;  | 
|---|
 | 264 | +		else  | 
|---|
 | 265 | +			prog_mask = &tmp_mask;  | 
|---|
 | 266 | +	} else {  | 
|---|
 | 267 | +		prog_mask = mask;  | 
|---|
 | 268 | +	}  | 
|---|
 | 269 | +  | 
|---|
 | 270 | +	/*  | 
|---|
 | 271 | +	 * Make sure we only provide online CPUs to the irqchip,  | 
|---|
 | 272 | +	 * unless we are being asked to force the affinity (in which  | 
|---|
 | 273 | +	 * case we do as we are told).  | 
|---|
 | 274 | +	 */  | 
|---|
 | 275 | +	cpumask_and(&tmp_mask, prog_mask, cpu_online_mask);  | 
|---|
 | 276 | +	if (!force && !cpumask_empty(&tmp_mask))  | 
|---|
 | 277 | +		ret = chip->irq_set_affinity(data, &tmp_mask, force);  | 
|---|
 | 278 | +	else if (force)  | 
|---|
 | 279 | +		ret = chip->irq_set_affinity(data, mask, force);  | 
|---|
 | 280 | +	else  | 
|---|
 | 281 | +		ret = -EINVAL;  | 
|---|
 | 282 | +  | 
|---|
 | 283 | +	raw_spin_unlock(&tmp_mask_lock);  | 
|---|
 | 284 | +  | 
|---|
| 233 | 285 |  	switch (ret) { | 
|---|
| 234 | 286 |  	case IRQ_SET_MASK_OK: | 
|---|
| 235 | 287 |  	case IRQ_SET_MASK_OK_DONE: | 
|---|
| 236 | 288 |  		cpumask_copy(desc->irq_common_data.affinity, mask); | 
|---|
 | 289 | +		fallthrough;  | 
|---|
| 237 | 290 |  	case IRQ_SET_MASK_OK_NOCOPY: | 
|---|
| 238 | 291 |  		irq_validate_effective_affinity(data); | 
|---|
| 239 | 292 |  		irq_set_thread_affinity(desc); | 
|---|
| .. | .. | 
|---|
| 242 | 295 |   | 
|---|
| 243 | 296 |  	return ret; | 
|---|
| 244 | 297 |  } | 
|---|
 | 298 | +EXPORT_SYMBOL_GPL(irq_do_set_affinity);  | 
|---|
| 245 | 299 |   | 
|---|
| 246 | 300 |  #ifdef CONFIG_GENERIC_PENDING_IRQ | 
|---|
| 247 | 301 |  static inline int irq_set_affinity_pending(struct irq_data *data, | 
|---|
| .. | .. | 
|---|
| 347 | 401 |  	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
|---|
| 348 | 402 |  	return ret; | 
|---|
| 349 | 403 |  } | 
|---|
| 350 |  | -EXPORT_SYMBOL_GPL(__irq_set_affinity);  | 
|---|
| 351 | 404 |   | 
|---|
| 352 | 405 |  int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) | 
|---|
| 353 | 406 |  { | 
|---|
| .. | .. | 
|---|
| 411 | 464 |  	/* The release function is promised process context */ | 
|---|
| 412 | 465 |  	might_sleep(); | 
|---|
| 413 | 466 |   | 
|---|
| 414 |  | -	if (!desc)  | 
|---|
 | 467 | +	if (!desc || desc->istate & IRQS_NMI)  | 
|---|
| 415 | 468 |  		return -EINVAL; | 
|---|
| 416 | 469 |   | 
|---|
| 417 | 470 |  	/* Complete initialisation of *notify */ | 
|---|
| .. | .. | 
|---|
| 614 | 667 |  } | 
|---|
| 615 | 668 |  EXPORT_SYMBOL_GPL(disable_hardirq); | 
|---|
| 616 | 669 |   | 
|---|
 | 670 | +/**  | 
|---|
 | 671 | + *	disable_nmi_nosync - disable an nmi without waiting  | 
|---|
 | 672 | + *	@irq: Interrupt to disable  | 
|---|
 | 673 | + *  | 
|---|
 | 674 | + *	Disable the selected interrupt line. Disables and enables are  | 
|---|
 | 675 | + *	nested.  | 
|---|
 | 676 | + *	The interrupt to disable must have been requested through request_nmi.  | 
|---|
 | 677 | + *	Unlike disable_nmi(), this function does not ensure existing  | 
|---|
 | 678 | + *	instances of the IRQ handler have completed before returning.  | 
|---|
 | 679 | + */  | 
|---|
 | 680 | +void disable_nmi_nosync(unsigned int irq)  | 
|---|
 | 681 | +{  | 
|---|
 | 682 | +	disable_irq_nosync(irq);  | 
|---|
 | 683 | +}  | 
|---|
 | 684 | +  | 
|---|
| 617 | 685 |  void __enable_irq(struct irq_desc *desc) | 
|---|
| 618 | 686 |  { | 
|---|
| 619 | 687 |  	switch (desc->depth) { | 
|---|
| .. | .. | 
|---|
| 670 | 738 |  } | 
|---|
| 671 | 739 |  EXPORT_SYMBOL(enable_irq); | 
|---|
| 672 | 740 |   | 
|---|
 | 741 | +/**  | 
|---|
 | 742 | + *	enable_nmi - enable handling of an nmi  | 
|---|
 | 743 | + *	@irq: Interrupt to enable  | 
|---|
 | 744 | + *  | 
|---|
 | 745 | + *	The interrupt to enable must have been requested through request_nmi.  | 
|---|
 | 746 | + *	Undoes the effect of one call to disable_nmi(). If this  | 
|---|
 | 747 | + *	matches the last disable, processing of interrupts on this  | 
|---|
 | 748 | + *	IRQ line is re-enabled.  | 
|---|
 | 749 | + */  | 
|---|
 | 750 | +void enable_nmi(unsigned int irq)  | 
|---|
 | 751 | +{  | 
|---|
 | 752 | +	enable_irq(irq);  | 
|---|
 | 753 | +}  | 
|---|
 | 754 | +  | 
|---|
| 673 | 755 |  static int set_irq_wake_real(unsigned int irq, unsigned int on) | 
|---|
| 674 | 756 |  { | 
|---|
| 675 | 757 |  	struct irq_desc *desc = irq_to_desc(irq); | 
|---|
| .. | .. | 
|---|
| 695 | 777 |   * | 
|---|
| 696 | 778 |   *	Wakeup mode lets this IRQ wake the system from sleep | 
|---|
| 697 | 779 |   *	states like "suspend to RAM". | 
|---|
 | 780 | + *  | 
|---|
 | 781 | + *	Note: irq enable/disable state is completely orthogonal  | 
|---|
 | 782 | + *	to the enable/disable state of irq wake. An irq can be  | 
|---|
 | 783 | + *	disabled with disable_irq() and still wake the system as  | 
|---|
 | 784 | + *	long as the irq has wake enabled. If this does not hold,  | 
|---|
 | 785 | + *	then the underlying irq chip and the related driver need  | 
|---|
 | 786 | + *	to be investigated.  | 
|---|
| 698 | 787 |   */ | 
|---|
| 699 | 788 |  int irq_set_irq_wake(unsigned int irq, unsigned int on) | 
|---|
| 700 | 789 |  { | 
|---|
| .. | .. | 
|---|
| 704 | 793 |   | 
|---|
| 705 | 794 |  	if (!desc) | 
|---|
| 706 | 795 |  		return -EINVAL; | 
|---|
 | 796 | +  | 
|---|
 | 797 | +	/* Don't use NMIs as wake up interrupts please */  | 
|---|
 | 798 | +	if (desc->istate & IRQS_NMI) {  | 
|---|
 | 799 | +		ret = -EINVAL;  | 
|---|
 | 800 | +		goto out_unlock;  | 
|---|
 | 801 | +	}  | 
|---|
| 707 | 802 |   | 
|---|
| 708 | 803 |  	/* wakeup-capable irqs can be shared between drivers that | 
|---|
| 709 | 804 |  	 * don't need to have the same sleep mode behaviors. | 
|---|
| .. | .. | 
|---|
| 727 | 822 |  				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); | 
|---|
| 728 | 823 |  		} | 
|---|
| 729 | 824 |  	} | 
|---|
 | 825 | +  | 
|---|
 | 826 | +out_unlock:  | 
|---|
| 730 | 827 |  	irq_put_desc_busunlock(desc, flags); | 
|---|
| 731 | 828 |  	return ret; | 
|---|
| 732 | 829 |  } | 
|---|
| .. | .. | 
|---|
| 787 | 884 |  	case IRQ_SET_MASK_OK_DONE: | 
|---|
| 788 | 885 |  		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); | 
|---|
| 789 | 886 |  		irqd_set(&desc->irq_data, flags); | 
|---|
 | 887 | +		fallthrough;  | 
|---|
| 790 | 888 |   | 
|---|
| 791 | 889 |  	case IRQ_SET_MASK_OK_NOCOPY: | 
|---|
| 792 | 890 |  		flags = irqd_get_trigger_type(&desc->irq_data); | 
|---|
| .. | .. | 
|---|
| 801 | 899 |  		ret = 0; | 
|---|
| 802 | 900 |  		break; | 
|---|
| 803 | 901 |  	default: | 
|---|
| 804 |  | -		pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",  | 
|---|
 | 902 | +		pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",  | 
|---|
| 805 | 903 |  		       flags, irq_desc_get_irq(desc), chip->irq_set_type); | 
|---|
| 806 | 904 |  	} | 
|---|
| 807 | 905 |  	if (unmask) | 
|---|
| .. | .. | 
|---|
| 979 | 1077 |  #endif | 
|---|
| 980 | 1078 |   | 
|---|
| 981 | 1079 |  /* | 
|---|
| 982 |  | - * Interrupts which are not explicitely requested as threaded  | 
|---|
 | 1080 | + * Interrupts which are not explicitly requested as threaded  | 
|---|
| 983 | 1081 |   * interrupts rely on the implicit bh/preempt disable of the hard irq | 
|---|
| 984 | 1082 |   * context. So we need to disable bh here to avoid deadlocks and other | 
|---|
| 985 | 1083 |   * side effects. | 
|---|
| .. | .. | 
|---|
| 990 | 1088 |  	irqreturn_t ret; | 
|---|
| 991 | 1089 |   | 
|---|
| 992 | 1090 |  	local_bh_disable(); | 
|---|
| 993 |  | -	if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE))  | 
|---|
 | 1091 | +	if (!IS_ENABLED(CONFIG_PREEMPT_RT))  | 
|---|
| 994 | 1092 |  		local_irq_disable(); | 
|---|
| 995 | 1093 |  	ret = action->thread_fn(action->irq, action->dev_id); | 
|---|
| 996 | 1094 |  	if (ret == IRQ_HANDLED) | 
|---|
| 997 | 1095 |  		atomic_inc(&desc->threads_handled); | 
|---|
| 998 | 1096 |   | 
|---|
| 999 | 1097 |  	irq_finalize_oneshot(desc, action); | 
|---|
| 1000 |  | -	if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE))  | 
|---|
 | 1098 | +	if (!IS_ENABLED(CONFIG_PREEMPT_RT))  | 
|---|
| 1001 | 1099 |  		local_irq_enable(); | 
|---|
| 1002 | 1100 |  	local_bh_enable(); | 
|---|
| 1003 | 1101 |  	return ret; | 
|---|
| .. | .. | 
|---|
| 1067 | 1165 |  } | 
|---|
| 1068 | 1166 |   | 
|---|
| 1069 | 1167 |  /* | 
|---|
 | 1168 | + * Internal function to notify that a interrupt thread is ready.  | 
|---|
 | 1169 | + */  | 
|---|
 | 1170 | +static void irq_thread_set_ready(struct irq_desc *desc,  | 
|---|
 | 1171 | +				 struct irqaction *action)  | 
|---|
 | 1172 | +{  | 
|---|
 | 1173 | +	set_bit(IRQTF_READY, &action->thread_flags);  | 
|---|
 | 1174 | +	wake_up(&desc->wait_for_threads);  | 
|---|
 | 1175 | +}  | 
|---|
 | 1176 | +  | 
|---|
 | 1177 | +/*  | 
|---|
 | 1178 | + * Internal function to wake up a interrupt thread and wait until it is  | 
|---|
 | 1179 | + * ready.  | 
|---|
 | 1180 | + */  | 
|---|
 | 1181 | +static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,  | 
|---|
 | 1182 | +						  struct irqaction *action)  | 
|---|
 | 1183 | +{  | 
|---|
 | 1184 | +	if (!action || !action->thread)  | 
|---|
 | 1185 | +		return;  | 
|---|
 | 1186 | +  | 
|---|
 | 1187 | +	wake_up_process(action->thread);  | 
|---|
 | 1188 | +	wait_event(desc->wait_for_threads,  | 
|---|
 | 1189 | +		   test_bit(IRQTF_READY, &action->thread_flags));  | 
|---|
 | 1190 | +}  | 
|---|
 | 1191 | +  | 
|---|
 | 1192 | +/*  | 
|---|
| 1070 | 1193 |   * Interrupt handler thread | 
|---|
| 1071 | 1194 |   */ | 
|---|
| 1072 | 1195 |  static int irq_thread(void *data) | 
|---|
| .. | .. | 
|---|
| 1077 | 1200 |  	irqreturn_t (*handler_fn)(struct irq_desc *desc, | 
|---|
| 1078 | 1201 |  			struct irqaction *action); | 
|---|
| 1079 | 1202 |   | 
|---|
 | 1203 | +	irq_thread_set_ready(desc, action);  | 
|---|
 | 1204 | +  | 
|---|
 | 1205 | +	sched_set_fifo(current);  | 
|---|
 | 1206 | +  | 
|---|
| 1080 | 1207 |  	if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, | 
|---|
| 1081 | 1208 |  					&action->thread_flags)) | 
|---|
| 1082 | 1209 |  		handler_fn = irq_forced_thread_fn; | 
|---|
| .. | .. | 
|---|
| 1084 | 1211 |  		handler_fn = irq_thread_fn; | 
|---|
| 1085 | 1212 |   | 
|---|
| 1086 | 1213 |  	init_task_work(&on_exit_work, irq_thread_dtor); | 
|---|
| 1087 |  | -	task_work_add(current, &on_exit_work, false);  | 
|---|
 | 1214 | +	task_work_add(current, &on_exit_work, TWA_NONE);  | 
|---|
| 1088 | 1215 |   | 
|---|
| 1089 | 1216 |  	irq_thread_check_affinity(desc, action); | 
|---|
| 1090 | 1217 |   | 
|---|
| .. | .. | 
|---|
| 1097 | 1224 |  		if (action_ret == IRQ_WAKE_THREAD) | 
|---|
| 1098 | 1225 |  			irq_wake_secondary(desc, action); | 
|---|
| 1099 | 1226 |   | 
|---|
| 1100 |  | -#ifdef CONFIG_PREEMPT_RT_FULL  | 
|---|
| 1101 |  | -		migrate_disable();  | 
|---|
| 1102 |  | -		add_interrupt_randomness(action->irq, 0,  | 
|---|
| 1103 |  | -				 desc->random_ip ^ (unsigned long) action);  | 
|---|
| 1104 |  | -		migrate_enable();  | 
|---|
| 1105 |  | -#endif  | 
|---|
| 1106 | 1227 |  		wake_threads_waitq(desc); | 
|---|
| 1107 | 1228 |  	} | 
|---|
| 1108 | 1229 |   | 
|---|
| .. | .. | 
|---|
| 1199 | 1320 |  		c->irq_release_resources(d); | 
|---|
| 1200 | 1321 |  } | 
|---|
| 1201 | 1322 |   | 
|---|
 | 1323 | +static bool irq_supports_nmi(struct irq_desc *desc)  | 
|---|
 | 1324 | +{  | 
|---|
 | 1325 | +	struct irq_data *d = irq_desc_get_irq_data(desc);  | 
|---|
 | 1326 | +  | 
|---|
 | 1327 | +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY  | 
|---|
 | 1328 | +	/* Only IRQs directly managed by the root irqchip can be set as NMI */  | 
|---|
 | 1329 | +	if (d->parent_data)  | 
|---|
 | 1330 | +		return false;  | 
|---|
 | 1331 | +#endif  | 
|---|
 | 1332 | +	/* Don't support NMIs for chips behind a slow bus */  | 
|---|
 | 1333 | +	if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)  | 
|---|
 | 1334 | +		return false;  | 
|---|
 | 1335 | +  | 
|---|
 | 1336 | +	return d->chip->flags & IRQCHIP_SUPPORTS_NMI;  | 
|---|
 | 1337 | +}  | 
|---|
 | 1338 | +  | 
|---|
 | 1339 | +static int irq_nmi_setup(struct irq_desc *desc)  | 
|---|
 | 1340 | +{  | 
|---|
 | 1341 | +	struct irq_data *d = irq_desc_get_irq_data(desc);  | 
|---|
 | 1342 | +	struct irq_chip *c = d->chip;  | 
|---|
 | 1343 | +  | 
|---|
 | 1344 | +	return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;  | 
|---|
 | 1345 | +}  | 
|---|
 | 1346 | +  | 
|---|
 | 1347 | +static void irq_nmi_teardown(struct irq_desc *desc)  | 
|---|
 | 1348 | +{  | 
|---|
 | 1349 | +	struct irq_data *d = irq_desc_get_irq_data(desc);  | 
|---|
 | 1350 | +	struct irq_chip *c = d->chip;  | 
|---|
 | 1351 | +  | 
|---|
 | 1352 | +	if (c->irq_nmi_teardown)  | 
|---|
 | 1353 | +		c->irq_nmi_teardown(d);  | 
|---|
 | 1354 | +}  | 
|---|
 | 1355 | +  | 
|---|
| 1202 | 1356 |  static int | 
|---|
| 1203 | 1357 |  setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) | 
|---|
| 1204 | 1358 |  { | 
|---|
| 1205 | 1359 |  	struct task_struct *t; | 
|---|
| 1206 |  | -	struct sched_param param = {  | 
|---|
| 1207 |  | -		.sched_priority = MAX_USER_RT_PRIO/2,  | 
|---|
| 1208 |  | -	};  | 
|---|
| 1209 | 1360 |   | 
|---|
| 1210 | 1361 |  	if (!secondary) { | 
|---|
| 1211 | 1362 |  		t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | 
|---|
| .. | .. | 
|---|
| 1213 | 1364 |  	} else { | 
|---|
| 1214 | 1365 |  		t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq, | 
|---|
| 1215 | 1366 |  				   new->name); | 
|---|
| 1216 |  | -		param.sched_priority -= 1;  | 
|---|
| 1217 | 1367 |  	} | 
|---|
| 1218 | 1368 |   | 
|---|
| 1219 | 1369 |  	if (IS_ERR(t)) | 
|---|
| 1220 | 1370 |  		return PTR_ERR(t); | 
|---|
| 1221 |  | -  | 
|---|
| 1222 |  | -	sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);  | 
|---|
| 1223 | 1371 |   | 
|---|
| 1224 | 1372 |  	/* | 
|---|
| 1225 | 1373 |  	 * We keep the reference to the task struct even if | 
|---|
| 1226 | 1374 |  	 * the thread dies to avoid that the interrupt code | 
|---|
| 1227 | 1375 |  	 * references an already freed task_struct. | 
|---|
| 1228 | 1376 |  	 */ | 
|---|
| 1229 |  | -	get_task_struct(t);  | 
|---|
| 1230 |  | -	new->thread = t;  | 
|---|
 | 1377 | +	new->thread = get_task_struct(t);  | 
|---|
| 1231 | 1378 |  	/* | 
|---|
| 1232 | 1379 |  	 * Tell the thread to set its affinity. This is | 
|---|
| 1233 | 1380 |  	 * important for shared interrupt handlers as we do | 
|---|
| .. | .. | 
|---|
| 1373 | 1520 |  		 * fields must have IRQF_SHARED set and the bits which | 
|---|
| 1374 | 1521 |  		 * set the trigger type must match. Also all must | 
|---|
| 1375 | 1522 |  		 * agree on ONESHOT. | 
|---|
 | 1523 | +		 * Interrupt lines used for NMIs cannot be shared.  | 
|---|
| 1376 | 1524 |  		 */ | 
|---|
| 1377 | 1525 |  		unsigned int oldtype; | 
|---|
 | 1526 | +  | 
|---|
 | 1527 | +		if (desc->istate & IRQS_NMI) {  | 
|---|
 | 1528 | +			pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",  | 
|---|
 | 1529 | +				new->name, irq, desc->irq_data.chip->name);  | 
|---|
 | 1530 | +			ret = -EINVAL;  | 
|---|
 | 1531 | +			goto out_unlock;  | 
|---|
 | 1532 | +		}  | 
|---|
| 1378 | 1533 |   | 
|---|
| 1379 | 1534 |  		/* | 
|---|
| 1380 | 1535 |  		 * If nobody did set the configuration before, inherit | 
|---|
| .. | .. | 
|---|
| 1464 | 1619 |  		 * has. The type flags are unreliable as the | 
|---|
| 1465 | 1620 |  		 * underlying chip implementation can override them. | 
|---|
| 1466 | 1621 |  		 */ | 
|---|
| 1467 |  | -		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",  | 
|---|
| 1468 |  | -		       irq);  | 
|---|
 | 1622 | +		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",  | 
|---|
 | 1623 | +		       new->name, irq);  | 
|---|
| 1469 | 1624 |  		ret = -EINVAL; | 
|---|
| 1470 | 1625 |  		goto out_unlock; | 
|---|
| 1471 | 1626 |  	} | 
|---|
| 1472 | 1627 |   | 
|---|
| 1473 | 1628 |  	if (!shared) { | 
|---|
| 1474 |  | -		init_waitqueue_head(&desc->wait_for_threads);  | 
|---|
| 1475 |  | -  | 
|---|
| 1476 | 1629 |  		/* Setup the type (level, edge polarity) if configured: */ | 
|---|
| 1477 | 1630 |  		if (new->flags & IRQF_TRIGGER_MASK) { | 
|---|
| 1478 | 1631 |  			ret = __irq_set_trigger(desc, | 
|---|
| .. | .. | 
|---|
| 1514 | 1667 |  			irq_settings_set_no_balancing(desc); | 
|---|
| 1515 | 1668 |  			irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | 
|---|
| 1516 | 1669 |  		} | 
|---|
| 1517 |  | -  | 
|---|
| 1518 |  | -		if (new->flags & IRQF_NO_SOFTIRQ_CALL)  | 
|---|
| 1519 |  | -			irq_settings_set_no_softirq_call(desc);  | 
|---|
| 1520 | 1670 |   | 
|---|
| 1521 | 1671 |  		if (irq_settings_can_autoenable(desc)) { | 
|---|
| 1522 | 1672 |  			irq_startup(desc, IRQ_RESEND, IRQ_START_COND); | 
|---|
| .. | .. | 
|---|
| 1565 | 1715 |   | 
|---|
| 1566 | 1716 |  	irq_setup_timings(desc, new); | 
|---|
| 1567 | 1717 |   | 
|---|
| 1568 |  | -	/*  | 
|---|
| 1569 |  | -	 * Strictly no need to wake it up, but hung_task complains  | 
|---|
| 1570 |  | -	 * when no hard interrupt wakes the thread up.  | 
|---|
| 1571 |  | -	 */  | 
|---|
| 1572 |  | -	if (new->thread)  | 
|---|
| 1573 |  | -		wake_up_process(new->thread);  | 
|---|
| 1574 |  | -	if (new->secondary)  | 
|---|
| 1575 |  | -		wake_up_process(new->secondary->thread);  | 
|---|
 | 1718 | +	wake_up_and_wait_for_irq_thread_ready(desc, new);  | 
|---|
 | 1719 | +	wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);  | 
|---|
| 1576 | 1720 |   | 
|---|
| 1577 | 1721 |  	register_irq_proc(irq, desc); | 
|---|
| 1578 | 1722 |  	new->dir = NULL; | 
|---|
| .. | .. | 
|---|
| 1617 | 1761 |  	module_put(desc->owner); | 
|---|
| 1618 | 1762 |  	return ret; | 
|---|
| 1619 | 1763 |  } | 
|---|
| 1620 |  | -  | 
|---|
| 1621 |  | -/**  | 
|---|
| 1622 |  | - *	setup_irq - setup an interrupt  | 
|---|
| 1623 |  | - *	@irq: Interrupt line to setup  | 
|---|
| 1624 |  | - *	@act: irqaction for the interrupt  | 
|---|
| 1625 |  | - *  | 
|---|
| 1626 |  | - * Used to statically setup interrupts in the early boot process.  | 
|---|
| 1627 |  | - */  | 
|---|
| 1628 |  | -int setup_irq(unsigned int irq, struct irqaction *act)  | 
|---|
| 1629 |  | -{  | 
|---|
| 1630 |  | -	int retval;  | 
|---|
| 1631 |  | -	struct irq_desc *desc = irq_to_desc(irq);  | 
|---|
| 1632 |  | -  | 
|---|
| 1633 |  | -	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))  | 
|---|
| 1634 |  | -		return -EINVAL;  | 
|---|
| 1635 |  | -  | 
|---|
| 1636 |  | -	retval = irq_chip_pm_get(&desc->irq_data);  | 
|---|
| 1637 |  | -	if (retval < 0)  | 
|---|
| 1638 |  | -		return retval;  | 
|---|
| 1639 |  | -  | 
|---|
| 1640 |  | -	retval = __setup_irq(irq, desc, act);  | 
|---|
| 1641 |  | -  | 
|---|
| 1642 |  | -	if (retval)  | 
|---|
| 1643 |  | -		irq_chip_pm_put(&desc->irq_data);  | 
|---|
| 1644 |  | -  | 
|---|
| 1645 |  | -	return retval;  | 
|---|
| 1646 |  | -}  | 
|---|
| 1647 |  | -EXPORT_SYMBOL_GPL(setup_irq);  | 
|---|
| 1648 | 1764 |   | 
|---|
| 1649 | 1765 |  /* | 
|---|
| 1650 | 1766 |   * Internal function to unregister an irqaction - used to free | 
|---|
| .. | .. | 
|---|
| 1787 | 1903 |  } | 
|---|
| 1788 | 1904 |   | 
|---|
| 1789 | 1905 |  /** | 
|---|
| 1790 |  | - *	remove_irq - free an interrupt  | 
|---|
| 1791 |  | - *	@irq: Interrupt line to free  | 
|---|
| 1792 |  | - *	@act: irqaction for the interrupt  | 
|---|
| 1793 |  | - *  | 
|---|
| 1794 |  | - * Used to remove interrupts statically setup by the early boot process.  | 
|---|
| 1795 |  | - */  | 
|---|
| 1796 |  | -void remove_irq(unsigned int irq, struct irqaction *act)  | 
|---|
| 1797 |  | -{  | 
|---|
| 1798 |  | -	struct irq_desc *desc = irq_to_desc(irq);  | 
|---|
| 1799 |  | -  | 
|---|
| 1800 |  | -	if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))  | 
|---|
| 1801 |  | -		__free_irq(desc, act->dev_id);  | 
|---|
| 1802 |  | -}  | 
|---|
| 1803 |  | -EXPORT_SYMBOL_GPL(remove_irq);  | 
|---|
| 1804 |  | -  | 
|---|
| 1805 |  | -/**  | 
|---|
| 1806 | 1906 |   *	free_irq - free an interrupt allocated with request_irq | 
|---|
| 1807 | 1907 |   *	@irq: Interrupt line to free | 
|---|
| 1808 | 1908 |   *	@dev_id: Device identity to free | 
|---|
| .. | .. | 
|---|
| 1842 | 1942 |  	return devname; | 
|---|
| 1843 | 1943 |  } | 
|---|
| 1844 | 1944 |  EXPORT_SYMBOL(free_irq); | 
|---|
 | 1945 | +  | 
|---|
 | 1946 | +/* This function must be called with desc->lock held */  | 
|---|
 | 1947 | +static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)  | 
|---|
 | 1948 | +{  | 
|---|
 | 1949 | +	const char *devname = NULL;  | 
|---|
 | 1950 | +  | 
|---|
 | 1951 | +	desc->istate &= ~IRQS_NMI;  | 
|---|
 | 1952 | +  | 
|---|
 | 1953 | +	if (!WARN_ON(desc->action == NULL)) {  | 
|---|
 | 1954 | +		irq_pm_remove_action(desc, desc->action);  | 
|---|
 | 1955 | +		devname = desc->action->name;  | 
|---|
 | 1956 | +		unregister_handler_proc(irq, desc->action);  | 
|---|
 | 1957 | +  | 
|---|
 | 1958 | +		kfree(desc->action);  | 
|---|
 | 1959 | +		desc->action = NULL;  | 
|---|
 | 1960 | +	}  | 
|---|
 | 1961 | +  | 
|---|
 | 1962 | +	irq_settings_clr_disable_unlazy(desc);  | 
|---|
 | 1963 | +	irq_shutdown_and_deactivate(desc);  | 
|---|
 | 1964 | +  | 
|---|
 | 1965 | +	irq_release_resources(desc);  | 
|---|
 | 1966 | +  | 
|---|
 | 1967 | +	irq_chip_pm_put(&desc->irq_data);  | 
|---|
 | 1968 | +	module_put(desc->owner);  | 
|---|
 | 1969 | +  | 
|---|
 | 1970 | +	return devname;  | 
|---|
 | 1971 | +}  | 
|---|
 | 1972 | +  | 
|---|
 | 1973 | +const void *free_nmi(unsigned int irq, void *dev_id)  | 
|---|
 | 1974 | +{  | 
|---|
 | 1975 | +	struct irq_desc *desc = irq_to_desc(irq);  | 
|---|
 | 1976 | +	unsigned long flags;  | 
|---|
 | 1977 | +	const void *devname;  | 
|---|
 | 1978 | +  | 
|---|
 | 1979 | +	if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))  | 
|---|
 | 1980 | +		return NULL;  | 
|---|
 | 1981 | +  | 
|---|
 | 1982 | +	if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))  | 
|---|
 | 1983 | +		return NULL;  | 
|---|
 | 1984 | +  | 
|---|
 | 1985 | +	/* NMI still enabled */  | 
|---|
 | 1986 | +	if (WARN_ON(desc->depth == 0))  | 
|---|
 | 1987 | +		disable_nmi_nosync(irq);  | 
|---|
 | 1988 | +  | 
|---|
 | 1989 | +	raw_spin_lock_irqsave(&desc->lock, flags);  | 
|---|
 | 1990 | +  | 
|---|
 | 1991 | +	irq_nmi_teardown(desc);  | 
|---|
 | 1992 | +	devname = __cleanup_nmi(irq, desc);  | 
|---|
 | 1993 | +  | 
|---|
 | 1994 | +	raw_spin_unlock_irqrestore(&desc->lock, flags);  | 
|---|
 | 1995 | +  | 
|---|
 | 1996 | +	return devname;  | 
|---|
 | 1997 | +}  | 
|---|
| 1845 | 1998 |   | 
|---|
| 1846 | 1999 |  /** | 
|---|
| 1847 | 2000 |   *	request_threaded_irq - allocate an interrupt line | 
|---|
| .. | .. | 
|---|
| 2012 | 2165 |  } | 
|---|
| 2013 | 2166 |  EXPORT_SYMBOL_GPL(request_any_context_irq); | 
|---|
| 2014 | 2167 |   | 
|---|
 | 2168 | +/**  | 
|---|
 | 2169 | + *	request_nmi - allocate an interrupt line for NMI delivery  | 
|---|
 | 2170 | + *	@irq: Interrupt line to allocate  | 
|---|
 | 2171 | + *	@handler: Function to be called when the IRQ occurs.  | 
|---|
 | 2172 | + *		  Threaded handler for threaded interrupts.  | 
|---|
 | 2173 | + *	@irqflags: Interrupt type flags  | 
|---|
 | 2174 | + *	@name: An ascii name for the claiming device  | 
|---|
 | 2175 | + *	@dev_id: A cookie passed back to the handler function  | 
|---|
 | 2176 | + *  | 
|---|
 | 2177 | + *	This call allocates interrupt resources and enables the  | 
|---|
 | 2178 | + *	interrupt line and IRQ handling. It sets up the IRQ line  | 
|---|
 | 2179 | + *	to be handled as an NMI.  | 
|---|
 | 2180 | + *  | 
|---|
 | 2181 | + *	An interrupt line delivering NMIs cannot be shared and IRQ handling  | 
|---|
 | 2182 | + *	cannot be threaded.  | 
|---|
 | 2183 | + *  | 
|---|
 | 2184 | + *	Interrupt lines requested for NMI delivering must produce per cpu  | 
|---|
 | 2185 | + *	interrupts and have auto enabling setting disabled.  | 
|---|
 | 2186 | + *  | 
|---|
 | 2187 | + *	Dev_id must be globally unique. Normally the address of the  | 
|---|
 | 2188 | + *	device data structure is used as the cookie. Since the handler  | 
|---|
 | 2189 | + *	receives this value it makes sense to use it.  | 
|---|
 | 2190 | + *  | 
|---|
 | 2191 | + *	If the interrupt line cannot be used to deliver NMIs, function  | 
|---|
 | 2192 | + *	will fail and return a negative value.  | 
|---|
 | 2193 | + */  | 
|---|
 | 2194 | +int request_nmi(unsigned int irq, irq_handler_t handler,  | 
|---|
 | 2195 | +		unsigned long irqflags, const char *name, void *dev_id)  | 
|---|
 | 2196 | +{  | 
|---|
 | 2197 | +	struct irqaction *action;  | 
|---|
 | 2198 | +	struct irq_desc *desc;  | 
|---|
 | 2199 | +	unsigned long flags;  | 
|---|
 | 2200 | +	int retval;  | 
|---|
 | 2201 | +  | 
|---|
 | 2202 | +	if (irq == IRQ_NOTCONNECTED)  | 
|---|
 | 2203 | +		return -ENOTCONN;  | 
|---|
 | 2204 | +  | 
|---|
 | 2205 | +	/* NMI cannot be shared, used for Polling */  | 
|---|
 | 2206 | +	if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))  | 
|---|
 | 2207 | +		return -EINVAL;  | 
|---|
 | 2208 | +  | 
|---|
 | 2209 | +	if (!(irqflags & IRQF_PERCPU))  | 
|---|
 | 2210 | +		return -EINVAL;  | 
|---|
 | 2211 | +  | 
|---|
 | 2212 | +	if (!handler)  | 
|---|
 | 2213 | +		return -EINVAL;  | 
|---|
 | 2214 | +  | 
|---|
 | 2215 | +	desc = irq_to_desc(irq);  | 
|---|
 | 2216 | +  | 
|---|
 | 2217 | +	if (!desc || irq_settings_can_autoenable(desc) ||  | 
|---|
 | 2218 | +	    !irq_settings_can_request(desc) ||  | 
|---|
 | 2219 | +	    WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||  | 
|---|
 | 2220 | +	    !irq_supports_nmi(desc))  | 
|---|
 | 2221 | +		return -EINVAL;  | 
|---|
 | 2222 | +  | 
|---|
 | 2223 | +	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);  | 
|---|
 | 2224 | +	if (!action)  | 
|---|
 | 2225 | +		return -ENOMEM;  | 
|---|
 | 2226 | +  | 
|---|
 | 2227 | +	action->handler = handler;  | 
|---|
 | 2228 | +	action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;  | 
|---|
 | 2229 | +	action->name = name;  | 
|---|
 | 2230 | +	action->dev_id = dev_id;  | 
|---|
 | 2231 | +  | 
|---|
 | 2232 | +	retval = irq_chip_pm_get(&desc->irq_data);  | 
|---|
 | 2233 | +	if (retval < 0)  | 
|---|
 | 2234 | +		goto err_out;  | 
|---|
 | 2235 | +  | 
|---|
 | 2236 | +	retval = __setup_irq(irq, desc, action);  | 
|---|
 | 2237 | +	if (retval)  | 
|---|
 | 2238 | +		goto err_irq_setup;  | 
|---|
 | 2239 | +  | 
|---|
 | 2240 | +	raw_spin_lock_irqsave(&desc->lock, flags);  | 
|---|
 | 2241 | +  | 
|---|
 | 2242 | +	/* Setup NMI state */  | 
|---|
 | 2243 | +	desc->istate |= IRQS_NMI;  | 
|---|
 | 2244 | +	retval = irq_nmi_setup(desc);  | 
|---|
 | 2245 | +	if (retval) {  | 
|---|
 | 2246 | +		__cleanup_nmi(irq, desc);  | 
|---|
 | 2247 | +		raw_spin_unlock_irqrestore(&desc->lock, flags);  | 
|---|
 | 2248 | +		return -EINVAL;  | 
|---|
 | 2249 | +	}  | 
|---|
 | 2250 | +  | 
|---|
 | 2251 | +	raw_spin_unlock_irqrestore(&desc->lock, flags);  | 
|---|
 | 2252 | +  | 
|---|
 | 2253 | +	return 0;  | 
|---|
 | 2254 | +  | 
|---|
 | 2255 | +err_irq_setup:  | 
|---|
 | 2256 | +	irq_chip_pm_put(&desc->irq_data);  | 
|---|
 | 2257 | +err_out:  | 
|---|
 | 2258 | +	kfree(action);  | 
|---|
 | 2259 | +  | 
|---|
 | 2260 | +	return retval;  | 
|---|
 | 2261 | +}  | 
|---|
 | 2262 | +  | 
|---|
| 2015 | 2263 |  void enable_percpu_irq(unsigned int irq, unsigned int type) | 
|---|
| 2016 | 2264 |  { | 
|---|
| 2017 | 2265 |  	unsigned int cpu = smp_processor_id(); | 
|---|
| .. | .. | 
|---|
| 2045 | 2293 |  	irq_put_desc_unlock(desc, flags); | 
|---|
| 2046 | 2294 |  } | 
|---|
| 2047 | 2295 |  EXPORT_SYMBOL_GPL(enable_percpu_irq); | 
|---|
 | 2296 | +  | 
|---|
 | 2297 | +void enable_percpu_nmi(unsigned int irq, unsigned int type)  | 
|---|
 | 2298 | +{  | 
|---|
 | 2299 | +	enable_percpu_irq(irq, type);  | 
|---|
 | 2300 | +}  | 
|---|
| 2048 | 2301 |   | 
|---|
| 2049 | 2302 |  /** | 
|---|
| 2050 | 2303 |   * irq_percpu_is_enabled - Check whether the per cpu irq is enabled | 
|---|
| .. | .. | 
|---|
| 2085 | 2338 |  } | 
|---|
| 2086 | 2339 |  EXPORT_SYMBOL_GPL(disable_percpu_irq); | 
|---|
| 2087 | 2340 |   | 
|---|
 | 2341 | +void disable_percpu_nmi(unsigned int irq)  | 
|---|
 | 2342 | +{  | 
|---|
 | 2343 | +	disable_percpu_irq(irq);  | 
|---|
 | 2344 | +}  | 
|---|
 | 2345 | +  | 
|---|
| 2088 | 2346 |  /* | 
|---|
| 2089 | 2347 |   * Internal function to unregister a percpu irqaction. | 
|---|
| 2090 | 2348 |   */ | 
|---|
| .. | .. | 
|---|
| 2115 | 2373 |   | 
|---|
| 2116 | 2374 |  	/* Found it - now remove it from the list of entries: */ | 
|---|
| 2117 | 2375 |  	desc->action = NULL; | 
|---|
 | 2376 | +  | 
|---|
 | 2377 | +	desc->istate &= ~IRQS_NMI;  | 
|---|
| 2118 | 2378 |   | 
|---|
| 2119 | 2379 |  	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
|---|
| 2120 | 2380 |   | 
|---|
| .. | .. | 
|---|
| 2168 | 2428 |  	chip_bus_sync_unlock(desc); | 
|---|
| 2169 | 2429 |  } | 
|---|
| 2170 | 2430 |  EXPORT_SYMBOL_GPL(free_percpu_irq); | 
|---|
 | 2431 | +  | 
|---|
 | 2432 | +void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)  | 
|---|
 | 2433 | +{  | 
|---|
 | 2434 | +	struct irq_desc *desc = irq_to_desc(irq);  | 
|---|
 | 2435 | +  | 
|---|
 | 2436 | +	if (!desc || !irq_settings_is_per_cpu_devid(desc))  | 
|---|
 | 2437 | +		return;  | 
|---|
 | 2438 | +  | 
|---|
 | 2439 | +	if (WARN_ON(!(desc->istate & IRQS_NMI)))  | 
|---|
 | 2440 | +		return;  | 
|---|
 | 2441 | +  | 
|---|
 | 2442 | +	kfree(__free_percpu_irq(irq, dev_id));  | 
|---|
 | 2443 | +}  | 
|---|
| 2171 | 2444 |   | 
|---|
| 2172 | 2445 |  /** | 
|---|
| 2173 | 2446 |   *	setup_percpu_irq - setup a per-cpu interrupt | 
|---|
| .. | .. | 
|---|
| 2258 | 2531 |  } | 
|---|
| 2259 | 2532 |  EXPORT_SYMBOL_GPL(__request_percpu_irq); | 
|---|
| 2260 | 2533 |   | 
|---|
 | 2534 | +/**  | 
|---|
 | 2535 | + *	request_percpu_nmi - allocate a percpu interrupt line for NMI delivery  | 
|---|
 | 2536 | + *	@irq: Interrupt line to allocate  | 
|---|
 | 2537 | + *	@handler: Function to be called when the IRQ occurs.  | 
|---|
 | 2538 | + *	@name: An ascii name for the claiming device  | 
|---|
 | 2539 | + *	@dev_id: A percpu cookie passed back to the handler function  | 
|---|
 | 2540 | + *  | 
|---|
 | 2541 | + *	This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs  | 
|---|
 | 2542 | + *	have to be setup on each CPU by calling prepare_percpu_nmi() before  | 
|---|
 | 2543 | + *	being enabled on the same CPU by using enable_percpu_nmi().  | 
|---|
 | 2544 | + *  | 
|---|
 | 2545 | + *	Dev_id must be globally unique. It is a per-cpu variable, and  | 
|---|
 | 2546 | + *	the handler gets called with the interrupted CPU's instance of  | 
|---|
 | 2547 | + *	that variable.  | 
|---|
 | 2548 | + *  | 
|---|
 | 2549 | + *	Interrupt lines requested for NMI delivering should have auto enabling  | 
|---|
 | 2550 | + *	setting disabled.  | 
|---|
 | 2551 | + *  | 
|---|
 | 2552 | + *	If the interrupt line cannot be used to deliver NMIs, function  | 
|---|
 | 2553 | + *	will fail returning a negative value.  | 
|---|
 | 2554 | + */  | 
|---|
 | 2555 | +int request_percpu_nmi(unsigned int irq, irq_handler_t handler,  | 
|---|
 | 2556 | +		       const char *name, void __percpu *dev_id)  | 
|---|
 | 2557 | +{  | 
|---|
 | 2558 | +	struct irqaction *action;  | 
|---|
 | 2559 | +	struct irq_desc *desc;  | 
|---|
 | 2560 | +	unsigned long flags;  | 
|---|
 | 2561 | +	int retval;  | 
|---|
 | 2562 | +  | 
|---|
 | 2563 | +	if (!handler)  | 
|---|
 | 2564 | +		return -EINVAL;  | 
|---|
 | 2565 | +  | 
|---|
 | 2566 | +	desc = irq_to_desc(irq);  | 
|---|
 | 2567 | +  | 
|---|
 | 2568 | +	if (!desc || !irq_settings_can_request(desc) ||  | 
|---|
 | 2569 | +	    !irq_settings_is_per_cpu_devid(desc) ||  | 
|---|
 | 2570 | +	    irq_settings_can_autoenable(desc) ||  | 
|---|
 | 2571 | +	    !irq_supports_nmi(desc))  | 
|---|
 | 2572 | +		return -EINVAL;  | 
|---|
 | 2573 | +  | 
|---|
 | 2574 | +	/* The line cannot already be NMI */  | 
|---|
 | 2575 | +	if (desc->istate & IRQS_NMI)  | 
|---|
 | 2576 | +		return -EINVAL;  | 
|---|
 | 2577 | +  | 
|---|
 | 2578 | +	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);  | 
|---|
 | 2579 | +	if (!action)  | 
|---|
 | 2580 | +		return -ENOMEM;  | 
|---|
 | 2581 | +  | 
|---|
 | 2582 | +	action->handler = handler;  | 
|---|
 | 2583 | +	action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD  | 
|---|
 | 2584 | +		| IRQF_NOBALANCING;  | 
|---|
 | 2585 | +	action->name = name;  | 
|---|
 | 2586 | +	action->percpu_dev_id = dev_id;  | 
|---|
 | 2587 | +  | 
|---|
 | 2588 | +	retval = irq_chip_pm_get(&desc->irq_data);  | 
|---|
 | 2589 | +	if (retval < 0)  | 
|---|
 | 2590 | +		goto err_out;  | 
|---|
 | 2591 | +  | 
|---|
 | 2592 | +	retval = __setup_irq(irq, desc, action);  | 
|---|
 | 2593 | +	if (retval)  | 
|---|
 | 2594 | +		goto err_irq_setup;  | 
|---|
 | 2595 | +  | 
|---|
 | 2596 | +	raw_spin_lock_irqsave(&desc->lock, flags);  | 
|---|
 | 2597 | +	desc->istate |= IRQS_NMI;  | 
|---|
 | 2598 | +	raw_spin_unlock_irqrestore(&desc->lock, flags);  | 
|---|
 | 2599 | +  | 
|---|
 | 2600 | +	return 0;  | 
|---|
 | 2601 | +  | 
|---|
 | 2602 | +err_irq_setup:  | 
|---|
 | 2603 | +	irq_chip_pm_put(&desc->irq_data);  | 
|---|
 | 2604 | +err_out:  | 
|---|
 | 2605 | +	kfree(action);  | 
|---|
 | 2606 | +  | 
|---|
 | 2607 | +	return retval;  | 
|---|
 | 2608 | +}  | 
|---|
 | 2609 | +  | 
|---|
 | 2610 | +/**  | 
|---|
 | 2611 | + *	prepare_percpu_nmi - performs CPU local setup for NMI delivery  | 
|---|
 | 2612 | + *	@irq: Interrupt line to prepare for NMI delivery  | 
|---|
 | 2613 | + *  | 
|---|
 | 2614 | + *	This call prepares an interrupt line to deliver NMI on the current CPU,  | 
|---|
 | 2615 | + *	before that interrupt line gets enabled with enable_percpu_nmi().  | 
|---|
 | 2616 | + *  | 
|---|
 | 2617 | + *	As a CPU local operation, this should be called from non-preemptible  | 
|---|
 | 2618 | + *	context.  | 
|---|
 | 2619 | + *  | 
|---|
 | 2620 | + *	If the interrupt line cannot be used to deliver NMIs, function  | 
|---|
 | 2621 | + *	will fail returning a negative value.  | 
|---|
 | 2622 | + */  | 
|---|
 | 2623 | +int prepare_percpu_nmi(unsigned int irq)  | 
|---|
 | 2624 | +{  | 
|---|
 | 2625 | +	unsigned long flags;  | 
|---|
 | 2626 | +	struct irq_desc *desc;  | 
|---|
 | 2627 | +	int ret = 0;  | 
|---|
 | 2628 | +  | 
|---|
 | 2629 | +	WARN_ON(preemptible());  | 
|---|
 | 2630 | +  | 
|---|
 | 2631 | +	desc = irq_get_desc_lock(irq, &flags,  | 
|---|
 | 2632 | +				 IRQ_GET_DESC_CHECK_PERCPU);  | 
|---|
 | 2633 | +	if (!desc)  | 
|---|
 | 2634 | +		return -EINVAL;  | 
|---|
 | 2635 | +  | 
|---|
 | 2636 | +	if (WARN(!(desc->istate & IRQS_NMI),  | 
|---|
 | 2637 | +		 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",  | 
|---|
 | 2638 | +		 irq)) {  | 
|---|
 | 2639 | +		ret = -EINVAL;  | 
|---|
 | 2640 | +		goto out;  | 
|---|
 | 2641 | +	}  | 
|---|
 | 2642 | +  | 
|---|
 | 2643 | +	ret = irq_nmi_setup(desc);  | 
|---|
 | 2644 | +	if (ret) {  | 
|---|
 | 2645 | +		pr_err("Failed to setup NMI delivery: irq %u\n", irq);  | 
|---|
 | 2646 | +		goto out;  | 
|---|
 | 2647 | +	}  | 
|---|
 | 2648 | +  | 
|---|
 | 2649 | +out:  | 
|---|
 | 2650 | +	irq_put_desc_unlock(desc, flags);  | 
|---|
 | 2651 | +	return ret;  | 
|---|
 | 2652 | +}  | 
|---|
 | 2653 | +  | 
|---|
 | 2654 | +/**  | 
|---|
 | 2655 | + *	teardown_percpu_nmi - undoes NMI setup of IRQ line  | 
|---|
 | 2656 | + *	@irq: Interrupt line from which CPU local NMI configuration should be  | 
|---|
 | 2657 | + *	      removed  | 
|---|
 | 2658 | + *  | 
|---|
 | 2659 | + *	This call undoes the setup done by prepare_percpu_nmi().  | 
|---|
 | 2660 | + *  | 
|---|
 | 2661 | + *	IRQ line should not be enabled for the current CPU.  | 
|---|
 | 2662 | + *  | 
|---|
 | 2663 | + *	As a CPU local operation, this should be called from non-preemptible  | 
|---|
 | 2664 | + *	context.  | 
|---|
 | 2665 | + */  | 
|---|
 | 2666 | +void teardown_percpu_nmi(unsigned int irq)  | 
|---|
 | 2667 | +{  | 
|---|
 | 2668 | +	unsigned long flags;  | 
|---|
 | 2669 | +	struct irq_desc *desc;  | 
|---|
 | 2670 | +  | 
|---|
 | 2671 | +	WARN_ON(preemptible());  | 
|---|
 | 2672 | +  | 
|---|
 | 2673 | +	desc = irq_get_desc_lock(irq, &flags,  | 
|---|
 | 2674 | +				 IRQ_GET_DESC_CHECK_PERCPU);  | 
|---|
 | 2675 | +	if (!desc)  | 
|---|
 | 2676 | +		return;  | 
|---|
 | 2677 | +  | 
|---|
 | 2678 | +	if (WARN_ON(!(desc->istate & IRQS_NMI)))  | 
|---|
 | 2679 | +		goto out;  | 
|---|
 | 2680 | +  | 
|---|
 | 2681 | +	irq_nmi_teardown(desc);  | 
|---|
 | 2682 | +out:  | 
|---|
 | 2683 | +	irq_put_desc_unlock(desc, flags);  | 
|---|
 | 2684 | +}  | 
|---|
 | 2685 | +  | 
|---|
| 2261 | 2686 |  int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, | 
|---|
| 2262 | 2687 |  			    bool *state) | 
|---|
| 2263 | 2688 |  { | 
|---|
| .. | .. | 
|---|
| 2266 | 2691 |   | 
|---|
| 2267 | 2692 |  	do { | 
|---|
| 2268 | 2693 |  		chip = irq_data_get_irq_chip(data); | 
|---|
 | 2694 | +		if (WARN_ON_ONCE(!chip))  | 
|---|
 | 2695 | +			return -ENODEV;  | 
|---|
| 2269 | 2696 |  		if (chip->irq_get_irqchip_state) | 
|---|
| 2270 | 2697 |  			break; | 
|---|
| 2271 | 2698 |  #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | 
|---|
| .. | .. | 
|---|
| 2343 | 2770 |   | 
|---|
| 2344 | 2771 |  	do { | 
|---|
| 2345 | 2772 |  		chip = irq_data_get_irq_chip(data); | 
|---|
 | 2773 | +		if (WARN_ON_ONCE(!chip)) {  | 
|---|
 | 2774 | +			err = -ENODEV;  | 
|---|
 | 2775 | +			goto out_unlock;  | 
|---|
 | 2776 | +		}  | 
|---|
| 2346 | 2777 |  		if (chip->irq_set_irqchip_state) | 
|---|
| 2347 | 2778 |  			break; | 
|---|
| 2348 | 2779 |  #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | 
|---|
| .. | .. | 
|---|
| 2355 | 2786 |  	if (data) | 
|---|
| 2356 | 2787 |  		err = chip->irq_set_irqchip_state(data, which, val); | 
|---|
| 2357 | 2788 |   | 
|---|
 | 2789 | +out_unlock:  | 
|---|
| 2358 | 2790 |  	irq_put_desc_busunlock(desc, flags); | 
|---|
| 2359 | 2791 |  	return err; | 
|---|
| 2360 | 2792 |  } | 
|---|