.. | .. |
---|
18 | 18 | #include <linux/sched.h> |
---|
19 | 19 | #include <linux/sched/rt.h> |
---|
20 | 20 | #include <linux/sched/task.h> |
---|
| 21 | +#include <linux/sched/isolation.h> |
---|
21 | 22 | #include <uapi/linux/sched/types.h> |
---|
22 | 23 | #include <linux/task_work.h> |
---|
23 | 24 | |
---|
24 | 25 | #include "internals.h" |
---|
25 | 26 | |
---|
26 | | -#ifdef CONFIG_IRQ_FORCED_THREADING |
---|
27 | | -# ifndef CONFIG_PREEMPT_RT_BASE |
---|
| 27 | +#if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT) |
---|
28 | 28 | __read_mostly bool force_irqthreads; |
---|
29 | 29 | EXPORT_SYMBOL_GPL(force_irqthreads); |
---|
30 | 30 | |
---|
.. | .. |
---|
34 | 34 | return 0; |
---|
35 | 35 | } |
---|
36 | 36 | early_param("threadirqs", setup_forced_irqthreads); |
---|
37 | | -# endif |
---|
38 | 37 | #endif |
---|
39 | 38 | |
---|
40 | 39 | static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) |
---|
.. | .. |
---|
224 | 223 | { |
---|
225 | 224 | struct irq_desc *desc = irq_data_to_desc(data); |
---|
226 | 225 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
---|
| 226 | + const struct cpumask *prog_mask; |
---|
227 | 227 | int ret; |
---|
| 228 | + |
---|
| 229 | + static DEFINE_RAW_SPINLOCK(tmp_mask_lock); |
---|
| 230 | + static struct cpumask tmp_mask; |
---|
228 | 231 | |
---|
229 | 232 | if (!chip || !chip->irq_set_affinity) |
---|
230 | 233 | return -EINVAL; |
---|
231 | 234 | |
---|
232 | | - ret = chip->irq_set_affinity(data, mask, force); |
---|
| 235 | + raw_spin_lock(&tmp_mask_lock); |
---|
| 236 | + /* |
---|
| 237 | + * If this is a managed interrupt and housekeeping is enabled on |
---|
| 238 | + * it check whether the requested affinity mask intersects with |
---|
| 239 | + * a housekeeping CPU. If so, then remove the isolated CPUs from |
---|
| 240 | + * the mask and just keep the housekeeping CPU(s). This prevents |
---|
| 241 | + * the affinity setter from routing the interrupt to an isolated |
---|
| 242 | + * CPU to avoid that I/O submitted from a housekeeping CPU causes |
---|
| 243 | + * interrupts on an isolated one. |
---|
| 244 | + * |
---|
| 245 | + * If the masks do not intersect or include online CPU(s) then |
---|
| 246 | + * keep the requested mask. The isolated target CPUs are only |
---|
| 247 | + * receiving interrupts when the I/O operation was submitted |
---|
| 248 | + * directly from them. |
---|
| 249 | + * |
---|
| 250 | + * If all housekeeping CPUs in the affinity mask are offline, the |
---|
| 251 | + * interrupt will be migrated by the CPU hotplug code once a |
---|
| 252 | + * housekeeping CPU which belongs to the affinity mask comes |
---|
| 253 | + * online. |
---|
| 254 | + */ |
---|
| 255 | + if (irqd_affinity_is_managed(data) && |
---|
| 256 | + housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) { |
---|
| 257 | + const struct cpumask *hk_mask; |
---|
| 258 | + |
---|
| 259 | + hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ); |
---|
| 260 | + |
---|
| 261 | + cpumask_and(&tmp_mask, mask, hk_mask); |
---|
| 262 | + if (!cpumask_intersects(&tmp_mask, cpu_online_mask)) |
---|
| 263 | + prog_mask = mask; |
---|
| 264 | + else |
---|
| 265 | + prog_mask = &tmp_mask; |
---|
| 266 | + } else { |
---|
| 267 | + prog_mask = mask; |
---|
| 268 | + } |
---|
| 269 | + |
---|
| 270 | + /* |
---|
| 271 | + * Make sure we only provide online CPUs to the irqchip, |
---|
| 272 | + * unless we are being asked to force the affinity (in which |
---|
| 273 | + * case we do as we are told). |
---|
| 274 | + */ |
---|
| 275 | + cpumask_and(&tmp_mask, prog_mask, cpu_online_mask); |
---|
| 276 | + if (!force && !cpumask_empty(&tmp_mask)) |
---|
| 277 | + ret = chip->irq_set_affinity(data, &tmp_mask, force); |
---|
| 278 | + else if (force) |
---|
| 279 | + ret = chip->irq_set_affinity(data, mask, force); |
---|
| 280 | + else |
---|
| 281 | + ret = -EINVAL; |
---|
| 282 | + |
---|
| 283 | + raw_spin_unlock(&tmp_mask_lock); |
---|
| 284 | + |
---|
233 | 285 | switch (ret) { |
---|
234 | 286 | case IRQ_SET_MASK_OK: |
---|
235 | 287 | case IRQ_SET_MASK_OK_DONE: |
---|
236 | 288 | cpumask_copy(desc->irq_common_data.affinity, mask); |
---|
| 289 | + fallthrough; |
---|
237 | 290 | case IRQ_SET_MASK_OK_NOCOPY: |
---|
238 | 291 | irq_validate_effective_affinity(data); |
---|
239 | 292 | irq_set_thread_affinity(desc); |
---|
.. | .. |
---|
242 | 295 | |
---|
243 | 296 | return ret; |
---|
244 | 297 | } |
---|
| 298 | +EXPORT_SYMBOL_GPL(irq_do_set_affinity); |
---|
245 | 299 | |
---|
246 | 300 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
---|
247 | 301 | static inline int irq_set_affinity_pending(struct irq_data *data, |
---|
.. | .. |
---|
288 | 342 | * If the interrupt is not yet activated, just store the affinity |
---|
289 | 343 | * mask and do not call the chip driver at all. On activation the |
---|
290 | 344 | * driver has to make sure anyway that the interrupt is in a |
---|
291 | | - * useable state so startup works. |
---|
| 345 | + * usable state so startup works. |
---|
292 | 346 | */ |
---|
293 | 347 | if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || |
---|
294 | 348 | irqd_is_activated(data) || !irqd_affinity_on_activate(data)) |
---|
.. | .. |
---|
347 | 401 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
---|
348 | 402 | return ret; |
---|
349 | 403 | } |
---|
350 | | -EXPORT_SYMBOL_GPL(__irq_set_affinity); |
---|
351 | 404 | |
---|
352 | 405 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) |
---|
353 | 406 | { |
---|
.. | .. |
---|
411 | 464 | /* The release function is promised process context */ |
---|
412 | 465 | might_sleep(); |
---|
413 | 466 | |
---|
414 | | - if (!desc) |
---|
| 467 | + if (!desc || desc->istate & IRQS_NMI) |
---|
415 | 468 | return -EINVAL; |
---|
416 | 469 | |
---|
417 | 470 | /* Complete initialisation of *notify */ |
---|
.. | .. |
---|
614 | 667 | } |
---|
615 | 668 | EXPORT_SYMBOL_GPL(disable_hardirq); |
---|
616 | 669 | |
---|
| 670 | +/** |
---|
| 671 | + * disable_nmi_nosync - disable an nmi without waiting |
---|
| 672 | + * @irq: Interrupt to disable |
---|
| 673 | + * |
---|
| 674 | + * Disable the selected interrupt line. Disables and enables are |
---|
| 675 | + * nested. |
---|
| 676 | + * The interrupt to disable must have been requested through request_nmi. |
---|
| 677 | + * Unlike disable_nmi(), this function does not ensure existing |
---|
| 678 | + * instances of the IRQ handler have completed before returning. |
---|
| 679 | + */ |
---|
| 680 | +void disable_nmi_nosync(unsigned int irq) |
---|
| 681 | +{ |
---|
| 682 | + disable_irq_nosync(irq); |
---|
| 683 | +} |
---|
| 684 | + |
---|
617 | 685 | void __enable_irq(struct irq_desc *desc) |
---|
618 | 686 | { |
---|
619 | 687 | switch (desc->depth) { |
---|
.. | .. |
---|
670 | 738 | } |
---|
671 | 739 | EXPORT_SYMBOL(enable_irq); |
---|
672 | 740 | |
---|
| 741 | +/** |
---|
| 742 | + * enable_nmi - enable handling of an nmi |
---|
| 743 | + * @irq: Interrupt to enable |
---|
| 744 | + * |
---|
| 745 | + * The interrupt to enable must have been requested through request_nmi. |
---|
| 746 | + * Undoes the effect of one call to disable_nmi(). If this |
---|
| 747 | + * matches the last disable, processing of interrupts on this |
---|
| 748 | + * IRQ line is re-enabled. |
---|
| 749 | + */ |
---|
| 750 | +void enable_nmi(unsigned int irq) |
---|
| 751 | +{ |
---|
| 752 | + enable_irq(irq); |
---|
| 753 | +} |
---|
| 754 | + |
---|
673 | 755 | static int set_irq_wake_real(unsigned int irq, unsigned int on) |
---|
674 | 756 | { |
---|
675 | 757 | struct irq_desc *desc = irq_to_desc(irq); |
---|
.. | .. |
---|
695 | 777 | * |
---|
696 | 778 | * Wakeup mode lets this IRQ wake the system from sleep |
---|
697 | 779 | * states like "suspend to RAM". |
---|
| 780 | + * |
---|
| 781 | + * Note: irq enable/disable state is completely orthogonal |
---|
| 782 | + * to the enable/disable state of irq wake. An irq can be |
---|
| 783 | + * disabled with disable_irq() and still wake the system as |
---|
| 784 | + * long as the irq has wake enabled. If this does not hold, |
---|
| 785 | + * then the underlying irq chip and the related driver need |
---|
| 786 | + * to be investigated. |
---|
698 | 787 | */ |
---|
699 | 788 | int irq_set_irq_wake(unsigned int irq, unsigned int on) |
---|
700 | 789 | { |
---|
.. | .. |
---|
704 | 793 | |
---|
705 | 794 | if (!desc) |
---|
706 | 795 | return -EINVAL; |
---|
| 796 | + |
---|
| 797 | + /* Don't use NMIs as wake up interrupts please */ |
---|
| 798 | + if (desc->istate & IRQS_NMI) { |
---|
| 799 | + ret = -EINVAL; |
---|
| 800 | + goto out_unlock; |
---|
| 801 | + } |
---|
707 | 802 | |
---|
708 | 803 | /* wakeup-capable irqs can be shared between drivers that |
---|
709 | 804 | * don't need to have the same sleep mode behaviors. |
---|
.. | .. |
---|
727 | 822 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); |
---|
728 | 823 | } |
---|
729 | 824 | } |
---|
| 825 | + |
---|
| 826 | +out_unlock: |
---|
730 | 827 | irq_put_desc_busunlock(desc, flags); |
---|
731 | 828 | return ret; |
---|
732 | 829 | } |
---|
.. | .. |
---|
787 | 884 | case IRQ_SET_MASK_OK_DONE: |
---|
788 | 885 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); |
---|
789 | 886 | irqd_set(&desc->irq_data, flags); |
---|
| 887 | + fallthrough; |
---|
790 | 888 | |
---|
791 | 889 | case IRQ_SET_MASK_OK_NOCOPY: |
---|
792 | 890 | flags = irqd_get_trigger_type(&desc->irq_data); |
---|
.. | .. |
---|
801 | 899 | ret = 0; |
---|
802 | 900 | break; |
---|
803 | 901 | default: |
---|
804 | | - pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", |
---|
| 902 | + pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n", |
---|
805 | 903 | flags, irq_desc_get_irq(desc), chip->irq_set_type); |
---|
806 | 904 | } |
---|
807 | 905 | if (unmask) |
---|
.. | .. |
---|
902 | 1000 | * to IRQS_INPROGRESS and the irq line is masked forever. |
---|
903 | 1001 | * |
---|
904 | 1002 | * This also serializes the state of shared oneshot handlers |
---|
905 | | - * versus "desc->threads_onehsot |= action->thread_mask;" in |
---|
| 1003 | + * versus "desc->threads_oneshot |= action->thread_mask;" in |
---|
906 | 1004 | * irq_wake_thread(). See the comment there which explains the |
---|
907 | 1005 | * serialization. |
---|
908 | 1006 | */ |
---|
.. | .. |
---|
979 | 1077 | #endif |
---|
980 | 1078 | |
---|
981 | 1079 | /* |
---|
982 | | - * Interrupts which are not explicitely requested as threaded |
---|
| 1080 | + * Interrupts which are not explicitly requested as threaded |
---|
983 | 1081 | * interrupts rely on the implicit bh/preempt disable of the hard irq |
---|
984 | 1082 | * context. So we need to disable bh here to avoid deadlocks and other |
---|
985 | 1083 | * side effects. |
---|
.. | .. |
---|
990 | 1088 | irqreturn_t ret; |
---|
991 | 1089 | |
---|
992 | 1090 | local_bh_disable(); |
---|
993 | | - if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) |
---|
| 1091 | + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
---|
994 | 1092 | local_irq_disable(); |
---|
995 | 1093 | ret = action->thread_fn(action->irq, action->dev_id); |
---|
996 | 1094 | if (ret == IRQ_HANDLED) |
---|
997 | 1095 | atomic_inc(&desc->threads_handled); |
---|
998 | 1096 | |
---|
999 | 1097 | irq_finalize_oneshot(desc, action); |
---|
1000 | | - if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) |
---|
| 1098 | + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
---|
1001 | 1099 | local_irq_enable(); |
---|
1002 | 1100 | local_bh_enable(); |
---|
1003 | 1101 | return ret; |
---|
.. | .. |
---|
1067 | 1165 | } |
---|
1068 | 1166 | |
---|
1069 | 1167 | /* |
---|
| 1168 | + * Internal function to notify that a interrupt thread is ready. |
---|
| 1169 | + */ |
---|
| 1170 | +static void irq_thread_set_ready(struct irq_desc *desc, |
---|
| 1171 | + struct irqaction *action) |
---|
| 1172 | +{ |
---|
| 1173 | + set_bit(IRQTF_READY, &action->thread_flags); |
---|
| 1174 | + wake_up(&desc->wait_for_threads); |
---|
| 1175 | +} |
---|
| 1176 | + |
---|
| 1177 | +/* |
---|
| 1178 | + * Internal function to wake up a interrupt thread and wait until it is |
---|
| 1179 | + * ready. |
---|
| 1180 | + */ |
---|
| 1181 | +static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc, |
---|
| 1182 | + struct irqaction *action) |
---|
| 1183 | +{ |
---|
| 1184 | + if (!action || !action->thread) |
---|
| 1185 | + return; |
---|
| 1186 | + |
---|
| 1187 | + wake_up_process(action->thread); |
---|
| 1188 | + wait_event(desc->wait_for_threads, |
---|
| 1189 | + test_bit(IRQTF_READY, &action->thread_flags)); |
---|
| 1190 | +} |
---|
| 1191 | + |
---|
| 1192 | +/* |
---|
1070 | 1193 | * Interrupt handler thread |
---|
1071 | 1194 | */ |
---|
1072 | 1195 | static int irq_thread(void *data) |
---|
.. | .. |
---|
1077 | 1200 | irqreturn_t (*handler_fn)(struct irq_desc *desc, |
---|
1078 | 1201 | struct irqaction *action); |
---|
1079 | 1202 | |
---|
| 1203 | + irq_thread_set_ready(desc, action); |
---|
| 1204 | + |
---|
1080 | 1205 | if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, |
---|
1081 | 1206 | &action->thread_flags)) |
---|
1082 | 1207 | handler_fn = irq_forced_thread_fn; |
---|
.. | .. |
---|
1084 | 1209 | handler_fn = irq_thread_fn; |
---|
1085 | 1210 | |
---|
1086 | 1211 | init_task_work(&on_exit_work, irq_thread_dtor); |
---|
1087 | | - task_work_add(current, &on_exit_work, false); |
---|
| 1212 | + task_work_add(current, &on_exit_work, TWA_NONE); |
---|
1088 | 1213 | |
---|
1089 | 1214 | irq_thread_check_affinity(desc, action); |
---|
1090 | 1215 | |
---|
.. | .. |
---|
1097 | 1222 | if (action_ret == IRQ_WAKE_THREAD) |
---|
1098 | 1223 | irq_wake_secondary(desc, action); |
---|
1099 | 1224 | |
---|
1100 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
1101 | | - migrate_disable(); |
---|
1102 | | - add_interrupt_randomness(action->irq, 0, |
---|
1103 | | - desc->random_ip ^ (unsigned long) action); |
---|
1104 | | - migrate_enable(); |
---|
1105 | | -#endif |
---|
1106 | 1225 | wake_threads_waitq(desc); |
---|
1107 | 1226 | } |
---|
1108 | 1227 | |
---|
.. | .. |
---|
1199 | 1318 | c->irq_release_resources(d); |
---|
1200 | 1319 | } |
---|
1201 | 1320 | |
---|
| 1321 | +static bool irq_supports_nmi(struct irq_desc *desc) |
---|
| 1322 | +{ |
---|
| 1323 | + struct irq_data *d = irq_desc_get_irq_data(desc); |
---|
| 1324 | + |
---|
| 1325 | +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
---|
| 1326 | + /* Only IRQs directly managed by the root irqchip can be set as NMI */ |
---|
| 1327 | + if (d->parent_data) |
---|
| 1328 | + return false; |
---|
| 1329 | +#endif |
---|
| 1330 | + /* Don't support NMIs for chips behind a slow bus */ |
---|
| 1331 | + if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock) |
---|
| 1332 | + return false; |
---|
| 1333 | + |
---|
| 1334 | + return d->chip->flags & IRQCHIP_SUPPORTS_NMI; |
---|
| 1335 | +} |
---|
| 1336 | + |
---|
| 1337 | +static int irq_nmi_setup(struct irq_desc *desc) |
---|
| 1338 | +{ |
---|
| 1339 | + struct irq_data *d = irq_desc_get_irq_data(desc); |
---|
| 1340 | + struct irq_chip *c = d->chip; |
---|
| 1341 | + |
---|
| 1342 | + return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL; |
---|
| 1343 | +} |
---|
| 1344 | + |
---|
| 1345 | +static void irq_nmi_teardown(struct irq_desc *desc) |
---|
| 1346 | +{ |
---|
| 1347 | + struct irq_data *d = irq_desc_get_irq_data(desc); |
---|
| 1348 | + struct irq_chip *c = d->chip; |
---|
| 1349 | + |
---|
| 1350 | + if (c->irq_nmi_teardown) |
---|
| 1351 | + c->irq_nmi_teardown(d); |
---|
| 1352 | +} |
---|
| 1353 | + |
---|
1202 | 1354 | static int |
---|
1203 | 1355 | setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) |
---|
1204 | 1356 | { |
---|
1205 | 1357 | struct task_struct *t; |
---|
1206 | | - struct sched_param param = { |
---|
1207 | | - .sched_priority = MAX_USER_RT_PRIO/2, |
---|
1208 | | - }; |
---|
1209 | 1358 | |
---|
1210 | 1359 | if (!secondary) { |
---|
1211 | 1360 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, |
---|
.. | .. |
---|
1213 | 1362 | } else { |
---|
1214 | 1363 | t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq, |
---|
1215 | 1364 | new->name); |
---|
1216 | | - param.sched_priority -= 1; |
---|
1217 | 1365 | } |
---|
1218 | 1366 | |
---|
1219 | 1367 | if (IS_ERR(t)) |
---|
1220 | 1368 | return PTR_ERR(t); |
---|
1221 | 1369 | |
---|
1222 | | - sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); |
---|
| 1370 | + sched_set_fifo(t); |
---|
1223 | 1371 | |
---|
1224 | 1372 | /* |
---|
1225 | 1373 | * We keep the reference to the task struct even if |
---|
1226 | 1374 | * the thread dies to avoid that the interrupt code |
---|
1227 | 1375 | * references an already freed task_struct. |
---|
1228 | 1376 | */ |
---|
1229 | | - get_task_struct(t); |
---|
1230 | | - new->thread = t; |
---|
| 1377 | + new->thread = get_task_struct(t); |
---|
1231 | 1378 | /* |
---|
1232 | 1379 | * Tell the thread to set its affinity. This is |
---|
1233 | 1380 | * important for shared interrupt handlers as we do |
---|
.. | .. |
---|
1373 | 1520 | * fields must have IRQF_SHARED set and the bits which |
---|
1374 | 1521 | * set the trigger type must match. Also all must |
---|
1375 | 1522 | * agree on ONESHOT. |
---|
| 1523 | + * Interrupt lines used for NMIs cannot be shared. |
---|
1376 | 1524 | */ |
---|
1377 | 1525 | unsigned int oldtype; |
---|
| 1526 | + |
---|
| 1527 | + if (desc->istate & IRQS_NMI) { |
---|
| 1528 | + pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n", |
---|
| 1529 | + new->name, irq, desc->irq_data.chip->name); |
---|
| 1530 | + ret = -EINVAL; |
---|
| 1531 | + goto out_unlock; |
---|
| 1532 | + } |
---|
1378 | 1533 | |
---|
1379 | 1534 | /* |
---|
1380 | 1535 | * If nobody did set the configuration before, inherit |
---|
.. | .. |
---|
1464 | 1619 | * has. The type flags are unreliable as the |
---|
1465 | 1620 | * underlying chip implementation can override them. |
---|
1466 | 1621 | */ |
---|
1467 | | - pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", |
---|
1468 | | - irq); |
---|
| 1622 | + pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n", |
---|
| 1623 | + new->name, irq); |
---|
1469 | 1624 | ret = -EINVAL; |
---|
1470 | 1625 | goto out_unlock; |
---|
1471 | 1626 | } |
---|
1472 | 1627 | |
---|
1473 | 1628 | if (!shared) { |
---|
1474 | | - init_waitqueue_head(&desc->wait_for_threads); |
---|
1475 | | - |
---|
1476 | 1629 | /* Setup the type (level, edge polarity) if configured: */ |
---|
1477 | 1630 | if (new->flags & IRQF_TRIGGER_MASK) { |
---|
1478 | 1631 | ret = __irq_set_trigger(desc, |
---|
.. | .. |
---|
1515 | 1668 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); |
---|
1516 | 1669 | } |
---|
1517 | 1670 | |
---|
1518 | | - if (new->flags & IRQF_NO_SOFTIRQ_CALL) |
---|
1519 | | - irq_settings_set_no_softirq_call(desc); |
---|
1520 | | - |
---|
1521 | | - if (irq_settings_can_autoenable(desc)) { |
---|
| 1671 | + if (!(new->flags & IRQF_NO_AUTOEN) && |
---|
| 1672 | + irq_settings_can_autoenable(desc)) { |
---|
1522 | 1673 | irq_startup(desc, IRQ_RESEND, IRQ_START_COND); |
---|
1523 | 1674 | } else { |
---|
1524 | 1675 | /* |
---|
.. | .. |
---|
1565 | 1716 | |
---|
1566 | 1717 | irq_setup_timings(desc, new); |
---|
1567 | 1718 | |
---|
1568 | | - /* |
---|
1569 | | - * Strictly no need to wake it up, but hung_task complains |
---|
1570 | | - * when no hard interrupt wakes the thread up. |
---|
1571 | | - */ |
---|
1572 | | - if (new->thread) |
---|
1573 | | - wake_up_process(new->thread); |
---|
1574 | | - if (new->secondary) |
---|
1575 | | - wake_up_process(new->secondary->thread); |
---|
| 1719 | + wake_up_and_wait_for_irq_thread_ready(desc, new); |
---|
| 1720 | + wake_up_and_wait_for_irq_thread_ready(desc, new->secondary); |
---|
1576 | 1721 | |
---|
1577 | 1722 | register_irq_proc(irq, desc); |
---|
1578 | 1723 | new->dir = NULL; |
---|
.. | .. |
---|
1617 | 1762 | module_put(desc->owner); |
---|
1618 | 1763 | return ret; |
---|
1619 | 1764 | } |
---|
1620 | | - |
---|
1621 | | -/** |
---|
1622 | | - * setup_irq - setup an interrupt |
---|
1623 | | - * @irq: Interrupt line to setup |
---|
1624 | | - * @act: irqaction for the interrupt |
---|
1625 | | - * |
---|
1626 | | - * Used to statically setup interrupts in the early boot process. |
---|
1627 | | - */ |
---|
1628 | | -int setup_irq(unsigned int irq, struct irqaction *act) |
---|
1629 | | -{ |
---|
1630 | | - int retval; |
---|
1631 | | - struct irq_desc *desc = irq_to_desc(irq); |
---|
1632 | | - |
---|
1633 | | - if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
---|
1634 | | - return -EINVAL; |
---|
1635 | | - |
---|
1636 | | - retval = irq_chip_pm_get(&desc->irq_data); |
---|
1637 | | - if (retval < 0) |
---|
1638 | | - return retval; |
---|
1639 | | - |
---|
1640 | | - retval = __setup_irq(irq, desc, act); |
---|
1641 | | - |
---|
1642 | | - if (retval) |
---|
1643 | | - irq_chip_pm_put(&desc->irq_data); |
---|
1644 | | - |
---|
1645 | | - return retval; |
---|
1646 | | -} |
---|
1647 | | -EXPORT_SYMBOL_GPL(setup_irq); |
---|
1648 | 1765 | |
---|
1649 | 1766 | /* |
---|
1650 | 1767 | * Internal function to unregister an irqaction - used to free |
---|
.. | .. |
---|
1761 | 1878 | /* Last action releases resources */ |
---|
1762 | 1879 | if (!desc->action) { |
---|
1763 | 1880 | /* |
---|
1764 | | - * Reaquire bus lock as irq_release_resources() might |
---|
| 1881 | + * Reacquire bus lock as irq_release_resources() might |
---|
1765 | 1882 | * require it to deallocate resources over the slow bus. |
---|
1766 | 1883 | */ |
---|
1767 | 1884 | chip_bus_lock(desc); |
---|
.. | .. |
---|
1785 | 1902 | kfree(action->secondary); |
---|
1786 | 1903 | return action; |
---|
1787 | 1904 | } |
---|
1788 | | - |
---|
1789 | | -/** |
---|
1790 | | - * remove_irq - free an interrupt |
---|
1791 | | - * @irq: Interrupt line to free |
---|
1792 | | - * @act: irqaction for the interrupt |
---|
1793 | | - * |
---|
1794 | | - * Used to remove interrupts statically setup by the early boot process. |
---|
1795 | | - */ |
---|
1796 | | -void remove_irq(unsigned int irq, struct irqaction *act) |
---|
1797 | | -{ |
---|
1798 | | - struct irq_desc *desc = irq_to_desc(irq); |
---|
1799 | | - |
---|
1800 | | - if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
---|
1801 | | - __free_irq(desc, act->dev_id); |
---|
1802 | | -} |
---|
1803 | | -EXPORT_SYMBOL_GPL(remove_irq); |
---|
1804 | 1905 | |
---|
1805 | 1906 | /** |
---|
1806 | 1907 | * free_irq - free an interrupt allocated with request_irq |
---|
.. | .. |
---|
1842 | 1943 | return devname; |
---|
1843 | 1944 | } |
---|
1844 | 1945 | EXPORT_SYMBOL(free_irq); |
---|
| 1946 | + |
---|
| 1947 | +/* This function must be called with desc->lock held */ |
---|
| 1948 | +static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) |
---|
| 1949 | +{ |
---|
| 1950 | + const char *devname = NULL; |
---|
| 1951 | + |
---|
| 1952 | + desc->istate &= ~IRQS_NMI; |
---|
| 1953 | + |
---|
| 1954 | + if (!WARN_ON(desc->action == NULL)) { |
---|
| 1955 | + irq_pm_remove_action(desc, desc->action); |
---|
| 1956 | + devname = desc->action->name; |
---|
| 1957 | + unregister_handler_proc(irq, desc->action); |
---|
| 1958 | + |
---|
| 1959 | + kfree(desc->action); |
---|
| 1960 | + desc->action = NULL; |
---|
| 1961 | + } |
---|
| 1962 | + |
---|
| 1963 | + irq_settings_clr_disable_unlazy(desc); |
---|
| 1964 | + irq_shutdown_and_deactivate(desc); |
---|
| 1965 | + |
---|
| 1966 | + irq_release_resources(desc); |
---|
| 1967 | + |
---|
| 1968 | + irq_chip_pm_put(&desc->irq_data); |
---|
| 1969 | + module_put(desc->owner); |
---|
| 1970 | + |
---|
| 1971 | + return devname; |
---|
| 1972 | +} |
---|
| 1973 | + |
---|
| 1974 | +const void *free_nmi(unsigned int irq, void *dev_id) |
---|
| 1975 | +{ |
---|
| 1976 | + struct irq_desc *desc = irq_to_desc(irq); |
---|
| 1977 | + unsigned long flags; |
---|
| 1978 | + const void *devname; |
---|
| 1979 | + |
---|
| 1980 | + if (!desc || WARN_ON(!(desc->istate & IRQS_NMI))) |
---|
| 1981 | + return NULL; |
---|
| 1982 | + |
---|
| 1983 | + if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
---|
| 1984 | + return NULL; |
---|
| 1985 | + |
---|
| 1986 | + /* NMI still enabled */ |
---|
| 1987 | + if (WARN_ON(desc->depth == 0)) |
---|
| 1988 | + disable_nmi_nosync(irq); |
---|
| 1989 | + |
---|
| 1990 | + raw_spin_lock_irqsave(&desc->lock, flags); |
---|
| 1991 | + |
---|
| 1992 | + irq_nmi_teardown(desc); |
---|
| 1993 | + devname = __cleanup_nmi(irq, desc); |
---|
| 1994 | + |
---|
| 1995 | + raw_spin_unlock_irqrestore(&desc->lock, flags); |
---|
| 1996 | + |
---|
| 1997 | + return devname; |
---|
| 1998 | +} |
---|
1845 | 1999 | |
---|
1846 | 2000 | /** |
---|
1847 | 2001 | * request_threaded_irq - allocate an interrupt line |
---|
.. | .. |
---|
1902 | 2056 | * which interrupt is which (messes up the interrupt freeing |
---|
1903 | 2057 | * logic etc). |
---|
1904 | 2058 | * |
---|
| 2059 | + * Also shared interrupts do not go well with disabling auto enable. |
---|
| 2060 | + * The sharing interrupt might request it while it's still disabled |
---|
| 2061 | + * and then wait for interrupts forever. |
---|
| 2062 | + * |
---|
1905 | 2063 | * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and |
---|
1906 | 2064 | * it cannot be set along with IRQF_NO_SUSPEND. |
---|
1907 | 2065 | */ |
---|
1908 | 2066 | if (((irqflags & IRQF_SHARED) && !dev_id) || |
---|
| 2067 | + ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) || |
---|
1909 | 2068 | (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) || |
---|
1910 | 2069 | ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND))) |
---|
1911 | 2070 | return -EINVAL; |
---|
.. | .. |
---|
2012 | 2171 | } |
---|
2013 | 2172 | EXPORT_SYMBOL_GPL(request_any_context_irq); |
---|
2014 | 2173 | |
---|
| 2174 | +/** |
---|
| 2175 | + * request_nmi - allocate an interrupt line for NMI delivery |
---|
| 2176 | + * @irq: Interrupt line to allocate |
---|
| 2177 | + * @handler: Function to be called when the IRQ occurs. |
---|
| 2178 | + * Threaded handler for threaded interrupts. |
---|
| 2179 | + * @irqflags: Interrupt type flags |
---|
| 2180 | + * @name: An ascii name for the claiming device |
---|
| 2181 | + * @dev_id: A cookie passed back to the handler function |
---|
| 2182 | + * |
---|
| 2183 | + * This call allocates interrupt resources and enables the |
---|
| 2184 | + * interrupt line and IRQ handling. It sets up the IRQ line |
---|
| 2185 | + * to be handled as an NMI. |
---|
| 2186 | + * |
---|
| 2187 | + * An interrupt line delivering NMIs cannot be shared and IRQ handling |
---|
| 2188 | + * cannot be threaded. |
---|
| 2189 | + * |
---|
| 2190 | + * Interrupt lines requested for NMI delivering must produce per cpu |
---|
| 2191 | + * interrupts and have auto enabling setting disabled. |
---|
| 2192 | + * |
---|
| 2193 | + * Dev_id must be globally unique. Normally the address of the |
---|
| 2194 | + * device data structure is used as the cookie. Since the handler |
---|
| 2195 | + * receives this value it makes sense to use it. |
---|
| 2196 | + * |
---|
| 2197 | + * If the interrupt line cannot be used to deliver NMIs, function |
---|
| 2198 | + * will fail and return a negative value. |
---|
| 2199 | + */ |
---|
| 2200 | +int request_nmi(unsigned int irq, irq_handler_t handler, |
---|
| 2201 | + unsigned long irqflags, const char *name, void *dev_id) |
---|
| 2202 | +{ |
---|
| 2203 | + struct irqaction *action; |
---|
| 2204 | + struct irq_desc *desc; |
---|
| 2205 | + unsigned long flags; |
---|
| 2206 | + int retval; |
---|
| 2207 | + |
---|
| 2208 | + if (irq == IRQ_NOTCONNECTED) |
---|
| 2209 | + return -ENOTCONN; |
---|
| 2210 | + |
---|
| 2211 | + /* NMI cannot be shared, used for Polling */ |
---|
| 2212 | + if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL)) |
---|
| 2213 | + return -EINVAL; |
---|
| 2214 | + |
---|
| 2215 | + if (!(irqflags & IRQF_PERCPU)) |
---|
| 2216 | + return -EINVAL; |
---|
| 2217 | + |
---|
| 2218 | + if (!handler) |
---|
| 2219 | + return -EINVAL; |
---|
| 2220 | + |
---|
| 2221 | + desc = irq_to_desc(irq); |
---|
| 2222 | + |
---|
| 2223 | + if (!desc || (irq_settings_can_autoenable(desc) && |
---|
| 2224 | + !(irqflags & IRQF_NO_AUTOEN)) || |
---|
| 2225 | + !irq_settings_can_request(desc) || |
---|
| 2226 | + WARN_ON(irq_settings_is_per_cpu_devid(desc)) || |
---|
| 2227 | + !irq_supports_nmi(desc)) |
---|
| 2228 | + return -EINVAL; |
---|
| 2229 | + |
---|
| 2230 | + action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
---|
| 2231 | + if (!action) |
---|
| 2232 | + return -ENOMEM; |
---|
| 2233 | + |
---|
| 2234 | + action->handler = handler; |
---|
| 2235 | + action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING; |
---|
| 2236 | + action->name = name; |
---|
| 2237 | + action->dev_id = dev_id; |
---|
| 2238 | + |
---|
| 2239 | + retval = irq_chip_pm_get(&desc->irq_data); |
---|
| 2240 | + if (retval < 0) |
---|
| 2241 | + goto err_out; |
---|
| 2242 | + |
---|
| 2243 | + retval = __setup_irq(irq, desc, action); |
---|
| 2244 | + if (retval) |
---|
| 2245 | + goto err_irq_setup; |
---|
| 2246 | + |
---|
| 2247 | + raw_spin_lock_irqsave(&desc->lock, flags); |
---|
| 2248 | + |
---|
| 2249 | + /* Setup NMI state */ |
---|
| 2250 | + desc->istate |= IRQS_NMI; |
---|
| 2251 | + retval = irq_nmi_setup(desc); |
---|
| 2252 | + if (retval) { |
---|
| 2253 | + __cleanup_nmi(irq, desc); |
---|
| 2254 | + raw_spin_unlock_irqrestore(&desc->lock, flags); |
---|
| 2255 | + return -EINVAL; |
---|
| 2256 | + } |
---|
| 2257 | + |
---|
| 2258 | + raw_spin_unlock_irqrestore(&desc->lock, flags); |
---|
| 2259 | + |
---|
| 2260 | + return 0; |
---|
| 2261 | + |
---|
| 2262 | +err_irq_setup: |
---|
| 2263 | + irq_chip_pm_put(&desc->irq_data); |
---|
| 2264 | +err_out: |
---|
| 2265 | + kfree(action); |
---|
| 2266 | + |
---|
| 2267 | + return retval; |
---|
| 2268 | +} |
---|
| 2269 | + |
---|
2015 | 2270 | void enable_percpu_irq(unsigned int irq, unsigned int type) |
---|
2016 | 2271 | { |
---|
2017 | 2272 | unsigned int cpu = smp_processor_id(); |
---|
.. | .. |
---|
2045 | 2300 | irq_put_desc_unlock(desc, flags); |
---|
2046 | 2301 | } |
---|
2047 | 2302 | EXPORT_SYMBOL_GPL(enable_percpu_irq); |
---|
| 2303 | + |
---|
| 2304 | +void enable_percpu_nmi(unsigned int irq, unsigned int type) |
---|
| 2305 | +{ |
---|
| 2306 | + enable_percpu_irq(irq, type); |
---|
| 2307 | +} |
---|
2048 | 2308 | |
---|
2049 | 2309 | /** |
---|
2050 | 2310 | * irq_percpu_is_enabled - Check whether the per cpu irq is enabled |
---|
.. | .. |
---|
2085 | 2345 | } |
---|
2086 | 2346 | EXPORT_SYMBOL_GPL(disable_percpu_irq); |
---|
2087 | 2347 | |
---|
| 2348 | +void disable_percpu_nmi(unsigned int irq) |
---|
| 2349 | +{ |
---|
| 2350 | + disable_percpu_irq(irq); |
---|
| 2351 | +} |
---|
| 2352 | + |
---|
2088 | 2353 | /* |
---|
2089 | 2354 | * Internal function to unregister a percpu irqaction. |
---|
2090 | 2355 | */ |
---|
.. | .. |
---|
2115 | 2380 | |
---|
2116 | 2381 | /* Found it - now remove it from the list of entries: */ |
---|
2117 | 2382 | desc->action = NULL; |
---|
| 2383 | + |
---|
| 2384 | + desc->istate &= ~IRQS_NMI; |
---|
2118 | 2385 | |
---|
2119 | 2386 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
---|
2120 | 2387 | |
---|
.. | .. |
---|
2168 | 2435 | chip_bus_sync_unlock(desc); |
---|
2169 | 2436 | } |
---|
2170 | 2437 | EXPORT_SYMBOL_GPL(free_percpu_irq); |
---|
| 2438 | + |
---|
| 2439 | +void free_percpu_nmi(unsigned int irq, void __percpu *dev_id) |
---|
| 2440 | +{ |
---|
| 2441 | + struct irq_desc *desc = irq_to_desc(irq); |
---|
| 2442 | + |
---|
| 2443 | + if (!desc || !irq_settings_is_per_cpu_devid(desc)) |
---|
| 2444 | + return; |
---|
| 2445 | + |
---|
| 2446 | + if (WARN_ON(!(desc->istate & IRQS_NMI))) |
---|
| 2447 | + return; |
---|
| 2448 | + |
---|
| 2449 | + kfree(__free_percpu_irq(irq, dev_id)); |
---|
| 2450 | +} |
---|
2171 | 2451 | |
---|
2172 | 2452 | /** |
---|
2173 | 2453 | * setup_percpu_irq - setup a per-cpu interrupt |
---|
.. | .. |
---|
2258 | 2538 | } |
---|
2259 | 2539 | EXPORT_SYMBOL_GPL(__request_percpu_irq); |
---|
2260 | 2540 | |
---|
| 2541 | +/** |
---|
| 2542 | + * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery |
---|
| 2543 | + * @irq: Interrupt line to allocate |
---|
| 2544 | + * @handler: Function to be called when the IRQ occurs. |
---|
| 2545 | + * @name: An ascii name for the claiming device |
---|
| 2546 | + * @dev_id: A percpu cookie passed back to the handler function |
---|
| 2547 | + * |
---|
| 2548 | + * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs |
---|
| 2549 | + * have to be setup on each CPU by calling prepare_percpu_nmi() before |
---|
| 2550 | + * being enabled on the same CPU by using enable_percpu_nmi(). |
---|
| 2551 | + * |
---|
| 2552 | + * Dev_id must be globally unique. It is a per-cpu variable, and |
---|
| 2553 | + * the handler gets called with the interrupted CPU's instance of |
---|
| 2554 | + * that variable. |
---|
| 2555 | + * |
---|
| 2556 | + * Interrupt lines requested for NMI delivering should have auto enabling |
---|
| 2557 | + * setting disabled. |
---|
| 2558 | + * |
---|
| 2559 | + * If the interrupt line cannot be used to deliver NMIs, function |
---|
| 2560 | + * will fail returning a negative value. |
---|
| 2561 | + */ |
---|
| 2562 | +int request_percpu_nmi(unsigned int irq, irq_handler_t handler, |
---|
| 2563 | + const char *name, void __percpu *dev_id) |
---|
| 2564 | +{ |
---|
| 2565 | + struct irqaction *action; |
---|
| 2566 | + struct irq_desc *desc; |
---|
| 2567 | + unsigned long flags; |
---|
| 2568 | + int retval; |
---|
| 2569 | + |
---|
| 2570 | + if (!handler) |
---|
| 2571 | + return -EINVAL; |
---|
| 2572 | + |
---|
| 2573 | + desc = irq_to_desc(irq); |
---|
| 2574 | + |
---|
| 2575 | + if (!desc || !irq_settings_can_request(desc) || |
---|
| 2576 | + !irq_settings_is_per_cpu_devid(desc) || |
---|
| 2577 | + irq_settings_can_autoenable(desc) || |
---|
| 2578 | + !irq_supports_nmi(desc)) |
---|
| 2579 | + return -EINVAL; |
---|
| 2580 | + |
---|
| 2581 | + /* The line cannot already be NMI */ |
---|
| 2582 | + if (desc->istate & IRQS_NMI) |
---|
| 2583 | + return -EINVAL; |
---|
| 2584 | + |
---|
| 2585 | + action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
---|
| 2586 | + if (!action) |
---|
| 2587 | + return -ENOMEM; |
---|
| 2588 | + |
---|
| 2589 | + action->handler = handler; |
---|
| 2590 | + action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD |
---|
| 2591 | + | IRQF_NOBALANCING; |
---|
| 2592 | + action->name = name; |
---|
| 2593 | + action->percpu_dev_id = dev_id; |
---|
| 2594 | + |
---|
| 2595 | + retval = irq_chip_pm_get(&desc->irq_data); |
---|
| 2596 | + if (retval < 0) |
---|
| 2597 | + goto err_out; |
---|
| 2598 | + |
---|
| 2599 | + retval = __setup_irq(irq, desc, action); |
---|
| 2600 | + if (retval) |
---|
| 2601 | + goto err_irq_setup; |
---|
| 2602 | + |
---|
| 2603 | + raw_spin_lock_irqsave(&desc->lock, flags); |
---|
| 2604 | + desc->istate |= IRQS_NMI; |
---|
| 2605 | + raw_spin_unlock_irqrestore(&desc->lock, flags); |
---|
| 2606 | + |
---|
| 2607 | + return 0; |
---|
| 2608 | + |
---|
| 2609 | +err_irq_setup: |
---|
| 2610 | + irq_chip_pm_put(&desc->irq_data); |
---|
| 2611 | +err_out: |
---|
| 2612 | + kfree(action); |
---|
| 2613 | + |
---|
| 2614 | + return retval; |
---|
| 2615 | +} |
---|
| 2616 | + |
---|
| 2617 | +/** |
---|
| 2618 | + * prepare_percpu_nmi - performs CPU local setup for NMI delivery |
---|
| 2619 | + * @irq: Interrupt line to prepare for NMI delivery |
---|
| 2620 | + * |
---|
| 2621 | + * This call prepares an interrupt line to deliver NMI on the current CPU, |
---|
| 2622 | + * before that interrupt line gets enabled with enable_percpu_nmi(). |
---|
| 2623 | + * |
---|
| 2624 | + * As a CPU local operation, this should be called from non-preemptible |
---|
| 2625 | + * context. |
---|
| 2626 | + * |
---|
| 2627 | + * If the interrupt line cannot be used to deliver NMIs, function |
---|
| 2628 | + * will fail returning a negative value. |
---|
| 2629 | + */ |
---|
| 2630 | +int prepare_percpu_nmi(unsigned int irq) |
---|
| 2631 | +{ |
---|
| 2632 | + unsigned long flags; |
---|
| 2633 | + struct irq_desc *desc; |
---|
| 2634 | + int ret = 0; |
---|
| 2635 | + |
---|
| 2636 | + WARN_ON(preemptible()); |
---|
| 2637 | + |
---|
| 2638 | + desc = irq_get_desc_lock(irq, &flags, |
---|
| 2639 | + IRQ_GET_DESC_CHECK_PERCPU); |
---|
| 2640 | + if (!desc) |
---|
| 2641 | + return -EINVAL; |
---|
| 2642 | + |
---|
| 2643 | + if (WARN(!(desc->istate & IRQS_NMI), |
---|
| 2644 | + KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", |
---|
| 2645 | + irq)) { |
---|
| 2646 | + ret = -EINVAL; |
---|
| 2647 | + goto out; |
---|
| 2648 | + } |
---|
| 2649 | + |
---|
| 2650 | + ret = irq_nmi_setup(desc); |
---|
| 2651 | + if (ret) { |
---|
| 2652 | + pr_err("Failed to setup NMI delivery: irq %u\n", irq); |
---|
| 2653 | + goto out; |
---|
| 2654 | + } |
---|
| 2655 | + |
---|
| 2656 | +out: |
---|
| 2657 | + irq_put_desc_unlock(desc, flags); |
---|
| 2658 | + return ret; |
---|
| 2659 | +} |
---|
| 2660 | + |
---|
| 2661 | +/** |
---|
| 2662 | + * teardown_percpu_nmi - undoes NMI setup of IRQ line |
---|
| 2663 | + * @irq: Interrupt line from which CPU local NMI configuration should be |
---|
| 2664 | + * removed |
---|
| 2665 | + * |
---|
| 2666 | + * This call undoes the setup done by prepare_percpu_nmi(). |
---|
| 2667 | + * |
---|
| 2668 | + * IRQ line should not be enabled for the current CPU. |
---|
| 2669 | + * |
---|
| 2670 | + * As a CPU local operation, this should be called from non-preemptible |
---|
| 2671 | + * context. |
---|
| 2672 | + */ |
---|
| 2673 | +void teardown_percpu_nmi(unsigned int irq) |
---|
| 2674 | +{ |
---|
| 2675 | + unsigned long flags; |
---|
| 2676 | + struct irq_desc *desc; |
---|
| 2677 | + |
---|
| 2678 | + WARN_ON(preemptible()); |
---|
| 2679 | + |
---|
| 2680 | + desc = irq_get_desc_lock(irq, &flags, |
---|
| 2681 | + IRQ_GET_DESC_CHECK_PERCPU); |
---|
| 2682 | + if (!desc) |
---|
| 2683 | + return; |
---|
| 2684 | + |
---|
| 2685 | + if (WARN_ON(!(desc->istate & IRQS_NMI))) |
---|
| 2686 | + goto out; |
---|
| 2687 | + |
---|
| 2688 | + irq_nmi_teardown(desc); |
---|
| 2689 | +out: |
---|
| 2690 | + irq_put_desc_unlock(desc, flags); |
---|
| 2691 | +} |
---|
| 2692 | + |
---|
2261 | 2693 | int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, |
---|
2262 | 2694 | bool *state) |
---|
2263 | 2695 | { |
---|
.. | .. |
---|
2266 | 2698 | |
---|
2267 | 2699 | do { |
---|
2268 | 2700 | chip = irq_data_get_irq_chip(data); |
---|
| 2701 | + if (WARN_ON_ONCE(!chip)) |
---|
| 2702 | + return -ENODEV; |
---|
2269 | 2703 | if (chip->irq_get_irqchip_state) |
---|
2270 | 2704 | break; |
---|
2271 | 2705 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
---|
.. | .. |
---|
2323 | 2757 | * This call sets the internal irqchip state of an interrupt, |
---|
2324 | 2758 | * depending on the value of @which. |
---|
2325 | 2759 | * |
---|
2326 | | - * This function should be called with migration disabled if the |
---|
| 2760 | + * This function should be called with preemption disabled if the |
---|
2327 | 2761 | * interrupt controller has per-cpu registers. |
---|
2328 | 2762 | */ |
---|
2329 | 2763 | int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, |
---|
.. | .. |
---|
2343 | 2777 | |
---|
2344 | 2778 | do { |
---|
2345 | 2779 | chip = irq_data_get_irq_chip(data); |
---|
| 2780 | + if (WARN_ON_ONCE(!chip)) { |
---|
| 2781 | + err = -ENODEV; |
---|
| 2782 | + goto out_unlock; |
---|
| 2783 | + } |
---|
2346 | 2784 | if (chip->irq_set_irqchip_state) |
---|
2347 | 2785 | break; |
---|
2348 | 2786 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
---|
.. | .. |
---|
2355 | 2793 | if (data) |
---|
2356 | 2794 | err = chip->irq_set_irqchip_state(data, which, val); |
---|
2357 | 2795 | |
---|
| 2796 | +out_unlock: |
---|
2358 | 2797 | irq_put_desc_busunlock(desc, flags); |
---|
2359 | 2798 | return err; |
---|
2360 | 2799 | } |
---|