| .. | .. |
|---|
| 34 | 34 | * true and let the handler run. |
|---|
| 35 | 35 | */ |
|---|
| 36 | 36 | bool irq_wait_for_poll(struct irq_desc *desc) |
|---|
| 37 | + __must_hold(&desc->lock) |
|---|
| 37 | 38 | { |
|---|
| 38 | 39 | if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), |
|---|
| 39 | 40 | "irq poll in progress on cpu %d for irq %d\n", |
|---|
| .. | .. |
|---|
| 66 | 67 | raw_spin_lock(&desc->lock); |
|---|
| 67 | 68 | |
|---|
| 68 | 69 | /* |
|---|
| 69 | | - * PER_CPU, nested thread interrupts and interrupts explicitely |
|---|
| 70 | + * PER_CPU, nested thread interrupts and interrupts explicitly |
|---|
| 70 | 71 | * marked polled are excluded from polling. |
|---|
| 71 | 72 | */ |
|---|
| 72 | 73 | if (irq_settings_is_per_cpu(desc) || |
|---|
| .. | .. |
|---|
| 76 | 77 | |
|---|
| 77 | 78 | /* |
|---|
| 78 | 79 | * Do not poll disabled interrupts unless the spurious |
|---|
| 79 | | - * disabled poller asks explicitely. |
|---|
| 80 | + * disabled poller asks explicitly. |
|---|
| 80 | 81 | */ |
|---|
| 81 | 82 | if (irqd_irq_disabled(&desc->irq_data) && !force) |
|---|
| 82 | 83 | goto out; |
|---|
| .. | .. |
|---|
| 212 | 213 | */ |
|---|
| 213 | 214 | raw_spin_lock_irqsave(&desc->lock, flags); |
|---|
| 214 | 215 | for_each_action_of_desc(desc, action) { |
|---|
| 215 | | - printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler); |
|---|
| 216 | + printk(KERN_ERR "[<%p>] %ps", action->handler, action->handler); |
|---|
| 216 | 217 | if (action->thread_fn) |
|---|
| 217 | | - printk(KERN_CONT " threaded [<%p>] %pf", |
|---|
| 218 | + printk(KERN_CONT " threaded [<%p>] %ps", |
|---|
| 218 | 219 | action->thread_fn, action->thread_fn); |
|---|
| 219 | 220 | printk(KERN_CONT "\n"); |
|---|
| 220 | 221 | } |
|---|
| .. | .. |
|---|
| 292 | 293 | * So in case a thread is woken, we just note the fact and |
|---|
| 293 | 294 | * defer the analysis to the next hardware interrupt. |
|---|
| 294 | 295 | * |
|---|
| 295 | | - * The threaded handlers store whether they sucessfully |
|---|
| 296 | + * The threaded handlers store whether they successfully |
|---|
| 296 | 297 | * handled an interrupt and we check whether that number |
|---|
| 297 | 298 | * changed versus the last invocation. |
|---|
| 298 | 299 | * |
|---|
| .. | .. |
|---|
| 442 | 443 | |
|---|
| 443 | 444 | static int __init irqfixup_setup(char *str) |
|---|
| 444 | 445 | { |
|---|
| 445 | | -#ifdef CONFIG_PREEMPT_RT_BASE |
|---|
| 446 | | - pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); |
|---|
| 447 | | - return 1; |
|---|
| 448 | | -#endif |
|---|
| 449 | 446 | irqfixup = 1; |
|---|
| 450 | 447 | printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); |
|---|
| 451 | 448 | printk(KERN_WARNING "This may impact system performance.\n"); |
|---|
| .. | .. |
|---|
| 458 | 455 | |
|---|
| 459 | 456 | static int __init irqpoll_setup(char *str) |
|---|
| 460 | 457 | { |
|---|
| 461 | | -#ifdef CONFIG_PREEMPT_RT_BASE |
|---|
| 462 | | - pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); |
|---|
| 463 | | - return 1; |
|---|
| 464 | | -#endif |
|---|
| 465 | 458 | irqfixup = 2; |
|---|
| 466 | 459 | printk(KERN_WARNING "Misrouted IRQ fixup and polling support " |
|---|
| 467 | 460 | "enabled\n"); |
|---|