| .. | .. |
|---|
| 45 | 45 | * IRQF_PERCPU - Interrupt is per cpu |
|---|
| 46 | 46 | * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing |
|---|
| 47 | 47 | * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is |
|---|
| 48 | | - * registered first in an shared interrupt is considered for |
|---|
| 48 | + * registered first in a shared interrupt is considered for |
|---|
| 49 | 49 | * performance reasons) |
|---|
| 50 | 50 | * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. |
|---|
| 51 | 51 | * Used by threaded interrupts which need to keep the |
|---|
| 52 | 52 | * irq line disabled until the threaded handler has been run. |
|---|
| 53 | 53 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee |
|---|
| 54 | 54 | * that this interrupt will wake the system from a suspended |
|---|
| 55 | | - * state. See Documentation/power/suspend-and-interrupts.txt |
|---|
| 55 | + * state. See Documentation/power/suspend-and-interrupts.rst |
|---|
| 56 | 56 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set |
|---|
| 57 | 57 | * IRQF_NO_THREAD - Interrupt cannot be threaded |
|---|
| 58 | 58 | * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device |
|---|
| .. | .. |
|---|
| 61 | 61 | * interrupt handler after suspending interrupts. For system |
|---|
| 62 | 62 | * wakeup devices users need to implement wakeup detection in |
|---|
| 63 | 63 | * their interrupt handlers. |
|---|
| 64 | | - * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) |
|---|
| 64 | + * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it. |
|---|
| 65 | + * Users will enable it explicitly by enable_irq() or enable_nmi() |
|---|
| 66 | + * later. |
|---|
| 65 | 67 | */ |
|---|
| 66 | 68 | #define IRQF_SHARED 0x00000080 |
|---|
| 67 | 69 | #define IRQF_PROBE_SHARED 0x00000100 |
|---|
| .. | .. |
|---|
| 75 | 77 | #define IRQF_NO_THREAD 0x00010000 |
|---|
| 76 | 78 | #define IRQF_EARLY_RESUME 0x00020000 |
|---|
| 77 | 79 | #define IRQF_COND_SUSPEND 0x00040000 |
|---|
| 78 | | -#define IRQF_NO_SOFTIRQ_CALL 0x00080000 |
|---|
| 80 | +#define IRQF_NO_AUTOEN 0x00080000 |
|---|
| 79 | 81 | |
|---|
| 80 | 82 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) |
|---|
| 81 | 83 | |
|---|
| .. | .. |
|---|
| 142 | 144 | irq_handler_t thread_fn, |
|---|
| 143 | 145 | unsigned long flags, const char *name, void *dev); |
|---|
| 144 | 146 | |
|---|
| 147 | +/** |
|---|
| 148 | + * request_irq - Add a handler for an interrupt line |
|---|
| 149 | + * @irq: The interrupt line to allocate |
|---|
| 150 | + * @handler: Function to be called when the IRQ occurs. |
|---|
| 151 | + * Primary handler for threaded interrupts |
|---|
| 152 | + * If NULL, the default primary handler is installed |
|---|
| 153 | + * @flags: Handling flags |
|---|
| 154 | + * @name: Name of the device generating this interrupt |
|---|
| 155 | + * @dev: A cookie passed to the handler function |
|---|
| 156 | + * |
|---|
| 157 | + * This call allocates an interrupt and establishes a handler; see |
|---|
| 158 | + * the documentation for request_threaded_irq() for details. |
|---|
| 159 | + */ |
|---|
| 145 | 160 | static inline int __must_check |
|---|
| 146 | 161 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, |
|---|
| 147 | 162 | const char *name, void *dev) |
|---|
| .. | .. |
|---|
| 158 | 173 | unsigned long flags, const char *devname, |
|---|
| 159 | 174 | void __percpu *percpu_dev_id); |
|---|
| 160 | 175 | |
|---|
| 176 | +extern int __must_check |
|---|
| 177 | +request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags, |
|---|
| 178 | + const char *name, void *dev); |
|---|
| 179 | + |
|---|
| 161 | 180 | static inline int __must_check |
|---|
| 162 | 181 | request_percpu_irq(unsigned int irq, irq_handler_t handler, |
|---|
| 163 | 182 | const char *devname, void __percpu *percpu_dev_id) |
|---|
| .. | .. |
|---|
| 166 | 185 | devname, percpu_dev_id); |
|---|
| 167 | 186 | } |
|---|
| 168 | 187 | |
|---|
| 188 | +extern int __must_check |
|---|
| 189 | +request_percpu_nmi(unsigned int irq, irq_handler_t handler, |
|---|
| 190 | + const char *devname, void __percpu *dev); |
|---|
| 191 | + |
|---|
| 169 | 192 | extern const void *free_irq(unsigned int, void *); |
|---|
| 170 | 193 | extern void free_percpu_irq(unsigned int, void __percpu *); |
|---|
| 194 | + |
|---|
| 195 | +extern const void *free_nmi(unsigned int irq, void *dev_id); |
|---|
| 196 | +extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id); |
|---|
| 171 | 197 | |
|---|
| 172 | 198 | struct device; |
|---|
| 173 | 199 | |
|---|
| .. | .. |
|---|
| 219 | 245 | extern bool irq_percpu_is_enabled(unsigned int irq); |
|---|
| 220 | 246 | extern void irq_wake_thread(unsigned int irq, void *dev_id); |
|---|
| 221 | 247 | |
|---|
| 248 | +extern void disable_nmi_nosync(unsigned int irq); |
|---|
| 249 | +extern void disable_percpu_nmi(unsigned int irq); |
|---|
| 250 | +extern void enable_nmi(unsigned int irq); |
|---|
| 251 | +extern void enable_percpu_nmi(unsigned int irq, unsigned int type); |
|---|
| 252 | +extern int prepare_percpu_nmi(unsigned int irq); |
|---|
| 253 | +extern void teardown_percpu_nmi(unsigned int irq); |
|---|
| 254 | + |
|---|
| 255 | +extern int irq_inject_interrupt(unsigned int irq); |
|---|
| 256 | + |
|---|
| 222 | 257 | /* The following three functions are for the core kernel use only. */ |
|---|
| 223 | 258 | extern void suspend_device_irqs(void); |
|---|
| 224 | 259 | extern void resume_device_irqs(void); |
|---|
| 260 | +extern void rearm_wake_irq(unsigned int irq); |
|---|
| 225 | 261 | |
|---|
| 226 | 262 | /** |
|---|
| 227 | 263 | * struct irq_affinity_notify - context for notification of IRQ affinity changes |
|---|
| .. | .. |
|---|
| 243 | 279 | void (*release)(struct kref *ref); |
|---|
| 244 | 280 | }; |
|---|
| 245 | 281 | |
|---|
| 282 | +#define IRQ_AFFINITY_MAX_SETS 4 |
|---|
| 283 | + |
|---|
| 246 | 284 | /** |
|---|
| 247 | 285 | * struct irq_affinity - Description for automatic irq affinity assignements |
|---|
| 248 | 286 | * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of |
|---|
| 249 | 287 | * the MSI(-X) vector space |
|---|
| 250 | 288 | * @post_vectors: Don't apply affinity to @post_vectors at end of |
|---|
| 251 | 289 | * the MSI(-X) vector space |
|---|
| 290 | + * @nr_sets: The number of interrupt sets for which affinity |
|---|
| 291 | + * spreading is required |
|---|
| 292 | + * @set_size: Array holding the size of each interrupt set |
|---|
| 293 | + * @calc_sets: Callback for calculating the number and size |
|---|
| 294 | + * of interrupt sets |
|---|
| 295 | + * @priv: Private data for usage by @calc_sets, usually a |
|---|
| 296 | + * pointer to driver/device specific data. |
|---|
| 252 | 297 | */ |
|---|
| 253 | 298 | struct irq_affinity { |
|---|
| 254 | | - int pre_vectors; |
|---|
| 255 | | - int post_vectors; |
|---|
| 299 | + unsigned int pre_vectors; |
|---|
| 300 | + unsigned int post_vectors; |
|---|
| 301 | + unsigned int nr_sets; |
|---|
| 302 | + unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; |
|---|
| 303 | + void (*calc_sets)(struct irq_affinity *, unsigned int nvecs); |
|---|
| 304 | + void *priv; |
|---|
| 305 | +}; |
|---|
| 306 | + |
|---|
| 307 | +/** |
|---|
| 308 | + * struct irq_affinity_desc - Interrupt affinity descriptor |
|---|
| 309 | + * @mask: cpumask to hold the affinity assignment |
|---|
| 310 | + * @is_managed: 1 if the interrupt is managed internally |
|---|
| 311 | + */ |
|---|
| 312 | +struct irq_affinity_desc { |
|---|
| 313 | + struct cpumask mask; |
|---|
| 314 | + unsigned int is_managed : 1; |
|---|
| 256 | 315 | }; |
|---|
| 257 | 316 | |
|---|
| 258 | 317 | #if defined(CONFIG_SMP) |
|---|
| .. | .. |
|---|
| 301 | 360 | extern int |
|---|
| 302 | 361 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); |
|---|
| 303 | 362 | |
|---|
| 304 | | -struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); |
|---|
| 305 | | -int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd); |
|---|
| 363 | +struct irq_affinity_desc * |
|---|
| 364 | +irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd); |
|---|
| 365 | + |
|---|
| 366 | +unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, |
|---|
| 367 | + const struct irq_affinity *affd); |
|---|
| 306 | 368 | |
|---|
| 307 | 369 | #else /* CONFIG_SMP */ |
|---|
| 308 | 370 | |
|---|
| .. | .. |
|---|
| 335 | 397 | return 0; |
|---|
| 336 | 398 | } |
|---|
| 337 | 399 | |
|---|
| 338 | | -static inline struct cpumask * |
|---|
| 339 | | -irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) |
|---|
| 400 | +static inline struct irq_affinity_desc * |
|---|
| 401 | +irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd) |
|---|
| 340 | 402 | { |
|---|
| 341 | 403 | return NULL; |
|---|
| 342 | 404 | } |
|---|
| 343 | 405 | |
|---|
| 344 | | -static inline int |
|---|
| 345 | | -irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) |
|---|
| 406 | +static inline unsigned int |
|---|
| 407 | +irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, |
|---|
| 408 | + const struct irq_affinity *affd) |
|---|
| 346 | 409 | { |
|---|
| 347 | 410 | return maxvec; |
|---|
| 348 | 411 | } |
|---|
| .. | .. |
|---|
| 429 | 492 | bool state); |
|---|
| 430 | 493 | |
|---|
| 431 | 494 | #ifdef CONFIG_IRQ_FORCED_THREADING |
|---|
| 432 | | -# ifdef CONFIG_PREEMPT_RT_BASE |
|---|
| 495 | +# ifdef CONFIG_PREEMPT_RT |
|---|
| 433 | 496 | # define force_irqthreads (true) |
|---|
| 434 | 497 | # else |
|---|
| 435 | 498 | extern bool force_irqthreads; |
|---|
| .. | .. |
|---|
| 476 | 539 | IRQ_POLL_SOFTIRQ, |
|---|
| 477 | 540 | TASKLET_SOFTIRQ, |
|---|
| 478 | 541 | SCHED_SOFTIRQ, |
|---|
| 479 | | - HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the |
|---|
| 480 | | - numbering. Sigh! */ |
|---|
| 542 | + HRTIMER_SOFTIRQ, |
|---|
| 481 | 543 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ |
|---|
| 482 | 544 | |
|---|
| 483 | 545 | NR_SOFTIRQS |
|---|
| 484 | 546 | }; |
|---|
| 485 | 547 | |
|---|
| 486 | 548 | #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) |
|---|
| 549 | +/* Softirq's where the handling might be long: */ |
|---|
| 550 | +#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ) | \ |
|---|
| 551 | + (1 << NET_RX_SOFTIRQ) | \ |
|---|
| 552 | + (1 << BLOCK_SOFTIRQ) | \ |
|---|
| 553 | + (1 << IRQ_POLL_SOFTIRQ) | \ |
|---|
| 554 | + (1 << TASKLET_SOFTIRQ)) |
|---|
| 487 | 555 | |
|---|
| 488 | 556 | /* map softirq index to softirq name. update 'softirq_to_name' in |
|---|
| 489 | 557 | * kernel/softirq.c when adding a new softirq. |
|---|
| .. | .. |
|---|
| 499 | 567 | void (*action)(struct softirq_action *); |
|---|
| 500 | 568 | }; |
|---|
| 501 | 569 | |
|---|
| 502 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
|---|
| 503 | 570 | asmlinkage void do_softirq(void); |
|---|
| 504 | 571 | asmlinkage void __do_softirq(void); |
|---|
| 505 | | -static inline void thread_do_softirq(void) { do_softirq(); } |
|---|
| 572 | + |
|---|
| 506 | 573 | #ifdef __ARCH_HAS_DO_SOFTIRQ |
|---|
| 507 | 574 | void do_softirq_own_stack(void); |
|---|
| 508 | 575 | #else |
|---|
| .. | .. |
|---|
| 511 | 578 | __do_softirq(); |
|---|
| 512 | 579 | } |
|---|
| 513 | 580 | #endif |
|---|
| 514 | | -#else |
|---|
| 515 | | -extern void thread_do_softirq(void); |
|---|
| 516 | | -#endif |
|---|
| 517 | 581 | |
|---|
| 518 | 582 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); |
|---|
| 519 | 583 | extern void softirq_init(void); |
|---|
| 520 | 584 | extern void __raise_softirq_irqoff(unsigned int nr); |
|---|
| 521 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 522 | | -extern void __raise_softirq_irqoff_ksoft(unsigned int nr); |
|---|
| 523 | | -#else |
|---|
| 524 | | -static inline void __raise_softirq_irqoff_ksoft(unsigned int nr) |
|---|
| 525 | | -{ |
|---|
| 526 | | - __raise_softirq_irqoff(nr); |
|---|
| 527 | | -} |
|---|
| 528 | | -#endif |
|---|
| 529 | 585 | |
|---|
| 530 | 586 | extern void raise_softirq_irqoff(unsigned int nr); |
|---|
| 531 | 587 | extern void raise_softirq(unsigned int nr); |
|---|
| 532 | | -extern void softirq_check_pending_idle(void); |
|---|
| 533 | 588 | |
|---|
| 534 | 589 | DECLARE_PER_CPU(struct task_struct *, ksoftirqd); |
|---|
| 590 | +DECLARE_PER_CPU(__u32, active_softirqs); |
|---|
| 535 | 591 | |
|---|
| 536 | 592 | static inline struct task_struct *this_cpu_ksoftirqd(void) |
|---|
| 537 | 593 | { |
|---|
| .. | .. |
|---|
| 539 | 595 | } |
|---|
| 540 | 596 | |
|---|
| 541 | 597 | /* Tasklets --- multithreaded analogue of BHs. |
|---|
| 598 | + |
|---|
| 599 | + This API is deprecated. Please consider using threaded IRQs instead: |
|---|
| 600 | + https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de |
|---|
| 542 | 601 | |
|---|
| 543 | 602 | Main feature differing them of generic softirqs: tasklet |
|---|
| 544 | 603 | is running only on one CPU simultaneously. |
|---|
| .. | .. |
|---|
| 551 | 610 | to be executed on some cpu at least once after this. |
|---|
| 552 | 611 | * If the tasklet is already scheduled, but its execution is still not |
|---|
| 553 | 612 | started, it will be executed only once. |
|---|
| 554 | | - * If this tasklet is already running on another CPU, it is rescheduled |
|---|
| 555 | | - for later. |
|---|
| 556 | | - * Schedule must not be called from the tasklet itself (a lockup occurs) |
|---|
| 613 | + * If this tasklet is already running on another CPU (or schedule is called |
|---|
| 614 | + from tasklet itself), it is rescheduled for later. |
|---|
| 557 | 615 | * Tasklet is strictly serialized wrt itself, but not |
|---|
| 558 | 616 | wrt another tasklets. If client needs some intertask synchronization, |
|---|
| 559 | 617 | he makes it with spinlocks. |
|---|
| .. | .. |
|---|
| 564 | 622 | struct tasklet_struct *next; |
|---|
| 565 | 623 | unsigned long state; |
|---|
| 566 | 624 | atomic_t count; |
|---|
| 567 | | - void (*func)(unsigned long); |
|---|
| 625 | + bool use_callback; |
|---|
| 626 | + union { |
|---|
| 627 | + void (*func)(unsigned long data); |
|---|
| 628 | + void (*callback)(struct tasklet_struct *t); |
|---|
| 629 | + }; |
|---|
| 568 | 630 | unsigned long data; |
|---|
| 569 | 631 | }; |
|---|
| 570 | 632 | |
|---|
| 571 | | -#define DECLARE_TASKLET(name, func, data) \ |
|---|
| 572 | | -struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } |
|---|
| 633 | +#define DECLARE_TASKLET(name, _callback) \ |
|---|
| 634 | +struct tasklet_struct name = { \ |
|---|
| 635 | + .count = ATOMIC_INIT(0), \ |
|---|
| 636 | + .callback = _callback, \ |
|---|
| 637 | + .use_callback = true, \ |
|---|
| 638 | +} |
|---|
| 573 | 639 | |
|---|
| 574 | | -#define DECLARE_TASKLET_DISABLED(name, func, data) \ |
|---|
| 575 | | -struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } |
|---|
| 640 | +#define DECLARE_TASKLET_DISABLED(name, _callback) \ |
|---|
| 641 | +struct tasklet_struct name = { \ |
|---|
| 642 | + .count = ATOMIC_INIT(1), \ |
|---|
| 643 | + .callback = _callback, \ |
|---|
| 644 | + .use_callback = true, \ |
|---|
| 645 | +} |
|---|
| 576 | 646 | |
|---|
| 647 | +#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \ |
|---|
| 648 | + container_of(callback_tasklet, typeof(*var), tasklet_fieldname) |
|---|
| 649 | + |
|---|
| 650 | +#define DECLARE_TASKLET_OLD(name, _func) \ |
|---|
| 651 | +struct tasklet_struct name = { \ |
|---|
| 652 | + .count = ATOMIC_INIT(0), \ |
|---|
| 653 | + .func = _func, \ |
|---|
| 654 | +} |
|---|
| 655 | + |
|---|
| 656 | +#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \ |
|---|
| 657 | +struct tasklet_struct name = { \ |
|---|
| 658 | + .count = ATOMIC_INIT(1), \ |
|---|
| 659 | + .func = _func, \ |
|---|
| 660 | +} |
|---|
| 577 | 661 | |
|---|
| 578 | 662 | enum |
|---|
| 579 | 663 | { |
|---|
| 580 | 664 | TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ |
|---|
| 581 | | - TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ |
|---|
| 582 | | - TASKLET_STATE_PENDING, /* Tasklet is pending */ |
|---|
| 583 | | - TASKLET_STATE_CHAINED /* Tasklet is chained */ |
|---|
| 665 | + TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ |
|---|
| 584 | 666 | }; |
|---|
| 585 | 667 | |
|---|
| 586 | | -#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) |
|---|
| 587 | | -#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) |
|---|
| 588 | | -#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) |
|---|
| 589 | | -#define TASKLET_STATEF_CHAINED (1 << TASKLET_STATE_CHAINED) |
|---|
| 590 | | -#define TASKLET_STATEF_RC (TASKLET_STATEF_RUN | TASKLET_STATEF_CHAINED) |
|---|
| 591 | | - |
|---|
| 592 | | -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) |
|---|
| 668 | +#ifdef CONFIG_SMP |
|---|
| 593 | 669 | static inline int tasklet_trylock(struct tasklet_struct *t) |
|---|
| 594 | 670 | { |
|---|
| 595 | 671 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); |
|---|
| 596 | | -} |
|---|
| 597 | | - |
|---|
| 598 | | -static inline int tasklet_tryunlock(struct tasklet_struct *t) |
|---|
| 599 | | -{ |
|---|
| 600 | | - return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; |
|---|
| 601 | 672 | } |
|---|
| 602 | 673 | |
|---|
| 603 | 674 | static inline void tasklet_unlock(struct tasklet_struct *t) |
|---|
| .. | .. |
|---|
| 606 | 677 | clear_bit(TASKLET_STATE_RUN, &(t)->state); |
|---|
| 607 | 678 | } |
|---|
| 608 | 679 | |
|---|
| 609 | | -extern void tasklet_unlock_wait(struct tasklet_struct *t); |
|---|
| 610 | | - |
|---|
| 680 | +static inline void tasklet_unlock_wait(struct tasklet_struct *t) |
|---|
| 681 | +{ |
|---|
| 682 | + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } |
|---|
| 683 | +} |
|---|
| 611 | 684 | #else |
|---|
| 612 | 685 | #define tasklet_trylock(t) 1 |
|---|
| 613 | | -#define tasklet_tryunlock(t) 1 |
|---|
| 614 | 686 | #define tasklet_unlock_wait(t) do { } while (0) |
|---|
| 615 | 687 | #define tasklet_unlock(t) do { } while (0) |
|---|
| 616 | 688 | #endif |
|---|
| .. | .. |
|---|
| 644 | 716 | smp_mb(); |
|---|
| 645 | 717 | } |
|---|
| 646 | 718 | |
|---|
| 647 | | -extern void tasklet_enable(struct tasklet_struct *t); |
|---|
| 719 | +static inline void tasklet_enable(struct tasklet_struct *t) |
|---|
| 720 | +{ |
|---|
| 721 | + smp_mb__before_atomic(); |
|---|
| 722 | + atomic_dec(&t->count); |
|---|
| 723 | +} |
|---|
| 724 | + |
|---|
| 648 | 725 | extern void tasklet_kill(struct tasklet_struct *t); |
|---|
| 649 | 726 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); |
|---|
| 650 | 727 | extern void tasklet_init(struct tasklet_struct *t, |
|---|
| 651 | 728 | void (*func)(unsigned long), unsigned long data); |
|---|
| 652 | | - |
|---|
| 653 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 654 | | -extern void softirq_early_init(void); |
|---|
| 655 | | -#else |
|---|
| 656 | | -static inline void softirq_early_init(void) { } |
|---|
| 657 | | -#endif |
|---|
| 658 | | - |
|---|
| 659 | | -struct tasklet_hrtimer { |
|---|
| 660 | | - struct hrtimer timer; |
|---|
| 661 | | - struct tasklet_struct tasklet; |
|---|
| 662 | | - enum hrtimer_restart (*function)(struct hrtimer *); |
|---|
| 663 | | -}; |
|---|
| 664 | | - |
|---|
| 665 | | -extern void |
|---|
| 666 | | -tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, |
|---|
| 667 | | - enum hrtimer_restart (*function)(struct hrtimer *), |
|---|
| 668 | | - clockid_t which_clock, enum hrtimer_mode mode); |
|---|
| 669 | | - |
|---|
| 670 | | -static inline |
|---|
| 671 | | -void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, |
|---|
| 672 | | - const enum hrtimer_mode mode) |
|---|
| 673 | | -{ |
|---|
| 674 | | - hrtimer_start(&ttimer->timer, time, mode); |
|---|
| 675 | | -} |
|---|
| 676 | | - |
|---|
| 677 | | -static inline |
|---|
| 678 | | -void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) |
|---|
| 679 | | -{ |
|---|
| 680 | | - hrtimer_cancel(&ttimer->timer); |
|---|
| 681 | | - tasklet_kill(&ttimer->tasklet); |
|---|
| 682 | | -} |
|---|
| 729 | +extern void tasklet_setup(struct tasklet_struct *t, |
|---|
| 730 | + void (*callback)(struct tasklet_struct *)); |
|---|
| 683 | 731 | |
|---|
| 684 | 732 | /* |
|---|
| 685 | 733 | * Autoprobing for irqs: |
|---|
| .. | .. |
|---|
| 754 | 802 | /* |
|---|
| 755 | 803 | * We want to know which function is an entrypoint of a hardirq or a softirq. |
|---|
| 756 | 804 | */ |
|---|
| 757 | | -#define __irq_entry __attribute__((__section__(".irqentry.text"))) |
|---|
| 758 | | -#define __softirq_entry \ |
|---|
| 759 | | - __attribute__((__section__(".softirqentry.text"))) |
|---|
| 805 | +#ifndef __irq_entry |
|---|
| 806 | +# define __irq_entry __section(".irqentry.text") |
|---|
| 807 | +#endif |
|---|
| 808 | + |
|---|
| 809 | +#define __softirq_entry __section(".softirqentry.text") |
|---|
| 760 | 810 | |
|---|
| 761 | 811 | #endif |
|---|