.. | .. |
---|
63 | 63 | WORK_CPU_UNBOUND = NR_CPUS, |
---|
64 | 64 | |
---|
65 | 65 | /* |
---|
66 | | - * Reserve 7 bits off of pwq pointer w/ debugobjects turned off. |
---|
| 66 | + * Reserve 8 bits off of pwq pointer w/ debugobjects turned off. |
---|
67 | 67 | * This makes pwqs aligned to 256 bytes and allows 15 workqueue |
---|
68 | 68 | * flush colors. |
---|
69 | 69 | */ |
---|
.. | .. |
---|
74 | 74 | WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, |
---|
75 | 75 | |
---|
76 | 76 | __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, |
---|
77 | | - WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING), |
---|
78 | 77 | |
---|
79 | 78 | /* |
---|
80 | 79 | * When a work item is off queue, its high bits point to the last |
---|
.. | .. |
---|
85 | 84 | WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, |
---|
86 | 85 | WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, |
---|
87 | 86 | WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, |
---|
88 | | - WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1, |
---|
89 | | - |
---|
90 | | - /* convenience constants */ |
---|
91 | | - WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, |
---|
92 | | - WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, |
---|
93 | | - WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT, |
---|
94 | 87 | |
---|
95 | 88 | /* bit mask for work_busy() return values */ |
---|
96 | 89 | WORK_BUSY_PENDING = 1 << 0, |
---|
.. | .. |
---|
99 | 92 | /* maximum string length for set_worker_desc() */ |
---|
100 | 93 | WORKER_DESC_LEN = 24, |
---|
101 | 94 | }; |
---|
| 95 | + |
---|
| 96 | +/* Convenience constants - of type 'unsigned long', not 'enum'! */ |
---|
| 97 | +#define WORK_OFFQ_CANCELING (1ul << __WORK_OFFQ_CANCELING) |
---|
| 98 | +#define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1) |
---|
| 99 | +#define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT) |
---|
| 100 | + |
---|
| 101 | +#define WORK_STRUCT_FLAG_MASK ((1ul << WORK_STRUCT_FLAG_BITS) - 1) |
---|
| 102 | +#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) |
---|
102 | 103 | |
---|
103 | 104 | struct work_struct { |
---|
104 | 105 | atomic_long_t data; |
---|
.. | .. |
---|
396 | 397 | extern struct workqueue_struct *system_power_efficient_wq; |
---|
397 | 398 | extern struct workqueue_struct *system_freezable_power_efficient_wq; |
---|
398 | 399 | |
---|
399 | | -extern struct workqueue_struct * |
---|
400 | | -__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, |
---|
401 | | - struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6); |
---|
402 | | - |
---|
403 | 400 | /** |
---|
404 | 401 | * alloc_workqueue - allocate a workqueue |
---|
405 | 402 | * @fmt: printf format for the name of the workqueue |
---|
406 | 403 | * @flags: WQ_* flags |
---|
407 | 404 | * @max_active: max in-flight work items, 0 for default |
---|
408 | | - * @args...: args for @fmt |
---|
| 405 | + * remaining args: args for @fmt |
---|
409 | 406 | * |
---|
410 | 407 | * Allocate a workqueue with the specified parameters. For detailed |
---|
411 | 408 | * information on WQ_* flags, please refer to |
---|
412 | 409 | * Documentation/core-api/workqueue.rst. |
---|
413 | 410 | * |
---|
414 | | - * The __lock_name macro dance is to guarantee that single lock_class_key |
---|
415 | | - * doesn't end up with different namesm, which isn't allowed by lockdep. |
---|
416 | | - * |
---|
417 | 411 | * RETURNS: |
---|
418 | 412 | * Pointer to the allocated workqueue on success, %NULL on failure. |
---|
419 | 413 | */ |
---|
420 | | -#ifdef CONFIG_LOCKDEP |
---|
421 | | -#define alloc_workqueue(fmt, flags, max_active, args...) \ |
---|
422 | | -({ \ |
---|
423 | | - static struct lock_class_key __key; \ |
---|
424 | | - const char *__lock_name; \ |
---|
425 | | - \ |
---|
426 | | - __lock_name = "(wq_completion)"#fmt#args; \ |
---|
427 | | - \ |
---|
428 | | - __alloc_workqueue_key((fmt), (flags), (max_active), \ |
---|
429 | | - &__key, __lock_name, ##args); \ |
---|
430 | | -}) |
---|
431 | | -#else |
---|
432 | | -#define alloc_workqueue(fmt, flags, max_active, args...) \ |
---|
433 | | - __alloc_workqueue_key((fmt), (flags), (max_active), \ |
---|
434 | | - NULL, NULL, ##args) |
---|
435 | | -#endif |
---|
| 414 | +struct workqueue_struct *alloc_workqueue(const char *fmt, |
---|
| 415 | + unsigned int flags, |
---|
| 416 | + int max_active, ...); |
---|
436 | 417 | |
---|
437 | 418 | /** |
---|
438 | 419 | * alloc_ordered_workqueue - allocate an ordered workqueue |
---|
.. | .. |
---|
461 | 442 | |
---|
462 | 443 | extern void destroy_workqueue(struct workqueue_struct *wq); |
---|
463 | 444 | |
---|
464 | | -struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask); |
---|
| 445 | +struct workqueue_attrs *alloc_workqueue_attrs(void); |
---|
465 | 446 | void free_workqueue_attrs(struct workqueue_attrs *attrs); |
---|
466 | 447 | int apply_workqueue_attrs(struct workqueue_struct *wq, |
---|
467 | 448 | const struct workqueue_attrs *attrs); |
---|
.. | .. |
---|
469 | 450 | |
---|
470 | 451 | extern bool queue_work_on(int cpu, struct workqueue_struct *wq, |
---|
471 | 452 | struct work_struct *work); |
---|
| 453 | +extern bool queue_work_node(int node, struct workqueue_struct *wq, |
---|
| 454 | + struct work_struct *work); |
---|
472 | 455 | extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
---|
473 | 456 | struct delayed_work *work, unsigned long delay); |
---|
474 | 457 | extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, |
---|
.. | .. |
---|
511 | 494 | * |
---|
512 | 495 | * We queue the work to the CPU on which it was submitted, but if the CPU dies |
---|
513 | 496 | * it can be processed by another CPU. |
---|
| 497 | + * |
---|
| 498 | + * Memory-ordering properties: If it returns %true, guarantees that all stores |
---|
| 499 | + * preceding the call to queue_work() in the program order will be visible from |
---|
| 500 | + * the CPU which will execute @work by the time such work executes, e.g., |
---|
| 501 | + * |
---|
| 502 | + * { x is initially 0 } |
---|
| 503 | + * |
---|
| 504 | + * CPU0 CPU1 |
---|
| 505 | + * |
---|
| 506 | + * WRITE_ONCE(x, 1); [ @work is being executed ] |
---|
| 507 | + * r0 = queue_work(wq, work); r1 = READ_ONCE(x); |
---|
| 508 | + * |
---|
| 509 | + * Forbids: r0 == true && r1 == 0 |
---|
514 | 510 | */ |
---|
515 | 511 | static inline bool queue_work(struct workqueue_struct *wq, |
---|
516 | 512 | struct work_struct *work) |
---|
.. | .. |
---|
570 | 566 | * This puts a job in the kernel-global workqueue if it was not already |
---|
571 | 567 | * queued and leaves it in the same position on the kernel-global |
---|
572 | 568 | * workqueue otherwise. |
---|
| 569 | + * |
---|
| 570 | + * Shares the same memory-ordering properties of queue_work(), cf. the |
---|
| 571 | + * DocBook header of queue_work(). |
---|
573 | 572 | */ |
---|
574 | 573 | static inline bool schedule_work(struct work_struct *work) |
---|
575 | 574 | { |
---|
.. | .. |
---|
673 | 672 | int workqueue_offline_cpu(unsigned int cpu); |
---|
674 | 673 | #endif |
---|
675 | 674 | |
---|
676 | | -int __init workqueue_init_early(void); |
---|
677 | | -int __init workqueue_init(void); |
---|
| 675 | +void __init workqueue_init_early(void); |
---|
| 676 | +void __init workqueue_init(void); |
---|
678 | 677 | |
---|
679 | 678 | #endif |
---|