.. | .. |
---|
128 | 128 | * |
---|
129 | 129 | * PL: wq_pool_mutex protected. |
---|
130 | 130 | * |
---|
131 | | - * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. |
---|
| 131 | + * PR: wq_pool_mutex protected for writes. RCU protected for reads. |
---|
132 | 132 | * |
---|
133 | 133 | * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. |
---|
134 | 134 | * |
---|
.. | .. |
---|
137 | 137 | * |
---|
138 | 138 | * WQ: wq->mutex protected. |
---|
139 | 139 | * |
---|
140 | | - * WR: wq->mutex protected for writes. Sched-RCU protected for reads. |
---|
| 140 | + * WR: wq->mutex protected for writes. RCU protected for reads. |
---|
141 | 141 | * |
---|
142 | 142 | * MD: wq_mayday_lock protected. |
---|
143 | 143 | */ |
---|
.. | .. |
---|
145 | 145 | /* struct worker is defined in workqueue_internal.h */ |
---|
146 | 146 | |
---|
147 | 147 | struct worker_pool { |
---|
148 | | - spinlock_t lock; /* the pool lock */ |
---|
| 148 | + raw_spinlock_t lock; /* the pool lock */ |
---|
149 | 149 | int cpu; /* I: the associated cpu */ |
---|
150 | 150 | int node; /* I: the associated node ID */ |
---|
151 | 151 | int id; /* I: pool ID */ |
---|
.. | .. |
---|
184 | 184 | atomic_t nr_running ____cacheline_aligned_in_smp; |
---|
185 | 185 | |
---|
186 | 186 | /* |
---|
187 | | - * Destruction of pool is sched-RCU protected to allow dereferences |
---|
| 187 | + * Destruction of pool is RCU protected to allow dereferences |
---|
188 | 188 | * from get_work_pool(). |
---|
189 | 189 | */ |
---|
190 | 190 | struct rcu_head rcu; |
---|
.. | .. |
---|
213 | 213 | /* |
---|
214 | 214 | * Release of unbound pwq is punted to system_wq. See put_pwq() |
---|
215 | 215 | * and pwq_unbound_release_workfn() for details. pool_workqueue |
---|
216 | | - * itself is also sched-RCU protected so that the first pwq can be |
---|
| 216 | + * itself is also RCU protected so that the first pwq can be |
---|
217 | 217 | * determined without grabbing wq->mutex. |
---|
218 | 218 | */ |
---|
219 | 219 | struct work_struct unbound_release_work; |
---|
.. | .. |
---|
298 | 298 | |
---|
299 | 299 | static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ |
---|
300 | 300 | static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ |
---|
301 | | -static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ |
---|
302 | | -static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */ |
---|
| 301 | +static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ |
---|
| 302 | +static DECLARE_SWAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */ |
---|
303 | 303 | |
---|
304 | 304 | static LIST_HEAD(workqueues); /* PR: list of all workqueues */ |
---|
305 | 305 | static bool workqueue_freezing; /* PL: have wqs started freezing? */ |
---|
.. | .. |
---|
358 | 358 | #include <trace/events/workqueue.h> |
---|
359 | 359 | |
---|
360 | 360 | #define assert_rcu_or_pool_mutex() \ |
---|
361 | | - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ |
---|
| 361 | + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ |
---|
362 | 362 | !lockdep_is_held(&wq_pool_mutex), \ |
---|
363 | | - "sched RCU or wq_pool_mutex should be held") |
---|
| 363 | + "RCU or wq_pool_mutex should be held") |
---|
364 | 364 | |
---|
365 | 365 | #define assert_rcu_or_wq_mutex(wq) \ |
---|
366 | | - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ |
---|
| 366 | + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ |
---|
367 | 367 | !lockdep_is_held(&wq->mutex), \ |
---|
368 | | - "sched RCU or wq->mutex should be held") |
---|
| 368 | + "RCU or wq->mutex should be held") |
---|
369 | 369 | |
---|
370 | 370 | #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ |
---|
371 | | - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ |
---|
| 371 | + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ |
---|
372 | 372 | !lockdep_is_held(&wq->mutex) && \ |
---|
373 | 373 | !lockdep_is_held(&wq_pool_mutex), \ |
---|
374 | | - "sched RCU, wq->mutex or wq_pool_mutex should be held") |
---|
| 374 | + "RCU, wq->mutex or wq_pool_mutex should be held") |
---|
375 | 375 | |
---|
376 | 376 | #define for_each_cpu_worker_pool(pool, cpu) \ |
---|
377 | 377 | for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ |
---|
.. | .. |
---|
383 | 383 | * @pool: iteration cursor |
---|
384 | 384 | * @pi: integer used for iteration |
---|
385 | 385 | * |
---|
386 | | - * This must be called either with wq_pool_mutex held or sched RCU read |
---|
| 386 | + * This must be called either with wq_pool_mutex held or RCU read |
---|
387 | 387 | * locked. If the pool needs to be used beyond the locking in effect, the |
---|
388 | 388 | * caller is responsible for guaranteeing that the pool stays online. |
---|
389 | 389 | * |
---|
.. | .. |
---|
415 | 415 | * @pwq: iteration cursor |
---|
416 | 416 | * @wq: the target workqueue |
---|
417 | 417 | * |
---|
418 | | - * This must be called either with wq->mutex held or sched RCU read locked. |
---|
| 418 | + * This must be called either with wq->mutex held or RCU read locked. |
---|
419 | 419 | * If the pwq needs to be used beyond the locking in effect, the caller is |
---|
420 | 420 | * responsible for guaranteeing that the pwq stays online. |
---|
421 | 421 | * |
---|
.. | .. |
---|
551 | 551 | * @wq: the target workqueue |
---|
552 | 552 | * @node: the node ID |
---|
553 | 553 | * |
---|
554 | | - * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU |
---|
| 554 | + * This must be called with any of wq_pool_mutex, wq->mutex or RCU |
---|
555 | 555 | * read locked. |
---|
556 | 556 | * If the pwq needs to be used beyond the locking in effect, the caller is |
---|
557 | 557 | * responsible for guaranteeing that the pwq stays online. |
---|
.. | .. |
---|
695 | 695 | * @work: the work item of interest |
---|
696 | 696 | * |
---|
697 | 697 | * Pools are created and destroyed under wq_pool_mutex, and allows read |
---|
698 | | - * access under sched-RCU read lock. As such, this function should be |
---|
699 | | - * called under wq_pool_mutex or with preemption disabled. |
---|
| 698 | + * access under RCU read lock. As such, this function should be |
---|
| 699 | + * called under wq_pool_mutex or inside of a rcu_read_lock() region. |
---|
700 | 700 | * |
---|
701 | 701 | * All fields of the returned pool are accessible as long as the above |
---|
702 | 702 | * mentioned locking is in effect. If the returned pool needs to be used |
---|
.. | .. |
---|
829 | 829 | * Wake up the first idle worker of @pool. |
---|
830 | 830 | * |
---|
831 | 831 | * CONTEXT: |
---|
832 | | - * spin_lock_irq(pool->lock). |
---|
| 832 | + * raw_spin_lock_irq(pool->lock). |
---|
833 | 833 | */ |
---|
834 | 834 | static void wake_up_worker(struct worker_pool *pool) |
---|
835 | 835 | { |
---|
.. | .. |
---|
840 | 840 | } |
---|
841 | 841 | |
---|
842 | 842 | /** |
---|
843 | | - * wq_worker_waking_up - a worker is waking up |
---|
| 843 | + * wq_worker_running - a worker is running again |
---|
844 | 844 | * @task: task waking up |
---|
845 | | - * @cpu: CPU @task is waking up to |
---|
846 | 845 | * |
---|
847 | | - * This function is called during try_to_wake_up() when a worker is |
---|
848 | | - * being awoken. |
---|
849 | | - * |
---|
850 | | - * CONTEXT: |
---|
851 | | - * spin_lock_irq(rq->lock) |
---|
| 846 | + * This function is called when a worker returns from schedule() |
---|
852 | 847 | */ |
---|
853 | | -void wq_worker_waking_up(struct task_struct *task, int cpu) |
---|
| 848 | +void wq_worker_running(struct task_struct *task) |
---|
854 | 849 | { |
---|
855 | 850 | struct worker *worker = kthread_data(task); |
---|
856 | 851 | |
---|
857 | | - if (!(worker->flags & WORKER_NOT_RUNNING)) { |
---|
858 | | - WARN_ON_ONCE(worker->pool->cpu != cpu); |
---|
| 852 | + if (!worker->sleeping) |
---|
| 853 | + return; |
---|
| 854 | + if (!(worker->flags & WORKER_NOT_RUNNING)) |
---|
859 | 855 | atomic_inc(&worker->pool->nr_running); |
---|
860 | | - } |
---|
| 856 | + worker->sleeping = 0; |
---|
861 | 857 | } |
---|
862 | 858 | |
---|
863 | 859 | /** |
---|
864 | 860 | * wq_worker_sleeping - a worker is going to sleep |
---|
865 | 861 | * @task: task going to sleep |
---|
866 | 862 | * |
---|
867 | | - * This function is called during schedule() when a busy worker is |
---|
868 | | - * going to sleep. Worker on the same cpu can be woken up by |
---|
869 | | - * returning pointer to its task. |
---|
870 | | - * |
---|
871 | | - * CONTEXT: |
---|
872 | | - * spin_lock_irq(rq->lock) |
---|
873 | | - * |
---|
874 | | - * Return: |
---|
875 | | - * Worker task on @cpu to wake up, %NULL if none. |
---|
| 863 | + * This function is called from schedule() when a busy worker is |
---|
| 864 | + * going to sleep. |
---|
876 | 865 | */ |
---|
877 | | -struct task_struct *wq_worker_sleeping(struct task_struct *task) |
---|
| 866 | +void wq_worker_sleeping(struct task_struct *task) |
---|
878 | 867 | { |
---|
879 | | - struct worker *worker = kthread_data(task), *to_wakeup = NULL; |
---|
| 868 | + struct worker *next, *worker = kthread_data(task); |
---|
880 | 869 | struct worker_pool *pool; |
---|
881 | 870 | |
---|
882 | 871 | /* |
---|
.. | .. |
---|
885 | 874 | * checking NOT_RUNNING. |
---|
886 | 875 | */ |
---|
887 | 876 | if (worker->flags & WORKER_NOT_RUNNING) |
---|
888 | | - return NULL; |
---|
| 877 | + return; |
---|
889 | 878 | |
---|
890 | 879 | pool = worker->pool; |
---|
891 | 880 | |
---|
892 | | - /* this can only happen on the local cpu */ |
---|
893 | | - if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id())) |
---|
894 | | - return NULL; |
---|
| 881 | + if (WARN_ON_ONCE(worker->sleeping)) |
---|
| 882 | + return; |
---|
| 883 | + |
---|
| 884 | + worker->sleeping = 1; |
---|
| 885 | + raw_spin_lock_irq(&pool->lock); |
---|
895 | 886 | |
---|
896 | 887 | /* |
---|
897 | 888 | * The counterpart of the following dec_and_test, implied mb, |
---|
.. | .. |
---|
905 | 896 | * lock is safe. |
---|
906 | 897 | */ |
---|
907 | 898 | if (atomic_dec_and_test(&pool->nr_running) && |
---|
908 | | - !list_empty(&pool->worklist)) |
---|
909 | | - to_wakeup = first_idle_worker(pool); |
---|
910 | | - return to_wakeup ? to_wakeup->task : NULL; |
---|
| 899 | + !list_empty(&pool->worklist)) { |
---|
| 900 | + next = first_idle_worker(pool); |
---|
| 901 | + if (next) |
---|
| 902 | + wake_up_process(next->task); |
---|
| 903 | + } |
---|
| 904 | + raw_spin_unlock_irq(&pool->lock); |
---|
911 | 905 | } |
---|
912 | 906 | |
---|
913 | 907 | /** |
---|
.. | .. |
---|
938 | 932 | * Set @flags in @worker->flags and adjust nr_running accordingly. |
---|
939 | 933 | * |
---|
940 | 934 | * CONTEXT: |
---|
941 | | - * spin_lock_irq(pool->lock) |
---|
| 935 | + * raw_spin_lock_irq(pool->lock) |
---|
942 | 936 | */ |
---|
943 | 937 | static inline void worker_set_flags(struct worker *worker, unsigned int flags) |
---|
944 | 938 | { |
---|
.. | .. |
---|
963 | 957 | * Clear @flags in @worker->flags and adjust nr_running accordingly. |
---|
964 | 958 | * |
---|
965 | 959 | * CONTEXT: |
---|
966 | | - * spin_lock_irq(pool->lock) |
---|
| 960 | + * raw_spin_lock_irq(pool->lock) |
---|
967 | 961 | */ |
---|
968 | 962 | static inline void worker_clr_flags(struct worker *worker, unsigned int flags) |
---|
969 | 963 | { |
---|
.. | .. |
---|
1011 | 1005 | * actually occurs, it should be easy to locate the culprit work function. |
---|
1012 | 1006 | * |
---|
1013 | 1007 | * CONTEXT: |
---|
1014 | | - * spin_lock_irq(pool->lock). |
---|
| 1008 | + * raw_spin_lock_irq(pool->lock). |
---|
1015 | 1009 | * |
---|
1016 | 1010 | * Return: |
---|
1017 | 1011 | * Pointer to worker which is executing @work if found, %NULL |
---|
.. | .. |
---|
1046 | 1040 | * nested inside outer list_for_each_entry_safe(). |
---|
1047 | 1041 | * |
---|
1048 | 1042 | * CONTEXT: |
---|
1049 | | - * spin_lock_irq(pool->lock). |
---|
| 1043 | + * raw_spin_lock_irq(pool->lock). |
---|
1050 | 1044 | */ |
---|
1051 | 1045 | static void move_linked_works(struct work_struct *work, struct list_head *head, |
---|
1052 | 1046 | struct work_struct **nextp) |
---|
.. | .. |
---|
1121 | 1115 | { |
---|
1122 | 1116 | if (pwq) { |
---|
1123 | 1117 | /* |
---|
1124 | | - * As both pwqs and pools are sched-RCU protected, the |
---|
| 1118 | + * As both pwqs and pools are RCU protected, the |
---|
1125 | 1119 | * following lock operations are safe. |
---|
1126 | 1120 | */ |
---|
1127 | | - spin_lock_irq(&pwq->pool->lock); |
---|
| 1121 | + raw_spin_lock_irq(&pwq->pool->lock); |
---|
1128 | 1122 | put_pwq(pwq); |
---|
1129 | | - spin_unlock_irq(&pwq->pool->lock); |
---|
| 1123 | + raw_spin_unlock_irq(&pwq->pool->lock); |
---|
1130 | 1124 | } |
---|
1131 | 1125 | } |
---|
1132 | 1126 | |
---|
.. | .. |
---|
1159 | 1153 | * decrement nr_in_flight of its pwq and handle workqueue flushing. |
---|
1160 | 1154 | * |
---|
1161 | 1155 | * CONTEXT: |
---|
1162 | | - * spin_lock_irq(pool->lock). |
---|
| 1156 | + * raw_spin_lock_irq(pool->lock). |
---|
1163 | 1157 | */ |
---|
1164 | 1158 | static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) |
---|
1165 | 1159 | { |
---|
.. | .. |
---|
1249 | 1243 | if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) |
---|
1250 | 1244 | return 0; |
---|
1251 | 1245 | |
---|
| 1246 | + rcu_read_lock(); |
---|
1252 | 1247 | /* |
---|
1253 | 1248 | * The queueing is in progress, or it is already queued. Try to |
---|
1254 | 1249 | * steal it from ->worklist without clearing WORK_STRUCT_PENDING. |
---|
.. | .. |
---|
1257 | 1252 | if (!pool) |
---|
1258 | 1253 | goto fail; |
---|
1259 | 1254 | |
---|
1260 | | - spin_lock(&pool->lock); |
---|
| 1255 | + raw_spin_lock(&pool->lock); |
---|
1261 | 1256 | /* |
---|
1262 | 1257 | * work->data is guaranteed to point to pwq only while the work |
---|
1263 | 1258 | * item is queued on pwq->wq, and both updating work->data to point |
---|
.. | .. |
---|
1286 | 1281 | /* work->data points to pwq iff queued, point to pool */ |
---|
1287 | 1282 | set_work_pool_and_keep_pending(work, pool->id); |
---|
1288 | 1283 | |
---|
1289 | | - spin_unlock(&pool->lock); |
---|
| 1284 | + raw_spin_unlock(&pool->lock); |
---|
| 1285 | + rcu_read_unlock(); |
---|
1290 | 1286 | return 1; |
---|
1291 | 1287 | } |
---|
1292 | | - spin_unlock(&pool->lock); |
---|
| 1288 | + raw_spin_unlock(&pool->lock); |
---|
1293 | 1289 | fail: |
---|
| 1290 | + rcu_read_unlock(); |
---|
1294 | 1291 | local_irq_restore(*flags); |
---|
1295 | 1292 | if (work_is_canceling(work)) |
---|
1296 | 1293 | return -ENOENT; |
---|
.. | .. |
---|
1309 | 1306 | * work_struct flags. |
---|
1310 | 1307 | * |
---|
1311 | 1308 | * CONTEXT: |
---|
1312 | | - * spin_lock_irq(pool->lock). |
---|
| 1309 | + * raw_spin_lock_irq(pool->lock). |
---|
1313 | 1310 | */ |
---|
1314 | 1311 | static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, |
---|
1315 | 1312 | struct list_head *head, unsigned int extra_flags) |
---|
.. | .. |
---|
1403 | 1400 | if (unlikely(wq->flags & __WQ_DRAINING) && |
---|
1404 | 1401 | WARN_ON_ONCE(!is_chained_work(wq))) |
---|
1405 | 1402 | return; |
---|
| 1403 | + rcu_read_lock(); |
---|
1406 | 1404 | retry: |
---|
1407 | 1405 | /* pwq which will be used unless @work is executing elsewhere */ |
---|
1408 | 1406 | if (wq->flags & WQ_UNBOUND) { |
---|
.. | .. |
---|
1424 | 1422 | if (last_pool && last_pool != pwq->pool) { |
---|
1425 | 1423 | struct worker *worker; |
---|
1426 | 1424 | |
---|
1427 | | - spin_lock(&last_pool->lock); |
---|
| 1425 | + raw_spin_lock(&last_pool->lock); |
---|
1428 | 1426 | |
---|
1429 | 1427 | worker = find_worker_executing_work(last_pool, work); |
---|
1430 | 1428 | |
---|
.. | .. |
---|
1432 | 1430 | pwq = worker->current_pwq; |
---|
1433 | 1431 | } else { |
---|
1434 | 1432 | /* meh... not running there, queue here */ |
---|
1435 | | - spin_unlock(&last_pool->lock); |
---|
1436 | | - spin_lock(&pwq->pool->lock); |
---|
| 1433 | + raw_spin_unlock(&last_pool->lock); |
---|
| 1434 | + raw_spin_lock(&pwq->pool->lock); |
---|
1437 | 1435 | } |
---|
1438 | 1436 | } else { |
---|
1439 | | - spin_lock(&pwq->pool->lock); |
---|
| 1437 | + raw_spin_lock(&pwq->pool->lock); |
---|
1440 | 1438 | } |
---|
1441 | 1439 | |
---|
1442 | 1440 | /* |
---|
.. | .. |
---|
1449 | 1447 | */ |
---|
1450 | 1448 | if (unlikely(!pwq->refcnt)) { |
---|
1451 | 1449 | if (wq->flags & WQ_UNBOUND) { |
---|
1452 | | - spin_unlock(&pwq->pool->lock); |
---|
| 1450 | + raw_spin_unlock(&pwq->pool->lock); |
---|
1453 | 1451 | cpu_relax(); |
---|
1454 | 1452 | goto retry; |
---|
1455 | 1453 | } |
---|
.. | .. |
---|
1461 | 1459 | /* pwq determined, queue */ |
---|
1462 | 1460 | trace_workqueue_queue_work(req_cpu, pwq, work); |
---|
1463 | 1461 | |
---|
1464 | | - if (WARN_ON(!list_empty(&work->entry))) { |
---|
1465 | | - spin_unlock(&pwq->pool->lock); |
---|
1466 | | - return; |
---|
1467 | | - } |
---|
| 1462 | + if (WARN_ON(!list_empty(&work->entry))) |
---|
| 1463 | + goto out; |
---|
1468 | 1464 | |
---|
1469 | 1465 | pwq->nr_in_flight[pwq->work_color]++; |
---|
1470 | 1466 | work_flags = work_color_to_flags(pwq->work_color); |
---|
.. | .. |
---|
1483 | 1479 | debug_work_activate(work); |
---|
1484 | 1480 | insert_work(pwq, work, worklist, work_flags); |
---|
1485 | 1481 | |
---|
1486 | | - spin_unlock(&pwq->pool->lock); |
---|
| 1482 | +out: |
---|
| 1483 | + raw_spin_unlock(&pwq->pool->lock); |
---|
| 1484 | + rcu_read_unlock(); |
---|
1487 | 1485 | } |
---|
1488 | 1486 | |
---|
1489 | 1487 | /** |
---|
.. | .. |
---|
1518 | 1516 | void delayed_work_timer_fn(struct timer_list *t) |
---|
1519 | 1517 | { |
---|
1520 | 1518 | struct delayed_work *dwork = from_timer(dwork, t, timer); |
---|
| 1519 | + unsigned long flags; |
---|
1521 | 1520 | |
---|
1522 | | - /* should have been called from irqsafe timer with irq already off */ |
---|
| 1521 | + local_irq_save(flags); |
---|
1523 | 1522 | __queue_work(dwork->cpu, dwork->wq, &dwork->work); |
---|
| 1523 | + local_irq_restore(flags); |
---|
1524 | 1524 | } |
---|
1525 | 1525 | EXPORT_SYMBOL(delayed_work_timer_fn); |
---|
1526 | 1526 | |
---|
.. | .. |
---|
1669 | 1669 | * necessary. |
---|
1670 | 1670 | * |
---|
1671 | 1671 | * LOCKING: |
---|
1672 | | - * spin_lock_irq(pool->lock). |
---|
| 1672 | + * raw_spin_lock_irq(pool->lock). |
---|
1673 | 1673 | */ |
---|
1674 | 1674 | static void worker_enter_idle(struct worker *worker) |
---|
1675 | 1675 | { |
---|
.. | .. |
---|
1709 | 1709 | * @worker is leaving idle state. Update stats. |
---|
1710 | 1710 | * |
---|
1711 | 1711 | * LOCKING: |
---|
1712 | | - * spin_lock_irq(pool->lock). |
---|
| 1712 | + * raw_spin_lock_irq(pool->lock). |
---|
1713 | 1713 | */ |
---|
1714 | 1714 | static void worker_leave_idle(struct worker *worker) |
---|
1715 | 1715 | { |
---|
.. | .. |
---|
1844 | 1844 | worker_attach_to_pool(worker, pool); |
---|
1845 | 1845 | |
---|
1846 | 1846 | /* start the newly created worker */ |
---|
1847 | | - spin_lock_irq(&pool->lock); |
---|
| 1847 | + raw_spin_lock_irq(&pool->lock); |
---|
1848 | 1848 | worker->pool->nr_workers++; |
---|
1849 | 1849 | worker_enter_idle(worker); |
---|
1850 | 1850 | wake_up_process(worker->task); |
---|
1851 | | - spin_unlock_irq(&pool->lock); |
---|
| 1851 | + raw_spin_unlock_irq(&pool->lock); |
---|
1852 | 1852 | |
---|
1853 | 1853 | return worker; |
---|
1854 | 1854 | |
---|
.. | .. |
---|
1867 | 1867 | * be idle. |
---|
1868 | 1868 | * |
---|
1869 | 1869 | * CONTEXT: |
---|
1870 | | - * spin_lock_irq(pool->lock). |
---|
| 1870 | + * raw_spin_lock_irq(pool->lock). |
---|
1871 | 1871 | */ |
---|
1872 | 1872 | static void destroy_worker(struct worker *worker) |
---|
1873 | 1873 | { |
---|
.. | .. |
---|
1893 | 1893 | { |
---|
1894 | 1894 | struct worker_pool *pool = from_timer(pool, t, idle_timer); |
---|
1895 | 1895 | |
---|
1896 | | - spin_lock_irq(&pool->lock); |
---|
| 1896 | + raw_spin_lock_irq(&pool->lock); |
---|
1897 | 1897 | |
---|
1898 | 1898 | while (too_many_workers(pool)) { |
---|
1899 | 1899 | struct worker *worker; |
---|
.. | .. |
---|
1911 | 1911 | destroy_worker(worker); |
---|
1912 | 1912 | } |
---|
1913 | 1913 | |
---|
1914 | | - spin_unlock_irq(&pool->lock); |
---|
| 1914 | + raw_spin_unlock_irq(&pool->lock); |
---|
1915 | 1915 | } |
---|
1916 | 1916 | |
---|
1917 | 1917 | static void send_mayday(struct work_struct *work) |
---|
.. | .. |
---|
1942 | 1942 | struct worker_pool *pool = from_timer(pool, t, mayday_timer); |
---|
1943 | 1943 | struct work_struct *work; |
---|
1944 | 1944 | |
---|
1945 | | - spin_lock_irq(&pool->lock); |
---|
1946 | | - spin_lock(&wq_mayday_lock); /* for wq->maydays */ |
---|
| 1945 | + raw_spin_lock_irq(&pool->lock); |
---|
| 1946 | + raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */ |
---|
1947 | 1947 | |
---|
1948 | 1948 | if (need_to_create_worker(pool)) { |
---|
1949 | 1949 | /* |
---|
.. | .. |
---|
1956 | 1956 | send_mayday(work); |
---|
1957 | 1957 | } |
---|
1958 | 1958 | |
---|
1959 | | - spin_unlock(&wq_mayday_lock); |
---|
1960 | | - spin_unlock_irq(&pool->lock); |
---|
| 1959 | + raw_spin_unlock(&wq_mayday_lock); |
---|
| 1960 | + raw_spin_unlock_irq(&pool->lock); |
---|
1961 | 1961 | |
---|
1962 | 1962 | mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); |
---|
1963 | 1963 | } |
---|
.. | .. |
---|
1976 | 1976 | * may_start_working() %true. |
---|
1977 | 1977 | * |
---|
1978 | 1978 | * LOCKING: |
---|
1979 | | - * spin_lock_irq(pool->lock) which may be released and regrabbed |
---|
| 1979 | + * raw_spin_lock_irq(pool->lock) which may be released and regrabbed |
---|
1980 | 1980 | * multiple times. Does GFP_KERNEL allocations. Called only from |
---|
1981 | 1981 | * manager. |
---|
1982 | 1982 | */ |
---|
.. | .. |
---|
1985 | 1985 | __acquires(&pool->lock) |
---|
1986 | 1986 | { |
---|
1987 | 1987 | restart: |
---|
1988 | | - spin_unlock_irq(&pool->lock); |
---|
| 1988 | + raw_spin_unlock_irq(&pool->lock); |
---|
1989 | 1989 | |
---|
1990 | 1990 | /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ |
---|
1991 | 1991 | mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); |
---|
.. | .. |
---|
2001 | 2001 | } |
---|
2002 | 2002 | |
---|
2003 | 2003 | del_timer_sync(&pool->mayday_timer); |
---|
2004 | | - spin_lock_irq(&pool->lock); |
---|
| 2004 | + raw_spin_lock_irq(&pool->lock); |
---|
2005 | 2005 | /* |
---|
2006 | 2006 | * This is necessary even after a new worker was just successfully |
---|
2007 | 2007 | * created as @pool->lock was dropped and the new worker might have |
---|
.. | .. |
---|
2024 | 2024 | * and may_start_working() is true. |
---|
2025 | 2025 | * |
---|
2026 | 2026 | * CONTEXT: |
---|
2027 | | - * spin_lock_irq(pool->lock) which may be released and regrabbed |
---|
| 2027 | + * raw_spin_lock_irq(pool->lock) which may be released and regrabbed |
---|
2028 | 2028 | * multiple times. Does GFP_KERNEL allocations. |
---|
2029 | 2029 | * |
---|
2030 | 2030 | * Return: |
---|
.. | .. |
---|
2047 | 2047 | |
---|
2048 | 2048 | pool->manager = NULL; |
---|
2049 | 2049 | pool->flags &= ~POOL_MANAGER_ACTIVE; |
---|
2050 | | - wake_up(&wq_manager_wait); |
---|
| 2050 | + swake_up_one(&wq_manager_wait); |
---|
2051 | 2051 | return true; |
---|
2052 | 2052 | } |
---|
2053 | 2053 | |
---|
.. | .. |
---|
2063 | 2063 | * call this function to process a work. |
---|
2064 | 2064 | * |
---|
2065 | 2065 | * CONTEXT: |
---|
2066 | | - * spin_lock_irq(pool->lock) which is released and regrabbed. |
---|
| 2066 | + * raw_spin_lock_irq(pool->lock) which is released and regrabbed. |
---|
2067 | 2067 | */ |
---|
2068 | 2068 | static void process_one_work(struct worker *worker, struct work_struct *work) |
---|
2069 | 2069 | __releases(&pool->lock) |
---|
.. | .. |
---|
2145 | 2145 | */ |
---|
2146 | 2146 | set_work_pool_and_clear_pending(work, pool->id); |
---|
2147 | 2147 | |
---|
2148 | | - spin_unlock_irq(&pool->lock); |
---|
| 2148 | + raw_spin_unlock_irq(&pool->lock); |
---|
2149 | 2149 | |
---|
2150 | 2150 | lock_map_acquire(&pwq->wq->lockdep_map); |
---|
2151 | 2151 | lock_map_acquire(&lockdep_map); |
---|
.. | .. |
---|
2200 | 2200 | */ |
---|
2201 | 2201 | cond_resched(); |
---|
2202 | 2202 | |
---|
2203 | | - spin_lock_irq(&pool->lock); |
---|
| 2203 | + raw_spin_lock_irq(&pool->lock); |
---|
2204 | 2204 | |
---|
2205 | 2205 | /* clear cpu intensive status */ |
---|
2206 | 2206 | if (unlikely(cpu_intensive)) |
---|
.. | .. |
---|
2226 | 2226 | * fetches a work from the top and executes it. |
---|
2227 | 2227 | * |
---|
2228 | 2228 | * CONTEXT: |
---|
2229 | | - * spin_lock_irq(pool->lock) which may be released and regrabbed |
---|
| 2229 | + * raw_spin_lock_irq(pool->lock) which may be released and regrabbed |
---|
2230 | 2230 | * multiple times. |
---|
2231 | 2231 | */ |
---|
2232 | 2232 | static void process_scheduled_works(struct worker *worker) |
---|
.. | .. |
---|
2268 | 2268 | /* tell the scheduler that this is a workqueue worker */ |
---|
2269 | 2269 | set_pf_worker(true); |
---|
2270 | 2270 | woke_up: |
---|
2271 | | - spin_lock_irq(&pool->lock); |
---|
| 2271 | + raw_spin_lock_irq(&pool->lock); |
---|
2272 | 2272 | |
---|
2273 | 2273 | /* am I supposed to die? */ |
---|
2274 | 2274 | if (unlikely(worker->flags & WORKER_DIE)) { |
---|
2275 | | - spin_unlock_irq(&pool->lock); |
---|
| 2275 | + raw_spin_unlock_irq(&pool->lock); |
---|
2276 | 2276 | WARN_ON_ONCE(!list_empty(&worker->entry)); |
---|
2277 | 2277 | set_pf_worker(false); |
---|
2278 | 2278 | |
---|
.. | .. |
---|
2338 | 2338 | */ |
---|
2339 | 2339 | worker_enter_idle(worker); |
---|
2340 | 2340 | __set_current_state(TASK_IDLE); |
---|
2341 | | - spin_unlock_irq(&pool->lock); |
---|
| 2341 | + raw_spin_unlock_irq(&pool->lock); |
---|
2342 | 2342 | schedule(); |
---|
2343 | 2343 | goto woke_up; |
---|
2344 | 2344 | } |
---|
.. | .. |
---|
2392 | 2392 | should_stop = kthread_should_stop(); |
---|
2393 | 2393 | |
---|
2394 | 2394 | /* see whether any pwq is asking for help */ |
---|
2395 | | - spin_lock_irq(&wq_mayday_lock); |
---|
| 2395 | + raw_spin_lock_irq(&wq_mayday_lock); |
---|
2396 | 2396 | |
---|
2397 | 2397 | while (!list_empty(&wq->maydays)) { |
---|
2398 | 2398 | struct pool_workqueue *pwq = list_first_entry(&wq->maydays, |
---|
.. | .. |
---|
2404 | 2404 | __set_current_state(TASK_RUNNING); |
---|
2405 | 2405 | list_del_init(&pwq->mayday_node); |
---|
2406 | 2406 | |
---|
2407 | | - spin_unlock_irq(&wq_mayday_lock); |
---|
| 2407 | + raw_spin_unlock_irq(&wq_mayday_lock); |
---|
2408 | 2408 | |
---|
2409 | 2409 | worker_attach_to_pool(rescuer, pool); |
---|
2410 | 2410 | |
---|
2411 | | - spin_lock_irq(&pool->lock); |
---|
| 2411 | + raw_spin_lock_irq(&pool->lock); |
---|
2412 | 2412 | |
---|
2413 | 2413 | /* |
---|
2414 | 2414 | * Slurp in all works issued via this workqueue and |
---|
.. | .. |
---|
2437 | 2437 | * incur MAYDAY_INTERVAL delay inbetween. |
---|
2438 | 2438 | */ |
---|
2439 | 2439 | if (need_to_create_worker(pool)) { |
---|
2440 | | - spin_lock(&wq_mayday_lock); |
---|
| 2440 | + raw_spin_lock(&wq_mayday_lock); |
---|
2441 | 2441 | /* |
---|
2442 | 2442 | * Queue iff we aren't racing destruction |
---|
2443 | 2443 | * and somebody else hasn't queued it already. |
---|
.. | .. |
---|
2446 | 2446 | get_pwq(pwq); |
---|
2447 | 2447 | list_add_tail(&pwq->mayday_node, &wq->maydays); |
---|
2448 | 2448 | } |
---|
2449 | | - spin_unlock(&wq_mayday_lock); |
---|
| 2449 | + raw_spin_unlock(&wq_mayday_lock); |
---|
2450 | 2450 | } |
---|
2451 | 2451 | } |
---|
2452 | 2452 | |
---|
.. | .. |
---|
2464 | 2464 | if (need_more_worker(pool)) |
---|
2465 | 2465 | wake_up_worker(pool); |
---|
2466 | 2466 | |
---|
2467 | | - spin_unlock_irq(&pool->lock); |
---|
| 2467 | + raw_spin_unlock_irq(&pool->lock); |
---|
2468 | 2468 | |
---|
2469 | 2469 | worker_detach_from_pool(rescuer); |
---|
2470 | 2470 | |
---|
2471 | | - spin_lock_irq(&wq_mayday_lock); |
---|
| 2471 | + raw_spin_lock_irq(&wq_mayday_lock); |
---|
2472 | 2472 | } |
---|
2473 | 2473 | |
---|
2474 | | - spin_unlock_irq(&wq_mayday_lock); |
---|
| 2474 | + raw_spin_unlock_irq(&wq_mayday_lock); |
---|
2475 | 2475 | |
---|
2476 | 2476 | if (should_stop) { |
---|
2477 | 2477 | __set_current_state(TASK_RUNNING); |
---|
.. | .. |
---|
2551 | 2551 | * underneath us, so we can't reliably determine pwq from @target. |
---|
2552 | 2552 | * |
---|
2553 | 2553 | * CONTEXT: |
---|
2554 | | - * spin_lock_irq(pool->lock). |
---|
| 2554 | + * raw_spin_lock_irq(pool->lock). |
---|
2555 | 2555 | */ |
---|
2556 | 2556 | static void insert_wq_barrier(struct pool_workqueue *pwq, |
---|
2557 | 2557 | struct wq_barrier *barr, |
---|
.. | .. |
---|
2638 | 2638 | for_each_pwq(pwq, wq) { |
---|
2639 | 2639 | struct worker_pool *pool = pwq->pool; |
---|
2640 | 2640 | |
---|
2641 | | - spin_lock_irq(&pool->lock); |
---|
| 2641 | + raw_spin_lock_irq(&pool->lock); |
---|
2642 | 2642 | |
---|
2643 | 2643 | if (flush_color >= 0) { |
---|
2644 | 2644 | WARN_ON_ONCE(pwq->flush_color != -1); |
---|
.. | .. |
---|
2655 | 2655 | pwq->work_color = work_color; |
---|
2656 | 2656 | } |
---|
2657 | 2657 | |
---|
2658 | | - spin_unlock_irq(&pool->lock); |
---|
| 2658 | + raw_spin_unlock_irq(&pool->lock); |
---|
2659 | 2659 | } |
---|
2660 | 2660 | |
---|
2661 | 2661 | if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) |
---|
.. | .. |
---|
2855 | 2855 | for_each_pwq(pwq, wq) { |
---|
2856 | 2856 | bool drained; |
---|
2857 | 2857 | |
---|
2858 | | - spin_lock_irq(&pwq->pool->lock); |
---|
| 2858 | + raw_spin_lock_irq(&pwq->pool->lock); |
---|
2859 | 2859 | drained = !pwq->nr_active && list_empty(&pwq->delayed_works); |
---|
2860 | | - spin_unlock_irq(&pwq->pool->lock); |
---|
| 2860 | + raw_spin_unlock_irq(&pwq->pool->lock); |
---|
2861 | 2861 | |
---|
2862 | 2862 | if (drained) |
---|
2863 | 2863 | continue; |
---|
.. | .. |
---|
2886 | 2886 | |
---|
2887 | 2887 | might_sleep(); |
---|
2888 | 2888 | |
---|
2889 | | - local_irq_disable(); |
---|
| 2889 | + rcu_read_lock(); |
---|
2890 | 2890 | pool = get_work_pool(work); |
---|
2891 | 2891 | if (!pool) { |
---|
2892 | | - local_irq_enable(); |
---|
| 2892 | + rcu_read_unlock(); |
---|
2893 | 2893 | return false; |
---|
2894 | 2894 | } |
---|
2895 | 2895 | |
---|
2896 | | - spin_lock(&pool->lock); |
---|
| 2896 | + raw_spin_lock_irq(&pool->lock); |
---|
2897 | 2897 | /* see the comment in try_to_grab_pending() with the same code */ |
---|
2898 | 2898 | pwq = get_work_pwq(work); |
---|
2899 | 2899 | if (pwq) { |
---|
.. | .. |
---|
2909 | 2909 | check_flush_dependency(pwq->wq, work); |
---|
2910 | 2910 | |
---|
2911 | 2911 | insert_wq_barrier(pwq, barr, work, worker); |
---|
2912 | | - spin_unlock_irq(&pool->lock); |
---|
| 2912 | + raw_spin_unlock_irq(&pool->lock); |
---|
2913 | 2913 | |
---|
2914 | 2914 | /* |
---|
2915 | 2915 | * Force a lock recursion deadlock when using flush_work() inside a |
---|
.. | .. |
---|
2925 | 2925 | lock_map_acquire(&pwq->wq->lockdep_map); |
---|
2926 | 2926 | lock_map_release(&pwq->wq->lockdep_map); |
---|
2927 | 2927 | } |
---|
2928 | | - |
---|
| 2928 | + rcu_read_unlock(); |
---|
2929 | 2929 | return true; |
---|
2930 | 2930 | already_gone: |
---|
2931 | | - spin_unlock_irq(&pool->lock); |
---|
| 2931 | + raw_spin_unlock_irq(&pool->lock); |
---|
| 2932 | + rcu_read_unlock(); |
---|
2932 | 2933 | return false; |
---|
2933 | 2934 | } |
---|
2934 | 2935 | |
---|
.. | .. |
---|
3240 | 3241 | * |
---|
3241 | 3242 | * Undo alloc_workqueue_attrs(). |
---|
3242 | 3243 | */ |
---|
3243 | | -void free_workqueue_attrs(struct workqueue_attrs *attrs) |
---|
| 3244 | +static void free_workqueue_attrs(struct workqueue_attrs *attrs) |
---|
3244 | 3245 | { |
---|
3245 | 3246 | if (attrs) { |
---|
3246 | 3247 | free_cpumask_var(attrs->cpumask); |
---|
.. | .. |
---|
3250 | 3251 | |
---|
3251 | 3252 | /** |
---|
3252 | 3253 | * alloc_workqueue_attrs - allocate a workqueue_attrs |
---|
3253 | | - * @gfp_mask: allocation mask to use |
---|
3254 | 3254 | * |
---|
3255 | 3255 | * Allocate a new workqueue_attrs, initialize with default settings and |
---|
3256 | 3256 | * return it. |
---|
3257 | 3257 | * |
---|
3258 | 3258 | * Return: The allocated new workqueue_attr on success. %NULL on failure. |
---|
3259 | 3259 | */ |
---|
3260 | | -struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) |
---|
| 3260 | +static struct workqueue_attrs *alloc_workqueue_attrs(void) |
---|
3261 | 3261 | { |
---|
3262 | 3262 | struct workqueue_attrs *attrs; |
---|
3263 | 3263 | |
---|
3264 | | - attrs = kzalloc(sizeof(*attrs), gfp_mask); |
---|
| 3264 | + attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); |
---|
3265 | 3265 | if (!attrs) |
---|
3266 | 3266 | goto fail; |
---|
3267 | | - if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask)) |
---|
| 3267 | + if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) |
---|
3268 | 3268 | goto fail; |
---|
3269 | 3269 | |
---|
3270 | 3270 | cpumask_copy(attrs->cpumask, cpu_possible_mask); |
---|
.. | .. |
---|
3321 | 3321 | */ |
---|
3322 | 3322 | static int init_worker_pool(struct worker_pool *pool) |
---|
3323 | 3323 | { |
---|
3324 | | - spin_lock_init(&pool->lock); |
---|
| 3324 | + raw_spin_lock_init(&pool->lock); |
---|
3325 | 3325 | pool->id = -1; |
---|
3326 | 3326 | pool->cpu = -1; |
---|
3327 | 3327 | pool->node = NUMA_NO_NODE; |
---|
.. | .. |
---|
3342 | 3342 | pool->refcnt = 1; |
---|
3343 | 3343 | |
---|
3344 | 3344 | /* shouldn't fail above this point */ |
---|
3345 | | - pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); |
---|
| 3345 | + pool->attrs = alloc_workqueue_attrs(); |
---|
3346 | 3346 | if (!pool->attrs) |
---|
3347 | 3347 | return -ENOMEM; |
---|
3348 | 3348 | return 0; |
---|
.. | .. |
---|
3375 | 3375 | * put_unbound_pool - put a worker_pool |
---|
3376 | 3376 | * @pool: worker_pool to put |
---|
3377 | 3377 | * |
---|
3378 | | - * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU |
---|
| 3378 | + * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU |
---|
3379 | 3379 | * safe manner. get_unbound_pool() calls this function on its failure path |
---|
3380 | 3380 | * and this function should be able to release pools which went through, |
---|
3381 | 3381 | * successfully or not, init_worker_pool(). |
---|
.. | .. |
---|
3407 | 3407 | * @pool's workers from blocking on attach_mutex. We're the last |
---|
3408 | 3408 | * manager and @pool gets freed with the flag set. |
---|
3409 | 3409 | */ |
---|
3410 | | - spin_lock_irq(&pool->lock); |
---|
3411 | | - wait_event_lock_irq(wq_manager_wait, |
---|
| 3410 | + raw_spin_lock_irq(&pool->lock); |
---|
| 3411 | + swait_event_lock_irq(wq_manager_wait, |
---|
3412 | 3412 | !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock); |
---|
3413 | 3413 | pool->flags |= POOL_MANAGER_ACTIVE; |
---|
3414 | 3414 | |
---|
3415 | 3415 | while ((worker = first_idle_worker(pool))) |
---|
3416 | 3416 | destroy_worker(worker); |
---|
3417 | 3417 | WARN_ON(pool->nr_workers || pool->nr_idle); |
---|
3418 | | - spin_unlock_irq(&pool->lock); |
---|
| 3418 | + raw_spin_unlock_irq(&pool->lock); |
---|
3419 | 3419 | |
---|
3420 | 3420 | mutex_lock(&wq_pool_attach_mutex); |
---|
3421 | 3421 | if (!list_empty(&pool->workers)) |
---|
.. | .. |
---|
3429 | 3429 | del_timer_sync(&pool->idle_timer); |
---|
3430 | 3430 | del_timer_sync(&pool->mayday_timer); |
---|
3431 | 3431 | |
---|
3432 | | - /* sched-RCU protected to allow dereferences from get_work_pool() */ |
---|
3433 | | - call_rcu_sched(&pool->rcu, rcu_free_pool); |
---|
| 3432 | + /* RCU protected to allow dereferences from get_work_pool() */ |
---|
| 3433 | + call_rcu(&pool->rcu, rcu_free_pool); |
---|
3434 | 3434 | } |
---|
3435 | 3435 | |
---|
3436 | 3436 | /** |
---|
.. | .. |
---|
3543 | 3543 | put_unbound_pool(pool); |
---|
3544 | 3544 | mutex_unlock(&wq_pool_mutex); |
---|
3545 | 3545 | |
---|
3546 | | - call_rcu_sched(&pwq->rcu, rcu_free_pwq); |
---|
| 3546 | + call_rcu(&pwq->rcu, rcu_free_pwq); |
---|
3547 | 3547 | |
---|
3548 | 3548 | /* |
---|
3549 | 3549 | * If we're the last pwq going away, @wq is already dead and no one |
---|
3550 | 3550 | * is gonna access it anymore. Schedule RCU free. |
---|
3551 | 3551 | */ |
---|
3552 | 3552 | if (is_last) |
---|
3553 | | - call_rcu_sched(&wq->rcu, rcu_free_wq); |
---|
| 3553 | + call_rcu(&wq->rcu, rcu_free_wq); |
---|
3554 | 3554 | } |
---|
3555 | 3555 | |
---|
3556 | 3556 | /** |
---|
.. | .. |
---|
3575 | 3575 | return; |
---|
3576 | 3576 | |
---|
3577 | 3577 | /* this function can be called during early boot w/ irq disabled */ |
---|
3578 | | - spin_lock_irqsave(&pwq->pool->lock, flags); |
---|
| 3578 | + raw_spin_lock_irqsave(&pwq->pool->lock, flags); |
---|
3579 | 3579 | |
---|
3580 | 3580 | /* |
---|
3581 | 3581 | * During [un]freezing, the caller is responsible for ensuring that |
---|
.. | .. |
---|
3605 | 3605 | pwq->max_active = 0; |
---|
3606 | 3606 | } |
---|
3607 | 3607 | |
---|
3608 | | - spin_unlock_irqrestore(&pwq->pool->lock, flags); |
---|
| 3608 | + raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); |
---|
3609 | 3609 | } |
---|
3610 | 3610 | |
---|
3611 | 3611 | /* initialize newly alloced @pwq which is associated with @wq and @pool */ |
---|
.. | .. |
---|
3778 | 3778 | |
---|
3779 | 3779 | ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL); |
---|
3780 | 3780 | |
---|
3781 | | - new_attrs = alloc_workqueue_attrs(GFP_KERNEL); |
---|
3782 | | - tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL); |
---|
| 3781 | + new_attrs = alloc_workqueue_attrs(); |
---|
| 3782 | + tmp_attrs = alloc_workqueue_attrs(); |
---|
3783 | 3783 | if (!ctx || !new_attrs || !tmp_attrs) |
---|
3784 | 3784 | goto out_free; |
---|
3785 | 3785 | |
---|
.. | .. |
---|
3915 | 3915 | * |
---|
3916 | 3916 | * Return: 0 on success and -errno on failure. |
---|
3917 | 3917 | */ |
---|
3918 | | -int apply_workqueue_attrs(struct workqueue_struct *wq, |
---|
| 3918 | +static int apply_workqueue_attrs(struct workqueue_struct *wq, |
---|
3919 | 3919 | const struct workqueue_attrs *attrs) |
---|
3920 | 3920 | { |
---|
3921 | 3921 | int ret; |
---|
.. | .. |
---|
3926 | 3926 | |
---|
3927 | 3927 | return ret; |
---|
3928 | 3928 | } |
---|
3929 | | -EXPORT_SYMBOL_GPL(apply_workqueue_attrs); |
---|
3930 | 3929 | |
---|
3931 | 3930 | /** |
---|
3932 | 3931 | * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug |
---|
.. | .. |
---|
4004 | 4003 | |
---|
4005 | 4004 | use_dfl_pwq: |
---|
4006 | 4005 | mutex_lock(&wq->mutex); |
---|
4007 | | - spin_lock_irq(&wq->dfl_pwq->pool->lock); |
---|
| 4006 | + raw_spin_lock_irq(&wq->dfl_pwq->pool->lock); |
---|
4008 | 4007 | get_pwq(wq->dfl_pwq); |
---|
4009 | | - spin_unlock_irq(&wq->dfl_pwq->pool->lock); |
---|
| 4008 | + raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock); |
---|
4010 | 4009 | old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); |
---|
4011 | 4010 | out_unlock: |
---|
4012 | 4011 | mutex_unlock(&wq->mutex); |
---|
.. | .. |
---|
4125 | 4124 | return NULL; |
---|
4126 | 4125 | |
---|
4127 | 4126 | if (flags & WQ_UNBOUND) { |
---|
4128 | | - wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL); |
---|
| 4127 | + wq->unbound_attrs = alloc_workqueue_attrs(); |
---|
4129 | 4128 | if (!wq->unbound_attrs) |
---|
4130 | 4129 | goto err_free_wq; |
---|
4131 | 4130 | } |
---|
.. | .. |
---|
4212 | 4211 | struct worker *rescuer = wq->rescuer; |
---|
4213 | 4212 | |
---|
4214 | 4213 | /* this prevents new queueing */ |
---|
4215 | | - spin_lock_irq(&wq_mayday_lock); |
---|
| 4214 | + raw_spin_lock_irq(&wq_mayday_lock); |
---|
4216 | 4215 | wq->rescuer = NULL; |
---|
4217 | | - spin_unlock_irq(&wq_mayday_lock); |
---|
| 4216 | + raw_spin_unlock_irq(&wq_mayday_lock); |
---|
4218 | 4217 | |
---|
4219 | 4218 | /* rescuer will empty maydays list before exiting */ |
---|
4220 | 4219 | kthread_stop(rescuer->task); |
---|
.. | .. |
---|
4257 | 4256 | * The base ref is never dropped on per-cpu pwqs. Directly |
---|
4258 | 4257 | * schedule RCU free. |
---|
4259 | 4258 | */ |
---|
4260 | | - call_rcu_sched(&wq->rcu, rcu_free_wq); |
---|
| 4259 | + call_rcu(&wq->rcu, rcu_free_wq); |
---|
4261 | 4260 | } else { |
---|
4262 | 4261 | /* |
---|
4263 | 4262 | * We're the sole accessor of @wq at this point. Directly |
---|
.. | .. |
---|
4367 | 4366 | struct pool_workqueue *pwq; |
---|
4368 | 4367 | bool ret; |
---|
4369 | 4368 | |
---|
4370 | | - rcu_read_lock_sched(); |
---|
| 4369 | + rcu_read_lock(); |
---|
| 4370 | + preempt_disable(); |
---|
4371 | 4371 | |
---|
4372 | 4372 | if (cpu == WORK_CPU_UNBOUND) |
---|
4373 | 4373 | cpu = smp_processor_id(); |
---|
.. | .. |
---|
4378 | 4378 | pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); |
---|
4379 | 4379 | |
---|
4380 | 4380 | ret = !list_empty(&pwq->delayed_works); |
---|
4381 | | - rcu_read_unlock_sched(); |
---|
| 4381 | + preempt_enable(); |
---|
| 4382 | + rcu_read_unlock(); |
---|
4382 | 4383 | |
---|
4383 | 4384 | return ret; |
---|
4384 | 4385 | } |
---|
.. | .. |
---|
4404 | 4405 | if (work_pending(work)) |
---|
4405 | 4406 | ret |= WORK_BUSY_PENDING; |
---|
4406 | 4407 | |
---|
4407 | | - local_irq_save(flags); |
---|
| 4408 | + rcu_read_lock(); |
---|
4408 | 4409 | pool = get_work_pool(work); |
---|
4409 | 4410 | if (pool) { |
---|
4410 | | - spin_lock(&pool->lock); |
---|
| 4411 | + raw_spin_lock_irqsave(&pool->lock, flags); |
---|
4411 | 4412 | if (find_worker_executing_work(pool, work)) |
---|
4412 | 4413 | ret |= WORK_BUSY_RUNNING; |
---|
4413 | | - spin_unlock(&pool->lock); |
---|
| 4414 | + raw_spin_unlock_irqrestore(&pool->lock, flags); |
---|
4414 | 4415 | } |
---|
4415 | | - local_irq_restore(flags); |
---|
| 4416 | + rcu_read_unlock(); |
---|
4416 | 4417 | |
---|
4417 | 4418 | return ret; |
---|
4418 | 4419 | } |
---|
.. | .. |
---|
4597 | 4598 | unsigned long flags; |
---|
4598 | 4599 | int pi; |
---|
4599 | 4600 | |
---|
4600 | | - rcu_read_lock_sched(); |
---|
| 4601 | + rcu_read_lock(); |
---|
4601 | 4602 | |
---|
4602 | 4603 | pr_info("Showing busy workqueues and worker pools:\n"); |
---|
4603 | 4604 | |
---|
.. | .. |
---|
4617 | 4618 | pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); |
---|
4618 | 4619 | |
---|
4619 | 4620 | for_each_pwq(pwq, wq) { |
---|
4620 | | - spin_lock_irqsave(&pwq->pool->lock, flags); |
---|
| 4621 | + raw_spin_lock_irqsave(&pwq->pool->lock, flags); |
---|
4621 | 4622 | if (pwq->nr_active || !list_empty(&pwq->delayed_works)) |
---|
4622 | 4623 | show_pwq(pwq); |
---|
4623 | | - spin_unlock_irqrestore(&pwq->pool->lock, flags); |
---|
| 4624 | + raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); |
---|
4624 | 4625 | /* |
---|
4625 | 4626 | * We could be printing a lot from atomic context, e.g. |
---|
4626 | 4627 | * sysrq-t -> show_workqueue_state(). Avoid triggering |
---|
.. | .. |
---|
4634 | 4635 | struct worker *worker; |
---|
4635 | 4636 | bool first = true; |
---|
4636 | 4637 | |
---|
4637 | | - spin_lock_irqsave(&pool->lock, flags); |
---|
| 4638 | + raw_spin_lock_irqsave(&pool->lock, flags); |
---|
4638 | 4639 | if (pool->nr_workers == pool->nr_idle) |
---|
4639 | 4640 | goto next_pool; |
---|
4640 | 4641 | |
---|
.. | .. |
---|
4653 | 4654 | } |
---|
4654 | 4655 | pr_cont("\n"); |
---|
4655 | 4656 | next_pool: |
---|
4656 | | - spin_unlock_irqrestore(&pool->lock, flags); |
---|
| 4657 | + raw_spin_unlock_irqrestore(&pool->lock, flags); |
---|
4657 | 4658 | /* |
---|
4658 | 4659 | * We could be printing a lot from atomic context, e.g. |
---|
4659 | 4660 | * sysrq-t -> show_workqueue_state(). Avoid triggering |
---|
.. | .. |
---|
4662 | 4663 | touch_nmi_watchdog(); |
---|
4663 | 4664 | } |
---|
4664 | 4665 | |
---|
4665 | | - rcu_read_unlock_sched(); |
---|
| 4666 | + rcu_read_unlock(); |
---|
4666 | 4667 | } |
---|
4667 | 4668 | |
---|
4668 | 4669 | /* used to show worker information through /proc/PID/{comm,stat,status} */ |
---|
.. | .. |
---|
4683 | 4684 | struct worker_pool *pool = worker->pool; |
---|
4684 | 4685 | |
---|
4685 | 4686 | if (pool) { |
---|
4686 | | - spin_lock_irq(&pool->lock); |
---|
| 4687 | + raw_spin_lock_irq(&pool->lock); |
---|
4687 | 4688 | /* |
---|
4688 | 4689 | * ->desc tracks information (wq name or |
---|
4689 | 4690 | * set_worker_desc()) for the latest execution. If |
---|
.. | .. |
---|
4697 | 4698 | scnprintf(buf + off, size - off, "-%s", |
---|
4698 | 4699 | worker->desc); |
---|
4699 | 4700 | } |
---|
4700 | | - spin_unlock_irq(&pool->lock); |
---|
| 4701 | + raw_spin_unlock_irq(&pool->lock); |
---|
4701 | 4702 | } |
---|
4702 | 4703 | } |
---|
4703 | 4704 | |
---|
.. | .. |
---|
4728 | 4729 | |
---|
4729 | 4730 | for_each_cpu_worker_pool(pool, cpu) { |
---|
4730 | 4731 | mutex_lock(&wq_pool_attach_mutex); |
---|
4731 | | - spin_lock_irq(&pool->lock); |
---|
| 4732 | + raw_spin_lock_irq(&pool->lock); |
---|
4732 | 4733 | |
---|
4733 | 4734 | /* |
---|
4734 | 4735 | * We've blocked all attach/detach operations. Make all workers |
---|
.. | .. |
---|
4742 | 4743 | |
---|
4743 | 4744 | pool->flags |= POOL_DISASSOCIATED; |
---|
4744 | 4745 | |
---|
4745 | | - spin_unlock_irq(&pool->lock); |
---|
| 4746 | + raw_spin_unlock_irq(&pool->lock); |
---|
4746 | 4747 | mutex_unlock(&wq_pool_attach_mutex); |
---|
4747 | 4748 | |
---|
4748 | 4749 | /* |
---|
.. | .. |
---|
4768 | 4769 | * worker blocking could lead to lengthy stalls. Kick off |
---|
4769 | 4770 | * unbound chain execution of currently pending work items. |
---|
4770 | 4771 | */ |
---|
4771 | | - spin_lock_irq(&pool->lock); |
---|
| 4772 | + raw_spin_lock_irq(&pool->lock); |
---|
4772 | 4773 | wake_up_worker(pool); |
---|
4773 | | - spin_unlock_irq(&pool->lock); |
---|
| 4774 | + raw_spin_unlock_irq(&pool->lock); |
---|
4774 | 4775 | } |
---|
4775 | 4776 | } |
---|
4776 | 4777 | |
---|
.. | .. |
---|
4797 | 4798 | WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, |
---|
4798 | 4799 | pool->attrs->cpumask) < 0); |
---|
4799 | 4800 | |
---|
4800 | | - spin_lock_irq(&pool->lock); |
---|
| 4801 | + raw_spin_lock_irq(&pool->lock); |
---|
4801 | 4802 | |
---|
4802 | 4803 | pool->flags &= ~POOL_DISASSOCIATED; |
---|
4803 | 4804 | |
---|
.. | .. |
---|
4836 | 4837 | WRITE_ONCE(worker->flags, worker_flags); |
---|
4837 | 4838 | } |
---|
4838 | 4839 | |
---|
4839 | | - spin_unlock_irq(&pool->lock); |
---|
| 4840 | + raw_spin_unlock_irq(&pool->lock); |
---|
4840 | 4841 | } |
---|
4841 | 4842 | |
---|
4842 | 4843 | /** |
---|
.. | .. |
---|
5049 | 5050 | * nr_active is monotonically decreasing. It's safe |
---|
5050 | 5051 | * to peek without lock. |
---|
5051 | 5052 | */ |
---|
5052 | | - rcu_read_lock_sched(); |
---|
| 5053 | + rcu_read_lock(); |
---|
5053 | 5054 | for_each_pwq(pwq, wq) { |
---|
5054 | 5055 | WARN_ON_ONCE(pwq->nr_active < 0); |
---|
5055 | 5056 | if (pwq->nr_active) { |
---|
5056 | 5057 | busy = true; |
---|
5057 | | - rcu_read_unlock_sched(); |
---|
| 5058 | + rcu_read_unlock(); |
---|
5058 | 5059 | goto out_unlock; |
---|
5059 | 5060 | } |
---|
5060 | 5061 | } |
---|
5061 | | - rcu_read_unlock_sched(); |
---|
| 5062 | + rcu_read_unlock(); |
---|
5062 | 5063 | } |
---|
5063 | 5064 | out_unlock: |
---|
5064 | 5065 | mutex_unlock(&wq_pool_mutex); |
---|
.. | .. |
---|
5260 | 5261 | const char *delim = ""; |
---|
5261 | 5262 | int node, written = 0; |
---|
5262 | 5263 | |
---|
5263 | | - rcu_read_lock_sched(); |
---|
| 5264 | + get_online_cpus(); |
---|
| 5265 | + rcu_read_lock(); |
---|
5264 | 5266 | for_each_node(node) { |
---|
5265 | 5267 | written += scnprintf(buf + written, PAGE_SIZE - written, |
---|
5266 | 5268 | "%s%d:%d", delim, node, |
---|
.. | .. |
---|
5268 | 5270 | delim = " "; |
---|
5269 | 5271 | } |
---|
5270 | 5272 | written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); |
---|
5271 | | - rcu_read_unlock_sched(); |
---|
| 5273 | + rcu_read_unlock(); |
---|
| 5274 | + put_online_cpus(); |
---|
5272 | 5275 | |
---|
5273 | 5276 | return written; |
---|
5274 | 5277 | } |
---|
.. | .. |
---|
5293 | 5296 | |
---|
5294 | 5297 | lockdep_assert_held(&wq_pool_mutex); |
---|
5295 | 5298 | |
---|
5296 | | - attrs = alloc_workqueue_attrs(GFP_KERNEL); |
---|
| 5299 | + attrs = alloc_workqueue_attrs(); |
---|
5297 | 5300 | if (!attrs) |
---|
5298 | 5301 | return NULL; |
---|
5299 | 5302 | |
---|
.. | .. |
---|
5722 | 5725 | return; |
---|
5723 | 5726 | } |
---|
5724 | 5727 | |
---|
5725 | | - wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL); |
---|
| 5728 | + wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(); |
---|
5726 | 5729 | BUG_ON(!wq_update_unbound_numa_attrs_buf); |
---|
5727 | 5730 | |
---|
5728 | 5731 | /* |
---|
.. | .. |
---|
5797 | 5800 | for (i = 0; i < NR_STD_WORKER_POOLS; i++) { |
---|
5798 | 5801 | struct workqueue_attrs *attrs; |
---|
5799 | 5802 | |
---|
5800 | | - BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); |
---|
| 5803 | + BUG_ON(!(attrs = alloc_workqueue_attrs())); |
---|
5801 | 5804 | attrs->nice = std_nice[i]; |
---|
5802 | 5805 | unbound_std_wq_attrs[i] = attrs; |
---|
5803 | 5806 | |
---|
.. | .. |
---|
5806 | 5809 | * guaranteed by max_active which is enforced by pwqs. |
---|
5807 | 5810 | * Turn off NUMA so that dfl_pwq is used for all nodes. |
---|
5808 | 5811 | */ |
---|
5809 | | - BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); |
---|
| 5812 | + BUG_ON(!(attrs = alloc_workqueue_attrs())); |
---|
5810 | 5813 | attrs->nice = std_nice[i]; |
---|
5811 | 5814 | attrs->no_numa = true; |
---|
5812 | 5815 | ordered_wq_attrs[i] = attrs; |
---|