hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/kernel/workqueue.c
....@@ -687,12 +687,17 @@
687687 set_work_data(work, WORK_STRUCT_NO_POOL, 0);
688688 }
689689
690
+static inline struct pool_workqueue *work_struct_pwq(unsigned long data)
691
+{
692
+ return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK);
693
+}
694
+
690695 static struct pool_workqueue *get_work_pwq(struct work_struct *work)
691696 {
692697 unsigned long data = atomic_long_read(&work->data);
693698
694699 if (data & WORK_STRUCT_PWQ)
695
- return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
700
+ return work_struct_pwq(data);
696701 else
697702 return NULL;
698703 }
....@@ -720,8 +725,7 @@
720725 assert_rcu_or_pool_mutex();
721726
722727 if (data & WORK_STRUCT_PWQ)
723
- return ((struct pool_workqueue *)
724
- (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
728
+ return work_struct_pwq(data)->pool;
725729
726730 pool_id = data >> WORK_OFFQ_POOL_SHIFT;
727731 if (pool_id == WORK_OFFQ_POOL_NONE)
....@@ -742,8 +746,7 @@
742746 unsigned long data = atomic_long_read(&work->data);
743747
744748 if (data & WORK_STRUCT_PWQ)
745
- return ((struct pool_workqueue *)
746
- (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
749
+ return work_struct_pwq(data)->pool->id;
747750
748751 return data >> WORK_OFFQ_POOL_SHIFT;
749752 }
....@@ -4954,10 +4957,6 @@
49544957 pool->flags |= POOL_DISASSOCIATED;
49554958
49564959 raw_spin_unlock_irq(&pool->lock);
4957
-
4958
- for_each_pool_worker(worker, pool)
4959
- WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_active_mask) < 0);
4960
-
49614960 mutex_unlock(&wq_pool_attach_mutex);
49624961
49634962 /*