hc
2024-09-20 cf4ce59b3b70238352c7f1729f0f7223214828ad
kernel/kernel/sched/core.c
....@@ -1018,7 +1018,7 @@
10181018 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
10191019 return;
10201020
1021
- WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value);
1021
+ uclamp_rq_set(rq, clamp_id, clamp_value);
10221022 }
10231023
10241024 static inline
....@@ -1211,8 +1211,8 @@
12111211 if (bucket->tasks == 1 || uc_se->value > bucket->value)
12121212 bucket->value = uc_se->value;
12131213
1214
- if (uc_se->value > READ_ONCE(uc_rq->value))
1215
- WRITE_ONCE(uc_rq->value, uc_se->value);
1214
+ if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1215
+ uclamp_rq_set(rq, clamp_id, uc_se->value);
12161216 }
12171217
12181218 /*
....@@ -1278,7 +1278,7 @@
12781278 if (likely(bucket->tasks))
12791279 return;
12801280
1281
- rq_clamp = READ_ONCE(uc_rq->value);
1281
+ rq_clamp = uclamp_rq_get(rq, clamp_id);
12821282 /*
12831283 * Defensive programming: this should never happen. If it happens,
12841284 * e.g. due to future modification, warn and fixup the expected value.
....@@ -1286,7 +1286,7 @@
12861286 SCHED_WARN_ON(bucket->value > rq_clamp);
12871287 if (bucket->value >= rq_clamp) {
12881288 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1289
- WRITE_ONCE(uc_rq->value, bkt_clamp);
1289
+ uclamp_rq_set(rq, clamp_id, bkt_clamp);
12901290 }
12911291 }
12921292
....@@ -1692,6 +1692,9 @@
16921692
16931693 void activate_task(struct rq *rq, struct task_struct *p, int flags)
16941694 {
1695
+ if (task_on_rq_migrating(p))
1696
+ flags |= ENQUEUE_MIGRATED;
1697
+
16951698 enqueue_task(rq, p, flags);
16961699
16971700 p->on_rq = TASK_ON_RQ_QUEUED;
....@@ -4535,8 +4538,7 @@
45354538 pr_err("Preemption disabled at:");
45364539 print_ip_sym(KERN_ERR, preempt_disable_ip);
45374540 }
4538
- if (panic_on_warn)
4539
- panic("scheduling while atomic\n");
4541
+ check_panic_on_warn("scheduling while atomic");
45404542
45414543 trace_android_rvh_schedule_bug(prev);
45424544
....@@ -5480,6 +5482,7 @@
54805482 int reset_on_fork;
54815483 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
54825484 struct rq *rq;
5485
+ bool cpuset_locked = false;
54835486
54845487 /* The pi code expects interrupts enabled */
54855488 BUG_ON(pi && in_interrupt());
....@@ -5582,6 +5585,15 @@
55825585 }
55835586
55845587 /*
5588
+ * SCHED_DEADLINE bandwidth accounting relies on stable cpusets
5589
+ * information.
5590
+ */
5591
+ if (dl_policy(policy) || dl_policy(p->policy)) {
5592
+ cpuset_locked = true;
5593
+ cpuset_lock();
5594
+ }
5595
+
5596
+ /*
55855597 * Make sure no PI-waiters arrive (or leave) while we are
55865598 * changing the priority of the task:
55875599 *
....@@ -5655,6 +5667,8 @@
56555667 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
56565668 policy = oldpolicy = -1;
56575669 task_rq_unlock(rq, p, &rf);
5670
+ if (cpuset_locked)
5671
+ cpuset_unlock();
56585672 goto recheck;
56595673 }
56605674
....@@ -5720,8 +5734,11 @@
57205734 preempt_disable();
57215735 task_rq_unlock(rq, p, &rf);
57225736
5723
- if (pi)
5737
+ if (pi) {
5738
+ if (cpuset_locked)
5739
+ cpuset_unlock();
57245740 rt_mutex_adjust_pi(p);
5741
+ }
57255742
57265743 /* Run balance callbacks after we've adjusted the PI chain: */
57275744 balance_callback(rq);
....@@ -5731,6 +5748,8 @@
57315748
57325749 unlock:
57335750 task_rq_unlock(rq, p, &rf);
5751
+ if (cpuset_locked)
5752
+ cpuset_unlock();
57345753 return retval;
57355754 }
57365755
....@@ -6344,14 +6363,14 @@
63446363 if (len & (sizeof(unsigned long)-1))
63456364 return -EINVAL;
63466365
6347
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
6366
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
63486367 return -ENOMEM;
63496368
63506369 ret = sched_getaffinity(pid, mask);
63516370 if (ret == 0) {
63526371 unsigned int retlen = min(len, cpumask_size());
63536372
6354
- if (copy_to_user(user_mask_ptr, mask, retlen))
6373
+ if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
63556374 ret = -EFAULT;
63566375 else
63576376 ret = retlen;
....@@ -6872,8 +6891,7 @@
68726891 return ret;
68736892 }
68746893
6875
-int task_can_attach(struct task_struct *p,
6876
- const struct cpumask *cs_effective_cpus)
6894
+int task_can_attach(struct task_struct *p)
68776895 {
68786896 int ret = 0;
68796897
....@@ -6886,21 +6904,9 @@
68866904 * success of set_cpus_allowed_ptr() on all attached tasks
68876905 * before cpus_mask may be changed.
68886906 */
6889
- if (p->flags & PF_NO_SETAFFINITY) {
6907
+ if (p->flags & PF_NO_SETAFFINITY)
68906908 ret = -EINVAL;
6891
- goto out;
6892
- }
68936909
6894
- if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
6895
- cs_effective_cpus)) {
6896
- int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus);
6897
-
6898
- if (unlikely(cpu >= nr_cpu_ids))
6899
- return -EINVAL;
6900
- ret = dl_cpu_busy(cpu, p);
6901
- }
6902
-
6903
-out:
69046910 return ret;
69056911 }
69066912
....@@ -7228,7 +7234,7 @@
72287234 static int cpuset_cpu_inactive(unsigned int cpu)
72297235 {
72307236 if (!cpuhp_tasks_frozen) {
7231
- int ret = dl_cpu_busy(cpu, NULL);
7237
+ int ret = dl_bw_check_overflow(cpu);
72327238
72337239 if (ret)
72347240 return ret;