.. | .. |
---|
1018 | 1018 | if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) |
---|
1019 | 1019 | return; |
---|
1020 | 1020 | |
---|
1021 | | - WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); |
---|
| 1021 | + uclamp_rq_set(rq, clamp_id, clamp_value); |
---|
1022 | 1022 | } |
---|
1023 | 1023 | |
---|
1024 | 1024 | static inline |
---|
.. | .. |
---|
1211 | 1211 | if (bucket->tasks == 1 || uc_se->value > bucket->value) |
---|
1212 | 1212 | bucket->value = uc_se->value; |
---|
1213 | 1213 | |
---|
1214 | | - if (uc_se->value > READ_ONCE(uc_rq->value)) |
---|
1215 | | - WRITE_ONCE(uc_rq->value, uc_se->value); |
---|
| 1214 | + if (uc_se->value > uclamp_rq_get(rq, clamp_id)) |
---|
| 1215 | + uclamp_rq_set(rq, clamp_id, uc_se->value); |
---|
1216 | 1216 | } |
---|
1217 | 1217 | |
---|
1218 | 1218 | /* |
---|
.. | .. |
---|
1278 | 1278 | if (likely(bucket->tasks)) |
---|
1279 | 1279 | return; |
---|
1280 | 1280 | |
---|
1281 | | - rq_clamp = READ_ONCE(uc_rq->value); |
---|
| 1281 | + rq_clamp = uclamp_rq_get(rq, clamp_id); |
---|
1282 | 1282 | /* |
---|
1283 | 1283 | * Defensive programming: this should never happen. If it happens, |
---|
1284 | 1284 | * e.g. due to future modification, warn and fixup the expected value. |
---|
.. | .. |
---|
1286 | 1286 | SCHED_WARN_ON(bucket->value > rq_clamp); |
---|
1287 | 1287 | if (bucket->value >= rq_clamp) { |
---|
1288 | 1288 | bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); |
---|
1289 | | - WRITE_ONCE(uc_rq->value, bkt_clamp); |
---|
| 1289 | + uclamp_rq_set(rq, clamp_id, bkt_clamp); |
---|
1290 | 1290 | } |
---|
1291 | 1291 | } |
---|
1292 | 1292 | |
---|
.. | .. |
---|
1692 | 1692 | |
---|
1693 | 1693 | void activate_task(struct rq *rq, struct task_struct *p, int flags) |
---|
1694 | 1694 | { |
---|
| 1695 | + if (task_on_rq_migrating(p)) |
---|
| 1696 | + flags |= ENQUEUE_MIGRATED; |
---|
| 1697 | + |
---|
1695 | 1698 | enqueue_task(rq, p, flags); |
---|
1696 | 1699 | |
---|
1697 | 1700 | p->on_rq = TASK_ON_RQ_QUEUED; |
---|
.. | .. |
---|
4535 | 4538 | pr_err("Preemption disabled at:"); |
---|
4536 | 4539 | print_ip_sym(KERN_ERR, preempt_disable_ip); |
---|
4537 | 4540 | } |
---|
4538 | | - if (panic_on_warn) |
---|
4539 | | - panic("scheduling while atomic\n"); |
---|
| 4541 | + check_panic_on_warn("scheduling while atomic"); |
---|
4540 | 4542 | |
---|
4541 | 4543 | trace_android_rvh_schedule_bug(prev); |
---|
4542 | 4544 | |
---|
.. | .. |
---|
5480 | 5482 | int reset_on_fork; |
---|
5481 | 5483 | int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; |
---|
5482 | 5484 | struct rq *rq; |
---|
| 5485 | + bool cpuset_locked = false; |
---|
5483 | 5486 | |
---|
5484 | 5487 | /* The pi code expects interrupts enabled */ |
---|
5485 | 5488 | BUG_ON(pi && in_interrupt()); |
---|
.. | .. |
---|
5582 | 5585 | } |
---|
5583 | 5586 | |
---|
5584 | 5587 | /* |
---|
| 5588 | + * SCHED_DEADLINE bandwidth accounting relies on stable cpusets |
---|
| 5589 | + * information. |
---|
| 5590 | + */ |
---|
| 5591 | + if (dl_policy(policy) || dl_policy(p->policy)) { |
---|
| 5592 | + cpuset_locked = true; |
---|
| 5593 | + cpuset_lock(); |
---|
| 5594 | + } |
---|
| 5595 | + |
---|
| 5596 | + /* |
---|
5585 | 5597 | * Make sure no PI-waiters arrive (or leave) while we are |
---|
5586 | 5598 | * changing the priority of the task: |
---|
5587 | 5599 | * |
---|
.. | .. |
---|
5655 | 5667 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
---|
5656 | 5668 | policy = oldpolicy = -1; |
---|
5657 | 5669 | task_rq_unlock(rq, p, &rf); |
---|
| 5670 | + if (cpuset_locked) |
---|
| 5671 | + cpuset_unlock(); |
---|
5658 | 5672 | goto recheck; |
---|
5659 | 5673 | } |
---|
5660 | 5674 | |
---|
.. | .. |
---|
5720 | 5734 | preempt_disable(); |
---|
5721 | 5735 | task_rq_unlock(rq, p, &rf); |
---|
5722 | 5736 | |
---|
5723 | | - if (pi) |
---|
| 5737 | + if (pi) { |
---|
| 5738 | + if (cpuset_locked) |
---|
| 5739 | + cpuset_unlock(); |
---|
5724 | 5740 | rt_mutex_adjust_pi(p); |
---|
| 5741 | + } |
---|
5725 | 5742 | |
---|
5726 | 5743 | /* Run balance callbacks after we've adjusted the PI chain: */ |
---|
5727 | 5744 | balance_callback(rq); |
---|
.. | .. |
---|
5731 | 5748 | |
---|
5732 | 5749 | unlock: |
---|
5733 | 5750 | task_rq_unlock(rq, p, &rf); |
---|
| 5751 | + if (cpuset_locked) |
---|
| 5752 | + cpuset_unlock(); |
---|
5734 | 5753 | return retval; |
---|
5735 | 5754 | } |
---|
5736 | 5755 | |
---|
.. | .. |
---|
6344 | 6363 | if (len & (sizeof(unsigned long)-1)) |
---|
6345 | 6364 | return -EINVAL; |
---|
6346 | 6365 | |
---|
6347 | | - if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
---|
| 6366 | + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) |
---|
6348 | 6367 | return -ENOMEM; |
---|
6349 | 6368 | |
---|
6350 | 6369 | ret = sched_getaffinity(pid, mask); |
---|
6351 | 6370 | if (ret == 0) { |
---|
6352 | 6371 | unsigned int retlen = min(len, cpumask_size()); |
---|
6353 | 6372 | |
---|
6354 | | - if (copy_to_user(user_mask_ptr, mask, retlen)) |
---|
| 6373 | + if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen)) |
---|
6355 | 6374 | ret = -EFAULT; |
---|
6356 | 6375 | else |
---|
6357 | 6376 | ret = retlen; |
---|
.. | .. |
---|
6872 | 6891 | return ret; |
---|
6873 | 6892 | } |
---|
6874 | 6893 | |
---|
6875 | | -int task_can_attach(struct task_struct *p, |
---|
6876 | | - const struct cpumask *cs_effective_cpus) |
---|
| 6894 | +int task_can_attach(struct task_struct *p) |
---|
6877 | 6895 | { |
---|
6878 | 6896 | int ret = 0; |
---|
6879 | 6897 | |
---|
.. | .. |
---|
6886 | 6904 | * success of set_cpus_allowed_ptr() on all attached tasks |
---|
6887 | 6905 | * before cpus_mask may be changed. |
---|
6888 | 6906 | */ |
---|
6889 | | - if (p->flags & PF_NO_SETAFFINITY) { |
---|
| 6907 | + if (p->flags & PF_NO_SETAFFINITY) |
---|
6890 | 6908 | ret = -EINVAL; |
---|
6891 | | - goto out; |
---|
6892 | | - } |
---|
6893 | 6909 | |
---|
6894 | | - if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, |
---|
6895 | | - cs_effective_cpus)) { |
---|
6896 | | - int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus); |
---|
6897 | | - |
---|
6898 | | - if (unlikely(cpu >= nr_cpu_ids)) |
---|
6899 | | - return -EINVAL; |
---|
6900 | | - ret = dl_cpu_busy(cpu, p); |
---|
6901 | | - } |
---|
6902 | | - |
---|
6903 | | -out: |
---|
6904 | 6910 | return ret; |
---|
6905 | 6911 | } |
---|
6906 | 6912 | |
---|
.. | .. |
---|
7228 | 7234 | static int cpuset_cpu_inactive(unsigned int cpu) |
---|
7229 | 7235 | { |
---|
7230 | 7236 | if (!cpuhp_tasks_frozen) { |
---|
7231 | | - int ret = dl_cpu_busy(cpu, NULL); |
---|
| 7237 | + int ret = dl_bw_check_overflow(cpu); |
---|
7232 | 7238 | |
---|
7233 | 7239 | if (ret) |
---|
7234 | 7240 | return ret; |
---|