.. | .. |
---|
1705 | 1705 | * be incurred if the tasks were swapped. |
---|
1706 | 1706 | */ |
---|
1707 | 1707 | /* Skip this swap candidate if cannot move to the source cpu */ |
---|
1708 | | - if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) |
---|
| 1708 | + if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) |
---|
1709 | 1709 | goto unlock; |
---|
1710 | 1710 | |
---|
1711 | 1711 | /* |
---|
.. | .. |
---|
1803 | 1803 | |
---|
1804 | 1804 | for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { |
---|
1805 | 1805 | /* Skip this CPU if the source task cannot migrate */ |
---|
1806 | | - if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed)) |
---|
| 1806 | + if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) |
---|
1807 | 1807 | continue; |
---|
1808 | 1808 | |
---|
1809 | 1809 | env->dst_cpu = cpu; |
---|
.. | .. |
---|
4210 | 4210 | ideal_runtime = sched_slice(cfs_rq, curr); |
---|
4211 | 4211 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; |
---|
4212 | 4212 | if (delta_exec > ideal_runtime) { |
---|
4213 | | - resched_curr(rq_of(cfs_rq)); |
---|
| 4213 | + resched_curr_lazy(rq_of(cfs_rq)); |
---|
4214 | 4214 | /* |
---|
4215 | 4215 | * The current task ran long enough, ensure it doesn't get |
---|
4216 | 4216 | * re-elected due to buddy favours. |
---|
.. | .. |
---|
4234 | 4234 | return; |
---|
4235 | 4235 | |
---|
4236 | 4236 | if (delta > ideal_runtime) |
---|
4237 | | - resched_curr(rq_of(cfs_rq)); |
---|
| 4237 | + resched_curr_lazy(rq_of(cfs_rq)); |
---|
4238 | 4238 | } |
---|
4239 | 4239 | |
---|
4240 | 4240 | static void |
---|
.. | .. |
---|
4376 | 4376 | * validating it and just reschedule. |
---|
4377 | 4377 | */ |
---|
4378 | 4378 | if (queued) { |
---|
4379 | | - resched_curr(rq_of(cfs_rq)); |
---|
| 4379 | + resched_curr_lazy(rq_of(cfs_rq)); |
---|
4380 | 4380 | return; |
---|
4381 | 4381 | } |
---|
4382 | 4382 | /* |
---|
.. | .. |
---|
4510 | 4510 | * hierarchy can be throttled |
---|
4511 | 4511 | */ |
---|
4512 | 4512 | if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) |
---|
4513 | | - resched_curr(rq_of(cfs_rq)); |
---|
| 4513 | + resched_curr_lazy(rq_of(cfs_rq)); |
---|
4514 | 4514 | } |
---|
4515 | 4515 | |
---|
4516 | 4516 | static __always_inline |
---|
.. | .. |
---|
4703 | 4703 | struct rq *rq = rq_of(cfs_rq); |
---|
4704 | 4704 | struct rq_flags rf; |
---|
4705 | 4705 | |
---|
4706 | | - rq_lock(rq, &rf); |
---|
| 4706 | + rq_lock_irqsave(rq, &rf); |
---|
4707 | 4707 | if (!cfs_rq_throttled(cfs_rq)) |
---|
4708 | 4708 | goto next; |
---|
4709 | 4709 | |
---|
.. | .. |
---|
4722 | 4722 | unthrottle_cfs_rq(cfs_rq); |
---|
4723 | 4723 | |
---|
4724 | 4724 | next: |
---|
4725 | | - rq_unlock(rq, &rf); |
---|
| 4725 | + rq_unlock_irqrestore(rq, &rf); |
---|
4726 | 4726 | |
---|
4727 | 4727 | if (!remaining) |
---|
4728 | 4728 | break; |
---|
.. | .. |
---|
4738 | 4738 | * period the timer is deactivated until scheduling resumes; cfs_b->idle is |
---|
4739 | 4739 | * used to track this state. |
---|
4740 | 4740 | */ |
---|
4741 | | -static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) |
---|
| 4741 | +static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags) |
---|
4742 | 4742 | { |
---|
4743 | 4743 | u64 runtime; |
---|
4744 | 4744 | int throttled; |
---|
.. | .. |
---|
4778 | 4778 | while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { |
---|
4779 | 4779 | runtime = cfs_b->runtime; |
---|
4780 | 4780 | cfs_b->distribute_running = 1; |
---|
4781 | | - raw_spin_unlock(&cfs_b->lock); |
---|
| 4781 | + raw_spin_unlock_irqrestore(&cfs_b->lock, flags); |
---|
4782 | 4782 | /* we can't nest cfs_b->lock while distributing bandwidth */ |
---|
4783 | 4783 | runtime = distribute_cfs_runtime(cfs_b, runtime); |
---|
4784 | | - raw_spin_lock(&cfs_b->lock); |
---|
| 4784 | + raw_spin_lock_irqsave(&cfs_b->lock, flags); |
---|
4785 | 4785 | |
---|
4786 | 4786 | cfs_b->distribute_running = 0; |
---|
4787 | 4787 | throttled = !list_empty(&cfs_b->throttled_cfs_rq); |
---|
.. | .. |
---|
4889 | 4889 | static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) |
---|
4890 | 4890 | { |
---|
4891 | 4891 | u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); |
---|
| 4892 | + unsigned long flags; |
---|
4892 | 4893 | |
---|
4893 | 4894 | /* confirm we're still not at a refresh boundary */ |
---|
4894 | | - raw_spin_lock(&cfs_b->lock); |
---|
| 4895 | + raw_spin_lock_irqsave(&cfs_b->lock, flags); |
---|
4895 | 4896 | if (cfs_b->distribute_running) { |
---|
4896 | | - raw_spin_unlock(&cfs_b->lock); |
---|
| 4897 | + raw_spin_unlock_irqrestore(&cfs_b->lock, flags); |
---|
4897 | 4898 | return; |
---|
4898 | 4899 | } |
---|
4899 | 4900 | |
---|
4900 | 4901 | if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { |
---|
4901 | | - raw_spin_unlock(&cfs_b->lock); |
---|
| 4902 | + raw_spin_unlock_irqrestore(&cfs_b->lock, flags); |
---|
4902 | 4903 | return; |
---|
4903 | 4904 | } |
---|
4904 | 4905 | |
---|
.. | .. |
---|
4908 | 4909 | if (runtime) |
---|
4909 | 4910 | cfs_b->distribute_running = 1; |
---|
4910 | 4911 | |
---|
4911 | | - raw_spin_unlock(&cfs_b->lock); |
---|
| 4912 | + raw_spin_unlock_irqrestore(&cfs_b->lock, flags); |
---|
4912 | 4913 | |
---|
4913 | 4914 | if (!runtime) |
---|
4914 | 4915 | return; |
---|
4915 | 4916 | |
---|
4916 | 4917 | runtime = distribute_cfs_runtime(cfs_b, runtime); |
---|
4917 | 4918 | |
---|
4918 | | - raw_spin_lock(&cfs_b->lock); |
---|
| 4919 | + raw_spin_lock_irqsave(&cfs_b->lock, flags); |
---|
4919 | 4920 | cfs_b->runtime -= min(runtime, cfs_b->runtime); |
---|
4920 | 4921 | cfs_b->distribute_running = 0; |
---|
4921 | | - raw_spin_unlock(&cfs_b->lock); |
---|
| 4922 | + raw_spin_unlock_irqrestore(&cfs_b->lock, flags); |
---|
4922 | 4923 | } |
---|
4923 | 4924 | |
---|
4924 | 4925 | /* |
---|
.. | .. |
---|
4998 | 4999 | { |
---|
4999 | 5000 | struct cfs_bandwidth *cfs_b = |
---|
5000 | 5001 | container_of(timer, struct cfs_bandwidth, period_timer); |
---|
| 5002 | + unsigned long flags; |
---|
5001 | 5003 | int overrun; |
---|
5002 | 5004 | int idle = 0; |
---|
5003 | 5005 | int count = 0; |
---|
5004 | 5006 | |
---|
5005 | | - raw_spin_lock(&cfs_b->lock); |
---|
| 5007 | + raw_spin_lock_irqsave(&cfs_b->lock, flags); |
---|
5006 | 5008 | for (;;) { |
---|
5007 | 5009 | overrun = hrtimer_forward_now(timer, cfs_b->period); |
---|
5008 | 5010 | if (!overrun) |
---|
.. | .. |
---|
5038 | 5040 | count = 0; |
---|
5039 | 5041 | } |
---|
5040 | 5042 | |
---|
5041 | | - idle = do_sched_cfs_period_timer(cfs_b, overrun); |
---|
| 5043 | + idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); |
---|
5042 | 5044 | } |
---|
5043 | 5045 | if (idle) |
---|
5044 | 5046 | cfs_b->period_active = 0; |
---|
5045 | | - raw_spin_unlock(&cfs_b->lock); |
---|
| 5047 | + raw_spin_unlock_irqrestore(&cfs_b->lock, flags); |
---|
5046 | 5048 | |
---|
5047 | 5049 | return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; |
---|
5048 | 5050 | } |
---|
.. | .. |
---|
5216 | 5218 | |
---|
5217 | 5219 | if (delta < 0) { |
---|
5218 | 5220 | if (rq->curr == p) |
---|
5219 | | - resched_curr(rq); |
---|
| 5221 | + resched_curr_lazy(rq); |
---|
5220 | 5222 | return; |
---|
5221 | 5223 | } |
---|
5222 | 5224 | hrtick_start(rq, delta); |
---|
.. | .. |
---|
6045 | 6047 | |
---|
6046 | 6048 | /* Skip over this group if it has no CPUs allowed */ |
---|
6047 | 6049 | if (!cpumask_intersects(sched_group_span(group), |
---|
6048 | | - &p->cpus_allowed)) |
---|
| 6050 | + p->cpus_ptr)) |
---|
6049 | 6051 | continue; |
---|
6050 | 6052 | |
---|
6051 | 6053 | #ifdef CONFIG_ROCKCHIP_SCHED_PERFORMANCE_BIAS |
---|
.. | .. |
---|
6191 | 6193 | return cpumask_first(sched_group_span(group)); |
---|
6192 | 6194 | |
---|
6193 | 6195 | /* Traverse only the allowed CPUs */ |
---|
6194 | | - for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) { |
---|
| 6196 | + for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { |
---|
6195 | 6197 | if (available_idle_cpu(i)) { |
---|
6196 | 6198 | struct rq *rq = cpu_rq(i); |
---|
6197 | 6199 | struct cpuidle_state *idle = idle_get_state(rq); |
---|
.. | .. |
---|
6231 | 6233 | { |
---|
6232 | 6234 | int new_cpu = cpu; |
---|
6233 | 6235 | |
---|
6234 | | - if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed)) |
---|
| 6236 | + if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) |
---|
6235 | 6237 | return prev_cpu; |
---|
6236 | 6238 | |
---|
6237 | 6239 | /* |
---|
.. | .. |
---|
6348 | 6350 | if (!test_idle_cores(target, false)) |
---|
6349 | 6351 | return -1; |
---|
6350 | 6352 | |
---|
6351 | | - cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); |
---|
| 6353 | + cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); |
---|
6352 | 6354 | |
---|
6353 | 6355 | for_each_cpu_wrap(core, cpus, target) { |
---|
6354 | 6356 | bool idle = true; |
---|
.. | .. |
---|
6382 | 6384 | return -1; |
---|
6383 | 6385 | |
---|
6384 | 6386 | for_each_cpu(cpu, cpu_smt_mask(target)) { |
---|
6385 | | - if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) |
---|
| 6387 | + if (!cpumask_test_cpu(cpu, p->cpus_ptr)) |
---|
6386 | 6388 | continue; |
---|
6387 | 6389 | if (available_idle_cpu(cpu)) |
---|
6388 | 6390 | return cpu; |
---|
.. | .. |
---|
6443 | 6445 | |
---|
6444 | 6446 | time = local_clock(); |
---|
6445 | 6447 | |
---|
6446 | | - cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); |
---|
| 6448 | + cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); |
---|
6447 | 6449 | |
---|
6448 | 6450 | for_each_cpu_wrap(cpu, cpus, target) { |
---|
6449 | 6451 | if (!--nr) |
---|
.. | .. |
---|
6483 | 6485 | recent_used_cpu != target && |
---|
6484 | 6486 | cpus_share_cache(recent_used_cpu, target) && |
---|
6485 | 6487 | available_idle_cpu(recent_used_cpu) && |
---|
6486 | | - cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) { |
---|
| 6488 | + cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) { |
---|
6487 | 6489 | /* |
---|
6488 | 6490 | * Replace recent_used_cpu with prev as it is a potential |
---|
6489 | 6491 | * candidate for the next wake: |
---|
.. | .. |
---|
6701 | 6703 | /* Scan CPUs in all SDs */ |
---|
6702 | 6704 | sg = sd->groups; |
---|
6703 | 6705 | do { |
---|
6704 | | - for_each_cpu_and(i, &p->cpus_allowed, sched_group_span(sg)) { |
---|
| 6706 | + for_each_cpu_and(i, p->cpus_ptr, sched_group_span(sg)) { |
---|
6705 | 6707 | unsigned long capacity_curr = capacity_curr_of(i); |
---|
6706 | 6708 | unsigned long capacity_orig = capacity_orig_of(i); |
---|
6707 | 6709 | unsigned long wake_util, new_util; |
---|
.. | .. |
---|
7119 | 7121 | max_spare_cap = 0; |
---|
7120 | 7122 | |
---|
7121 | 7123 | for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) { |
---|
7122 | | - if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) |
---|
| 7124 | + if (!cpumask_test_cpu(cpu, p->cpus_ptr)) |
---|
7123 | 7125 | continue; |
---|
7124 | 7126 | |
---|
7125 | 7127 | util = cpu_util_next(cpu, p, cpu); |
---|
.. | .. |
---|
7237 | 7239 | |
---|
7238 | 7240 | if (sysctl_sched_sync_hint_enable && sync) { |
---|
7239 | 7241 | cpu = smp_processor_id(); |
---|
7240 | | - if (cpumask_test_cpu(cpu, &p->cpus_allowed)) |
---|
| 7242 | + if (cpumask_test_cpu(cpu, p->cpus_ptr)) |
---|
7241 | 7243 | return cpu; |
---|
7242 | 7244 | } |
---|
7243 | 7245 | |
---|
.. | .. |
---|
7282 | 7284 | goto unlock; |
---|
7283 | 7285 | } |
---|
7284 | 7286 | |
---|
7285 | | - if (cpumask_test_cpu(prev_cpu, &p->cpus_allowed)) |
---|
| 7287 | + if (cpumask_test_cpu(prev_cpu, p->cpus_ptr)) |
---|
7286 | 7288 | prev_energy = best_energy = compute_energy(p, prev_cpu, pd); |
---|
7287 | 7289 | else |
---|
7288 | 7290 | prev_energy = best_energy = ULONG_MAX; |
---|
.. | .. |
---|
7355 | 7357 | |
---|
7356 | 7358 | want_affine = !wake_wide(p, sibling_count_hint) && |
---|
7357 | 7359 | !wake_cap(p, cpu, prev_cpu) && |
---|
7358 | | - cpumask_test_cpu(cpu, &p->cpus_allowed); |
---|
| 7360 | + cpumask_test_cpu(cpu, p->cpus_ptr); |
---|
7359 | 7361 | } |
---|
7360 | 7362 | |
---|
7361 | 7363 | sd_loop: |
---|
.. | .. |
---|
7617 | 7619 | return; |
---|
7618 | 7620 | |
---|
7619 | 7621 | preempt: |
---|
7620 | | - resched_curr(rq); |
---|
| 7622 | + resched_curr_lazy(rq); |
---|
7621 | 7623 | /* |
---|
7622 | 7624 | * Only set the backward buddy when the current task is still |
---|
7623 | 7625 | * on the rq. This can happen when a wakeup gets interleaved |
---|
.. | .. |
---|
8113 | 8115 | /* |
---|
8114 | 8116 | * We do not migrate tasks that are: |
---|
8115 | 8117 | * 1) throttled_lb_pair, or |
---|
8116 | | - * 2) cannot be migrated to this CPU due to cpus_allowed, or |
---|
| 8118 | + * 2) cannot be migrated to this CPU due to cpus_ptr, or |
---|
8117 | 8119 | * 3) running (obviously), or |
---|
8118 | 8120 | * 4) are cache-hot on their current CPU. |
---|
8119 | 8121 | */ |
---|
8120 | 8122 | if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) |
---|
8121 | 8123 | return 0; |
---|
8122 | 8124 | |
---|
8123 | | - if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) { |
---|
| 8125 | + if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { |
---|
8124 | 8126 | int cpu; |
---|
8125 | 8127 | |
---|
8126 | 8128 | schedstat_inc(p->se.statistics.nr_failed_migrations_affine); |
---|
.. | .. |
---|
8140 | 8142 | |
---|
8141 | 8143 | /* Prevent to re-select dst_cpu via env's CPUs: */ |
---|
8142 | 8144 | for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { |
---|
8143 | | - if (cpumask_test_cpu(cpu, &p->cpus_allowed)) { |
---|
| 8145 | + if (cpumask_test_cpu(cpu, p->cpus_ptr)) { |
---|
8144 | 8146 | env->flags |= LBF_DST_PINNED; |
---|
8145 | 8147 | env->new_dst_cpu = cpu; |
---|
8146 | 8148 | break; |
---|
.. | .. |
---|
8806 | 8808 | |
---|
8807 | 8809 | /* |
---|
8808 | 8810 | * Group imbalance indicates (and tries to solve) the problem where balancing |
---|
8809 | | - * groups is inadequate due to ->cpus_allowed constraints. |
---|
| 8811 | + * groups is inadequate due to ->cpus_ptr constraints. |
---|
8810 | 8812 | * |
---|
8811 | 8813 | * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a |
---|
8812 | 8814 | * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. |
---|
.. | .. |
---|
9507 | 9509 | /* |
---|
9508 | 9510 | * If the busiest group is imbalanced the below checks don't |
---|
9509 | 9511 | * work because they assume all things are equal, which typically |
---|
9510 | | - * isn't true due to cpus_allowed constraints and the like. |
---|
| 9512 | + * isn't true due to cpus_ptr constraints and the like. |
---|
9511 | 9513 | */ |
---|
9512 | 9514 | if (busiest->group_type == group_imbalanced) |
---|
9513 | 9515 | goto force_balance; |
---|
.. | .. |
---|
9947 | 9949 | * if the curr task on busiest CPU can't be |
---|
9948 | 9950 | * moved to this_cpu: |
---|
9949 | 9951 | */ |
---|
9950 | | - if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { |
---|
| 9952 | + if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { |
---|
9951 | 9953 | raw_spin_unlock_irqrestore(&busiest->lock, |
---|
9952 | 9954 | flags); |
---|
9953 | 9955 | env.flags |= LBF_ALL_PINNED; |
---|
.. | .. |
---|
10943 | 10945 | * 'current' within the tree based on its new key value. |
---|
10944 | 10946 | */ |
---|
10945 | 10947 | swap(curr->vruntime, se->vruntime); |
---|
10946 | | - resched_curr(rq); |
---|
| 10948 | + resched_curr_lazy(rq); |
---|
10947 | 10949 | } |
---|
10948 | 10950 | |
---|
10949 | 10951 | se->vruntime -= cfs_rq->min_vruntime; |
---|
.. | .. |
---|
10967 | 10969 | */ |
---|
10968 | 10970 | if (rq->curr == p) { |
---|
10969 | 10971 | if (p->prio > oldprio) |
---|
10970 | | - resched_curr(rq); |
---|
| 10972 | + resched_curr_lazy(rq); |
---|
10971 | 10973 | } else |
---|
10972 | 10974 | check_preempt_curr(rq, p, 0); |
---|
10973 | 10975 | } |
---|