.. | .. |
---|
351 | 351 | extern bool __checkparam_dl(const struct sched_attr *attr); |
---|
352 | 352 | extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); |
---|
353 | 353 | extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); |
---|
354 | | -extern int dl_cpu_busy(int cpu, struct task_struct *p); |
---|
| 354 | +extern int dl_bw_check_overflow(int cpu); |
---|
355 | 355 | |
---|
356 | 356 | #ifdef CONFIG_CGROUP_SCHED |
---|
357 | 357 | |
---|
.. | .. |
---|
2459 | 2459 | #ifdef CONFIG_UCLAMP_TASK |
---|
2460 | 2460 | unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); |
---|
2461 | 2461 | |
---|
| 2462 | +static inline unsigned long uclamp_rq_get(struct rq *rq, |
---|
| 2463 | + enum uclamp_id clamp_id) |
---|
| 2464 | +{ |
---|
| 2465 | + return READ_ONCE(rq->uclamp[clamp_id].value); |
---|
| 2466 | +} |
---|
| 2467 | + |
---|
| 2468 | +static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, |
---|
| 2469 | + unsigned int value) |
---|
| 2470 | +{ |
---|
| 2471 | + WRITE_ONCE(rq->uclamp[clamp_id].value, value); |
---|
| 2472 | +} |
---|
| 2473 | + |
---|
| 2474 | +static inline bool uclamp_rq_is_idle(struct rq *rq) |
---|
| 2475 | +{ |
---|
| 2476 | + return rq->uclamp_flags & UCLAMP_FLAG_IDLE; |
---|
| 2477 | +} |
---|
| 2478 | + |
---|
2462 | 2479 | /** |
---|
2463 | 2480 | * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. |
---|
2464 | 2481 | * @rq: The rq to clamp against. Must not be NULL. |
---|
.. | .. |
---|
2494 | 2511 | * Ignore last runnable task's max clamp, as this task will |
---|
2495 | 2512 | * reset it. Similarly, no need to read the rq's min clamp. |
---|
2496 | 2513 | */ |
---|
2497 | | - if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) |
---|
| 2514 | + if (uclamp_rq_is_idle(rq)) |
---|
2498 | 2515 | goto out; |
---|
2499 | 2516 | } |
---|
2500 | 2517 | |
---|
2501 | | - min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value)); |
---|
2502 | | - max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value)); |
---|
| 2518 | + min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN)); |
---|
| 2519 | + max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX)); |
---|
2503 | 2520 | out: |
---|
2504 | 2521 | /* |
---|
2505 | 2522 | * Since CPU's {min,max}_util clamps are MAX aggregated considering |
---|
.. | .. |
---|
2530 | 2547 | return static_branch_likely(&sched_uclamp_used); |
---|
2531 | 2548 | } |
---|
2532 | 2549 | #else /* CONFIG_UCLAMP_TASK */ |
---|
| 2550 | +static inline unsigned long uclamp_eff_value(struct task_struct *p, |
---|
| 2551 | + enum uclamp_id clamp_id) |
---|
| 2552 | +{ |
---|
| 2553 | + if (clamp_id == UCLAMP_MIN) |
---|
| 2554 | + return 0; |
---|
| 2555 | + |
---|
| 2556 | + return SCHED_CAPACITY_SCALE; |
---|
| 2557 | +} |
---|
| 2558 | + |
---|
2533 | 2559 | static inline |
---|
2534 | 2560 | unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, |
---|
2535 | 2561 | struct task_struct *p) |
---|
.. | .. |
---|
2546 | 2572 | { |
---|
2547 | 2573 | return false; |
---|
2548 | 2574 | } |
---|
| 2575 | + |
---|
| 2576 | +static inline unsigned long uclamp_rq_get(struct rq *rq, |
---|
| 2577 | + enum uclamp_id clamp_id) |
---|
| 2578 | +{ |
---|
| 2579 | + if (clamp_id == UCLAMP_MIN) |
---|
| 2580 | + return 0; |
---|
| 2581 | + |
---|
| 2582 | + return SCHED_CAPACITY_SCALE; |
---|
| 2583 | +} |
---|
| 2584 | + |
---|
| 2585 | +static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, |
---|
| 2586 | + unsigned int value) |
---|
| 2587 | +{ |
---|
| 2588 | +} |
---|
| 2589 | + |
---|
| 2590 | +static inline bool uclamp_rq_is_idle(struct rq *rq) |
---|
| 2591 | +{ |
---|
| 2592 | + return false; |
---|
| 2593 | +} |
---|
2549 | 2594 | #endif /* CONFIG_UCLAMP_TASK */ |
---|
2550 | 2595 | |
---|
2551 | 2596 | #ifdef CONFIG_UCLAMP_TASK_GROUP |
---|