.. | .. |
---|
351 | 351 | extern bool __checkparam_dl(const struct sched_attr *attr); |
---|
352 | 352 | extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); |
---|
353 | 353 | extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); |
---|
354 | | -extern int dl_cpu_busy(int cpu, struct task_struct *p); |
---|
| 354 | +extern int dl_bw_check_overflow(int cpu); |
---|
355 | 355 | |
---|
356 | 356 | #ifdef CONFIG_CGROUP_SCHED |
---|
357 | 357 | |
---|
.. | .. |
---|
996 | 996 | unsigned long cpu_capacity_orig; |
---|
997 | 997 | |
---|
998 | 998 | struct callback_head *balance_callback; |
---|
999 | | - unsigned char balance_flags; |
---|
1000 | 999 | |
---|
1001 | 1000 | unsigned char nohz_idle_balance; |
---|
1002 | 1001 | unsigned char idle_balance; |
---|
.. | .. |
---|
1027 | 1026 | |
---|
1028 | 1027 | /* This is used to determine avg_idle's max value */ |
---|
1029 | 1028 | u64 max_idle_balance_cost; |
---|
1030 | | - |
---|
1031 | | -#ifdef CONFIG_HOTPLUG_CPU |
---|
1032 | | - struct rcuwait hotplug_wait; |
---|
1033 | | -#endif |
---|
1034 | 1029 | #endif /* CONFIG_SMP */ |
---|
1035 | 1030 | |
---|
1036 | 1031 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
---|
.. | .. |
---|
1082 | 1077 | /* Must be inspected within a rcu lock section */ |
---|
1083 | 1078 | struct cpuidle_state *idle_state; |
---|
1084 | 1079 | #endif |
---|
1085 | | - |
---|
1086 | | -#ifdef CONFIG_SMP |
---|
1087 | | - unsigned int nr_pinned; |
---|
1088 | | -#endif |
---|
1089 | | - unsigned int push_busy; |
---|
1090 | | - struct cpu_stop_work push_work; |
---|
1091 | 1080 | |
---|
1092 | 1081 | ANDROID_VENDOR_DATA_ARRAY(1, 96); |
---|
1093 | 1082 | ANDROID_OEM_DATA_ARRAY(1, 16); |
---|
.. | .. |
---|
1286 | 1275 | rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); |
---|
1287 | 1276 | rf->clock_update_flags = 0; |
---|
1288 | 1277 | #endif |
---|
1289 | | -#ifdef CONFIG_SMP |
---|
1290 | | - SCHED_WARN_ON(rq->balance_callback); |
---|
1291 | | -#endif |
---|
1292 | 1278 | } |
---|
1293 | 1279 | |
---|
1294 | 1280 | static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) |
---|
.. | .. |
---|
1448 | 1434 | |
---|
1449 | 1435 | #ifdef CONFIG_SMP |
---|
1450 | 1436 | |
---|
1451 | | -#define BALANCE_WORK 0x01 |
---|
1452 | | -#define BALANCE_PUSH 0x02 |
---|
1453 | | - |
---|
1454 | 1437 | extern int migrate_swap(struct task_struct *p, struct task_struct *t, |
---|
1455 | 1438 | int cpu, int scpu); |
---|
1456 | 1439 | static inline void |
---|
.. | .. |
---|
1460 | 1443 | { |
---|
1461 | 1444 | lockdep_assert_held(&rq->lock); |
---|
1462 | 1445 | |
---|
1463 | | - if (unlikely(head->next || (rq->balance_flags & BALANCE_PUSH))) |
---|
| 1446 | + if (unlikely(head->next)) |
---|
1464 | 1447 | return; |
---|
1465 | 1448 | |
---|
1466 | 1449 | head->func = (void (*)(struct callback_head *))func; |
---|
1467 | 1450 | head->next = rq->balance_callback; |
---|
1468 | 1451 | rq->balance_callback = head; |
---|
1469 | | - rq->balance_flags |= BALANCE_WORK; |
---|
1470 | 1452 | } |
---|
1471 | 1453 | |
---|
1472 | 1454 | #define rcu_dereference_check_sched_domain(p) \ |
---|
.. | .. |
---|
1795 | 1777 | #define WF_FORK 0x02 /* Child wakeup after fork */ |
---|
1796 | 1778 | #define WF_MIGRATED 0x04 /* Internal use, task got migrated */ |
---|
1797 | 1779 | #define WF_ON_CPU 0x08 /* Wakee is on_cpu */ |
---|
1798 | | -#define WF_LOCK_SLEEPER 0x10 /* Wakeup spinlock "sleeper" */ |
---|
1799 | 1780 | #define WF_ANDROID_VENDOR 0x1000 /* Vendor specific for Android */ |
---|
1800 | 1781 | |
---|
1801 | 1782 | /* |
---|
.. | .. |
---|
1880 | 1861 | void (*task_woken)(struct rq *this_rq, struct task_struct *task); |
---|
1881 | 1862 | |
---|
1882 | 1863 | void (*set_cpus_allowed)(struct task_struct *p, |
---|
1883 | | - const struct cpumask *newmask, |
---|
1884 | | - u32 flags); |
---|
| 1864 | + const struct cpumask *newmask); |
---|
1885 | 1865 | |
---|
1886 | 1866 | void (*rq_online)(struct rq *rq); |
---|
1887 | 1867 | void (*rq_offline)(struct rq *rq); |
---|
1888 | | - |
---|
1889 | | - struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq); |
---|
1890 | 1868 | #endif |
---|
1891 | 1869 | |
---|
1892 | 1870 | void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); |
---|
.. | .. |
---|
1970 | 1948 | extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); |
---|
1971 | 1949 | extern struct task_struct *pick_next_task_idle(struct rq *rq); |
---|
1972 | 1950 | |
---|
1973 | | -#define SCA_CHECK 0x01 |
---|
1974 | | -#define SCA_MIGRATE_DISABLE 0x02 |
---|
1975 | | -#define SCA_MIGRATE_ENABLE 0x04 |
---|
1976 | | - |
---|
1977 | 1951 | #ifdef CONFIG_SMP |
---|
1978 | 1952 | |
---|
1979 | 1953 | extern void update_group_capacity(struct sched_domain *sd, int cpu); |
---|
1980 | 1954 | |
---|
1981 | 1955 | extern void trigger_load_balance(struct rq *rq); |
---|
1982 | 1956 | |
---|
1983 | | -extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags); |
---|
1984 | | - |
---|
1985 | | -static inline struct task_struct *get_push_task(struct rq *rq) |
---|
1986 | | -{ |
---|
1987 | | - struct task_struct *p = rq->curr; |
---|
1988 | | - |
---|
1989 | | - lockdep_assert_held(&rq->lock); |
---|
1990 | | - |
---|
1991 | | - if (rq->push_busy) |
---|
1992 | | - return NULL; |
---|
1993 | | - |
---|
1994 | | - if (p->nr_cpus_allowed == 1) |
---|
1995 | | - return NULL; |
---|
1996 | | - |
---|
1997 | | - if (p->migration_disabled) |
---|
1998 | | - return NULL; |
---|
1999 | | - |
---|
2000 | | - rq->push_busy = true; |
---|
2001 | | - return get_task_struct(p); |
---|
2002 | | -} |
---|
2003 | | - |
---|
2004 | | -extern int push_cpu_stop(void *arg); |
---|
| 1957 | +extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); |
---|
2005 | 1958 | |
---|
2006 | 1959 | extern unsigned long __read_mostly max_load_balance_interval; |
---|
2007 | 1960 | #endif |
---|
.. | .. |
---|
2045 | 1998 | |
---|
2046 | 1999 | extern void resched_curr(struct rq *rq); |
---|
2047 | 2000 | extern void resched_cpu(int cpu); |
---|
2048 | | - |
---|
2049 | | -#ifdef CONFIG_PREEMPT_LAZY |
---|
2050 | | -extern void resched_curr_lazy(struct rq *rq); |
---|
2051 | | -#else |
---|
2052 | | -static inline void resched_curr_lazy(struct rq *rq) |
---|
2053 | | -{ |
---|
2054 | | - resched_curr(rq); |
---|
2055 | | -} |
---|
2056 | | -#endif |
---|
2057 | 2001 | |
---|
2058 | 2002 | extern struct rt_bandwidth def_rt_bandwidth; |
---|
2059 | 2003 | extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); |
---|
.. | .. |
---|
2417 | 2361 | static inline void nohz_balance_exit_idle(struct rq *rq) { } |
---|
2418 | 2362 | #endif |
---|
2419 | 2363 | |
---|
2420 | | -#define MDF_PUSH 0x01 |
---|
2421 | | - |
---|
2422 | | -static inline bool is_migration_disabled(struct task_struct *p) |
---|
2423 | | -{ |
---|
2424 | | -#ifdef CONFIG_SMP |
---|
2425 | | - return p->migration_disabled; |
---|
2426 | | -#else |
---|
2427 | | - return false; |
---|
2428 | | -#endif |
---|
2429 | | -} |
---|
2430 | 2364 | |
---|
2431 | 2365 | #ifdef CONFIG_SMP |
---|
2432 | 2366 | static inline |
---|
.. | .. |
---|
2525 | 2459 | #ifdef CONFIG_UCLAMP_TASK |
---|
2526 | 2460 | unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); |
---|
2527 | 2461 | |
---|
| 2462 | +static inline unsigned long uclamp_rq_get(struct rq *rq, |
---|
| 2463 | + enum uclamp_id clamp_id) |
---|
| 2464 | +{ |
---|
| 2465 | + return READ_ONCE(rq->uclamp[clamp_id].value); |
---|
| 2466 | +} |
---|
| 2467 | + |
---|
| 2468 | +static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, |
---|
| 2469 | + unsigned int value) |
---|
| 2470 | +{ |
---|
| 2471 | + WRITE_ONCE(rq->uclamp[clamp_id].value, value); |
---|
| 2472 | +} |
---|
| 2473 | + |
---|
| 2474 | +static inline bool uclamp_rq_is_idle(struct rq *rq) |
---|
| 2475 | +{ |
---|
| 2476 | + return rq->uclamp_flags & UCLAMP_FLAG_IDLE; |
---|
| 2477 | +} |
---|
| 2478 | + |
---|
2528 | 2479 | /** |
---|
2529 | 2480 | * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. |
---|
2530 | 2481 | * @rq: The rq to clamp against. Must not be NULL. |
---|
.. | .. |
---|
2560 | 2511 | * Ignore last runnable task's max clamp, as this task will |
---|
2561 | 2512 | * reset it. Similarly, no need to read the rq's min clamp. |
---|
2562 | 2513 | */ |
---|
2563 | | - if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) |
---|
| 2514 | + if (uclamp_rq_is_idle(rq)) |
---|
2564 | 2515 | goto out; |
---|
2565 | 2516 | } |
---|
2566 | 2517 | |
---|
2567 | | - min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value)); |
---|
2568 | | - max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value)); |
---|
| 2518 | + min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN)); |
---|
| 2519 | + max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX)); |
---|
2569 | 2520 | out: |
---|
2570 | 2521 | /* |
---|
2571 | 2522 | * Since CPU's {min,max}_util clamps are MAX aggregated considering |
---|
.. | .. |
---|
2596 | 2547 | return static_branch_likely(&sched_uclamp_used); |
---|
2597 | 2548 | } |
---|
2598 | 2549 | #else /* CONFIG_UCLAMP_TASK */ |
---|
| 2550 | +static inline unsigned long uclamp_eff_value(struct task_struct *p, |
---|
| 2551 | + enum uclamp_id clamp_id) |
---|
| 2552 | +{ |
---|
| 2553 | + if (clamp_id == UCLAMP_MIN) |
---|
| 2554 | + return 0; |
---|
| 2555 | + |
---|
| 2556 | + return SCHED_CAPACITY_SCALE; |
---|
| 2557 | +} |
---|
| 2558 | + |
---|
2599 | 2559 | static inline |
---|
2600 | 2560 | unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, |
---|
2601 | 2561 | struct task_struct *p) |
---|
.. | .. |
---|
2612 | 2572 | { |
---|
2613 | 2573 | return false; |
---|
2614 | 2574 | } |
---|
| 2575 | + |
---|
| 2576 | +static inline unsigned long uclamp_rq_get(struct rq *rq, |
---|
| 2577 | + enum uclamp_id clamp_id) |
---|
| 2578 | +{ |
---|
| 2579 | + if (clamp_id == UCLAMP_MIN) |
---|
| 2580 | + return 0; |
---|
| 2581 | + |
---|
| 2582 | + return SCHED_CAPACITY_SCALE; |
---|
| 2583 | +} |
---|
| 2584 | + |
---|
| 2585 | +static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, |
---|
| 2586 | + unsigned int value) |
---|
| 2587 | +{ |
---|
| 2588 | +} |
---|
| 2589 | + |
---|
| 2590 | +static inline bool uclamp_rq_is_idle(struct rq *rq) |
---|
| 2591 | +{ |
---|
| 2592 | + return false; |
---|
| 2593 | +} |
---|
2615 | 2594 | #endif /* CONFIG_UCLAMP_TASK */ |
---|
2616 | 2595 | |
---|
2617 | 2596 | #ifdef CONFIG_UCLAMP_TASK_GROUP |
---|