.. | .. |
---|
59 | 59 | #include <linux/psi.h> |
---|
60 | 60 | #include <linux/rcupdate_wait.h> |
---|
61 | 61 | #include <linux/security.h> |
---|
62 | | -#include <linux/stackprotector.h> |
---|
63 | 62 | #include <linux/stop_machine.h> |
---|
64 | 63 | #include <linux/suspend.h> |
---|
65 | 64 | #include <linux/swait.h> |
---|
66 | 65 | #include <linux/syscalls.h> |
---|
67 | 66 | #include <linux/task_work.h> |
---|
68 | 67 | #include <linux/tsacct_kern.h> |
---|
| 68 | +#include <linux/android_vendor.h> |
---|
| 69 | +#include <linux/android_kabi.h> |
---|
69 | 70 | |
---|
70 | 71 | #include <asm/tlb.h> |
---|
| 72 | +#include <asm-generic/vmlinux.lds.h> |
---|
| 73 | +#include <soc/rockchip/rockchip_performance.h> |
---|
71 | 74 | |
---|
72 | 75 | #ifdef CONFIG_PARAVIRT |
---|
73 | 76 | # include <asm/paravirt.h> |
---|
.. | .. |
---|
76 | 79 | #include "cpupri.h" |
---|
77 | 80 | #include "cpudeadline.h" |
---|
78 | 81 | |
---|
| 82 | +#include <trace/events/sched.h> |
---|
| 83 | + |
---|
79 | 84 | #ifdef CONFIG_SCHED_DEBUG |
---|
80 | 85 | # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) |
---|
81 | 86 | #else |
---|
82 | 87 | # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) |
---|
83 | 88 | #endif |
---|
84 | | - |
---|
85 | | -#include "tune.h" |
---|
86 | 89 | |
---|
87 | 90 | struct rq; |
---|
88 | 91 | struct cpuidle_state; |
---|
.. | .. |
---|
99 | 102 | extern void calc_global_load_tick(struct rq *this_rq); |
---|
100 | 103 | extern long calc_load_fold_active(struct rq *this_rq, long adjust); |
---|
101 | 104 | |
---|
102 | | -#ifdef CONFIG_SMP |
---|
103 | | -extern void cpu_load_update_active(struct rq *this_rq); |
---|
104 | | -#else |
---|
105 | | -static inline void cpu_load_update_active(struct rq *this_rq) { } |
---|
106 | | -#endif |
---|
107 | | - |
---|
| 105 | +extern void call_trace_sched_update_nr_running(struct rq *rq, int count); |
---|
108 | 106 | /* |
---|
109 | 107 | * Helpers for converting nanosecond timing to jiffy resolution |
---|
110 | 108 | */ |
---|
.. | .. |
---|
187 | 185 | rt_policy(policy) || dl_policy(policy); |
---|
188 | 186 | } |
---|
189 | 187 | |
---|
| 188 | +static inline int task_has_idle_policy(struct task_struct *p) |
---|
| 189 | +{ |
---|
| 190 | + return idle_policy(p->policy); |
---|
| 191 | +} |
---|
| 192 | + |
---|
190 | 193 | static inline int task_has_rt_policy(struct task_struct *p) |
---|
191 | 194 | { |
---|
192 | 195 | return rt_policy(p->policy); |
---|
.. | .. |
---|
198 | 201 | } |
---|
199 | 202 | |
---|
200 | 203 | #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) |
---|
| 204 | + |
---|
| 205 | +static inline void update_avg(u64 *avg, u64 sample) |
---|
| 206 | +{ |
---|
| 207 | + s64 diff = sample - *avg; |
---|
| 208 | + *avg += diff / 8; |
---|
| 209 | +} |
---|
| 210 | + |
---|
| 211 | +/* |
---|
| 212 | + * Shifting a value by an exponent greater *or equal* to the size of said value |
---|
| 213 | + * is UB; cap at size-1. |
---|
| 214 | + */ |
---|
| 215 | +#define shr_bound(val, shift) \ |
---|
| 216 | + (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1)) |
---|
201 | 217 | |
---|
202 | 218 | /* |
---|
203 | 219 | * !! For sched_setattr_nocheck() (kernel) only !! |
---|
.. | .. |
---|
304 | 320 | __dl_update(dl_b, -((s32)tsk_bw / cpus)); |
---|
305 | 321 | } |
---|
306 | 322 | |
---|
307 | | -static inline |
---|
308 | | -bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) |
---|
| 323 | +static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap, |
---|
| 324 | + u64 old_bw, u64 new_bw) |
---|
309 | 325 | { |
---|
310 | 326 | return dl_b->bw != -1 && |
---|
311 | | - dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; |
---|
| 327 | + cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; |
---|
312 | 328 | } |
---|
313 | 329 | |
---|
314 | | -extern void dl_change_utilization(struct task_struct *p, u64 new_bw); |
---|
| 330 | +/* |
---|
| 331 | + * Verify the fitness of task @p to run on @cpu taking into account the |
---|
| 332 | + * CPU original capacity and the runtime/deadline ratio of the task. |
---|
| 333 | + * |
---|
| 334 | + * The function will return true if the CPU original capacity of the |
---|
| 335 | + * @cpu scaled by SCHED_CAPACITY_SCALE >= runtime/deadline ratio of the |
---|
| 336 | + * task and false otherwise. |
---|
| 337 | + */ |
---|
| 338 | +static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) |
---|
| 339 | +{ |
---|
| 340 | + unsigned long cap = arch_scale_cpu_capacity(cpu); |
---|
| 341 | + |
---|
| 342 | + return cap_scale(p->dl.dl_deadline, cap) >= p->dl.dl_runtime; |
---|
| 343 | +} |
---|
| 344 | + |
---|
315 | 345 | extern void init_dl_bw(struct dl_bw *dl_b); |
---|
316 | 346 | extern int sched_dl_global_validate(void); |
---|
317 | 347 | extern void sched_dl_do_global(void); |
---|
.. | .. |
---|
320 | 350 | extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); |
---|
321 | 351 | extern bool __checkparam_dl(const struct sched_attr *attr); |
---|
322 | 352 | extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); |
---|
323 | | -extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); |
---|
324 | 353 | extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); |
---|
325 | | -extern bool dl_cpu_busy(unsigned int cpu); |
---|
| 354 | +extern int dl_cpu_busy(int cpu, struct task_struct *p); |
---|
326 | 355 | |
---|
327 | 356 | #ifdef CONFIG_CGROUP_SCHED |
---|
328 | 357 | |
---|
.. | .. |
---|
342 | 371 | u64 runtime; |
---|
343 | 372 | s64 hierarchical_quota; |
---|
344 | 373 | |
---|
345 | | - short idle; |
---|
346 | | - short period_active; |
---|
| 374 | + u8 idle; |
---|
| 375 | + u8 period_active; |
---|
| 376 | + u8 slack_started; |
---|
347 | 377 | struct hrtimer period_timer; |
---|
348 | 378 | struct hrtimer slack_timer; |
---|
349 | 379 | struct list_head throttled_cfs_rq; |
---|
.. | .. |
---|
352 | 382 | int nr_periods; |
---|
353 | 383 | int nr_throttled; |
---|
354 | 384 | u64 throttled_time; |
---|
355 | | - |
---|
356 | | - bool distribute_running; |
---|
357 | 385 | #endif |
---|
358 | 386 | }; |
---|
359 | 387 | |
---|
.. | .. |
---|
407 | 435 | struct uclamp_se uclamp[UCLAMP_CNT]; |
---|
408 | 436 | /* Latency-sensitive flag used for a task group */ |
---|
409 | 437 | unsigned int latency_sensitive; |
---|
| 438 | + |
---|
| 439 | + ANDROID_VENDOR_DATA_ARRAY(1, 4); |
---|
410 | 440 | #endif |
---|
411 | 441 | |
---|
| 442 | + ANDROID_KABI_RESERVE(1); |
---|
| 443 | + ANDROID_KABI_RESERVE(2); |
---|
| 444 | + ANDROID_KABI_RESERVE(3); |
---|
| 445 | + ANDROID_KABI_RESERVE(4); |
---|
412 | 446 | }; |
---|
413 | 447 | |
---|
414 | 448 | #ifdef CONFIG_FAIR_GROUP_SCHED |
---|
.. | .. |
---|
497 | 531 | /* CFS-related fields in a runqueue */ |
---|
498 | 532 | struct cfs_rq { |
---|
499 | 533 | struct load_weight load; |
---|
500 | | - unsigned long runnable_weight; |
---|
501 | 534 | unsigned int nr_running; |
---|
502 | | - unsigned int h_nr_running; |
---|
| 535 | + unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ |
---|
| 536 | + unsigned int idle_h_nr_running; /* SCHED_IDLE */ |
---|
503 | 537 | |
---|
504 | 538 | u64 exec_clock; |
---|
505 | 539 | u64 min_vruntime; |
---|
.. | .. |
---|
535 | 569 | int nr; |
---|
536 | 570 | unsigned long load_avg; |
---|
537 | 571 | unsigned long util_avg; |
---|
538 | | - unsigned long runnable_sum; |
---|
| 572 | + unsigned long runnable_avg; |
---|
539 | 573 | } removed; |
---|
540 | 574 | |
---|
541 | 575 | #ifdef CONFIG_FAIR_GROUP_SCHED |
---|
.. | .. |
---|
575 | 609 | s64 runtime_remaining; |
---|
576 | 610 | |
---|
577 | 611 | u64 throttled_clock; |
---|
578 | | - u64 throttled_clock_task; |
---|
579 | | - u64 throttled_clock_task_time; |
---|
| 612 | + u64 throttled_clock_pelt; |
---|
| 613 | + u64 throttled_clock_pelt_time; |
---|
580 | 614 | int throttled; |
---|
581 | 615 | int throttle_count; |
---|
582 | 616 | struct list_head throttled_list; |
---|
583 | 617 | #endif /* CONFIG_CFS_BANDWIDTH */ |
---|
| 618 | + |
---|
| 619 | + ANDROID_VENDOR_DATA_ARRAY(1, 16); |
---|
584 | 620 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
---|
585 | 621 | }; |
---|
586 | 622 | |
---|
.. | .. |
---|
646 | 682 | /* |
---|
647 | 683 | * Deadline values of the currently executing and the |
---|
648 | 684 | * earliest ready task on this rq. Caching these facilitates |
---|
649 | | - * the decision wether or not a ready but not running task |
---|
| 685 | + * the decision whether or not a ready but not running task |
---|
650 | 686 | * should migrate somewhere else. |
---|
651 | 687 | */ |
---|
652 | 688 | struct { |
---|
.. | .. |
---|
695 | 731 | #ifdef CONFIG_FAIR_GROUP_SCHED |
---|
696 | 732 | /* An entity is a task if it doesn't "own" a runqueue */ |
---|
697 | 733 | #define entity_is_task(se) (!se->my_q) |
---|
| 734 | + |
---|
| 735 | +static inline void se_update_runnable(struct sched_entity *se) |
---|
| 736 | +{ |
---|
| 737 | + if (!entity_is_task(se)) |
---|
| 738 | + se->runnable_weight = se->my_q->h_nr_running; |
---|
| 739 | +} |
---|
| 740 | + |
---|
| 741 | +static inline long se_runnable(struct sched_entity *se) |
---|
| 742 | +{ |
---|
| 743 | + if (entity_is_task(se)) |
---|
| 744 | + return !!se->on_rq; |
---|
| 745 | + else |
---|
| 746 | + return se->runnable_weight; |
---|
| 747 | +} |
---|
| 748 | + |
---|
698 | 749 | #else |
---|
699 | 750 | #define entity_is_task(se) 1 |
---|
| 751 | + |
---|
| 752 | +static inline void se_update_runnable(struct sched_entity *se) {} |
---|
| 753 | + |
---|
| 754 | +static inline long se_runnable(struct sched_entity *se) |
---|
| 755 | +{ |
---|
| 756 | + return !!se->on_rq; |
---|
| 757 | +} |
---|
700 | 758 | #endif |
---|
701 | 759 | |
---|
702 | 760 | #ifdef CONFIG_SMP |
---|
.. | .. |
---|
708 | 766 | return scale_load_down(se->load.weight); |
---|
709 | 767 | } |
---|
710 | 768 | |
---|
711 | | -static inline long se_runnable(struct sched_entity *se) |
---|
712 | | -{ |
---|
713 | | - return scale_load_down(se->runnable_weight); |
---|
714 | | -} |
---|
715 | 769 | |
---|
716 | 770 | static inline bool sched_asym_prefer(int a, int b) |
---|
717 | 771 | { |
---|
.. | .. |
---|
722 | 776 | struct em_perf_domain *em_pd; |
---|
723 | 777 | struct perf_domain *next; |
---|
724 | 778 | struct rcu_head rcu; |
---|
725 | | -}; |
---|
726 | | - |
---|
727 | | -struct max_cpu_capacity { |
---|
728 | | - raw_spinlock_t lock; |
---|
729 | | - unsigned long val; |
---|
730 | | - int cpu; |
---|
731 | 779 | }; |
---|
732 | 780 | |
---|
733 | 781 | /* Scheduling group status flags */ |
---|
.. | .. |
---|
788 | 836 | cpumask_var_t rto_mask; |
---|
789 | 837 | struct cpupri cpupri; |
---|
790 | 838 | |
---|
791 | | - /* Maximum cpu capacity in the system. */ |
---|
792 | | - struct max_cpu_capacity max_cpu_capacity; |
---|
| 839 | + unsigned long max_cpu_capacity; |
---|
793 | 840 | |
---|
794 | 841 | /* |
---|
795 | 842 | * NULL-terminated list of performance domains intersecting with the |
---|
796 | 843 | * CPUs of the rd. Protected by RCU. |
---|
797 | 844 | */ |
---|
798 | | - struct perf_domain *pd; |
---|
| 845 | + struct perf_domain __rcu *pd; |
---|
799 | 846 | |
---|
800 | | - /* Vendor fields. */ |
---|
801 | | - /* First cpu with maximum and minimum original capacity */ |
---|
802 | | - int max_cap_orig_cpu, min_cap_orig_cpu; |
---|
803 | | - /* First cpu with mid capacity */ |
---|
804 | | - int mid_cap_orig_cpu; |
---|
| 847 | + ANDROID_VENDOR_DATA_ARRAY(1, 4); |
---|
| 848 | + |
---|
| 849 | + ANDROID_KABI_RESERVE(1); |
---|
| 850 | + ANDROID_KABI_RESERVE(2); |
---|
| 851 | + ANDROID_KABI_RESERVE(3); |
---|
| 852 | + ANDROID_KABI_RESERVE(4); |
---|
805 | 853 | }; |
---|
806 | 854 | |
---|
807 | | -extern struct root_domain def_root_domain; |
---|
808 | | -extern struct mutex sched_domains_mutex; |
---|
809 | | - |
---|
810 | 855 | extern void init_defrootdomain(void); |
---|
811 | | -extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc); |
---|
812 | 856 | extern int sched_init_domains(const struct cpumask *cpu_map); |
---|
813 | 857 | extern void rq_attach_root(struct rq *rq, struct root_domain *rd); |
---|
814 | 858 | extern void sched_get_rd(struct root_domain *rd); |
---|
.. | .. |
---|
817 | 861 | #ifdef HAVE_RT_PUSH_IPI |
---|
818 | 862 | extern void rto_push_irq_work_func(struct irq_work *work); |
---|
819 | 863 | #endif |
---|
| 864 | +extern struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu); |
---|
820 | 865 | #endif /* CONFIG_SMP */ |
---|
821 | 866 | |
---|
822 | 867 | #ifdef CONFIG_UCLAMP_TASK |
---|
.. | .. |
---|
859 | 904 | unsigned int value; |
---|
860 | 905 | struct uclamp_bucket bucket[UCLAMP_BUCKETS]; |
---|
861 | 906 | }; |
---|
| 907 | + |
---|
| 908 | +DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); |
---|
862 | 909 | #endif /* CONFIG_UCLAMP_TASK */ |
---|
863 | 910 | |
---|
864 | 911 | /* |
---|
.. | .. |
---|
882 | 929 | unsigned int nr_preferred_running; |
---|
883 | 930 | unsigned int numa_migrate_on; |
---|
884 | 931 | #endif |
---|
885 | | - #define CPU_LOAD_IDX_MAX 5 |
---|
886 | | - unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
---|
887 | 932 | #ifdef CONFIG_NO_HZ_COMMON |
---|
888 | 933 | #ifdef CONFIG_SMP |
---|
889 | | - unsigned long last_load_update_tick; |
---|
890 | 934 | unsigned long last_blocked_load_update_tick; |
---|
891 | 935 | unsigned int has_blocked_load; |
---|
| 936 | + call_single_data_t nohz_csd; |
---|
892 | 937 | #endif /* CONFIG_SMP */ |
---|
893 | 938 | unsigned int nohz_tick_stopped; |
---|
894 | | - atomic_t nohz_flags; |
---|
| 939 | + atomic_t nohz_flags; |
---|
895 | 940 | #endif /* CONFIG_NO_HZ_COMMON */ |
---|
896 | 941 | |
---|
897 | | - /* capture load from *all* tasks on this CPU: */ |
---|
898 | | - struct load_weight load; |
---|
899 | | - unsigned long nr_load_updates; |
---|
| 942 | +#ifdef CONFIG_SMP |
---|
| 943 | + unsigned int ttwu_pending; |
---|
| 944 | +#endif |
---|
900 | 945 | u64 nr_switches; |
---|
901 | 946 | |
---|
902 | 947 | #ifdef CONFIG_UCLAMP_TASK |
---|
.. | .. |
---|
924 | 969 | */ |
---|
925 | 970 | unsigned long nr_uninterruptible; |
---|
926 | 971 | |
---|
927 | | - struct task_struct *curr; |
---|
| 972 | + struct task_struct __rcu *curr; |
---|
928 | 973 | struct task_struct *idle; |
---|
929 | 974 | struct task_struct *stop; |
---|
930 | 975 | unsigned long next_balance; |
---|
.. | .. |
---|
939 | 984 | |
---|
940 | 985 | atomic_t nr_iowait; |
---|
941 | 986 | |
---|
| 987 | +#ifdef CONFIG_MEMBARRIER |
---|
| 988 | + int membarrier_state; |
---|
| 989 | +#endif |
---|
| 990 | + |
---|
942 | 991 | #ifdef CONFIG_SMP |
---|
943 | | - struct root_domain *rd; |
---|
944 | | - struct sched_domain *sd; |
---|
| 992 | + struct root_domain *rd; |
---|
| 993 | + struct sched_domain __rcu *sd; |
---|
945 | 994 | |
---|
946 | 995 | unsigned long cpu_capacity; |
---|
947 | 996 | unsigned long cpu_capacity_orig; |
---|
948 | 997 | |
---|
949 | 998 | struct callback_head *balance_callback; |
---|
950 | 999 | |
---|
| 1000 | + unsigned char nohz_idle_balance; |
---|
951 | 1001 | unsigned char idle_balance; |
---|
952 | 1002 | |
---|
953 | 1003 | unsigned long misfit_task_load; |
---|
.. | .. |
---|
968 | 1018 | #ifdef CONFIG_HAVE_SCHED_AVG_IRQ |
---|
969 | 1019 | struct sched_avg avg_irq; |
---|
970 | 1020 | #endif |
---|
| 1021 | +#ifdef CONFIG_SCHED_THERMAL_PRESSURE |
---|
| 1022 | + struct sched_avg avg_thermal; |
---|
| 1023 | +#endif |
---|
971 | 1024 | u64 idle_stamp; |
---|
972 | 1025 | u64 avg_idle; |
---|
973 | 1026 | |
---|
974 | 1027 | /* This is used to determine avg_idle's max value */ |
---|
975 | 1028 | u64 max_idle_balance_cost; |
---|
976 | | -#endif |
---|
| 1029 | +#endif /* CONFIG_SMP */ |
---|
977 | 1030 | |
---|
978 | 1031 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
---|
979 | 1032 | u64 prev_irq_time; |
---|
.. | .. |
---|
991 | 1044 | |
---|
992 | 1045 | #ifdef CONFIG_SCHED_HRTICK |
---|
993 | 1046 | #ifdef CONFIG_SMP |
---|
994 | | - int hrtick_csd_pending; |
---|
995 | 1047 | call_single_data_t hrtick_csd; |
---|
996 | 1048 | #endif |
---|
997 | 1049 | struct hrtimer hrtick_timer; |
---|
| 1050 | + ktime_t hrtick_time; |
---|
998 | 1051 | #endif |
---|
999 | 1052 | |
---|
1000 | 1053 | #ifdef CONFIG_SCHEDSTATS |
---|
.. | .. |
---|
1015 | 1068 | unsigned int ttwu_local; |
---|
1016 | 1069 | #endif |
---|
1017 | 1070 | |
---|
1018 | | -#ifdef CONFIG_SMP |
---|
1019 | | - struct llist_head wake_list; |
---|
| 1071 | +#ifdef CONFIG_HOTPLUG_CPU |
---|
| 1072 | + struct cpu_stop_work drain; |
---|
| 1073 | + struct cpu_stop_done drain_done; |
---|
1020 | 1074 | #endif |
---|
1021 | 1075 | |
---|
1022 | 1076 | #ifdef CONFIG_CPU_IDLE |
---|
1023 | 1077 | /* Must be inspected within a rcu lock section */ |
---|
1024 | 1078 | struct cpuidle_state *idle_state; |
---|
1025 | | - int idle_state_idx; |
---|
1026 | 1079 | #endif |
---|
| 1080 | + |
---|
| 1081 | + ANDROID_VENDOR_DATA_ARRAY(1, 96); |
---|
| 1082 | + ANDROID_OEM_DATA_ARRAY(1, 16); |
---|
| 1083 | + |
---|
| 1084 | + ANDROID_KABI_RESERVE(1); |
---|
| 1085 | + ANDROID_KABI_RESERVE(2); |
---|
| 1086 | + ANDROID_KABI_RESERVE(3); |
---|
| 1087 | + ANDROID_KABI_RESERVE(4); |
---|
1027 | 1088 | }; |
---|
1028 | 1089 | |
---|
1029 | 1090 | #ifdef CONFIG_FAIR_GROUP_SCHED |
---|
.. | .. |
---|
1132 | 1193 | return rq->clock_task; |
---|
1133 | 1194 | } |
---|
1134 | 1195 | |
---|
| 1196 | +#ifdef CONFIG_SMP |
---|
| 1197 | +DECLARE_PER_CPU(u64, clock_task_mult); |
---|
| 1198 | + |
---|
| 1199 | +static inline u64 rq_clock_task_mult(struct rq *rq) |
---|
| 1200 | +{ |
---|
| 1201 | + lockdep_assert_held(&rq->lock); |
---|
| 1202 | + assert_clock_updated(rq); |
---|
| 1203 | + |
---|
| 1204 | + return per_cpu(clock_task_mult, cpu_of(rq)); |
---|
| 1205 | +} |
---|
| 1206 | +#else |
---|
| 1207 | +static inline u64 rq_clock_task_mult(struct rq *rq) |
---|
| 1208 | +{ |
---|
| 1209 | + return rq_clock_task(rq); |
---|
| 1210 | +} |
---|
| 1211 | +#endif |
---|
| 1212 | + |
---|
| 1213 | +/** |
---|
| 1214 | + * By default the decay is the default pelt decay period. |
---|
| 1215 | + * The decay shift can change the decay period in |
---|
| 1216 | + * multiples of 32. |
---|
| 1217 | + * Decay shift Decay period(ms) |
---|
| 1218 | + * 0 32 |
---|
| 1219 | + * 1 64 |
---|
| 1220 | + * 2 128 |
---|
| 1221 | + * 3 256 |
---|
| 1222 | + * 4 512 |
---|
| 1223 | + */ |
---|
| 1224 | +extern int sched_thermal_decay_shift; |
---|
| 1225 | + |
---|
| 1226 | +static inline u64 rq_clock_thermal(struct rq *rq) |
---|
| 1227 | +{ |
---|
| 1228 | + return rq_clock_task(rq) >> sched_thermal_decay_shift; |
---|
| 1229 | +} |
---|
| 1230 | + |
---|
1135 | 1231 | static inline void rq_clock_skip_update(struct rq *rq) |
---|
1136 | 1232 | { |
---|
1137 | 1233 | lockdep_assert_held(&rq->lock); |
---|
.. | .. |
---|
1161 | 1257 | #endif |
---|
1162 | 1258 | }; |
---|
1163 | 1259 | |
---|
| 1260 | +/* |
---|
| 1261 | + * Lockdep annotation that avoids accidental unlocks; it's like a |
---|
| 1262 | + * sticky/continuous lockdep_assert_held(). |
---|
| 1263 | + * |
---|
| 1264 | + * This avoids code that has access to 'struct rq *rq' (basically everything in |
---|
| 1265 | + * the scheduler) from accidentally unlocking the rq if they do not also have a |
---|
| 1266 | + * copy of the (on-stack) 'struct rq_flags rf'. |
---|
| 1267 | + * |
---|
| 1268 | + * Also see Documentation/locking/lockdep-design.rst. |
---|
| 1269 | + */ |
---|
1164 | 1270 | static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) |
---|
1165 | 1271 | { |
---|
1166 | 1272 | rf->cookie = lockdep_pin_lock(&rq->lock); |
---|
.. | .. |
---|
1294 | 1400 | extern enum numa_topology_type sched_numa_topology_type; |
---|
1295 | 1401 | extern int sched_max_numa_distance; |
---|
1296 | 1402 | extern bool find_numa_distance(int distance); |
---|
1297 | | -#endif |
---|
1298 | | - |
---|
1299 | | -#ifdef CONFIG_NUMA |
---|
1300 | 1403 | extern void sched_init_numa(void); |
---|
1301 | 1404 | extern void sched_domains_numa_masks_set(unsigned int cpu); |
---|
1302 | 1405 | extern void sched_domains_numa_masks_clear(unsigned int cpu); |
---|
| 1406 | +extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); |
---|
1303 | 1407 | #else |
---|
1304 | 1408 | static inline void sched_init_numa(void) { } |
---|
1305 | 1409 | static inline void sched_domains_numa_masks_set(unsigned int cpu) { } |
---|
1306 | 1410 | static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } |
---|
| 1411 | +static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) |
---|
| 1412 | +{ |
---|
| 1413 | + return nr_cpu_ids; |
---|
| 1414 | +} |
---|
1307 | 1415 | #endif |
---|
1308 | 1416 | |
---|
1309 | 1417 | #ifdef CONFIG_NUMA_BALANCING |
---|
.. | .. |
---|
1316 | 1424 | }; |
---|
1317 | 1425 | extern void sched_setnuma(struct task_struct *p, int node); |
---|
1318 | 1426 | extern int migrate_task_to(struct task_struct *p, int cpu); |
---|
1319 | | -extern int migrate_swap(struct task_struct *p, struct task_struct *t, |
---|
1320 | | - int cpu, int scpu); |
---|
1321 | 1427 | extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); |
---|
1322 | 1428 | #else |
---|
1323 | 1429 | static inline void |
---|
.. | .. |
---|
1328 | 1434 | |
---|
1329 | 1435 | #ifdef CONFIG_SMP |
---|
1330 | 1436 | |
---|
| 1437 | +extern int migrate_swap(struct task_struct *p, struct task_struct *t, |
---|
| 1438 | + int cpu, int scpu); |
---|
1331 | 1439 | static inline void |
---|
1332 | 1440 | queue_balance_callback(struct rq *rq, |
---|
1333 | 1441 | struct callback_head *head, |
---|
.. | .. |
---|
1343 | 1451 | rq->balance_callback = head; |
---|
1344 | 1452 | } |
---|
1345 | 1453 | |
---|
1346 | | -extern void sched_ttwu_pending(void); |
---|
1347 | | - |
---|
1348 | 1454 | #define rcu_dereference_check_sched_domain(p) \ |
---|
1349 | 1455 | rcu_dereference_check((p), \ |
---|
1350 | 1456 | lockdep_is_held(&sched_domains_mutex)) |
---|
1351 | 1457 | |
---|
1352 | 1458 | /* |
---|
1353 | 1459 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. |
---|
1354 | | - * See detach_destroy_domains: synchronize_sched for details. |
---|
| 1460 | + * See destroy_sched_domains: call_rcu for details. |
---|
1355 | 1461 | * |
---|
1356 | 1462 | * The domain tree of any CPU may only be accessed from within |
---|
1357 | 1463 | * preempt-disabled sections. |
---|
.. | .. |
---|
1359 | 1465 | #define for_each_domain(cpu, __sd) \ |
---|
1360 | 1466 | for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ |
---|
1361 | 1467 | __sd; __sd = __sd->parent) |
---|
1362 | | - |
---|
1363 | | -#define for_each_lower_domain(sd) for (; sd; sd = sd->child) |
---|
1364 | 1468 | |
---|
1365 | 1469 | /** |
---|
1366 | 1470 | * highest_flag_domain - Return highest sched_domain containing flag. |
---|
.. | .. |
---|
1396 | 1500 | return sd; |
---|
1397 | 1501 | } |
---|
1398 | 1502 | |
---|
1399 | | -DECLARE_PER_CPU(struct sched_domain *, sd_llc); |
---|
| 1503 | +DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); |
---|
1400 | 1504 | DECLARE_PER_CPU(int, sd_llc_size); |
---|
1401 | 1505 | DECLARE_PER_CPU(int, sd_llc_id); |
---|
1402 | | -DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); |
---|
1403 | | -DECLARE_PER_CPU(struct sched_domain *, sd_numa); |
---|
1404 | | -DECLARE_PER_CPU(struct sched_domain *, sd_asym_packing); |
---|
1405 | | -DECLARE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity); |
---|
| 1506 | +DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); |
---|
| 1507 | +DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); |
---|
| 1508 | +DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); |
---|
| 1509 | +DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); |
---|
1406 | 1510 | extern struct static_key_false sched_asym_cpucapacity; |
---|
1407 | 1511 | |
---|
1408 | 1512 | struct sched_group_capacity { |
---|
.. | .. |
---|
1421 | 1525 | int id; |
---|
1422 | 1526 | #endif |
---|
1423 | 1527 | |
---|
1424 | | - unsigned long cpumask[0]; /* Balance mask */ |
---|
| 1528 | + unsigned long cpumask[]; /* Balance mask */ |
---|
1425 | 1529 | }; |
---|
1426 | 1530 | |
---|
1427 | 1531 | struct sched_group { |
---|
.. | .. |
---|
1439 | 1543 | * by attaching extra space to the end of the structure, |
---|
1440 | 1544 | * depending on how many CPUs the kernel has booted up with) |
---|
1441 | 1545 | */ |
---|
1442 | | - unsigned long cpumask[0]; |
---|
| 1546 | + unsigned long cpumask[]; |
---|
1443 | 1547 | }; |
---|
1444 | 1548 | |
---|
1445 | 1549 | static inline struct cpumask *sched_group_span(struct sched_group *sg) |
---|
.. | .. |
---|
1482 | 1586 | } |
---|
1483 | 1587 | #endif |
---|
1484 | 1588 | |
---|
1485 | | -#else |
---|
| 1589 | +extern void flush_smp_call_function_from_idle(void); |
---|
1486 | 1590 | |
---|
1487 | | -static inline void sched_ttwu_pending(void) { } |
---|
1488 | | - |
---|
1489 | | -#endif /* CONFIG_SMP */ |
---|
| 1591 | +#else /* !CONFIG_SMP: */ |
---|
| 1592 | +static inline void flush_smp_call_function_from_idle(void) { } |
---|
| 1593 | +#endif |
---|
1490 | 1594 | |
---|
1491 | 1595 | #include "stats.h" |
---|
1492 | 1596 | #include "autogroup.h" |
---|
.. | .. |
---|
1546 | 1650 | #ifdef CONFIG_SMP |
---|
1547 | 1651 | /* |
---|
1548 | 1652 | * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be |
---|
1549 | | - * successfuly executed on another CPU. We must ensure that updates of |
---|
| 1653 | + * successfully executed on another CPU. We must ensure that updates of |
---|
1550 | 1654 | * per-task data have been completed by this moment. |
---|
1551 | 1655 | */ |
---|
1552 | 1656 | smp_wmb(); |
---|
.. | .. |
---|
1598 | 1702 | #undef SCHED_FEAT |
---|
1599 | 1703 | |
---|
1600 | 1704 | extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; |
---|
| 1705 | +extern const char * const sched_feat_names[__SCHED_FEAT_NR]; |
---|
| 1706 | + |
---|
1601 | 1707 | #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) |
---|
1602 | 1708 | |
---|
1603 | 1709 | #else /* !CONFIG_JUMP_LABEL */ |
---|
.. | .. |
---|
1669 | 1775 | */ |
---|
1670 | 1776 | #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ |
---|
1671 | 1777 | #define WF_FORK 0x02 /* Child wakeup after fork */ |
---|
1672 | | -#define WF_MIGRATED 0x4 /* Internal use, task got migrated */ |
---|
| 1778 | +#define WF_MIGRATED 0x04 /* Internal use, task got migrated */ |
---|
| 1779 | +#define WF_ON_CPU 0x08 /* Wakee is on_cpu */ |
---|
| 1780 | +#define WF_ANDROID_VENDOR 0x1000 /* Vendor specific for Android */ |
---|
1673 | 1781 | |
---|
1674 | 1782 | /* |
---|
1675 | 1783 | * To aid in avoiding the subversion of "niceness" due to uneven distribution |
---|
.. | .. |
---|
1723 | 1831 | #define ENQUEUE_MIGRATED 0x00 |
---|
1724 | 1832 | #endif |
---|
1725 | 1833 | |
---|
| 1834 | +#define ENQUEUE_WAKEUP_SYNC 0x80 |
---|
| 1835 | + |
---|
1726 | 1836 | #define RETRY_TASK ((void *)-1UL) |
---|
1727 | 1837 | |
---|
1728 | 1838 | struct sched_class { |
---|
1729 | | - const struct sched_class *next; |
---|
1730 | 1839 | |
---|
1731 | 1840 | #ifdef CONFIG_UCLAMP_TASK |
---|
1732 | 1841 | int uclamp_enabled; |
---|
.. | .. |
---|
1735 | 1844 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); |
---|
1736 | 1845 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); |
---|
1737 | 1846 | void (*yield_task) (struct rq *rq); |
---|
1738 | | - bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt); |
---|
| 1847 | + bool (*yield_to_task)(struct rq *rq, struct task_struct *p); |
---|
1739 | 1848 | |
---|
1740 | 1849 | void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); |
---|
1741 | 1850 | |
---|
1742 | | - /* |
---|
1743 | | - * It is the responsibility of the pick_next_task() method that will |
---|
1744 | | - * return the next task to call put_prev_task() on the @prev task or |
---|
1745 | | - * something equivalent. |
---|
1746 | | - * |
---|
1747 | | - * May return RETRY_TASK when it finds a higher prio class has runnable |
---|
1748 | | - * tasks. |
---|
1749 | | - */ |
---|
1750 | | - struct task_struct * (*pick_next_task)(struct rq *rq, |
---|
1751 | | - struct task_struct *prev, |
---|
1752 | | - struct rq_flags *rf); |
---|
| 1851 | + struct task_struct *(*pick_next_task)(struct rq *rq); |
---|
| 1852 | + |
---|
1753 | 1853 | void (*put_prev_task)(struct rq *rq, struct task_struct *p); |
---|
| 1854 | + void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); |
---|
1754 | 1855 | |
---|
1755 | 1856 | #ifdef CONFIG_SMP |
---|
1756 | | - int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags, |
---|
1757 | | - int subling_count_hint); |
---|
| 1857 | + int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); |
---|
| 1858 | + int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); |
---|
1758 | 1859 | void (*migrate_task_rq)(struct task_struct *p, int new_cpu); |
---|
1759 | 1860 | |
---|
1760 | 1861 | void (*task_woken)(struct rq *this_rq, struct task_struct *task); |
---|
.. | .. |
---|
1766 | 1867 | void (*rq_offline)(struct rq *rq); |
---|
1767 | 1868 | #endif |
---|
1768 | 1869 | |
---|
1769 | | - void (*set_curr_task)(struct rq *rq); |
---|
1770 | 1870 | void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); |
---|
1771 | 1871 | void (*task_fork)(struct task_struct *p); |
---|
1772 | 1872 | void (*task_dead)(struct task_struct *p); |
---|
.. | .. |
---|
1792 | 1892 | #ifdef CONFIG_FAIR_GROUP_SCHED |
---|
1793 | 1893 | void (*task_change_group)(struct task_struct *p, int type); |
---|
1794 | 1894 | #endif |
---|
1795 | | -}; |
---|
| 1895 | +} __aligned(STRUCT_ALIGNMENT); /* STRUCT_ALIGN(), vmlinux.lds.h */ |
---|
1796 | 1896 | |
---|
1797 | 1897 | static inline void put_prev_task(struct rq *rq, struct task_struct *prev) |
---|
1798 | 1898 | { |
---|
| 1899 | + WARN_ON_ONCE(rq->curr != prev); |
---|
1799 | 1900 | prev->sched_class->put_prev_task(rq, prev); |
---|
1800 | 1901 | } |
---|
1801 | 1902 | |
---|
1802 | | -static inline void set_curr_task(struct rq *rq, struct task_struct *curr) |
---|
| 1903 | +static inline void set_next_task(struct rq *rq, struct task_struct *next) |
---|
1803 | 1904 | { |
---|
1804 | | - curr->sched_class->set_curr_task(rq); |
---|
| 1905 | + WARN_ON_ONCE(rq->curr != next); |
---|
| 1906 | + next->sched_class->set_next_task(rq, next, false); |
---|
1805 | 1907 | } |
---|
1806 | 1908 | |
---|
1807 | | -#ifdef CONFIG_SMP |
---|
1808 | | -#define sched_class_highest (&stop_sched_class) |
---|
1809 | | -#else |
---|
1810 | | -#define sched_class_highest (&dl_sched_class) |
---|
1811 | | -#endif |
---|
| 1909 | +/* Defined in include/asm-generic/vmlinux.lds.h */ |
---|
| 1910 | +extern struct sched_class __begin_sched_classes[]; |
---|
| 1911 | +extern struct sched_class __end_sched_classes[]; |
---|
| 1912 | + |
---|
| 1913 | +#define sched_class_highest (__end_sched_classes - 1) |
---|
| 1914 | +#define sched_class_lowest (__begin_sched_classes - 1) |
---|
| 1915 | + |
---|
| 1916 | +#define for_class_range(class, _from, _to) \ |
---|
| 1917 | + for (class = (_from); class != (_to); class--) |
---|
| 1918 | + |
---|
1812 | 1919 | #define for_each_class(class) \ |
---|
1813 | | - for (class = sched_class_highest; class; class = class->next) |
---|
| 1920 | + for_class_range(class, sched_class_highest, sched_class_lowest) |
---|
1814 | 1921 | |
---|
1815 | 1922 | extern const struct sched_class stop_sched_class; |
---|
1816 | 1923 | extern const struct sched_class dl_sched_class; |
---|
.. | .. |
---|
1818 | 1925 | extern const struct sched_class fair_sched_class; |
---|
1819 | 1926 | extern const struct sched_class idle_sched_class; |
---|
1820 | 1927 | |
---|
| 1928 | +static inline bool sched_stop_runnable(struct rq *rq) |
---|
| 1929 | +{ |
---|
| 1930 | + return rq->stop && task_on_rq_queued(rq->stop); |
---|
| 1931 | +} |
---|
| 1932 | + |
---|
| 1933 | +static inline bool sched_dl_runnable(struct rq *rq) |
---|
| 1934 | +{ |
---|
| 1935 | + return rq->dl.dl_nr_running > 0; |
---|
| 1936 | +} |
---|
| 1937 | + |
---|
| 1938 | +static inline bool sched_rt_runnable(struct rq *rq) |
---|
| 1939 | +{ |
---|
| 1940 | + return rq->rt.rt_queued > 0; |
---|
| 1941 | +} |
---|
| 1942 | + |
---|
| 1943 | +static inline bool sched_fair_runnable(struct rq *rq) |
---|
| 1944 | +{ |
---|
| 1945 | + return rq->cfs.nr_running > 0; |
---|
| 1946 | +} |
---|
| 1947 | + |
---|
| 1948 | +extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); |
---|
| 1949 | +extern struct task_struct *pick_next_task_idle(struct rq *rq); |
---|
1821 | 1950 | |
---|
1822 | 1951 | #ifdef CONFIG_SMP |
---|
1823 | 1952 | |
---|
.. | .. |
---|
1827 | 1956 | |
---|
1828 | 1957 | extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); |
---|
1829 | 1958 | |
---|
| 1959 | +extern unsigned long __read_mostly max_load_balance_interval; |
---|
1830 | 1960 | #endif |
---|
1831 | 1961 | |
---|
1832 | 1962 | #ifdef CONFIG_CPU_IDLE |
---|
.. | .. |
---|
1842 | 1972 | |
---|
1843 | 1973 | return rq->idle_state; |
---|
1844 | 1974 | } |
---|
1845 | | - |
---|
1846 | | -static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx) |
---|
1847 | | -{ |
---|
1848 | | - rq->idle_state_idx = idle_state_idx; |
---|
1849 | | -} |
---|
1850 | | - |
---|
1851 | | -static inline int idle_get_state_idx(struct rq *rq) |
---|
1852 | | -{ |
---|
1853 | | - WARN_ON(!rcu_read_lock_held()); |
---|
1854 | | - return rq->idle_state_idx; |
---|
1855 | | -} |
---|
1856 | 1975 | #else |
---|
1857 | 1976 | static inline void idle_set_state(struct rq *rq, |
---|
1858 | 1977 | struct cpuidle_state *idle_state) |
---|
.. | .. |
---|
1862 | 1981 | static inline struct cpuidle_state *idle_get_state(struct rq *rq) |
---|
1863 | 1982 | { |
---|
1864 | 1983 | return NULL; |
---|
1865 | | -} |
---|
1866 | | - |
---|
1867 | | -static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx) |
---|
1868 | | -{ |
---|
1869 | | -} |
---|
1870 | | - |
---|
1871 | | -static inline int idle_get_state_idx(struct rq *rq) |
---|
1872 | | -{ |
---|
1873 | | - return -1; |
---|
1874 | 1984 | } |
---|
1875 | 1985 | #endif |
---|
1876 | 1986 | |
---|
.. | .. |
---|
1896 | 2006 | extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); |
---|
1897 | 2007 | extern void init_dl_task_timer(struct sched_dl_entity *dl_se); |
---|
1898 | 2008 | extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); |
---|
1899 | | -extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); |
---|
1900 | 2009 | |
---|
1901 | 2010 | #define BW_SHIFT 20 |
---|
1902 | 2011 | #define BW_UNIT (1 << BW_SHIFT) |
---|
1903 | 2012 | #define RATIO_SHIFT 8 |
---|
| 2013 | +#define MAX_BW_BITS (64 - BW_SHIFT) |
---|
| 2014 | +#define MAX_BW ((1ULL << MAX_BW_BITS) - 1) |
---|
1904 | 2015 | unsigned long to_ratio(u64 period, u64 runtime); |
---|
1905 | 2016 | |
---|
1906 | 2017 | extern void init_entity_runnable_average(struct sched_entity *se); |
---|
1907 | | -extern void post_init_entity_util_avg(struct sched_entity *se); |
---|
| 2018 | +extern void post_init_entity_util_avg(struct task_struct *p); |
---|
1908 | 2019 | |
---|
1909 | 2020 | #ifdef CONFIG_NO_HZ_FULL |
---|
1910 | 2021 | extern bool sched_can_stop_tick(struct rq *rq); |
---|
.. | .. |
---|
1917 | 2028 | */ |
---|
1918 | 2029 | static inline void sched_update_tick_dependency(struct rq *rq) |
---|
1919 | 2030 | { |
---|
1920 | | - int cpu; |
---|
1921 | | - |
---|
1922 | | - if (!tick_nohz_full_enabled()) |
---|
1923 | | - return; |
---|
1924 | | - |
---|
1925 | | - cpu = cpu_of(rq); |
---|
| 2031 | + int cpu = cpu_of(rq); |
---|
1926 | 2032 | |
---|
1927 | 2033 | if (!tick_nohz_full_cpu(cpu)) |
---|
1928 | 2034 | return; |
---|
.. | .. |
---|
1942 | 2048 | unsigned prev_nr = rq->nr_running; |
---|
1943 | 2049 | |
---|
1944 | 2050 | rq->nr_running = prev_nr + count; |
---|
| 2051 | + if (trace_sched_update_nr_running_tp_enabled()) { |
---|
| 2052 | + call_trace_sched_update_nr_running(rq, count); |
---|
| 2053 | + } |
---|
1945 | 2054 | |
---|
1946 | | - if (prev_nr < 2 && rq->nr_running >= 2) { |
---|
1947 | 2055 | #ifdef CONFIG_SMP |
---|
| 2056 | + if (prev_nr < 2 && rq->nr_running >= 2) { |
---|
1948 | 2057 | if (!READ_ONCE(rq->rd->overload)) |
---|
1949 | 2058 | WRITE_ONCE(rq->rd->overload, 1); |
---|
1950 | | -#endif |
---|
1951 | 2059 | } |
---|
| 2060 | +#endif |
---|
1952 | 2061 | |
---|
1953 | 2062 | sched_update_tick_dependency(rq); |
---|
1954 | 2063 | } |
---|
.. | .. |
---|
1956 | 2065 | static inline void sub_nr_running(struct rq *rq, unsigned count) |
---|
1957 | 2066 | { |
---|
1958 | 2067 | rq->nr_running -= count; |
---|
| 2068 | + if (trace_sched_update_nr_running_tp_enabled()) { |
---|
| 2069 | + call_trace_sched_update_nr_running(rq, -count); |
---|
| 2070 | + } |
---|
| 2071 | + |
---|
1959 | 2072 | /* Check if we still need preemption */ |
---|
1960 | 2073 | sched_update_tick_dependency(rq); |
---|
1961 | 2074 | } |
---|
.. | .. |
---|
1995 | 2108 | |
---|
1996 | 2109 | #endif /* CONFIG_SCHED_HRTICK */ |
---|
1997 | 2110 | |
---|
| 2111 | +#ifndef arch_scale_freq_tick |
---|
| 2112 | +static __always_inline |
---|
| 2113 | +void arch_scale_freq_tick(void) |
---|
| 2114 | +{ |
---|
| 2115 | +} |
---|
| 2116 | +#endif |
---|
| 2117 | + |
---|
1998 | 2118 | #ifndef arch_scale_freq_capacity |
---|
| 2119 | +/** |
---|
| 2120 | + * arch_scale_freq_capacity - get the frequency scale factor of a given CPU. |
---|
| 2121 | + * @cpu: the CPU in question. |
---|
| 2122 | + * |
---|
| 2123 | + * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e. |
---|
| 2124 | + * |
---|
| 2125 | + * f_curr |
---|
| 2126 | + * ------ * SCHED_CAPACITY_SCALE |
---|
| 2127 | + * f_max |
---|
| 2128 | + */ |
---|
1999 | 2129 | static __always_inline |
---|
2000 | 2130 | unsigned long arch_scale_freq_capacity(int cpu) |
---|
2001 | 2131 | { |
---|
.. | .. |
---|
2003 | 2133 | } |
---|
2004 | 2134 | #endif |
---|
2005 | 2135 | |
---|
2006 | | -#ifndef arch_scale_max_freq_capacity |
---|
2007 | | -struct sched_domain; |
---|
2008 | | -static __always_inline |
---|
2009 | | -unsigned long arch_scale_max_freq_capacity(struct sched_domain *sd, int cpu) |
---|
2010 | | -{ |
---|
2011 | | - return SCHED_CAPACITY_SCALE; |
---|
2012 | | -} |
---|
2013 | | -#endif |
---|
2014 | | - |
---|
2015 | 2136 | #ifdef CONFIG_SMP |
---|
2016 | | -#ifdef CONFIG_PREEMPT |
---|
| 2137 | +#ifdef CONFIG_PREEMPTION |
---|
2017 | 2138 | |
---|
2018 | 2139 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); |
---|
2019 | 2140 | |
---|
.. | .. |
---|
2065 | 2186 | return ret; |
---|
2066 | 2187 | } |
---|
2067 | 2188 | |
---|
2068 | | -#endif /* CONFIG_PREEMPT */ |
---|
| 2189 | +#endif /* CONFIG_PREEMPTION */ |
---|
2069 | 2190 | |
---|
2070 | 2191 | /* |
---|
2071 | 2192 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. |
---|
.. | .. |
---|
2298 | 2419 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ |
---|
2299 | 2420 | |
---|
2300 | 2421 | #ifdef CONFIG_CPU_FREQ |
---|
2301 | | -DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); |
---|
| 2422 | +DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); |
---|
2302 | 2423 | |
---|
2303 | 2424 | /** |
---|
2304 | 2425 | * cpufreq_update_util - Take a note about CPU utilization changes. |
---|
.. | .. |
---|
2338 | 2459 | #ifdef CONFIG_UCLAMP_TASK |
---|
2339 | 2460 | unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); |
---|
2340 | 2461 | |
---|
| 2462 | +/** |
---|
| 2463 | + * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. |
---|
| 2464 | + * @rq: The rq to clamp against. Must not be NULL. |
---|
| 2465 | + * @util: The util value to clamp. |
---|
| 2466 | + * @p: The task to clamp against. Can be NULL if you want to clamp |
---|
| 2467 | + * against @rq only. |
---|
| 2468 | + * |
---|
| 2469 | + * Clamps the passed @util to the max(@rq, @p) effective uclamp values. |
---|
| 2470 | + * |
---|
| 2471 | + * If sched_uclamp_used static key is disabled, then just return the util |
---|
| 2472 | + * without any clamping since uclamp aggregation at the rq level in the fast |
---|
| 2473 | + * path is disabled, rendering this operation a NOP. |
---|
| 2474 | + * |
---|
| 2475 | + * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It |
---|
| 2476 | + * will return the correct effective uclamp value of the task even if the |
---|
| 2477 | + * static key is disabled. |
---|
| 2478 | + */ |
---|
2341 | 2479 | static __always_inline |
---|
2342 | 2480 | unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, |
---|
2343 | 2481 | struct task_struct *p) |
---|
2344 | 2482 | { |
---|
2345 | | - unsigned long min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value); |
---|
2346 | | - unsigned long max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); |
---|
| 2483 | + unsigned long min_util = 0; |
---|
| 2484 | + unsigned long max_util = 0; |
---|
| 2485 | + |
---|
| 2486 | + if (!static_branch_likely(&sched_uclamp_used)) |
---|
| 2487 | + return util; |
---|
2347 | 2488 | |
---|
2348 | 2489 | if (p) { |
---|
2349 | | - min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN)); |
---|
2350 | | - max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX)); |
---|
| 2490 | + min_util = uclamp_eff_value(p, UCLAMP_MIN); |
---|
| 2491 | + max_util = uclamp_eff_value(p, UCLAMP_MAX); |
---|
| 2492 | + |
---|
| 2493 | + /* |
---|
| 2494 | + * Ignore last runnable task's max clamp, as this task will |
---|
| 2495 | + * reset it. Similarly, no need to read the rq's min clamp. |
---|
| 2496 | + */ |
---|
| 2497 | + if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) |
---|
| 2498 | + goto out; |
---|
2351 | 2499 | } |
---|
2352 | 2500 | |
---|
| 2501 | + min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value)); |
---|
| 2502 | + max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value)); |
---|
| 2503 | +out: |
---|
2353 | 2504 | /* |
---|
2354 | 2505 | * Since CPU's {min,max}_util clamps are MAX aggregated considering |
---|
2355 | 2506 | * RUNNABLE tasks with _different_ clamps, we can end up with an |
---|
.. | .. |
---|
2360 | 2511 | |
---|
2361 | 2512 | return clamp(util, min_util, max_util); |
---|
2362 | 2513 | } |
---|
| 2514 | + |
---|
| 2515 | +static inline bool uclamp_boosted(struct task_struct *p) |
---|
| 2516 | +{ |
---|
| 2517 | + return uclamp_eff_value(p, UCLAMP_MIN) > 0; |
---|
| 2518 | +} |
---|
| 2519 | + |
---|
| 2520 | +/* |
---|
| 2521 | + * When uclamp is compiled in, the aggregation at rq level is 'turned off' |
---|
| 2522 | + * by default in the fast path and only gets turned on once userspace performs |
---|
| 2523 | + * an operation that requires it. |
---|
| 2524 | + * |
---|
| 2525 | + * Returns true if userspace opted-in to use uclamp and aggregation at rq level |
---|
| 2526 | + * hence is active. |
---|
| 2527 | + */ |
---|
| 2528 | +static inline bool uclamp_is_used(void) |
---|
| 2529 | +{ |
---|
| 2530 | + return static_branch_likely(&sched_uclamp_used); |
---|
| 2531 | +} |
---|
2363 | 2532 | #else /* CONFIG_UCLAMP_TASK */ |
---|
2364 | 2533 | static inline |
---|
2365 | 2534 | unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, |
---|
.. | .. |
---|
2367 | 2536 | { |
---|
2368 | 2537 | return util; |
---|
2369 | 2538 | } |
---|
| 2539 | + |
---|
| 2540 | +static inline bool uclamp_boosted(struct task_struct *p) |
---|
| 2541 | +{ |
---|
| 2542 | + return false; |
---|
| 2543 | +} |
---|
| 2544 | + |
---|
| 2545 | +static inline bool uclamp_is_used(void) |
---|
| 2546 | +{ |
---|
| 2547 | + return false; |
---|
| 2548 | +} |
---|
2370 | 2549 | #endif /* CONFIG_UCLAMP_TASK */ |
---|
2371 | 2550 | |
---|
2372 | | -unsigned long task_util_est(struct task_struct *p); |
---|
2373 | | -unsigned int uclamp_task(struct task_struct *p); |
---|
2374 | | -bool uclamp_latency_sensitive(struct task_struct *p); |
---|
2375 | | -bool uclamp_boosted(struct task_struct *p); |
---|
| 2551 | +#ifdef CONFIG_UCLAMP_TASK_GROUP |
---|
| 2552 | +static inline bool uclamp_latency_sensitive(struct task_struct *p) |
---|
| 2553 | +{ |
---|
| 2554 | + struct cgroup_subsys_state *css = task_css(p, cpu_cgrp_id); |
---|
| 2555 | + struct task_group *tg; |
---|
| 2556 | + |
---|
| 2557 | + if (!css) |
---|
| 2558 | + return false; |
---|
| 2559 | + tg = container_of(css, struct task_group, css); |
---|
| 2560 | + |
---|
| 2561 | + return tg->latency_sensitive; |
---|
| 2562 | +} |
---|
| 2563 | +#else |
---|
| 2564 | +static inline bool uclamp_latency_sensitive(struct task_struct *p) |
---|
| 2565 | +{ |
---|
| 2566 | + return false; |
---|
| 2567 | +} |
---|
| 2568 | +#endif /* CONFIG_UCLAMP_TASK_GROUP */ |
---|
2376 | 2569 | |
---|
2377 | 2570 | #ifdef arch_scale_freq_capacity |
---|
2378 | 2571 | # ifndef arch_scale_freq_invariant |
---|
.. | .. |
---|
2404 | 2597 | ENERGY_UTIL, |
---|
2405 | 2598 | }; |
---|
2406 | 2599 | |
---|
2407 | | -#ifdef CONFIG_SMP |
---|
2408 | | -static inline unsigned long cpu_util_cfs(struct rq *rq) |
---|
2409 | | -{ |
---|
2410 | | - unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); |
---|
2411 | | - |
---|
2412 | | - if (sched_feat(UTIL_EST)) { |
---|
2413 | | - util = max_t(unsigned long, util, |
---|
2414 | | - READ_ONCE(rq->cfs.avg.util_est.enqueued)); |
---|
2415 | | - } |
---|
2416 | | - |
---|
2417 | | - return util; |
---|
2418 | | -} |
---|
2419 | | -#endif |
---|
2420 | | - |
---|
2421 | 2600 | #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL |
---|
2422 | 2601 | |
---|
2423 | 2602 | unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, |
---|
.. | .. |
---|
2434 | 2613 | return READ_ONCE(rq->avg_dl.util_avg); |
---|
2435 | 2614 | } |
---|
2436 | 2615 | |
---|
| 2616 | +static inline unsigned long cpu_util_cfs(struct rq *rq) |
---|
| 2617 | +{ |
---|
| 2618 | + unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); |
---|
| 2619 | + |
---|
| 2620 | + if (sched_feat(UTIL_EST)) { |
---|
| 2621 | + util = max_t(unsigned long, util, |
---|
| 2622 | + READ_ONCE(rq->cfs.avg.util_est.enqueued)); |
---|
| 2623 | + } |
---|
| 2624 | + |
---|
| 2625 | + return util; |
---|
| 2626 | +} |
---|
| 2627 | + |
---|
2437 | 2628 | static inline unsigned long cpu_util_rt(struct rq *rq) |
---|
2438 | 2629 | { |
---|
2439 | 2630 | return READ_ONCE(rq->avg_rt.util_avg); |
---|
2440 | 2631 | } |
---|
2441 | | - |
---|
2442 | 2632 | #else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ |
---|
2443 | 2633 | static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, |
---|
2444 | 2634 | unsigned long max, enum schedutil_type type, |
---|
.. | .. |
---|
2476 | 2666 | } |
---|
2477 | 2667 | #endif |
---|
2478 | 2668 | |
---|
2479 | | -#ifdef CONFIG_SMP |
---|
2480 | | -#ifdef CONFIG_ENERGY_MODEL |
---|
| 2669 | +#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) |
---|
| 2670 | + |
---|
2481 | 2671 | #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) |
---|
2482 | | -#else |
---|
| 2672 | + |
---|
| 2673 | +DECLARE_STATIC_KEY_FALSE(sched_energy_present); |
---|
| 2674 | + |
---|
| 2675 | +static inline bool sched_energy_enabled(void) |
---|
| 2676 | +{ |
---|
| 2677 | + return static_branch_unlikely(&sched_energy_present); |
---|
| 2678 | +} |
---|
| 2679 | + |
---|
| 2680 | +#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ |
---|
| 2681 | + |
---|
2483 | 2682 | #define perf_domain_span(pd) NULL |
---|
2484 | | -#endif |
---|
| 2683 | +static inline bool sched_energy_enabled(void) { return false; } |
---|
| 2684 | + |
---|
| 2685 | +#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ |
---|
| 2686 | + |
---|
| 2687 | +#ifdef CONFIG_MEMBARRIER |
---|
| 2688 | +/* |
---|
| 2689 | + * The scheduler provides memory barriers required by membarrier between: |
---|
| 2690 | + * - prior user-space memory accesses and store to rq->membarrier_state, |
---|
| 2691 | + * - store to rq->membarrier_state and following user-space memory accesses. |
---|
| 2692 | + * In the same way it provides those guarantees around store to rq->curr. |
---|
| 2693 | + */ |
---|
| 2694 | +static inline void membarrier_switch_mm(struct rq *rq, |
---|
| 2695 | + struct mm_struct *prev_mm, |
---|
| 2696 | + struct mm_struct *next_mm) |
---|
| 2697 | +{ |
---|
| 2698 | + int membarrier_state; |
---|
| 2699 | + |
---|
| 2700 | + if (prev_mm == next_mm) |
---|
| 2701 | + return; |
---|
| 2702 | + |
---|
| 2703 | + membarrier_state = atomic_read(&next_mm->membarrier_state); |
---|
| 2704 | + if (READ_ONCE(rq->membarrier_state) == membarrier_state) |
---|
| 2705 | + return; |
---|
| 2706 | + |
---|
| 2707 | + WRITE_ONCE(rq->membarrier_state, membarrier_state); |
---|
| 2708 | +} |
---|
| 2709 | +#else |
---|
| 2710 | +static inline void membarrier_switch_mm(struct rq *rq, |
---|
| 2711 | + struct mm_struct *prev_mm, |
---|
| 2712 | + struct mm_struct *next_mm) |
---|
| 2713 | +{ |
---|
| 2714 | +} |
---|
2485 | 2715 | #endif |
---|
2486 | 2716 | |
---|
2487 | 2717 | #ifdef CONFIG_SMP |
---|
2488 | | -extern struct static_key_false sched_energy_present; |
---|
| 2718 | +static inline bool is_per_cpu_kthread(struct task_struct *p) |
---|
| 2719 | +{ |
---|
| 2720 | + if (!(p->flags & PF_KTHREAD)) |
---|
| 2721 | + return false; |
---|
| 2722 | + |
---|
| 2723 | + if (p->nr_cpus_allowed != 1) |
---|
| 2724 | + return false; |
---|
| 2725 | + |
---|
| 2726 | + return true; |
---|
| 2727 | +} |
---|
2489 | 2728 | #endif |
---|
| 2729 | + |
---|
| 2730 | +void swake_up_all_locked(struct swait_queue_head *q); |
---|
| 2731 | +void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); |
---|
| 2732 | + |
---|
| 2733 | +/* |
---|
| 2734 | + * task_may_not_preempt - check whether a task may not be preemptible soon |
---|
| 2735 | + */ |
---|
| 2736 | +#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION |
---|
| 2737 | +extern bool task_may_not_preempt(struct task_struct *task, int cpu); |
---|
| 2738 | +#else |
---|
| 2739 | +static inline bool task_may_not_preempt(struct task_struct *task, int cpu) |
---|
| 2740 | +{ |
---|
| 2741 | + return false; |
---|
| 2742 | +} |
---|
| 2743 | +#endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */ |
---|