hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/kernel/sched/sched.h
....@@ -59,15 +59,18 @@
5959 #include <linux/psi.h>
6060 #include <linux/rcupdate_wait.h>
6161 #include <linux/security.h>
62
-#include <linux/stackprotector.h>
6362 #include <linux/stop_machine.h>
6463 #include <linux/suspend.h>
6564 #include <linux/swait.h>
6665 #include <linux/syscalls.h>
6766 #include <linux/task_work.h>
6867 #include <linux/tsacct_kern.h>
68
+#include <linux/android_vendor.h>
69
+#include <linux/android_kabi.h>
6970
7071 #include <asm/tlb.h>
72
+#include <asm-generic/vmlinux.lds.h>
73
+#include <soc/rockchip/rockchip_performance.h>
7174
7275 #ifdef CONFIG_PARAVIRT
7376 # include <asm/paravirt.h>
....@@ -76,13 +79,13 @@
7679 #include "cpupri.h"
7780 #include "cpudeadline.h"
7881
82
+#include <trace/events/sched.h>
83
+
7984 #ifdef CONFIG_SCHED_DEBUG
8085 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
8186 #else
8287 # define SCHED_WARN_ON(x) ({ (void)(x), 0; })
8388 #endif
84
-
85
-#include "tune.h"
8689
8790 struct rq;
8891 struct cpuidle_state;
....@@ -99,12 +102,7 @@
99102 extern void calc_global_load_tick(struct rq *this_rq);
100103 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
101104
102
-#ifdef CONFIG_SMP
103
-extern void cpu_load_update_active(struct rq *this_rq);
104
-#else
105
-static inline void cpu_load_update_active(struct rq *this_rq) { }
106
-#endif
107
-
105
+extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
108106 /*
109107 * Helpers for converting nanosecond timing to jiffy resolution
110108 */
....@@ -187,6 +185,11 @@
187185 rt_policy(policy) || dl_policy(policy);
188186 }
189187
188
+static inline int task_has_idle_policy(struct task_struct *p)
189
+{
190
+ return idle_policy(p->policy);
191
+}
192
+
190193 static inline int task_has_rt_policy(struct task_struct *p)
191194 {
192195 return rt_policy(p->policy);
....@@ -198,6 +201,19 @@
198201 }
199202
200203 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
204
+
205
+static inline void update_avg(u64 *avg, u64 sample)
206
+{
207
+ s64 diff = sample - *avg;
208
+ *avg += diff / 8;
209
+}
210
+
211
+/*
212
+ * Shifting a value by an exponent greater *or equal* to the size of said value
213
+ * is UB; cap at size-1.
214
+ */
215
+#define shr_bound(val, shift) \
216
+ (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
201217
202218 /*
203219 * !! For sched_setattr_nocheck() (kernel) only !!
....@@ -304,14 +320,28 @@
304320 __dl_update(dl_b, -((s32)tsk_bw / cpus));
305321 }
306322
307
-static inline
308
-bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
323
+static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap,
324
+ u64 old_bw, u64 new_bw)
309325 {
310326 return dl_b->bw != -1 &&
311
- dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
327
+ cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
312328 }
313329
314
-extern void dl_change_utilization(struct task_struct *p, u64 new_bw);
330
+/*
331
+ * Verify the fitness of task @p to run on @cpu taking into account the
332
+ * CPU original capacity and the runtime/deadline ratio of the task.
333
+ *
334
+ * The function will return true if the CPU original capacity of the
335
+ * @cpu scaled by SCHED_CAPACITY_SCALE >= runtime/deadline ratio of the
336
+ * task and false otherwise.
337
+ */
338
+static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu)
339
+{
340
+ unsigned long cap = arch_scale_cpu_capacity(cpu);
341
+
342
+ return cap_scale(p->dl.dl_deadline, cap) >= p->dl.dl_runtime;
343
+}
344
+
315345 extern void init_dl_bw(struct dl_bw *dl_b);
316346 extern int sched_dl_global_validate(void);
317347 extern void sched_dl_do_global(void);
....@@ -320,9 +350,8 @@
320350 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
321351 extern bool __checkparam_dl(const struct sched_attr *attr);
322352 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
323
-extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
324353 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
325
-extern bool dl_cpu_busy(unsigned int cpu);
354
+extern int dl_cpu_busy(int cpu, struct task_struct *p);
326355
327356 #ifdef CONFIG_CGROUP_SCHED
328357
....@@ -342,8 +371,9 @@
342371 u64 runtime;
343372 s64 hierarchical_quota;
344373
345
- short idle;
346
- short period_active;
374
+ u8 idle;
375
+ u8 period_active;
376
+ u8 slack_started;
347377 struct hrtimer period_timer;
348378 struct hrtimer slack_timer;
349379 struct list_head throttled_cfs_rq;
....@@ -352,8 +382,6 @@
352382 int nr_periods;
353383 int nr_throttled;
354384 u64 throttled_time;
355
-
356
- bool distribute_running;
357385 #endif
358386 };
359387
....@@ -407,8 +435,14 @@
407435 struct uclamp_se uclamp[UCLAMP_CNT];
408436 /* Latency-sensitive flag used for a task group */
409437 unsigned int latency_sensitive;
438
+
439
+ ANDROID_VENDOR_DATA_ARRAY(1, 4);
410440 #endif
411441
442
+ ANDROID_KABI_RESERVE(1);
443
+ ANDROID_KABI_RESERVE(2);
444
+ ANDROID_KABI_RESERVE(3);
445
+ ANDROID_KABI_RESERVE(4);
412446 };
413447
414448 #ifdef CONFIG_FAIR_GROUP_SCHED
....@@ -497,9 +531,9 @@
497531 /* CFS-related fields in a runqueue */
498532 struct cfs_rq {
499533 struct load_weight load;
500
- unsigned long runnable_weight;
501534 unsigned int nr_running;
502
- unsigned int h_nr_running;
535
+ unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */
536
+ unsigned int idle_h_nr_running; /* SCHED_IDLE */
503537
504538 u64 exec_clock;
505539 u64 min_vruntime;
....@@ -535,7 +569,7 @@
535569 int nr;
536570 unsigned long load_avg;
537571 unsigned long util_avg;
538
- unsigned long runnable_sum;
572
+ unsigned long runnable_avg;
539573 } removed;
540574
541575 #ifdef CONFIG_FAIR_GROUP_SCHED
....@@ -575,12 +609,14 @@
575609 s64 runtime_remaining;
576610
577611 u64 throttled_clock;
578
- u64 throttled_clock_task;
579
- u64 throttled_clock_task_time;
612
+ u64 throttled_clock_pelt;
613
+ u64 throttled_clock_pelt_time;
580614 int throttled;
581615 int throttle_count;
582616 struct list_head throttled_list;
583617 #endif /* CONFIG_CFS_BANDWIDTH */
618
+
619
+ ANDROID_VENDOR_DATA_ARRAY(1, 16);
584620 #endif /* CONFIG_FAIR_GROUP_SCHED */
585621 };
586622
....@@ -646,7 +682,7 @@
646682 /*
647683 * Deadline values of the currently executing and the
648684 * earliest ready task on this rq. Caching these facilitates
649
- * the decision wether or not a ready but not running task
685
+ * the decision whether or not a ready but not running task
650686 * should migrate somewhere else.
651687 */
652688 struct {
....@@ -695,8 +731,30 @@
695731 #ifdef CONFIG_FAIR_GROUP_SCHED
696732 /* An entity is a task if it doesn't "own" a runqueue */
697733 #define entity_is_task(se) (!se->my_q)
734
+
735
+static inline void se_update_runnable(struct sched_entity *se)
736
+{
737
+ if (!entity_is_task(se))
738
+ se->runnable_weight = se->my_q->h_nr_running;
739
+}
740
+
741
+static inline long se_runnable(struct sched_entity *se)
742
+{
743
+ if (entity_is_task(se))
744
+ return !!se->on_rq;
745
+ else
746
+ return se->runnable_weight;
747
+}
748
+
698749 #else
699750 #define entity_is_task(se) 1
751
+
752
+static inline void se_update_runnable(struct sched_entity *se) {}
753
+
754
+static inline long se_runnable(struct sched_entity *se)
755
+{
756
+ return !!se->on_rq;
757
+}
700758 #endif
701759
702760 #ifdef CONFIG_SMP
....@@ -708,10 +766,6 @@
708766 return scale_load_down(se->load.weight);
709767 }
710768
711
-static inline long se_runnable(struct sched_entity *se)
712
-{
713
- return scale_load_down(se->runnable_weight);
714
-}
715769
716770 static inline bool sched_asym_prefer(int a, int b)
717771 {
....@@ -722,12 +776,6 @@
722776 struct em_perf_domain *em_pd;
723777 struct perf_domain *next;
724778 struct rcu_head rcu;
725
-};
726
-
727
-struct max_cpu_capacity {
728
- raw_spinlock_t lock;
729
- unsigned long val;
730
- int cpu;
731779 };
732780
733781 /* Scheduling group status flags */
....@@ -788,27 +836,23 @@
788836 cpumask_var_t rto_mask;
789837 struct cpupri cpupri;
790838
791
- /* Maximum cpu capacity in the system. */
792
- struct max_cpu_capacity max_cpu_capacity;
839
+ unsigned long max_cpu_capacity;
793840
794841 /*
795842 * NULL-terminated list of performance domains intersecting with the
796843 * CPUs of the rd. Protected by RCU.
797844 */
798
- struct perf_domain *pd;
845
+ struct perf_domain __rcu *pd;
799846
800
- /* Vendor fields. */
801
- /* First cpu with maximum and minimum original capacity */
802
- int max_cap_orig_cpu, min_cap_orig_cpu;
803
- /* First cpu with mid capacity */
804
- int mid_cap_orig_cpu;
847
+ ANDROID_VENDOR_DATA_ARRAY(1, 4);
848
+
849
+ ANDROID_KABI_RESERVE(1);
850
+ ANDROID_KABI_RESERVE(2);
851
+ ANDROID_KABI_RESERVE(3);
852
+ ANDROID_KABI_RESERVE(4);
805853 };
806854
807
-extern struct root_domain def_root_domain;
808
-extern struct mutex sched_domains_mutex;
809
-
810855 extern void init_defrootdomain(void);
811
-extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc);
812856 extern int sched_init_domains(const struct cpumask *cpu_map);
813857 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
814858 extern void sched_get_rd(struct root_domain *rd);
....@@ -817,6 +861,7 @@
817861 #ifdef HAVE_RT_PUSH_IPI
818862 extern void rto_push_irq_work_func(struct irq_work *work);
819863 #endif
864
+extern struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu);
820865 #endif /* CONFIG_SMP */
821866
822867 #ifdef CONFIG_UCLAMP_TASK
....@@ -859,6 +904,8 @@
859904 unsigned int value;
860905 struct uclamp_bucket bucket[UCLAMP_BUCKETS];
861906 };
907
+
908
+DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
862909 #endif /* CONFIG_UCLAMP_TASK */
863910
864911 /*
....@@ -882,21 +929,19 @@
882929 unsigned int nr_preferred_running;
883930 unsigned int numa_migrate_on;
884931 #endif
885
- #define CPU_LOAD_IDX_MAX 5
886
- unsigned long cpu_load[CPU_LOAD_IDX_MAX];
887932 #ifdef CONFIG_NO_HZ_COMMON
888933 #ifdef CONFIG_SMP
889
- unsigned long last_load_update_tick;
890934 unsigned long last_blocked_load_update_tick;
891935 unsigned int has_blocked_load;
936
+ call_single_data_t nohz_csd;
892937 #endif /* CONFIG_SMP */
893938 unsigned int nohz_tick_stopped;
894
- atomic_t nohz_flags;
939
+ atomic_t nohz_flags;
895940 #endif /* CONFIG_NO_HZ_COMMON */
896941
897
- /* capture load from *all* tasks on this CPU: */
898
- struct load_weight load;
899
- unsigned long nr_load_updates;
942
+#ifdef CONFIG_SMP
943
+ unsigned int ttwu_pending;
944
+#endif
900945 u64 nr_switches;
901946
902947 #ifdef CONFIG_UCLAMP_TASK
....@@ -924,7 +969,7 @@
924969 */
925970 unsigned long nr_uninterruptible;
926971
927
- struct task_struct *curr;
972
+ struct task_struct __rcu *curr;
928973 struct task_struct *idle;
929974 struct task_struct *stop;
930975 unsigned long next_balance;
....@@ -939,15 +984,20 @@
939984
940985 atomic_t nr_iowait;
941986
987
+#ifdef CONFIG_MEMBARRIER
988
+ int membarrier_state;
989
+#endif
990
+
942991 #ifdef CONFIG_SMP
943
- struct root_domain *rd;
944
- struct sched_domain *sd;
992
+ struct root_domain *rd;
993
+ struct sched_domain __rcu *sd;
945994
946995 unsigned long cpu_capacity;
947996 unsigned long cpu_capacity_orig;
948997
949998 struct callback_head *balance_callback;
950999
1000
+ unsigned char nohz_idle_balance;
9511001 unsigned char idle_balance;
9521002
9531003 unsigned long misfit_task_load;
....@@ -968,12 +1018,15 @@
9681018 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
9691019 struct sched_avg avg_irq;
9701020 #endif
1021
+#ifdef CONFIG_SCHED_THERMAL_PRESSURE
1022
+ struct sched_avg avg_thermal;
1023
+#endif
9711024 u64 idle_stamp;
9721025 u64 avg_idle;
9731026
9741027 /* This is used to determine avg_idle's max value */
9751028 u64 max_idle_balance_cost;
976
-#endif
1029
+#endif /* CONFIG_SMP */
9771030
9781031 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
9791032 u64 prev_irq_time;
....@@ -991,10 +1044,10 @@
9911044
9921045 #ifdef CONFIG_SCHED_HRTICK
9931046 #ifdef CONFIG_SMP
994
- int hrtick_csd_pending;
9951047 call_single_data_t hrtick_csd;
9961048 #endif
9971049 struct hrtimer hrtick_timer;
1050
+ ktime_t hrtick_time;
9981051 #endif
9991052
10001053 #ifdef CONFIG_SCHEDSTATS
....@@ -1015,15 +1068,23 @@
10151068 unsigned int ttwu_local;
10161069 #endif
10171070
1018
-#ifdef CONFIG_SMP
1019
- struct llist_head wake_list;
1071
+#ifdef CONFIG_HOTPLUG_CPU
1072
+ struct cpu_stop_work drain;
1073
+ struct cpu_stop_done drain_done;
10201074 #endif
10211075
10221076 #ifdef CONFIG_CPU_IDLE
10231077 /* Must be inspected within a rcu lock section */
10241078 struct cpuidle_state *idle_state;
1025
- int idle_state_idx;
10261079 #endif
1080
+
1081
+ ANDROID_VENDOR_DATA_ARRAY(1, 96);
1082
+ ANDROID_OEM_DATA_ARRAY(1, 16);
1083
+
1084
+ ANDROID_KABI_RESERVE(1);
1085
+ ANDROID_KABI_RESERVE(2);
1086
+ ANDROID_KABI_RESERVE(3);
1087
+ ANDROID_KABI_RESERVE(4);
10271088 };
10281089
10291090 #ifdef CONFIG_FAIR_GROUP_SCHED
....@@ -1132,6 +1193,41 @@
11321193 return rq->clock_task;
11331194 }
11341195
1196
+#ifdef CONFIG_SMP
1197
+DECLARE_PER_CPU(u64, clock_task_mult);
1198
+
1199
+static inline u64 rq_clock_task_mult(struct rq *rq)
1200
+{
1201
+ lockdep_assert_held(&rq->lock);
1202
+ assert_clock_updated(rq);
1203
+
1204
+ return per_cpu(clock_task_mult, cpu_of(rq));
1205
+}
1206
+#else
1207
+static inline u64 rq_clock_task_mult(struct rq *rq)
1208
+{
1209
+ return rq_clock_task(rq);
1210
+}
1211
+#endif
1212
+
1213
+/**
1214
+ * By default the decay is the default pelt decay period.
1215
+ * The decay shift can change the decay period in
1216
+ * multiples of 32.
1217
+ * Decay shift Decay period(ms)
1218
+ * 0 32
1219
+ * 1 64
1220
+ * 2 128
1221
+ * 3 256
1222
+ * 4 512
1223
+ */
1224
+extern int sched_thermal_decay_shift;
1225
+
1226
+static inline u64 rq_clock_thermal(struct rq *rq)
1227
+{
1228
+ return rq_clock_task(rq) >> sched_thermal_decay_shift;
1229
+}
1230
+
11351231 static inline void rq_clock_skip_update(struct rq *rq)
11361232 {
11371233 lockdep_assert_held(&rq->lock);
....@@ -1161,6 +1257,16 @@
11611257 #endif
11621258 };
11631259
1260
+/*
1261
+ * Lockdep annotation that avoids accidental unlocks; it's like a
1262
+ * sticky/continuous lockdep_assert_held().
1263
+ *
1264
+ * This avoids code that has access to 'struct rq *rq' (basically everything in
1265
+ * the scheduler) from accidentally unlocking the rq if they do not also have a
1266
+ * copy of the (on-stack) 'struct rq_flags rf'.
1267
+ *
1268
+ * Also see Documentation/locking/lockdep-design.rst.
1269
+ */
11641270 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
11651271 {
11661272 rf->cookie = lockdep_pin_lock(&rq->lock);
....@@ -1294,16 +1400,18 @@
12941400 extern enum numa_topology_type sched_numa_topology_type;
12951401 extern int sched_max_numa_distance;
12961402 extern bool find_numa_distance(int distance);
1297
-#endif
1298
-
1299
-#ifdef CONFIG_NUMA
13001403 extern void sched_init_numa(void);
13011404 extern void sched_domains_numa_masks_set(unsigned int cpu);
13021405 extern void sched_domains_numa_masks_clear(unsigned int cpu);
1406
+extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
13031407 #else
13041408 static inline void sched_init_numa(void) { }
13051409 static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
13061410 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
1411
+static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
1412
+{
1413
+ return nr_cpu_ids;
1414
+}
13071415 #endif
13081416
13091417 #ifdef CONFIG_NUMA_BALANCING
....@@ -1316,8 +1424,6 @@
13161424 };
13171425 extern void sched_setnuma(struct task_struct *p, int node);
13181426 extern int migrate_task_to(struct task_struct *p, int cpu);
1319
-extern int migrate_swap(struct task_struct *p, struct task_struct *t,
1320
- int cpu, int scpu);
13211427 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
13221428 #else
13231429 static inline void
....@@ -1328,6 +1434,8 @@
13281434
13291435 #ifdef CONFIG_SMP
13301436
1437
+extern int migrate_swap(struct task_struct *p, struct task_struct *t,
1438
+ int cpu, int scpu);
13311439 static inline void
13321440 queue_balance_callback(struct rq *rq,
13331441 struct callback_head *head,
....@@ -1343,15 +1451,13 @@
13431451 rq->balance_callback = head;
13441452 }
13451453
1346
-extern void sched_ttwu_pending(void);
1347
-
13481454 #define rcu_dereference_check_sched_domain(p) \
13491455 rcu_dereference_check((p), \
13501456 lockdep_is_held(&sched_domains_mutex))
13511457
13521458 /*
13531459 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1354
- * See detach_destroy_domains: synchronize_sched for details.
1460
+ * See destroy_sched_domains: call_rcu for details.
13551461 *
13561462 * The domain tree of any CPU may only be accessed from within
13571463 * preempt-disabled sections.
....@@ -1359,8 +1465,6 @@
13591465 #define for_each_domain(cpu, __sd) \
13601466 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
13611467 __sd; __sd = __sd->parent)
1362
-
1363
-#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
13641468
13651469 /**
13661470 * highest_flag_domain - Return highest sched_domain containing flag.
....@@ -1396,13 +1500,13 @@
13961500 return sd;
13971501 }
13981502
1399
-DECLARE_PER_CPU(struct sched_domain *, sd_llc);
1503
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
14001504 DECLARE_PER_CPU(int, sd_llc_size);
14011505 DECLARE_PER_CPU(int, sd_llc_id);
1402
-DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
1403
-DECLARE_PER_CPU(struct sched_domain *, sd_numa);
1404
-DECLARE_PER_CPU(struct sched_domain *, sd_asym_packing);
1405
-DECLARE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity);
1506
+DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
1507
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
1508
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
1509
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
14061510 extern struct static_key_false sched_asym_cpucapacity;
14071511
14081512 struct sched_group_capacity {
....@@ -1421,7 +1525,7 @@
14211525 int id;
14221526 #endif
14231527
1424
- unsigned long cpumask[0]; /* Balance mask */
1528
+ unsigned long cpumask[]; /* Balance mask */
14251529 };
14261530
14271531 struct sched_group {
....@@ -1439,7 +1543,7 @@
14391543 * by attaching extra space to the end of the structure,
14401544 * depending on how many CPUs the kernel has booted up with)
14411545 */
1442
- unsigned long cpumask[0];
1546
+ unsigned long cpumask[];
14431547 };
14441548
14451549 static inline struct cpumask *sched_group_span(struct sched_group *sg)
....@@ -1482,11 +1586,11 @@
14821586 }
14831587 #endif
14841588
1485
-#else
1589
+extern void flush_smp_call_function_from_idle(void);
14861590
1487
-static inline void sched_ttwu_pending(void) { }
1488
-
1489
-#endif /* CONFIG_SMP */
1591
+#else /* !CONFIG_SMP: */
1592
+static inline void flush_smp_call_function_from_idle(void) { }
1593
+#endif
14901594
14911595 #include "stats.h"
14921596 #include "autogroup.h"
....@@ -1546,7 +1650,7 @@
15461650 #ifdef CONFIG_SMP
15471651 /*
15481652 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1549
- * successfuly executed on another CPU. We must ensure that updates of
1653
+ * successfully executed on another CPU. We must ensure that updates of
15501654 * per-task data have been completed by this moment.
15511655 */
15521656 smp_wmb();
....@@ -1598,6 +1702,8 @@
15981702 #undef SCHED_FEAT
15991703
16001704 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1705
+extern const char * const sched_feat_names[__SCHED_FEAT_NR];
1706
+
16011707 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
16021708
16031709 #else /* !CONFIG_JUMP_LABEL */
....@@ -1669,7 +1775,9 @@
16691775 */
16701776 #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */
16711777 #define WF_FORK 0x02 /* Child wakeup after fork */
1672
-#define WF_MIGRATED 0x4 /* Internal use, task got migrated */
1778
+#define WF_MIGRATED 0x04 /* Internal use, task got migrated */
1779
+#define WF_ON_CPU 0x08 /* Wakee is on_cpu */
1780
+#define WF_ANDROID_VENDOR 0x1000 /* Vendor specific for Android */
16731781
16741782 /*
16751783 * To aid in avoiding the subversion of "niceness" due to uneven distribution
....@@ -1723,10 +1831,11 @@
17231831 #define ENQUEUE_MIGRATED 0x00
17241832 #endif
17251833
1834
+#define ENQUEUE_WAKEUP_SYNC 0x80
1835
+
17261836 #define RETRY_TASK ((void *)-1UL)
17271837
17281838 struct sched_class {
1729
- const struct sched_class *next;
17301839
17311840 #ifdef CONFIG_UCLAMP_TASK
17321841 int uclamp_enabled;
....@@ -1735,26 +1844,18 @@
17351844 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
17361845 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
17371846 void (*yield_task) (struct rq *rq);
1738
- bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt);
1847
+ bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
17391848
17401849 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
17411850
1742
- /*
1743
- * It is the responsibility of the pick_next_task() method that will
1744
- * return the next task to call put_prev_task() on the @prev task or
1745
- * something equivalent.
1746
- *
1747
- * May return RETRY_TASK when it finds a higher prio class has runnable
1748
- * tasks.
1749
- */
1750
- struct task_struct * (*pick_next_task)(struct rq *rq,
1751
- struct task_struct *prev,
1752
- struct rq_flags *rf);
1851
+ struct task_struct *(*pick_next_task)(struct rq *rq);
1852
+
17531853 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1854
+ void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
17541855
17551856 #ifdef CONFIG_SMP
1756
- int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags,
1757
- int subling_count_hint);
1857
+ int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
1858
+ int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
17581859 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
17591860
17601861 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
....@@ -1766,7 +1867,6 @@
17661867 void (*rq_offline)(struct rq *rq);
17671868 #endif
17681869
1769
- void (*set_curr_task)(struct rq *rq);
17701870 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
17711871 void (*task_fork)(struct task_struct *p);
17721872 void (*task_dead)(struct task_struct *p);
....@@ -1792,25 +1892,32 @@
17921892 #ifdef CONFIG_FAIR_GROUP_SCHED
17931893 void (*task_change_group)(struct task_struct *p, int type);
17941894 #endif
1795
-};
1895
+} __aligned(STRUCT_ALIGNMENT); /* STRUCT_ALIGN(), vmlinux.lds.h */
17961896
17971897 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
17981898 {
1899
+ WARN_ON_ONCE(rq->curr != prev);
17991900 prev->sched_class->put_prev_task(rq, prev);
18001901 }
18011902
1802
-static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1903
+static inline void set_next_task(struct rq *rq, struct task_struct *next)
18031904 {
1804
- curr->sched_class->set_curr_task(rq);
1905
+ WARN_ON_ONCE(rq->curr != next);
1906
+ next->sched_class->set_next_task(rq, next, false);
18051907 }
18061908
1807
-#ifdef CONFIG_SMP
1808
-#define sched_class_highest (&stop_sched_class)
1809
-#else
1810
-#define sched_class_highest (&dl_sched_class)
1811
-#endif
1909
+/* Defined in include/asm-generic/vmlinux.lds.h */
1910
+extern struct sched_class __begin_sched_classes[];
1911
+extern struct sched_class __end_sched_classes[];
1912
+
1913
+#define sched_class_highest (__end_sched_classes - 1)
1914
+#define sched_class_lowest (__begin_sched_classes - 1)
1915
+
1916
+#define for_class_range(class, _from, _to) \
1917
+ for (class = (_from); class != (_to); class--)
1918
+
18121919 #define for_each_class(class) \
1813
- for (class = sched_class_highest; class; class = class->next)
1920
+ for_class_range(class, sched_class_highest, sched_class_lowest)
18141921
18151922 extern const struct sched_class stop_sched_class;
18161923 extern const struct sched_class dl_sched_class;
....@@ -1818,6 +1925,28 @@
18181925 extern const struct sched_class fair_sched_class;
18191926 extern const struct sched_class idle_sched_class;
18201927
1928
+static inline bool sched_stop_runnable(struct rq *rq)
1929
+{
1930
+ return rq->stop && task_on_rq_queued(rq->stop);
1931
+}
1932
+
1933
+static inline bool sched_dl_runnable(struct rq *rq)
1934
+{
1935
+ return rq->dl.dl_nr_running > 0;
1936
+}
1937
+
1938
+static inline bool sched_rt_runnable(struct rq *rq)
1939
+{
1940
+ return rq->rt.rt_queued > 0;
1941
+}
1942
+
1943
+static inline bool sched_fair_runnable(struct rq *rq)
1944
+{
1945
+ return rq->cfs.nr_running > 0;
1946
+}
1947
+
1948
+extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
1949
+extern struct task_struct *pick_next_task_idle(struct rq *rq);
18211950
18221951 #ifdef CONFIG_SMP
18231952
....@@ -1827,6 +1956,7 @@
18271956
18281957 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
18291958
1959
+extern unsigned long __read_mostly max_load_balance_interval;
18301960 #endif
18311961
18321962 #ifdef CONFIG_CPU_IDLE
....@@ -1842,17 +1972,6 @@
18421972
18431973 return rq->idle_state;
18441974 }
1845
-
1846
-static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx)
1847
-{
1848
- rq->idle_state_idx = idle_state_idx;
1849
-}
1850
-
1851
-static inline int idle_get_state_idx(struct rq *rq)
1852
-{
1853
- WARN_ON(!rcu_read_lock_held());
1854
- return rq->idle_state_idx;
1855
-}
18561975 #else
18571976 static inline void idle_set_state(struct rq *rq,
18581977 struct cpuidle_state *idle_state)
....@@ -1862,15 +1981,6 @@
18621981 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
18631982 {
18641983 return NULL;
1865
-}
1866
-
1867
-static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx)
1868
-{
1869
-}
1870
-
1871
-static inline int idle_get_state_idx(struct rq *rq)
1872
-{
1873
- return -1;
18741984 }
18751985 #endif
18761986
....@@ -1896,15 +2006,16 @@
18962006 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
18972007 extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
18982008 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
1899
-extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
19002009
19012010 #define BW_SHIFT 20
19022011 #define BW_UNIT (1 << BW_SHIFT)
19032012 #define RATIO_SHIFT 8
2013
+#define MAX_BW_BITS (64 - BW_SHIFT)
2014
+#define MAX_BW ((1ULL << MAX_BW_BITS) - 1)
19042015 unsigned long to_ratio(u64 period, u64 runtime);
19052016
19062017 extern void init_entity_runnable_average(struct sched_entity *se);
1907
-extern void post_init_entity_util_avg(struct sched_entity *se);
2018
+extern void post_init_entity_util_avg(struct task_struct *p);
19082019
19092020 #ifdef CONFIG_NO_HZ_FULL
19102021 extern bool sched_can_stop_tick(struct rq *rq);
....@@ -1917,12 +2028,7 @@
19172028 */
19182029 static inline void sched_update_tick_dependency(struct rq *rq)
19192030 {
1920
- int cpu;
1921
-
1922
- if (!tick_nohz_full_enabled())
1923
- return;
1924
-
1925
- cpu = cpu_of(rq);
2031
+ int cpu = cpu_of(rq);
19262032
19272033 if (!tick_nohz_full_cpu(cpu))
19282034 return;
....@@ -1942,13 +2048,16 @@
19422048 unsigned prev_nr = rq->nr_running;
19432049
19442050 rq->nr_running = prev_nr + count;
2051
+ if (trace_sched_update_nr_running_tp_enabled()) {
2052
+ call_trace_sched_update_nr_running(rq, count);
2053
+ }
19452054
1946
- if (prev_nr < 2 && rq->nr_running >= 2) {
19472055 #ifdef CONFIG_SMP
2056
+ if (prev_nr < 2 && rq->nr_running >= 2) {
19482057 if (!READ_ONCE(rq->rd->overload))
19492058 WRITE_ONCE(rq->rd->overload, 1);
1950
-#endif
19512059 }
2060
+#endif
19522061
19532062 sched_update_tick_dependency(rq);
19542063 }
....@@ -1956,6 +2065,10 @@
19562065 static inline void sub_nr_running(struct rq *rq, unsigned count)
19572066 {
19582067 rq->nr_running -= count;
2068
+ if (trace_sched_update_nr_running_tp_enabled()) {
2069
+ call_trace_sched_update_nr_running(rq, -count);
2070
+ }
2071
+
19592072 /* Check if we still need preemption */
19602073 sched_update_tick_dependency(rq);
19612074 }
....@@ -1995,7 +2108,24 @@
19952108
19962109 #endif /* CONFIG_SCHED_HRTICK */
19972110
2111
+#ifndef arch_scale_freq_tick
2112
+static __always_inline
2113
+void arch_scale_freq_tick(void)
2114
+{
2115
+}
2116
+#endif
2117
+
19982118 #ifndef arch_scale_freq_capacity
2119
+/**
2120
+ * arch_scale_freq_capacity - get the frequency scale factor of a given CPU.
2121
+ * @cpu: the CPU in question.
2122
+ *
2123
+ * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
2124
+ *
2125
+ * f_curr
2126
+ * ------ * SCHED_CAPACITY_SCALE
2127
+ * f_max
2128
+ */
19992129 static __always_inline
20002130 unsigned long arch_scale_freq_capacity(int cpu)
20012131 {
....@@ -2003,17 +2133,8 @@
20032133 }
20042134 #endif
20052135
2006
-#ifndef arch_scale_max_freq_capacity
2007
-struct sched_domain;
2008
-static __always_inline
2009
-unsigned long arch_scale_max_freq_capacity(struct sched_domain *sd, int cpu)
2010
-{
2011
- return SCHED_CAPACITY_SCALE;
2012
-}
2013
-#endif
2014
-
20152136 #ifdef CONFIG_SMP
2016
-#ifdef CONFIG_PREEMPT
2137
+#ifdef CONFIG_PREEMPTION
20172138
20182139 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
20192140
....@@ -2065,7 +2186,7 @@
20652186 return ret;
20662187 }
20672188
2068
-#endif /* CONFIG_PREEMPT */
2189
+#endif /* CONFIG_PREEMPTION */
20692190
20702191 /*
20712192 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
....@@ -2298,7 +2419,7 @@
22982419 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
22992420
23002421 #ifdef CONFIG_CPU_FREQ
2301
-DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
2422
+DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
23022423
23032424 /**
23042425 * cpufreq_update_util - Take a note about CPU utilization changes.
....@@ -2338,18 +2459,48 @@
23382459 #ifdef CONFIG_UCLAMP_TASK
23392460 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
23402461
2462
+/**
2463
+ * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.
2464
+ * @rq: The rq to clamp against. Must not be NULL.
2465
+ * @util: The util value to clamp.
2466
+ * @p: The task to clamp against. Can be NULL if you want to clamp
2467
+ * against @rq only.
2468
+ *
2469
+ * Clamps the passed @util to the max(@rq, @p) effective uclamp values.
2470
+ *
2471
+ * If sched_uclamp_used static key is disabled, then just return the util
2472
+ * without any clamping since uclamp aggregation at the rq level in the fast
2473
+ * path is disabled, rendering this operation a NOP.
2474
+ *
2475
+ * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It
2476
+ * will return the correct effective uclamp value of the task even if the
2477
+ * static key is disabled.
2478
+ */
23412479 static __always_inline
23422480 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
23432481 struct task_struct *p)
23442482 {
2345
- unsigned long min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
2346
- unsigned long max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
2483
+ unsigned long min_util = 0;
2484
+ unsigned long max_util = 0;
2485
+
2486
+ if (!static_branch_likely(&sched_uclamp_used))
2487
+ return util;
23472488
23482489 if (p) {
2349
- min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN));
2350
- max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX));
2490
+ min_util = uclamp_eff_value(p, UCLAMP_MIN);
2491
+ max_util = uclamp_eff_value(p, UCLAMP_MAX);
2492
+
2493
+ /*
2494
+ * Ignore last runnable task's max clamp, as this task will
2495
+ * reset it. Similarly, no need to read the rq's min clamp.
2496
+ */
2497
+ if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
2498
+ goto out;
23512499 }
23522500
2501
+ min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value));
2502
+ max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value));
2503
+out:
23532504 /*
23542505 * Since CPU's {min,max}_util clamps are MAX aggregated considering
23552506 * RUNNABLE tasks with _different_ clamps, we can end up with an
....@@ -2360,6 +2511,24 @@
23602511
23612512 return clamp(util, min_util, max_util);
23622513 }
2514
+
2515
+static inline bool uclamp_boosted(struct task_struct *p)
2516
+{
2517
+ return uclamp_eff_value(p, UCLAMP_MIN) > 0;
2518
+}
2519
+
2520
+/*
2521
+ * When uclamp is compiled in, the aggregation at rq level is 'turned off'
2522
+ * by default in the fast path and only gets turned on once userspace performs
2523
+ * an operation that requires it.
2524
+ *
2525
+ * Returns true if userspace opted-in to use uclamp and aggregation at rq level
2526
+ * hence is active.
2527
+ */
2528
+static inline bool uclamp_is_used(void)
2529
+{
2530
+ return static_branch_likely(&sched_uclamp_used);
2531
+}
23632532 #else /* CONFIG_UCLAMP_TASK */
23642533 static inline
23652534 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
....@@ -2367,12 +2536,36 @@
23672536 {
23682537 return util;
23692538 }
2539
+
2540
+static inline bool uclamp_boosted(struct task_struct *p)
2541
+{
2542
+ return false;
2543
+}
2544
+
2545
+static inline bool uclamp_is_used(void)
2546
+{
2547
+ return false;
2548
+}
23702549 #endif /* CONFIG_UCLAMP_TASK */
23712550
2372
-unsigned long task_util_est(struct task_struct *p);
2373
-unsigned int uclamp_task(struct task_struct *p);
2374
-bool uclamp_latency_sensitive(struct task_struct *p);
2375
-bool uclamp_boosted(struct task_struct *p);
2551
+#ifdef CONFIG_UCLAMP_TASK_GROUP
2552
+static inline bool uclamp_latency_sensitive(struct task_struct *p)
2553
+{
2554
+ struct cgroup_subsys_state *css = task_css(p, cpu_cgrp_id);
2555
+ struct task_group *tg;
2556
+
2557
+ if (!css)
2558
+ return false;
2559
+ tg = container_of(css, struct task_group, css);
2560
+
2561
+ return tg->latency_sensitive;
2562
+}
2563
+#else
2564
+static inline bool uclamp_latency_sensitive(struct task_struct *p)
2565
+{
2566
+ return false;
2567
+}
2568
+#endif /* CONFIG_UCLAMP_TASK_GROUP */
23762569
23772570 #ifdef arch_scale_freq_capacity
23782571 # ifndef arch_scale_freq_invariant
....@@ -2404,20 +2597,6 @@
24042597 ENERGY_UTIL,
24052598 };
24062599
2407
-#ifdef CONFIG_SMP
2408
-static inline unsigned long cpu_util_cfs(struct rq *rq)
2409
-{
2410
- unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2411
-
2412
- if (sched_feat(UTIL_EST)) {
2413
- util = max_t(unsigned long, util,
2414
- READ_ONCE(rq->cfs.avg.util_est.enqueued));
2415
- }
2416
-
2417
- return util;
2418
-}
2419
-#endif
2420
-
24212600 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
24222601
24232602 unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
....@@ -2434,11 +2613,22 @@
24342613 return READ_ONCE(rq->avg_dl.util_avg);
24352614 }
24362615
2616
+static inline unsigned long cpu_util_cfs(struct rq *rq)
2617
+{
2618
+ unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2619
+
2620
+ if (sched_feat(UTIL_EST)) {
2621
+ util = max_t(unsigned long, util,
2622
+ READ_ONCE(rq->cfs.avg.util_est.enqueued));
2623
+ }
2624
+
2625
+ return util;
2626
+}
2627
+
24372628 static inline unsigned long cpu_util_rt(struct rq *rq)
24382629 {
24392630 return READ_ONCE(rq->avg_rt.util_avg);
24402631 }
2441
-
24422632 #else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
24432633 static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
24442634 unsigned long max, enum schedutil_type type,
....@@ -2476,14 +2666,78 @@
24762666 }
24772667 #endif
24782668
2479
-#ifdef CONFIG_SMP
2480
-#ifdef CONFIG_ENERGY_MODEL
2669
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
2670
+
24812671 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
2482
-#else
2672
+
2673
+DECLARE_STATIC_KEY_FALSE(sched_energy_present);
2674
+
2675
+static inline bool sched_energy_enabled(void)
2676
+{
2677
+ return static_branch_unlikely(&sched_energy_present);
2678
+}
2679
+
2680
+#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */
2681
+
24832682 #define perf_domain_span(pd) NULL
2484
-#endif
2683
+static inline bool sched_energy_enabled(void) { return false; }
2684
+
2685
+#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
2686
+
2687
+#ifdef CONFIG_MEMBARRIER
2688
+/*
2689
+ * The scheduler provides memory barriers required by membarrier between:
2690
+ * - prior user-space memory accesses and store to rq->membarrier_state,
2691
+ * - store to rq->membarrier_state and following user-space memory accesses.
2692
+ * In the same way it provides those guarantees around store to rq->curr.
2693
+ */
2694
+static inline void membarrier_switch_mm(struct rq *rq,
2695
+ struct mm_struct *prev_mm,
2696
+ struct mm_struct *next_mm)
2697
+{
2698
+ int membarrier_state;
2699
+
2700
+ if (prev_mm == next_mm)
2701
+ return;
2702
+
2703
+ membarrier_state = atomic_read(&next_mm->membarrier_state);
2704
+ if (READ_ONCE(rq->membarrier_state) == membarrier_state)
2705
+ return;
2706
+
2707
+ WRITE_ONCE(rq->membarrier_state, membarrier_state);
2708
+}
2709
+#else
2710
+static inline void membarrier_switch_mm(struct rq *rq,
2711
+ struct mm_struct *prev_mm,
2712
+ struct mm_struct *next_mm)
2713
+{
2714
+}
24852715 #endif
24862716
24872717 #ifdef CONFIG_SMP
2488
-extern struct static_key_false sched_energy_present;
2718
+static inline bool is_per_cpu_kthread(struct task_struct *p)
2719
+{
2720
+ if (!(p->flags & PF_KTHREAD))
2721
+ return false;
2722
+
2723
+ if (p->nr_cpus_allowed != 1)
2724
+ return false;
2725
+
2726
+ return true;
2727
+}
24892728 #endif
2729
+
2730
+void swake_up_all_locked(struct swait_queue_head *q);
2731
+void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
2732
+
2733
+/*
2734
+ * task_may_not_preempt - check whether a task may not be preemptible soon
2735
+ */
2736
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
2737
+extern bool task_may_not_preempt(struct task_struct *task, int cpu);
2738
+#else
2739
+static inline bool task_may_not_preempt(struct task_struct *task, int cpu)
2740
+{
2741
+ return false;
2742
+}
2743
+#endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */