forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/kernel/sched/sched.h
....@@ -59,15 +59,18 @@
5959 #include <linux/psi.h>
6060 #include <linux/rcupdate_wait.h>
6161 #include <linux/security.h>
62
-#include <linux/stackprotector.h>
6362 #include <linux/stop_machine.h>
6463 #include <linux/suspend.h>
6564 #include <linux/swait.h>
6665 #include <linux/syscalls.h>
6766 #include <linux/task_work.h>
6867 #include <linux/tsacct_kern.h>
68
+#include <linux/android_vendor.h>
69
+#include <linux/android_kabi.h>
6970
7071 #include <asm/tlb.h>
72
+#include <asm-generic/vmlinux.lds.h>
73
+#include <soc/rockchip/rockchip_performance.h>
7174
7275 #ifdef CONFIG_PARAVIRT
7376 # include <asm/paravirt.h>
....@@ -76,13 +79,13 @@
7679 #include "cpupri.h"
7780 #include "cpudeadline.h"
7881
82
+#include <trace/events/sched.h>
83
+
7984 #ifdef CONFIG_SCHED_DEBUG
8085 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
8186 #else
8287 # define SCHED_WARN_ON(x) ({ (void)(x), 0; })
8388 #endif
84
-
85
-#include "tune.h"
8689
8790 struct rq;
8891 struct cpuidle_state;
....@@ -99,12 +102,7 @@
99102 extern void calc_global_load_tick(struct rq *this_rq);
100103 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
101104
102
-#ifdef CONFIG_SMP
103
-extern void cpu_load_update_active(struct rq *this_rq);
104
-#else
105
-static inline void cpu_load_update_active(struct rq *this_rq) { }
106
-#endif
107
-
105
+extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
108106 /*
109107 * Helpers for converting nanosecond timing to jiffy resolution
110108 */
....@@ -187,6 +185,11 @@
187185 rt_policy(policy) || dl_policy(policy);
188186 }
189187
188
+static inline int task_has_idle_policy(struct task_struct *p)
189
+{
190
+ return idle_policy(p->policy);
191
+}
192
+
190193 static inline int task_has_rt_policy(struct task_struct *p)
191194 {
192195 return rt_policy(p->policy);
....@@ -198,6 +201,19 @@
198201 }
199202
200203 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
204
+
205
+static inline void update_avg(u64 *avg, u64 sample)
206
+{
207
+ s64 diff = sample - *avg;
208
+ *avg += diff / 8;
209
+}
210
+
211
+/*
212
+ * Shifting a value by an exponent greater *or equal* to the size of said value
213
+ * is UB; cap at size-1.
214
+ */
215
+#define shr_bound(val, shift) \
216
+ (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
201217
202218 /*
203219 * !! For sched_setattr_nocheck() (kernel) only !!
....@@ -304,14 +320,28 @@
304320 __dl_update(dl_b, -((s32)tsk_bw / cpus));
305321 }
306322
307
-static inline
308
-bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
323
+static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap,
324
+ u64 old_bw, u64 new_bw)
309325 {
310326 return dl_b->bw != -1 &&
311
- dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
327
+ cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
312328 }
313329
314
-extern void dl_change_utilization(struct task_struct *p, u64 new_bw);
330
+/*
331
+ * Verify the fitness of task @p to run on @cpu taking into account the
332
+ * CPU original capacity and the runtime/deadline ratio of the task.
333
+ *
334
+ * The function will return true if the CPU original capacity of the
335
+ * @cpu scaled by SCHED_CAPACITY_SCALE >= runtime/deadline ratio of the
336
+ * task and false otherwise.
337
+ */
338
+static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu)
339
+{
340
+ unsigned long cap = arch_scale_cpu_capacity(cpu);
341
+
342
+ return cap_scale(p->dl.dl_deadline, cap) >= p->dl.dl_runtime;
343
+}
344
+
315345 extern void init_dl_bw(struct dl_bw *dl_b);
316346 extern int sched_dl_global_validate(void);
317347 extern void sched_dl_do_global(void);
....@@ -320,9 +350,8 @@
320350 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
321351 extern bool __checkparam_dl(const struct sched_attr *attr);
322352 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
323
-extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
324353 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
325
-extern bool dl_cpu_busy(unsigned int cpu);
354
+extern int dl_cpu_busy(int cpu, struct task_struct *p);
326355
327356 #ifdef CONFIG_CGROUP_SCHED
328357
....@@ -342,8 +371,9 @@
342371 u64 runtime;
343372 s64 hierarchical_quota;
344373
345
- short idle;
346
- short period_active;
374
+ u8 idle;
375
+ u8 period_active;
376
+ u8 slack_started;
347377 struct hrtimer period_timer;
348378 struct hrtimer slack_timer;
349379 struct list_head throttled_cfs_rq;
....@@ -352,8 +382,6 @@
352382 int nr_periods;
353383 int nr_throttled;
354384 u64 throttled_time;
355
-
356
- bool distribute_running;
357385 #endif
358386 };
359387
....@@ -407,8 +435,14 @@
407435 struct uclamp_se uclamp[UCLAMP_CNT];
408436 /* Latency-sensitive flag used for a task group */
409437 unsigned int latency_sensitive;
438
+
439
+ ANDROID_VENDOR_DATA_ARRAY(1, 4);
410440 #endif
411441
442
+ ANDROID_KABI_RESERVE(1);
443
+ ANDROID_KABI_RESERVE(2);
444
+ ANDROID_KABI_RESERVE(3);
445
+ ANDROID_KABI_RESERVE(4);
412446 };
413447
414448 #ifdef CONFIG_FAIR_GROUP_SCHED
....@@ -497,9 +531,9 @@
497531 /* CFS-related fields in a runqueue */
498532 struct cfs_rq {
499533 struct load_weight load;
500
- unsigned long runnable_weight;
501534 unsigned int nr_running;
502
- unsigned int h_nr_running;
535
+ unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */
536
+ unsigned int idle_h_nr_running; /* SCHED_IDLE */
503537
504538 u64 exec_clock;
505539 u64 min_vruntime;
....@@ -535,7 +569,7 @@
535569 int nr;
536570 unsigned long load_avg;
537571 unsigned long util_avg;
538
- unsigned long runnable_sum;
572
+ unsigned long runnable_avg;
539573 } removed;
540574
541575 #ifdef CONFIG_FAIR_GROUP_SCHED
....@@ -575,12 +609,14 @@
575609 s64 runtime_remaining;
576610
577611 u64 throttled_clock;
578
- u64 throttled_clock_task;
579
- u64 throttled_clock_task_time;
612
+ u64 throttled_clock_pelt;
613
+ u64 throttled_clock_pelt_time;
580614 int throttled;
581615 int throttle_count;
582616 struct list_head throttled_list;
583617 #endif /* CONFIG_CFS_BANDWIDTH */
618
+
619
+ ANDROID_VENDOR_DATA_ARRAY(1, 16);
584620 #endif /* CONFIG_FAIR_GROUP_SCHED */
585621 };
586622
....@@ -646,7 +682,7 @@
646682 /*
647683 * Deadline values of the currently executing and the
648684 * earliest ready task on this rq. Caching these facilitates
649
- * the decision wether or not a ready but not running task
685
+ * the decision whether or not a ready but not running task
650686 * should migrate somewhere else.
651687 */
652688 struct {
....@@ -695,8 +731,30 @@
695731 #ifdef CONFIG_FAIR_GROUP_SCHED
696732 /* An entity is a task if it doesn't "own" a runqueue */
697733 #define entity_is_task(se) (!se->my_q)
734
+
735
+static inline void se_update_runnable(struct sched_entity *se)
736
+{
737
+ if (!entity_is_task(se))
738
+ se->runnable_weight = se->my_q->h_nr_running;
739
+}
740
+
741
+static inline long se_runnable(struct sched_entity *se)
742
+{
743
+ if (entity_is_task(se))
744
+ return !!se->on_rq;
745
+ else
746
+ return se->runnable_weight;
747
+}
748
+
698749 #else
699750 #define entity_is_task(se) 1
751
+
752
+static inline void se_update_runnable(struct sched_entity *se) {}
753
+
754
+static inline long se_runnable(struct sched_entity *se)
755
+{
756
+ return !!se->on_rq;
757
+}
700758 #endif
701759
702760 #ifdef CONFIG_SMP
....@@ -708,10 +766,6 @@
708766 return scale_load_down(se->load.weight);
709767 }
710768
711
-static inline long se_runnable(struct sched_entity *se)
712
-{
713
- return scale_load_down(se->runnable_weight);
714
-}
715769
716770 static inline bool sched_asym_prefer(int a, int b)
717771 {
....@@ -722,12 +776,6 @@
722776 struct em_perf_domain *em_pd;
723777 struct perf_domain *next;
724778 struct rcu_head rcu;
725
-};
726
-
727
-struct max_cpu_capacity {
728
- raw_spinlock_t lock;
729
- unsigned long val;
730
- int cpu;
731779 };
732780
733781 /* Scheduling group status flags */
....@@ -788,27 +836,23 @@
788836 cpumask_var_t rto_mask;
789837 struct cpupri cpupri;
790838
791
- /* Maximum cpu capacity in the system. */
792
- struct max_cpu_capacity max_cpu_capacity;
839
+ unsigned long max_cpu_capacity;
793840
794841 /*
795842 * NULL-terminated list of performance domains intersecting with the
796843 * CPUs of the rd. Protected by RCU.
797844 */
798
- struct perf_domain *pd;
845
+ struct perf_domain __rcu *pd;
799846
800
- /* Vendor fields. */
801
- /* First cpu with maximum and minimum original capacity */
802
- int max_cap_orig_cpu, min_cap_orig_cpu;
803
- /* First cpu with mid capacity */
804
- int mid_cap_orig_cpu;
847
+ ANDROID_VENDOR_DATA_ARRAY(1, 4);
848
+
849
+ ANDROID_KABI_RESERVE(1);
850
+ ANDROID_KABI_RESERVE(2);
851
+ ANDROID_KABI_RESERVE(3);
852
+ ANDROID_KABI_RESERVE(4);
805853 };
806854
807
-extern struct root_domain def_root_domain;
808
-extern struct mutex sched_domains_mutex;
809
-
810855 extern void init_defrootdomain(void);
811
-extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc);
812856 extern int sched_init_domains(const struct cpumask *cpu_map);
813857 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
814858 extern void sched_get_rd(struct root_domain *rd);
....@@ -817,6 +861,7 @@
817861 #ifdef HAVE_RT_PUSH_IPI
818862 extern void rto_push_irq_work_func(struct irq_work *work);
819863 #endif
864
+extern struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu);
820865 #endif /* CONFIG_SMP */
821866
822867 #ifdef CONFIG_UCLAMP_TASK
....@@ -859,6 +904,8 @@
859904 unsigned int value;
860905 struct uclamp_bucket bucket[UCLAMP_BUCKETS];
861906 };
907
+
908
+DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
862909 #endif /* CONFIG_UCLAMP_TASK */
863910
864911 /*
....@@ -882,21 +929,19 @@
882929 unsigned int nr_preferred_running;
883930 unsigned int numa_migrate_on;
884931 #endif
885
- #define CPU_LOAD_IDX_MAX 5
886
- unsigned long cpu_load[CPU_LOAD_IDX_MAX];
887932 #ifdef CONFIG_NO_HZ_COMMON
888933 #ifdef CONFIG_SMP
889
- unsigned long last_load_update_tick;
890934 unsigned long last_blocked_load_update_tick;
891935 unsigned int has_blocked_load;
936
+ call_single_data_t nohz_csd;
892937 #endif /* CONFIG_SMP */
893938 unsigned int nohz_tick_stopped;
894
- atomic_t nohz_flags;
939
+ atomic_t nohz_flags;
895940 #endif /* CONFIG_NO_HZ_COMMON */
896941
897
- /* capture load from *all* tasks on this CPU: */
898
- struct load_weight load;
899
- unsigned long nr_load_updates;
942
+#ifdef CONFIG_SMP
943
+ unsigned int ttwu_pending;
944
+#endif
900945 u64 nr_switches;
901946
902947 #ifdef CONFIG_UCLAMP_TASK
....@@ -924,7 +969,7 @@
924969 */
925970 unsigned long nr_uninterruptible;
926971
927
- struct task_struct *curr;
972
+ struct task_struct __rcu *curr;
928973 struct task_struct *idle;
929974 struct task_struct *stop;
930975 unsigned long next_balance;
....@@ -939,15 +984,21 @@
939984
940985 atomic_t nr_iowait;
941986
987
+#ifdef CONFIG_MEMBARRIER
988
+ int membarrier_state;
989
+#endif
990
+
942991 #ifdef CONFIG_SMP
943
- struct root_domain *rd;
944
- struct sched_domain *sd;
992
+ struct root_domain *rd;
993
+ struct sched_domain __rcu *sd;
945994
946995 unsigned long cpu_capacity;
947996 unsigned long cpu_capacity_orig;
948997
949998 struct callback_head *balance_callback;
999
+ unsigned char balance_flags;
9501000
1001
+ unsigned char nohz_idle_balance;
9511002 unsigned char idle_balance;
9521003
9531004 unsigned long misfit_task_load;
....@@ -968,12 +1019,19 @@
9681019 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
9691020 struct sched_avg avg_irq;
9701021 #endif
1022
+#ifdef CONFIG_SCHED_THERMAL_PRESSURE
1023
+ struct sched_avg avg_thermal;
1024
+#endif
9711025 u64 idle_stamp;
9721026 u64 avg_idle;
9731027
9741028 /* This is used to determine avg_idle's max value */
9751029 u64 max_idle_balance_cost;
1030
+
1031
+#ifdef CONFIG_HOTPLUG_CPU
1032
+ struct rcuwait hotplug_wait;
9761033 #endif
1034
+#endif /* CONFIG_SMP */
9771035
9781036 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
9791037 u64 prev_irq_time;
....@@ -991,10 +1049,10 @@
9911049
9921050 #ifdef CONFIG_SCHED_HRTICK
9931051 #ifdef CONFIG_SMP
994
- int hrtick_csd_pending;
9951052 call_single_data_t hrtick_csd;
9961053 #endif
9971054 struct hrtimer hrtick_timer;
1055
+ ktime_t hrtick_time;
9981056 #endif
9991057
10001058 #ifdef CONFIG_SCHEDSTATS
....@@ -1015,19 +1073,29 @@
10151073 unsigned int ttwu_local;
10161074 #endif
10171075
1018
-#ifdef CONFIG_SMP
1019
- struct llist_head wake_list;
1076
+#ifdef CONFIG_HOTPLUG_CPU
1077
+ struct cpu_stop_work drain;
1078
+ struct cpu_stop_done drain_done;
10201079 #endif
10211080
10221081 #ifdef CONFIG_CPU_IDLE
10231082 /* Must be inspected within a rcu lock section */
10241083 struct cpuidle_state *idle_state;
1025
- int idle_state_idx;
10261084 #endif
10271085
1028
-#if defined(CONFIG_PREEMPT_RT_BASE) && defined(CONFIG_SMP)
1029
- int nr_pinned;
1086
+#ifdef CONFIG_SMP
1087
+ unsigned int nr_pinned;
10301088 #endif
1089
+ unsigned int push_busy;
1090
+ struct cpu_stop_work push_work;
1091
+
1092
+ ANDROID_VENDOR_DATA_ARRAY(1, 96);
1093
+ ANDROID_OEM_DATA_ARRAY(1, 16);
1094
+
1095
+ ANDROID_KABI_RESERVE(1);
1096
+ ANDROID_KABI_RESERVE(2);
1097
+ ANDROID_KABI_RESERVE(3);
1098
+ ANDROID_KABI_RESERVE(4);
10311099 };
10321100
10331101 #ifdef CONFIG_FAIR_GROUP_SCHED
....@@ -1136,6 +1204,41 @@
11361204 return rq->clock_task;
11371205 }
11381206
1207
+#ifdef CONFIG_SMP
1208
+DECLARE_PER_CPU(u64, clock_task_mult);
1209
+
1210
+static inline u64 rq_clock_task_mult(struct rq *rq)
1211
+{
1212
+ lockdep_assert_held(&rq->lock);
1213
+ assert_clock_updated(rq);
1214
+
1215
+ return per_cpu(clock_task_mult, cpu_of(rq));
1216
+}
1217
+#else
1218
+static inline u64 rq_clock_task_mult(struct rq *rq)
1219
+{
1220
+ return rq_clock_task(rq);
1221
+}
1222
+#endif
1223
+
1224
+/**
1225
+ * By default the decay is the default pelt decay period.
1226
+ * The decay shift can change the decay period in
1227
+ * multiples of 32.
1228
+ * Decay shift Decay period(ms)
1229
+ * 0 32
1230
+ * 1 64
1231
+ * 2 128
1232
+ * 3 256
1233
+ * 4 512
1234
+ */
1235
+extern int sched_thermal_decay_shift;
1236
+
1237
+static inline u64 rq_clock_thermal(struct rq *rq)
1238
+{
1239
+ return rq_clock_task(rq) >> sched_thermal_decay_shift;
1240
+}
1241
+
11391242 static inline void rq_clock_skip_update(struct rq *rq)
11401243 {
11411244 lockdep_assert_held(&rq->lock);
....@@ -1165,6 +1268,16 @@
11651268 #endif
11661269 };
11671270
1271
+/*
1272
+ * Lockdep annotation that avoids accidental unlocks; it's like a
1273
+ * sticky/continuous lockdep_assert_held().
1274
+ *
1275
+ * This avoids code that has access to 'struct rq *rq' (basically everything in
1276
+ * the scheduler) from accidentally unlocking the rq if they do not also have a
1277
+ * copy of the (on-stack) 'struct rq_flags rf'.
1278
+ *
1279
+ * Also see Documentation/locking/lockdep-design.rst.
1280
+ */
11681281 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
11691282 {
11701283 rf->cookie = lockdep_pin_lock(&rq->lock);
....@@ -1172,6 +1285,9 @@
11721285 #ifdef CONFIG_SCHED_DEBUG
11731286 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
11741287 rf->clock_update_flags = 0;
1288
+#endif
1289
+#ifdef CONFIG_SMP
1290
+ SCHED_WARN_ON(rq->balance_callback);
11751291 #endif
11761292 }
11771293
....@@ -1298,16 +1414,18 @@
12981414 extern enum numa_topology_type sched_numa_topology_type;
12991415 extern int sched_max_numa_distance;
13001416 extern bool find_numa_distance(int distance);
1301
-#endif
1302
-
1303
-#ifdef CONFIG_NUMA
13041417 extern void sched_init_numa(void);
13051418 extern void sched_domains_numa_masks_set(unsigned int cpu);
13061419 extern void sched_domains_numa_masks_clear(unsigned int cpu);
1420
+extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
13071421 #else
13081422 static inline void sched_init_numa(void) { }
13091423 static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
13101424 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
1425
+static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
1426
+{
1427
+ return nr_cpu_ids;
1428
+}
13111429 #endif
13121430
13131431 #ifdef CONFIG_NUMA_BALANCING
....@@ -1320,8 +1438,6 @@
13201438 };
13211439 extern void sched_setnuma(struct task_struct *p, int node);
13221440 extern int migrate_task_to(struct task_struct *p, int cpu);
1323
-extern int migrate_swap(struct task_struct *p, struct task_struct *t,
1324
- int cpu, int scpu);
13251441 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
13261442 #else
13271443 static inline void
....@@ -1332,6 +1448,11 @@
13321448
13331449 #ifdef CONFIG_SMP
13341450
1451
+#define BALANCE_WORK 0x01
1452
+#define BALANCE_PUSH 0x02
1453
+
1454
+extern int migrate_swap(struct task_struct *p, struct task_struct *t,
1455
+ int cpu, int scpu);
13351456 static inline void
13361457 queue_balance_callback(struct rq *rq,
13371458 struct callback_head *head,
....@@ -1339,15 +1460,14 @@
13391460 {
13401461 lockdep_assert_held(&rq->lock);
13411462
1342
- if (unlikely(head->next))
1463
+ if (unlikely(head->next || (rq->balance_flags & BALANCE_PUSH)))
13431464 return;
13441465
13451466 head->func = (void (*)(struct callback_head *))func;
13461467 head->next = rq->balance_callback;
13471468 rq->balance_callback = head;
1469
+ rq->balance_flags |= BALANCE_WORK;
13481470 }
1349
-
1350
-extern void sched_ttwu_pending(void);
13511471
13521472 #define rcu_dereference_check_sched_domain(p) \
13531473 rcu_dereference_check((p), \
....@@ -1355,7 +1475,7 @@
13551475
13561476 /*
13571477 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1358
- * See detach_destroy_domains: synchronize_sched for details.
1478
+ * See destroy_sched_domains: call_rcu for details.
13591479 *
13601480 * The domain tree of any CPU may only be accessed from within
13611481 * preempt-disabled sections.
....@@ -1363,8 +1483,6 @@
13631483 #define for_each_domain(cpu, __sd) \
13641484 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
13651485 __sd; __sd = __sd->parent)
1366
-
1367
-#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
13681486
13691487 /**
13701488 * highest_flag_domain - Return highest sched_domain containing flag.
....@@ -1400,13 +1518,13 @@
14001518 return sd;
14011519 }
14021520
1403
-DECLARE_PER_CPU(struct sched_domain *, sd_llc);
1521
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
14041522 DECLARE_PER_CPU(int, sd_llc_size);
14051523 DECLARE_PER_CPU(int, sd_llc_id);
1406
-DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
1407
-DECLARE_PER_CPU(struct sched_domain *, sd_numa);
1408
-DECLARE_PER_CPU(struct sched_domain *, sd_asym_packing);
1409
-DECLARE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity);
1524
+DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
1525
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
1526
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
1527
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
14101528 extern struct static_key_false sched_asym_cpucapacity;
14111529
14121530 struct sched_group_capacity {
....@@ -1425,7 +1543,7 @@
14251543 int id;
14261544 #endif
14271545
1428
- unsigned long cpumask[0]; /* Balance mask */
1546
+ unsigned long cpumask[]; /* Balance mask */
14291547 };
14301548
14311549 struct sched_group {
....@@ -1443,7 +1561,7 @@
14431561 * by attaching extra space to the end of the structure,
14441562 * depending on how many CPUs the kernel has booted up with)
14451563 */
1446
- unsigned long cpumask[0];
1564
+ unsigned long cpumask[];
14471565 };
14481566
14491567 static inline struct cpumask *sched_group_span(struct sched_group *sg)
....@@ -1486,11 +1604,11 @@
14861604 }
14871605 #endif
14881606
1489
-#else
1607
+extern void flush_smp_call_function_from_idle(void);
14901608
1491
-static inline void sched_ttwu_pending(void) { }
1492
-
1493
-#endif /* CONFIG_SMP */
1609
+#else /* !CONFIG_SMP: */
1610
+static inline void flush_smp_call_function_from_idle(void) { }
1611
+#endif
14941612
14951613 #include "stats.h"
14961614 #include "autogroup.h"
....@@ -1550,7 +1668,7 @@
15501668 #ifdef CONFIG_SMP
15511669 /*
15521670 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1553
- * successfuly executed on another CPU. We must ensure that updates of
1671
+ * successfully executed on another CPU. We must ensure that updates of
15541672 * per-task data have been completed by this moment.
15551673 */
15561674 smp_wmb();
....@@ -1602,6 +1720,8 @@
16021720 #undef SCHED_FEAT
16031721
16041722 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1723
+extern const char * const sched_feat_names[__SCHED_FEAT_NR];
1724
+
16051725 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
16061726
16071727 #else /* !CONFIG_JUMP_LABEL */
....@@ -1673,8 +1793,10 @@
16731793 */
16741794 #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */
16751795 #define WF_FORK 0x02 /* Child wakeup after fork */
1676
-#define WF_MIGRATED 0x4 /* Internal use, task got migrated */
1677
-#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
1796
+#define WF_MIGRATED 0x04 /* Internal use, task got migrated */
1797
+#define WF_ON_CPU 0x08 /* Wakee is on_cpu */
1798
+#define WF_LOCK_SLEEPER 0x10 /* Wakeup spinlock "sleeper" */
1799
+#define WF_ANDROID_VENDOR 0x1000 /* Vendor specific for Android */
16781800
16791801 /*
16801802 * To aid in avoiding the subversion of "niceness" due to uneven distribution
....@@ -1728,10 +1850,11 @@
17281850 #define ENQUEUE_MIGRATED 0x00
17291851 #endif
17301852
1853
+#define ENQUEUE_WAKEUP_SYNC 0x80
1854
+
17311855 #define RETRY_TASK ((void *)-1UL)
17321856
17331857 struct sched_class {
1734
- const struct sched_class *next;
17351858
17361859 #ifdef CONFIG_UCLAMP_TASK
17371860 int uclamp_enabled;
....@@ -1740,38 +1863,32 @@
17401863 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
17411864 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
17421865 void (*yield_task) (struct rq *rq);
1743
- bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt);
1866
+ bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
17441867
17451868 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
17461869
1747
- /*
1748
- * It is the responsibility of the pick_next_task() method that will
1749
- * return the next task to call put_prev_task() on the @prev task or
1750
- * something equivalent.
1751
- *
1752
- * May return RETRY_TASK when it finds a higher prio class has runnable
1753
- * tasks.
1754
- */
1755
- struct task_struct * (*pick_next_task)(struct rq *rq,
1756
- struct task_struct *prev,
1757
- struct rq_flags *rf);
1870
+ struct task_struct *(*pick_next_task)(struct rq *rq);
1871
+
17581872 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1873
+ void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
17591874
17601875 #ifdef CONFIG_SMP
1761
- int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags,
1762
- int subling_count_hint);
1876
+ int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
1877
+ int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
17631878 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
17641879
17651880 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
17661881
17671882 void (*set_cpus_allowed)(struct task_struct *p,
1768
- const struct cpumask *newmask);
1883
+ const struct cpumask *newmask,
1884
+ u32 flags);
17691885
17701886 void (*rq_online)(struct rq *rq);
17711887 void (*rq_offline)(struct rq *rq);
1888
+
1889
+ struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
17721890 #endif
17731891
1774
- void (*set_curr_task)(struct rq *rq);
17751892 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
17761893 void (*task_fork)(struct task_struct *p);
17771894 void (*task_dead)(struct task_struct *p);
....@@ -1797,25 +1914,32 @@
17971914 #ifdef CONFIG_FAIR_GROUP_SCHED
17981915 void (*task_change_group)(struct task_struct *p, int type);
17991916 #endif
1800
-};
1917
+} __aligned(STRUCT_ALIGNMENT); /* STRUCT_ALIGN(), vmlinux.lds.h */
18011918
18021919 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
18031920 {
1921
+ WARN_ON_ONCE(rq->curr != prev);
18041922 prev->sched_class->put_prev_task(rq, prev);
18051923 }
18061924
1807
-static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1925
+static inline void set_next_task(struct rq *rq, struct task_struct *next)
18081926 {
1809
- curr->sched_class->set_curr_task(rq);
1927
+ WARN_ON_ONCE(rq->curr != next);
1928
+ next->sched_class->set_next_task(rq, next, false);
18101929 }
18111930
1812
-#ifdef CONFIG_SMP
1813
-#define sched_class_highest (&stop_sched_class)
1814
-#else
1815
-#define sched_class_highest (&dl_sched_class)
1816
-#endif
1931
+/* Defined in include/asm-generic/vmlinux.lds.h */
1932
+extern struct sched_class __begin_sched_classes[];
1933
+extern struct sched_class __end_sched_classes[];
1934
+
1935
+#define sched_class_highest (__end_sched_classes - 1)
1936
+#define sched_class_lowest (__begin_sched_classes - 1)
1937
+
1938
+#define for_class_range(class, _from, _to) \
1939
+ for (class = (_from); class != (_to); class--)
1940
+
18171941 #define for_each_class(class) \
1818
- for (class = sched_class_highest; class; class = class->next)
1942
+ for_class_range(class, sched_class_highest, sched_class_lowest)
18191943
18201944 extern const struct sched_class stop_sched_class;
18211945 extern const struct sched_class dl_sched_class;
....@@ -1823,6 +1947,32 @@
18231947 extern const struct sched_class fair_sched_class;
18241948 extern const struct sched_class idle_sched_class;
18251949
1950
+static inline bool sched_stop_runnable(struct rq *rq)
1951
+{
1952
+ return rq->stop && task_on_rq_queued(rq->stop);
1953
+}
1954
+
1955
+static inline bool sched_dl_runnable(struct rq *rq)
1956
+{
1957
+ return rq->dl.dl_nr_running > 0;
1958
+}
1959
+
1960
+static inline bool sched_rt_runnable(struct rq *rq)
1961
+{
1962
+ return rq->rt.rt_queued > 0;
1963
+}
1964
+
1965
+static inline bool sched_fair_runnable(struct rq *rq)
1966
+{
1967
+ return rq->cfs.nr_running > 0;
1968
+}
1969
+
1970
+extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
1971
+extern struct task_struct *pick_next_task_idle(struct rq *rq);
1972
+
1973
+#define SCA_CHECK 0x01
1974
+#define SCA_MIGRATE_DISABLE 0x02
1975
+#define SCA_MIGRATE_ENABLE 0x04
18261976
18271977 #ifdef CONFIG_SMP
18281978
....@@ -1830,8 +1980,30 @@
18301980
18311981 extern void trigger_load_balance(struct rq *rq);
18321982
1833
-extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1983
+extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
18341984
1985
+static inline struct task_struct *get_push_task(struct rq *rq)
1986
+{
1987
+ struct task_struct *p = rq->curr;
1988
+
1989
+ lockdep_assert_held(&rq->lock);
1990
+
1991
+ if (rq->push_busy)
1992
+ return NULL;
1993
+
1994
+ if (p->nr_cpus_allowed == 1)
1995
+ return NULL;
1996
+
1997
+ if (p->migration_disabled)
1998
+ return NULL;
1999
+
2000
+ rq->push_busy = true;
2001
+ return get_task_struct(p);
2002
+}
2003
+
2004
+extern int push_cpu_stop(void *arg);
2005
+
2006
+extern unsigned long __read_mostly max_load_balance_interval;
18352007 #endif
18362008
18372009 #ifdef CONFIG_CPU_IDLE
....@@ -1847,17 +2019,6 @@
18472019
18482020 return rq->idle_state;
18492021 }
1850
-
1851
-static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx)
1852
-{
1853
- rq->idle_state_idx = idle_state_idx;
1854
-}
1855
-
1856
-static inline int idle_get_state_idx(struct rq *rq)
1857
-{
1858
- WARN_ON(!rcu_read_lock_held());
1859
- return rq->idle_state_idx;
1860
-}
18612022 #else
18622023 static inline void idle_set_state(struct rq *rq,
18632024 struct cpuidle_state *idle_state)
....@@ -1867,15 +2028,6 @@
18672028 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
18682029 {
18692030 return NULL;
1870
-}
1871
-
1872
-static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx)
1873
-{
1874
-}
1875
-
1876
-static inline int idle_get_state_idx(struct rq *rq)
1877
-{
1878
- return -1;
18792031 }
18802032 #endif
18812033
....@@ -1910,15 +2062,16 @@
19102062 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
19112063 extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
19122064 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
1913
-extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
19142065
19152066 #define BW_SHIFT 20
19162067 #define BW_UNIT (1 << BW_SHIFT)
19172068 #define RATIO_SHIFT 8
2069
+#define MAX_BW_BITS (64 - BW_SHIFT)
2070
+#define MAX_BW ((1ULL << MAX_BW_BITS) - 1)
19182071 unsigned long to_ratio(u64 period, u64 runtime);
19192072
19202073 extern void init_entity_runnable_average(struct sched_entity *se);
1921
-extern void post_init_entity_util_avg(struct sched_entity *se);
2074
+extern void post_init_entity_util_avg(struct task_struct *p);
19222075
19232076 #ifdef CONFIG_NO_HZ_FULL
19242077 extern bool sched_can_stop_tick(struct rq *rq);
....@@ -1931,12 +2084,7 @@
19312084 */
19322085 static inline void sched_update_tick_dependency(struct rq *rq)
19332086 {
1934
- int cpu;
1935
-
1936
- if (!tick_nohz_full_enabled())
1937
- return;
1938
-
1939
- cpu = cpu_of(rq);
2087
+ int cpu = cpu_of(rq);
19402088
19412089 if (!tick_nohz_full_cpu(cpu))
19422090 return;
....@@ -1956,13 +2104,16 @@
19562104 unsigned prev_nr = rq->nr_running;
19572105
19582106 rq->nr_running = prev_nr + count;
2107
+ if (trace_sched_update_nr_running_tp_enabled()) {
2108
+ call_trace_sched_update_nr_running(rq, count);
2109
+ }
19592110
1960
- if (prev_nr < 2 && rq->nr_running >= 2) {
19612111 #ifdef CONFIG_SMP
2112
+ if (prev_nr < 2 && rq->nr_running >= 2) {
19622113 if (!READ_ONCE(rq->rd->overload))
19632114 WRITE_ONCE(rq->rd->overload, 1);
1964
-#endif
19652115 }
2116
+#endif
19662117
19672118 sched_update_tick_dependency(rq);
19682119 }
....@@ -1970,6 +2121,10 @@
19702121 static inline void sub_nr_running(struct rq *rq, unsigned count)
19712122 {
19722123 rq->nr_running -= count;
2124
+ if (trace_sched_update_nr_running_tp_enabled()) {
2125
+ call_trace_sched_update_nr_running(rq, -count);
2126
+ }
2127
+
19732128 /* Check if we still need preemption */
19742129 sched_update_tick_dependency(rq);
19752130 }
....@@ -2009,7 +2164,24 @@
20092164
20102165 #endif /* CONFIG_SCHED_HRTICK */
20112166
2167
+#ifndef arch_scale_freq_tick
2168
+static __always_inline
2169
+void arch_scale_freq_tick(void)
2170
+{
2171
+}
2172
+#endif
2173
+
20122174 #ifndef arch_scale_freq_capacity
2175
+/**
2176
+ * arch_scale_freq_capacity - get the frequency scale factor of a given CPU.
2177
+ * @cpu: the CPU in question.
2178
+ *
2179
+ * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
2180
+ *
2181
+ * f_curr
2182
+ * ------ * SCHED_CAPACITY_SCALE
2183
+ * f_max
2184
+ */
20132185 static __always_inline
20142186 unsigned long arch_scale_freq_capacity(int cpu)
20152187 {
....@@ -2017,17 +2189,8 @@
20172189 }
20182190 #endif
20192191
2020
-#ifndef arch_scale_max_freq_capacity
2021
-struct sched_domain;
2022
-static __always_inline
2023
-unsigned long arch_scale_max_freq_capacity(struct sched_domain *sd, int cpu)
2024
-{
2025
- return SCHED_CAPACITY_SCALE;
2026
-}
2027
-#endif
2028
-
20292192 #ifdef CONFIG_SMP
2030
-#ifdef CONFIG_PREEMPT
2193
+#ifdef CONFIG_PREEMPTION
20312194
20322195 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
20332196
....@@ -2079,7 +2242,7 @@
20792242 return ret;
20802243 }
20812244
2082
-#endif /* CONFIG_PREEMPT */
2245
+#endif /* CONFIG_PREEMPTION */
20832246
20842247 /*
20852248 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
....@@ -2254,6 +2417,16 @@
22542417 static inline void nohz_balance_exit_idle(struct rq *rq) { }
22552418 #endif
22562419
2420
+#define MDF_PUSH 0x01
2421
+
2422
+static inline bool is_migration_disabled(struct task_struct *p)
2423
+{
2424
+#ifdef CONFIG_SMP
2425
+ return p->migration_disabled;
2426
+#else
2427
+ return false;
2428
+#endif
2429
+}
22572430
22582431 #ifdef CONFIG_SMP
22592432 static inline
....@@ -2312,7 +2485,7 @@
23122485 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
23132486
23142487 #ifdef CONFIG_CPU_FREQ
2315
-DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
2488
+DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
23162489
23172490 /**
23182491 * cpufreq_update_util - Take a note about CPU utilization changes.
....@@ -2352,18 +2525,48 @@
23522525 #ifdef CONFIG_UCLAMP_TASK
23532526 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
23542527
2528
+/**
2529
+ * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.
2530
+ * @rq: The rq to clamp against. Must not be NULL.
2531
+ * @util: The util value to clamp.
2532
+ * @p: The task to clamp against. Can be NULL if you want to clamp
2533
+ * against @rq only.
2534
+ *
2535
+ * Clamps the passed @util to the max(@rq, @p) effective uclamp values.
2536
+ *
2537
+ * If sched_uclamp_used static key is disabled, then just return the util
2538
+ * without any clamping since uclamp aggregation at the rq level in the fast
2539
+ * path is disabled, rendering this operation a NOP.
2540
+ *
2541
+ * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It
2542
+ * will return the correct effective uclamp value of the task even if the
2543
+ * static key is disabled.
2544
+ */
23552545 static __always_inline
23562546 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
23572547 struct task_struct *p)
23582548 {
2359
- unsigned long min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
2360
- unsigned long max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
2549
+ unsigned long min_util = 0;
2550
+ unsigned long max_util = 0;
2551
+
2552
+ if (!static_branch_likely(&sched_uclamp_used))
2553
+ return util;
23612554
23622555 if (p) {
2363
- min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN));
2364
- max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX));
2556
+ min_util = uclamp_eff_value(p, UCLAMP_MIN);
2557
+ max_util = uclamp_eff_value(p, UCLAMP_MAX);
2558
+
2559
+ /*
2560
+ * Ignore last runnable task's max clamp, as this task will
2561
+ * reset it. Similarly, no need to read the rq's min clamp.
2562
+ */
2563
+ if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
2564
+ goto out;
23652565 }
23662566
2567
+ min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value));
2568
+ max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value));
2569
+out:
23672570 /*
23682571 * Since CPU's {min,max}_util clamps are MAX aggregated considering
23692572 * RUNNABLE tasks with _different_ clamps, we can end up with an
....@@ -2374,6 +2577,24 @@
23742577
23752578 return clamp(util, min_util, max_util);
23762579 }
2580
+
2581
+static inline bool uclamp_boosted(struct task_struct *p)
2582
+{
2583
+ return uclamp_eff_value(p, UCLAMP_MIN) > 0;
2584
+}
2585
+
2586
+/*
2587
+ * When uclamp is compiled in, the aggregation at rq level is 'turned off'
2588
+ * by default in the fast path and only gets turned on once userspace performs
2589
+ * an operation that requires it.
2590
+ *
2591
+ * Returns true if userspace opted-in to use uclamp and aggregation at rq level
2592
+ * hence is active.
2593
+ */
2594
+static inline bool uclamp_is_used(void)
2595
+{
2596
+ return static_branch_likely(&sched_uclamp_used);
2597
+}
23772598 #else /* CONFIG_UCLAMP_TASK */
23782599 static inline
23792600 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
....@@ -2381,12 +2602,36 @@
23812602 {
23822603 return util;
23832604 }
2605
+
2606
+static inline bool uclamp_boosted(struct task_struct *p)
2607
+{
2608
+ return false;
2609
+}
2610
+
2611
+static inline bool uclamp_is_used(void)
2612
+{
2613
+ return false;
2614
+}
23842615 #endif /* CONFIG_UCLAMP_TASK */
23852616
2386
-unsigned long task_util_est(struct task_struct *p);
2387
-unsigned int uclamp_task(struct task_struct *p);
2388
-bool uclamp_latency_sensitive(struct task_struct *p);
2389
-bool uclamp_boosted(struct task_struct *p);
2617
+#ifdef CONFIG_UCLAMP_TASK_GROUP
2618
+static inline bool uclamp_latency_sensitive(struct task_struct *p)
2619
+{
2620
+ struct cgroup_subsys_state *css = task_css(p, cpu_cgrp_id);
2621
+ struct task_group *tg;
2622
+
2623
+ if (!css)
2624
+ return false;
2625
+ tg = container_of(css, struct task_group, css);
2626
+
2627
+ return tg->latency_sensitive;
2628
+}
2629
+#else
2630
+static inline bool uclamp_latency_sensitive(struct task_struct *p)
2631
+{
2632
+ return false;
2633
+}
2634
+#endif /* CONFIG_UCLAMP_TASK_GROUP */
23902635
23912636 #ifdef arch_scale_freq_capacity
23922637 # ifndef arch_scale_freq_invariant
....@@ -2418,20 +2663,6 @@
24182663 ENERGY_UTIL,
24192664 };
24202665
2421
-#ifdef CONFIG_SMP
2422
-static inline unsigned long cpu_util_cfs(struct rq *rq)
2423
-{
2424
- unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2425
-
2426
- if (sched_feat(UTIL_EST)) {
2427
- util = max_t(unsigned long, util,
2428
- READ_ONCE(rq->cfs.avg.util_est.enqueued));
2429
- }
2430
-
2431
- return util;
2432
-}
2433
-#endif
2434
-
24352666 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
24362667
24372668 unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
....@@ -2448,11 +2679,22 @@
24482679 return READ_ONCE(rq->avg_dl.util_avg);
24492680 }
24502681
2682
+static inline unsigned long cpu_util_cfs(struct rq *rq)
2683
+{
2684
+ unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2685
+
2686
+ if (sched_feat(UTIL_EST)) {
2687
+ util = max_t(unsigned long, util,
2688
+ READ_ONCE(rq->cfs.avg.util_est.enqueued));
2689
+ }
2690
+
2691
+ return util;
2692
+}
2693
+
24512694 static inline unsigned long cpu_util_rt(struct rq *rq)
24522695 {
24532696 return READ_ONCE(rq->avg_rt.util_avg);
24542697 }
2455
-
24562698 #else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
24572699 static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
24582700 unsigned long max, enum schedutil_type type,
....@@ -2490,14 +2732,78 @@
24902732 }
24912733 #endif
24922734
2493
-#ifdef CONFIG_SMP
2494
-#ifdef CONFIG_ENERGY_MODEL
2735
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
2736
+
24952737 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
2496
-#else
2738
+
2739
+DECLARE_STATIC_KEY_FALSE(sched_energy_present);
2740
+
2741
+static inline bool sched_energy_enabled(void)
2742
+{
2743
+ return static_branch_unlikely(&sched_energy_present);
2744
+}
2745
+
2746
+#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */
2747
+
24972748 #define perf_domain_span(pd) NULL
2498
-#endif
2749
+static inline bool sched_energy_enabled(void) { return false; }
2750
+
2751
+#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
2752
+
2753
+#ifdef CONFIG_MEMBARRIER
2754
+/*
2755
+ * The scheduler provides memory barriers required by membarrier between:
2756
+ * - prior user-space memory accesses and store to rq->membarrier_state,
2757
+ * - store to rq->membarrier_state and following user-space memory accesses.
2758
+ * In the same way it provides those guarantees around store to rq->curr.
2759
+ */
2760
+static inline void membarrier_switch_mm(struct rq *rq,
2761
+ struct mm_struct *prev_mm,
2762
+ struct mm_struct *next_mm)
2763
+{
2764
+ int membarrier_state;
2765
+
2766
+ if (prev_mm == next_mm)
2767
+ return;
2768
+
2769
+ membarrier_state = atomic_read(&next_mm->membarrier_state);
2770
+ if (READ_ONCE(rq->membarrier_state) == membarrier_state)
2771
+ return;
2772
+
2773
+ WRITE_ONCE(rq->membarrier_state, membarrier_state);
2774
+}
2775
+#else
2776
+static inline void membarrier_switch_mm(struct rq *rq,
2777
+ struct mm_struct *prev_mm,
2778
+ struct mm_struct *next_mm)
2779
+{
2780
+}
24992781 #endif
25002782
25012783 #ifdef CONFIG_SMP
2502
-extern struct static_key_false sched_energy_present;
2784
+static inline bool is_per_cpu_kthread(struct task_struct *p)
2785
+{
2786
+ if (!(p->flags & PF_KTHREAD))
2787
+ return false;
2788
+
2789
+ if (p->nr_cpus_allowed != 1)
2790
+ return false;
2791
+
2792
+ return true;
2793
+}
25032794 #endif
2795
+
2796
+void swake_up_all_locked(struct swait_queue_head *q);
2797
+void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
2798
+
2799
+/*
2800
+ * task_may_not_preempt - check whether a task may not be preemptible soon
2801
+ */
2802
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
2803
+extern bool task_may_not_preempt(struct task_struct *task, int cpu);
2804
+#else
2805
+static inline bool task_may_not_preempt(struct task_struct *task, int cpu)
2806
+{
2807
+ return false;
2808
+}
2809
+#endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */