forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/kernel/sched/sched.h
....@@ -59,15 +59,18 @@
5959 #include <linux/psi.h>
6060 #include <linux/rcupdate_wait.h>
6161 #include <linux/security.h>
62
-#include <linux/stackprotector.h>
6362 #include <linux/stop_machine.h>
6463 #include <linux/suspend.h>
6564 #include <linux/swait.h>
6665 #include <linux/syscalls.h>
6766 #include <linux/task_work.h>
6867 #include <linux/tsacct_kern.h>
68
+#include <linux/android_vendor.h>
69
+#include <linux/android_kabi.h>
6970
7071 #include <asm/tlb.h>
72
+#include <asm-generic/vmlinux.lds.h>
73
+#include <soc/rockchip/rockchip_performance.h>
7174
7275 #ifdef CONFIG_PARAVIRT
7376 # include <asm/paravirt.h>
....@@ -76,13 +79,13 @@
7679 #include "cpupri.h"
7780 #include "cpudeadline.h"
7881
82
+#include <trace/events/sched.h>
83
+
7984 #ifdef CONFIG_SCHED_DEBUG
8085 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
8186 #else
8287 # define SCHED_WARN_ON(x) ({ (void)(x), 0; })
8388 #endif
84
-
85
-#include "tune.h"
8689
8790 struct rq;
8891 struct cpuidle_state;
....@@ -99,12 +102,7 @@
99102 extern void calc_global_load_tick(struct rq *this_rq);
100103 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
101104
102
-#ifdef CONFIG_SMP
103
-extern void cpu_load_update_active(struct rq *this_rq);
104
-#else
105
-static inline void cpu_load_update_active(struct rq *this_rq) { }
106
-#endif
107
-
105
+extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
108106 /*
109107 * Helpers for converting nanosecond timing to jiffy resolution
110108 */
....@@ -187,6 +185,11 @@
187185 rt_policy(policy) || dl_policy(policy);
188186 }
189187
188
+static inline int task_has_idle_policy(struct task_struct *p)
189
+{
190
+ return idle_policy(p->policy);
191
+}
192
+
190193 static inline int task_has_rt_policy(struct task_struct *p)
191194 {
192195 return rt_policy(p->policy);
....@@ -198,6 +201,19 @@
198201 }
199202
200203 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
204
+
205
+static inline void update_avg(u64 *avg, u64 sample)
206
+{
207
+ s64 diff = sample - *avg;
208
+ *avg += diff / 8;
209
+}
210
+
211
+/*
212
+ * Shifting a value by an exponent greater *or equal* to the size of said value
213
+ * is UB; cap at size-1.
214
+ */
215
+#define shr_bound(val, shift) \
216
+ (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
201217
202218 /*
203219 * !! For sched_setattr_nocheck() (kernel) only !!
....@@ -304,14 +320,28 @@
304320 __dl_update(dl_b, -((s32)tsk_bw / cpus));
305321 }
306322
307
-static inline
308
-bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
323
+static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap,
324
+ u64 old_bw, u64 new_bw)
309325 {
310326 return dl_b->bw != -1 &&
311
- dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
327
+ cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
312328 }
313329
314
-extern void dl_change_utilization(struct task_struct *p, u64 new_bw);
330
+/*
331
+ * Verify the fitness of task @p to run on @cpu taking into account the
332
+ * CPU original capacity and the runtime/deadline ratio of the task.
333
+ *
334
+ * The function will return true if the CPU original capacity of the
335
+ * @cpu scaled by SCHED_CAPACITY_SCALE >= runtime/deadline ratio of the
336
+ * task and false otherwise.
337
+ */
338
+static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu)
339
+{
340
+ unsigned long cap = arch_scale_cpu_capacity(cpu);
341
+
342
+ return cap_scale(p->dl.dl_deadline, cap) >= p->dl.dl_runtime;
343
+}
344
+
315345 extern void init_dl_bw(struct dl_bw *dl_b);
316346 extern int sched_dl_global_validate(void);
317347 extern void sched_dl_do_global(void);
....@@ -320,9 +350,8 @@
320350 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
321351 extern bool __checkparam_dl(const struct sched_attr *attr);
322352 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
323
-extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
324353 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
325
-extern bool dl_cpu_busy(unsigned int cpu);
354
+extern int dl_cpu_busy(int cpu, struct task_struct *p);
326355
327356 #ifdef CONFIG_CGROUP_SCHED
328357
....@@ -342,8 +371,9 @@
342371 u64 runtime;
343372 s64 hierarchical_quota;
344373
345
- short idle;
346
- short period_active;
374
+ u8 idle;
375
+ u8 period_active;
376
+ u8 slack_started;
347377 struct hrtimer period_timer;
348378 struct hrtimer slack_timer;
349379 struct list_head throttled_cfs_rq;
....@@ -352,8 +382,6 @@
352382 int nr_periods;
353383 int nr_throttled;
354384 u64 throttled_time;
355
-
356
- bool distribute_running;
357385 #endif
358386 };
359387
....@@ -407,8 +435,14 @@
407435 struct uclamp_se uclamp[UCLAMP_CNT];
408436 /* Latency-sensitive flag used for a task group */
409437 unsigned int latency_sensitive;
438
+
439
+ ANDROID_VENDOR_DATA_ARRAY(1, 4);
410440 #endif
411441
442
+ ANDROID_KABI_RESERVE(1);
443
+ ANDROID_KABI_RESERVE(2);
444
+ ANDROID_KABI_RESERVE(3);
445
+ ANDROID_KABI_RESERVE(4);
412446 };
413447
414448 #ifdef CONFIG_FAIR_GROUP_SCHED
....@@ -497,9 +531,9 @@
497531 /* CFS-related fields in a runqueue */
498532 struct cfs_rq {
499533 struct load_weight load;
500
- unsigned long runnable_weight;
501534 unsigned int nr_running;
502
- unsigned int h_nr_running;
535
+ unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */
536
+ unsigned int idle_h_nr_running; /* SCHED_IDLE */
503537
504538 u64 exec_clock;
505539 u64 min_vruntime;
....@@ -535,7 +569,7 @@
535569 int nr;
536570 unsigned long load_avg;
537571 unsigned long util_avg;
538
- unsigned long runnable_sum;
572
+ unsigned long runnable_avg;
539573 } removed;
540574
541575 #ifdef CONFIG_FAIR_GROUP_SCHED
....@@ -575,12 +609,14 @@
575609 s64 runtime_remaining;
576610
577611 u64 throttled_clock;
578
- u64 throttled_clock_task;
579
- u64 throttled_clock_task_time;
612
+ u64 throttled_clock_pelt;
613
+ u64 throttled_clock_pelt_time;
580614 int throttled;
581615 int throttle_count;
582616 struct list_head throttled_list;
583617 #endif /* CONFIG_CFS_BANDWIDTH */
618
+
619
+ ANDROID_VENDOR_DATA_ARRAY(1, 16);
584620 #endif /* CONFIG_FAIR_GROUP_SCHED */
585621 };
586622
....@@ -646,7 +682,7 @@
646682 /*
647683 * Deadline values of the currently executing and the
648684 * earliest ready task on this rq. Caching these facilitates
649
- * the decision wether or not a ready but not running task
685
+ * the decision whether or not a ready but not running task
650686 * should migrate somewhere else.
651687 */
652688 struct {
....@@ -695,8 +731,30 @@
695731 #ifdef CONFIG_FAIR_GROUP_SCHED
696732 /* An entity is a task if it doesn't "own" a runqueue */
697733 #define entity_is_task(se) (!se->my_q)
734
+
735
+static inline void se_update_runnable(struct sched_entity *se)
736
+{
737
+ if (!entity_is_task(se))
738
+ se->runnable_weight = se->my_q->h_nr_running;
739
+}
740
+
741
+static inline long se_runnable(struct sched_entity *se)
742
+{
743
+ if (entity_is_task(se))
744
+ return !!se->on_rq;
745
+ else
746
+ return se->runnable_weight;
747
+}
748
+
698749 #else
699750 #define entity_is_task(se) 1
751
+
752
+static inline void se_update_runnable(struct sched_entity *se) {}
753
+
754
+static inline long se_runnable(struct sched_entity *se)
755
+{
756
+ return !!se->on_rq;
757
+}
700758 #endif
701759
702760 #ifdef CONFIG_SMP
....@@ -708,10 +766,6 @@
708766 return scale_load_down(se->load.weight);
709767 }
710768
711
-static inline long se_runnable(struct sched_entity *se)
712
-{
713
- return scale_load_down(se->runnable_weight);
714
-}
715769
716770 static inline bool sched_asym_prefer(int a, int b)
717771 {
....@@ -722,12 +776,6 @@
722776 struct em_perf_domain *em_pd;
723777 struct perf_domain *next;
724778 struct rcu_head rcu;
725
-};
726
-
727
-struct max_cpu_capacity {
728
- raw_spinlock_t lock;
729
- unsigned long val;
730
- int cpu;
731779 };
732780
733781 /* Scheduling group status flags */
....@@ -788,27 +836,23 @@
788836 cpumask_var_t rto_mask;
789837 struct cpupri cpupri;
790838
791
- /* Maximum cpu capacity in the system. */
792
- struct max_cpu_capacity max_cpu_capacity;
839
+ unsigned long max_cpu_capacity;
793840
794841 /*
795842 * NULL-terminated list of performance domains intersecting with the
796843 * CPUs of the rd. Protected by RCU.
797844 */
798
- struct perf_domain *pd;
845
+ struct perf_domain __rcu *pd;
799846
800
- /* Vendor fields. */
801
- /* First cpu with maximum and minimum original capacity */
802
- int max_cap_orig_cpu, min_cap_orig_cpu;
803
- /* First cpu with mid capacity */
804
- int mid_cap_orig_cpu;
847
+ ANDROID_VENDOR_DATA_ARRAY(1, 4);
848
+
849
+ ANDROID_KABI_RESERVE(1);
850
+ ANDROID_KABI_RESERVE(2);
851
+ ANDROID_KABI_RESERVE(3);
852
+ ANDROID_KABI_RESERVE(4);
805853 };
806854
807
-extern struct root_domain def_root_domain;
808
-extern struct mutex sched_domains_mutex;
809
-
810855 extern void init_defrootdomain(void);
811
-extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc);
812856 extern int sched_init_domains(const struct cpumask *cpu_map);
813857 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
814858 extern void sched_get_rd(struct root_domain *rd);
....@@ -817,6 +861,7 @@
817861 #ifdef HAVE_RT_PUSH_IPI
818862 extern void rto_push_irq_work_func(struct irq_work *work);
819863 #endif
864
+extern struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu);
820865 #endif /* CONFIG_SMP */
821866
822867 #ifdef CONFIG_UCLAMP_TASK
....@@ -859,6 +904,8 @@
859904 unsigned int value;
860905 struct uclamp_bucket bucket[UCLAMP_BUCKETS];
861906 };
907
+
908
+DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
862909 #endif /* CONFIG_UCLAMP_TASK */
863910
864911 /*
....@@ -882,21 +929,19 @@
882929 unsigned int nr_preferred_running;
883930 unsigned int numa_migrate_on;
884931 #endif
885
- #define CPU_LOAD_IDX_MAX 5
886
- unsigned long cpu_load[CPU_LOAD_IDX_MAX];
887932 #ifdef CONFIG_NO_HZ_COMMON
888933 #ifdef CONFIG_SMP
889
- unsigned long last_load_update_tick;
890934 unsigned long last_blocked_load_update_tick;
891935 unsigned int has_blocked_load;
936
+ call_single_data_t nohz_csd;
892937 #endif /* CONFIG_SMP */
893938 unsigned int nohz_tick_stopped;
894
- atomic_t nohz_flags;
939
+ atomic_t nohz_flags;
895940 #endif /* CONFIG_NO_HZ_COMMON */
896941
897
- /* capture load from *all* tasks on this CPU: */
898
- struct load_weight load;
899
- unsigned long nr_load_updates;
942
+#ifdef CONFIG_SMP
943
+ unsigned int ttwu_pending;
944
+#endif
900945 u64 nr_switches;
901946
902947 #ifdef CONFIG_UCLAMP_TASK
....@@ -924,7 +969,7 @@
924969 */
925970 unsigned long nr_uninterruptible;
926971
927
- struct task_struct *curr;
972
+ struct task_struct __rcu *curr;
928973 struct task_struct *idle;
929974 struct task_struct *stop;
930975 unsigned long next_balance;
....@@ -939,15 +984,21 @@
939984
940985 atomic_t nr_iowait;
941986
987
+#ifdef CONFIG_MEMBARRIER
988
+ int membarrier_state;
989
+#endif
990
+
942991 #ifdef CONFIG_SMP
943
- struct root_domain *rd;
944
- struct sched_domain *sd;
992
+ struct root_domain *rd;
993
+ struct sched_domain __rcu *sd;
945994
946995 unsigned long cpu_capacity;
947996 unsigned long cpu_capacity_orig;
948997
949998 struct callback_head *balance_callback;
999
+ unsigned char balance_flags;
9501000
1001
+ unsigned char nohz_idle_balance;
9511002 unsigned char idle_balance;
9521003
9531004 unsigned long misfit_task_load;
....@@ -968,12 +1019,19 @@
9681019 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
9691020 struct sched_avg avg_irq;
9701021 #endif
1022
+#ifdef CONFIG_SCHED_THERMAL_PRESSURE
1023
+ struct sched_avg avg_thermal;
1024
+#endif
9711025 u64 idle_stamp;
9721026 u64 avg_idle;
9731027
9741028 /* This is used to determine avg_idle's max value */
9751029 u64 max_idle_balance_cost;
1030
+
1031
+#ifdef CONFIG_HOTPLUG_CPU
1032
+ struct rcuwait hotplug_wait;
9761033 #endif
1034
+#endif /* CONFIG_SMP */
9771035
9781036 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
9791037 u64 prev_irq_time;
....@@ -991,10 +1049,10 @@
9911049
9921050 #ifdef CONFIG_SCHED_HRTICK
9931051 #ifdef CONFIG_SMP
994
- int hrtick_csd_pending;
9951052 call_single_data_t hrtick_csd;
9961053 #endif
9971054 struct hrtimer hrtick_timer;
1055
+ ktime_t hrtick_time;
9981056 #endif
9991057
10001058 #ifdef CONFIG_SCHEDSTATS
....@@ -1015,15 +1073,29 @@
10151073 unsigned int ttwu_local;
10161074 #endif
10171075
1018
-#ifdef CONFIG_SMP
1019
- struct llist_head wake_list;
1076
+#ifdef CONFIG_HOTPLUG_CPU
1077
+ struct cpu_stop_work drain;
1078
+ struct cpu_stop_done drain_done;
10201079 #endif
10211080
10221081 #ifdef CONFIG_CPU_IDLE
10231082 /* Must be inspected within a rcu lock section */
10241083 struct cpuidle_state *idle_state;
1025
- int idle_state_idx;
10261084 #endif
1085
+
1086
+#ifdef CONFIG_SMP
1087
+ unsigned int nr_pinned;
1088
+#endif
1089
+ unsigned int push_busy;
1090
+ struct cpu_stop_work push_work;
1091
+
1092
+ ANDROID_VENDOR_DATA_ARRAY(1, 96);
1093
+ ANDROID_OEM_DATA_ARRAY(1, 16);
1094
+
1095
+ ANDROID_KABI_RESERVE(1);
1096
+ ANDROID_KABI_RESERVE(2);
1097
+ ANDROID_KABI_RESERVE(3);
1098
+ ANDROID_KABI_RESERVE(4);
10271099 };
10281100
10291101 #ifdef CONFIG_FAIR_GROUP_SCHED
....@@ -1132,6 +1204,41 @@
11321204 return rq->clock_task;
11331205 }
11341206
1207
+#ifdef CONFIG_SMP
1208
+DECLARE_PER_CPU(u64, clock_task_mult);
1209
+
1210
+static inline u64 rq_clock_task_mult(struct rq *rq)
1211
+{
1212
+ lockdep_assert_held(&rq->lock);
1213
+ assert_clock_updated(rq);
1214
+
1215
+ return per_cpu(clock_task_mult, cpu_of(rq));
1216
+}
1217
+#else
1218
+static inline u64 rq_clock_task_mult(struct rq *rq)
1219
+{
1220
+ return rq_clock_task(rq);
1221
+}
1222
+#endif
1223
+
1224
+/**
1225
+ * By default the decay is the default pelt decay period.
1226
+ * The decay shift can change the decay period in
1227
+ * multiples of 32.
1228
+ * Decay shift Decay period(ms)
1229
+ * 0 32
1230
+ * 1 64
1231
+ * 2 128
1232
+ * 3 256
1233
+ * 4 512
1234
+ */
1235
+extern int sched_thermal_decay_shift;
1236
+
1237
+static inline u64 rq_clock_thermal(struct rq *rq)
1238
+{
1239
+ return rq_clock_task(rq) >> sched_thermal_decay_shift;
1240
+}
1241
+
11351242 static inline void rq_clock_skip_update(struct rq *rq)
11361243 {
11371244 lockdep_assert_held(&rq->lock);
....@@ -1161,6 +1268,16 @@
11611268 #endif
11621269 };
11631270
1271
+/*
1272
+ * Lockdep annotation that avoids accidental unlocks; it's like a
1273
+ * sticky/continuous lockdep_assert_held().
1274
+ *
1275
+ * This avoids code that has access to 'struct rq *rq' (basically everything in
1276
+ * the scheduler) from accidentally unlocking the rq if they do not also have a
1277
+ * copy of the (on-stack) 'struct rq_flags rf'.
1278
+ *
1279
+ * Also see Documentation/locking/lockdep-design.rst.
1280
+ */
11641281 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
11651282 {
11661283 rf->cookie = lockdep_pin_lock(&rq->lock);
....@@ -1168,6 +1285,9 @@
11681285 #ifdef CONFIG_SCHED_DEBUG
11691286 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
11701287 rf->clock_update_flags = 0;
1288
+#endif
1289
+#ifdef CONFIG_SMP
1290
+ SCHED_WARN_ON(rq->balance_callback);
11711291 #endif
11721292 }
11731293
....@@ -1294,16 +1414,18 @@
12941414 extern enum numa_topology_type sched_numa_topology_type;
12951415 extern int sched_max_numa_distance;
12961416 extern bool find_numa_distance(int distance);
1297
-#endif
1298
-
1299
-#ifdef CONFIG_NUMA
13001417 extern void sched_init_numa(void);
13011418 extern void sched_domains_numa_masks_set(unsigned int cpu);
13021419 extern void sched_domains_numa_masks_clear(unsigned int cpu);
1420
+extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
13031421 #else
13041422 static inline void sched_init_numa(void) { }
13051423 static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
13061424 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
1425
+static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
1426
+{
1427
+ return nr_cpu_ids;
1428
+}
13071429 #endif
13081430
13091431 #ifdef CONFIG_NUMA_BALANCING
....@@ -1316,8 +1438,6 @@
13161438 };
13171439 extern void sched_setnuma(struct task_struct *p, int node);
13181440 extern int migrate_task_to(struct task_struct *p, int cpu);
1319
-extern int migrate_swap(struct task_struct *p, struct task_struct *t,
1320
- int cpu, int scpu);
13211441 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
13221442 #else
13231443 static inline void
....@@ -1328,6 +1448,11 @@
13281448
13291449 #ifdef CONFIG_SMP
13301450
1451
+#define BALANCE_WORK 0x01
1452
+#define BALANCE_PUSH 0x02
1453
+
1454
+extern int migrate_swap(struct task_struct *p, struct task_struct *t,
1455
+ int cpu, int scpu);
13311456 static inline void
13321457 queue_balance_callback(struct rq *rq,
13331458 struct callback_head *head,
....@@ -1335,15 +1460,14 @@
13351460 {
13361461 lockdep_assert_held(&rq->lock);
13371462
1338
- if (unlikely(head->next))
1463
+ if (unlikely(head->next || (rq->balance_flags & BALANCE_PUSH)))
13391464 return;
13401465
13411466 head->func = (void (*)(struct callback_head *))func;
13421467 head->next = rq->balance_callback;
13431468 rq->balance_callback = head;
1469
+ rq->balance_flags |= BALANCE_WORK;
13441470 }
1345
-
1346
-extern void sched_ttwu_pending(void);
13471471
13481472 #define rcu_dereference_check_sched_domain(p) \
13491473 rcu_dereference_check((p), \
....@@ -1351,7 +1475,7 @@
13511475
13521476 /*
13531477 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1354
- * See detach_destroy_domains: synchronize_sched for details.
1478
+ * See destroy_sched_domains: call_rcu for details.
13551479 *
13561480 * The domain tree of any CPU may only be accessed from within
13571481 * preempt-disabled sections.
....@@ -1359,8 +1483,6 @@
13591483 #define for_each_domain(cpu, __sd) \
13601484 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
13611485 __sd; __sd = __sd->parent)
1362
-
1363
-#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
13641486
13651487 /**
13661488 * highest_flag_domain - Return highest sched_domain containing flag.
....@@ -1396,13 +1518,13 @@
13961518 return sd;
13971519 }
13981520
1399
-DECLARE_PER_CPU(struct sched_domain *, sd_llc);
1521
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
14001522 DECLARE_PER_CPU(int, sd_llc_size);
14011523 DECLARE_PER_CPU(int, sd_llc_id);
1402
-DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
1403
-DECLARE_PER_CPU(struct sched_domain *, sd_numa);
1404
-DECLARE_PER_CPU(struct sched_domain *, sd_asym_packing);
1405
-DECLARE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity);
1524
+DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
1525
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
1526
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
1527
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
14061528 extern struct static_key_false sched_asym_cpucapacity;
14071529
14081530 struct sched_group_capacity {
....@@ -1421,7 +1543,7 @@
14211543 int id;
14221544 #endif
14231545
1424
- unsigned long cpumask[0]; /* Balance mask */
1546
+ unsigned long cpumask[]; /* Balance mask */
14251547 };
14261548
14271549 struct sched_group {
....@@ -1439,7 +1561,7 @@
14391561 * by attaching extra space to the end of the structure,
14401562 * depending on how many CPUs the kernel has booted up with)
14411563 */
1442
- unsigned long cpumask[0];
1564
+ unsigned long cpumask[];
14431565 };
14441566
14451567 static inline struct cpumask *sched_group_span(struct sched_group *sg)
....@@ -1482,11 +1604,11 @@
14821604 }
14831605 #endif
14841606
1485
-#else
1607
+extern void flush_smp_call_function_from_idle(void);
14861608
1487
-static inline void sched_ttwu_pending(void) { }
1488
-
1489
-#endif /* CONFIG_SMP */
1609
+#else /* !CONFIG_SMP: */
1610
+static inline void flush_smp_call_function_from_idle(void) { }
1611
+#endif
14901612
14911613 #include "stats.h"
14921614 #include "autogroup.h"
....@@ -1546,7 +1668,7 @@
15461668 #ifdef CONFIG_SMP
15471669 /*
15481670 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1549
- * successfuly executed on another CPU. We must ensure that updates of
1671
+ * successfully executed on another CPU. We must ensure that updates of
15501672 * per-task data have been completed by this moment.
15511673 */
15521674 smp_wmb();
....@@ -1598,6 +1720,8 @@
15981720 #undef SCHED_FEAT
15991721
16001722 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1723
+extern const char * const sched_feat_names[__SCHED_FEAT_NR];
1724
+
16011725 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
16021726
16031727 #else /* !CONFIG_JUMP_LABEL */
....@@ -1669,7 +1793,10 @@
16691793 */
16701794 #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */
16711795 #define WF_FORK 0x02 /* Child wakeup after fork */
1672
-#define WF_MIGRATED 0x4 /* Internal use, task got migrated */
1796
+#define WF_MIGRATED 0x04 /* Internal use, task got migrated */
1797
+#define WF_ON_CPU 0x08 /* Wakee is on_cpu */
1798
+#define WF_LOCK_SLEEPER 0x10 /* Wakeup spinlock "sleeper" */
1799
+#define WF_ANDROID_VENDOR 0x1000 /* Vendor specific for Android */
16731800
16741801 /*
16751802 * To aid in avoiding the subversion of "niceness" due to uneven distribution
....@@ -1723,10 +1850,11 @@
17231850 #define ENQUEUE_MIGRATED 0x00
17241851 #endif
17251852
1853
+#define ENQUEUE_WAKEUP_SYNC 0x80
1854
+
17261855 #define RETRY_TASK ((void *)-1UL)
17271856
17281857 struct sched_class {
1729
- const struct sched_class *next;
17301858
17311859 #ifdef CONFIG_UCLAMP_TASK
17321860 int uclamp_enabled;
....@@ -1735,38 +1863,32 @@
17351863 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
17361864 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
17371865 void (*yield_task) (struct rq *rq);
1738
- bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt);
1866
+ bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
17391867
17401868 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
17411869
1742
- /*
1743
- * It is the responsibility of the pick_next_task() method that will
1744
- * return the next task to call put_prev_task() on the @prev task or
1745
- * something equivalent.
1746
- *
1747
- * May return RETRY_TASK when it finds a higher prio class has runnable
1748
- * tasks.
1749
- */
1750
- struct task_struct * (*pick_next_task)(struct rq *rq,
1751
- struct task_struct *prev,
1752
- struct rq_flags *rf);
1870
+ struct task_struct *(*pick_next_task)(struct rq *rq);
1871
+
17531872 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1873
+ void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
17541874
17551875 #ifdef CONFIG_SMP
1756
- int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags,
1757
- int subling_count_hint);
1876
+ int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
1877
+ int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
17581878 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
17591879
17601880 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
17611881
17621882 void (*set_cpus_allowed)(struct task_struct *p,
1763
- const struct cpumask *newmask);
1883
+ const struct cpumask *newmask,
1884
+ u32 flags);
17641885
17651886 void (*rq_online)(struct rq *rq);
17661887 void (*rq_offline)(struct rq *rq);
1888
+
1889
+ struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
17671890 #endif
17681891
1769
- void (*set_curr_task)(struct rq *rq);
17701892 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
17711893 void (*task_fork)(struct task_struct *p);
17721894 void (*task_dead)(struct task_struct *p);
....@@ -1792,25 +1914,32 @@
17921914 #ifdef CONFIG_FAIR_GROUP_SCHED
17931915 void (*task_change_group)(struct task_struct *p, int type);
17941916 #endif
1795
-};
1917
+} __aligned(STRUCT_ALIGNMENT); /* STRUCT_ALIGN(), vmlinux.lds.h */
17961918
17971919 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
17981920 {
1921
+ WARN_ON_ONCE(rq->curr != prev);
17991922 prev->sched_class->put_prev_task(rq, prev);
18001923 }
18011924
1802
-static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1925
+static inline void set_next_task(struct rq *rq, struct task_struct *next)
18031926 {
1804
- curr->sched_class->set_curr_task(rq);
1927
+ WARN_ON_ONCE(rq->curr != next);
1928
+ next->sched_class->set_next_task(rq, next, false);
18051929 }
18061930
1807
-#ifdef CONFIG_SMP
1808
-#define sched_class_highest (&stop_sched_class)
1809
-#else
1810
-#define sched_class_highest (&dl_sched_class)
1811
-#endif
1931
+/* Defined in include/asm-generic/vmlinux.lds.h */
1932
+extern struct sched_class __begin_sched_classes[];
1933
+extern struct sched_class __end_sched_classes[];
1934
+
1935
+#define sched_class_highest (__end_sched_classes - 1)
1936
+#define sched_class_lowest (__begin_sched_classes - 1)
1937
+
1938
+#define for_class_range(class, _from, _to) \
1939
+ for (class = (_from); class != (_to); class--)
1940
+
18121941 #define for_each_class(class) \
1813
- for (class = sched_class_highest; class; class = class->next)
1942
+ for_class_range(class, sched_class_highest, sched_class_lowest)
18141943
18151944 extern const struct sched_class stop_sched_class;
18161945 extern const struct sched_class dl_sched_class;
....@@ -1818,6 +1947,32 @@
18181947 extern const struct sched_class fair_sched_class;
18191948 extern const struct sched_class idle_sched_class;
18201949
1950
+static inline bool sched_stop_runnable(struct rq *rq)
1951
+{
1952
+ return rq->stop && task_on_rq_queued(rq->stop);
1953
+}
1954
+
1955
+static inline bool sched_dl_runnable(struct rq *rq)
1956
+{
1957
+ return rq->dl.dl_nr_running > 0;
1958
+}
1959
+
1960
+static inline bool sched_rt_runnable(struct rq *rq)
1961
+{
1962
+ return rq->rt.rt_queued > 0;
1963
+}
1964
+
1965
+static inline bool sched_fair_runnable(struct rq *rq)
1966
+{
1967
+ return rq->cfs.nr_running > 0;
1968
+}
1969
+
1970
+extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
1971
+extern struct task_struct *pick_next_task_idle(struct rq *rq);
1972
+
1973
+#define SCA_CHECK 0x01
1974
+#define SCA_MIGRATE_DISABLE 0x02
1975
+#define SCA_MIGRATE_ENABLE 0x04
18211976
18221977 #ifdef CONFIG_SMP
18231978
....@@ -1825,8 +1980,30 @@
18251980
18261981 extern void trigger_load_balance(struct rq *rq);
18271982
1828
-extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1983
+extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
18291984
1985
+static inline struct task_struct *get_push_task(struct rq *rq)
1986
+{
1987
+ struct task_struct *p = rq->curr;
1988
+
1989
+ lockdep_assert_held(&rq->lock);
1990
+
1991
+ if (rq->push_busy)
1992
+ return NULL;
1993
+
1994
+ if (p->nr_cpus_allowed == 1)
1995
+ return NULL;
1996
+
1997
+ if (p->migration_disabled)
1998
+ return NULL;
1999
+
2000
+ rq->push_busy = true;
2001
+ return get_task_struct(p);
2002
+}
2003
+
2004
+extern int push_cpu_stop(void *arg);
2005
+
2006
+extern unsigned long __read_mostly max_load_balance_interval;
18302007 #endif
18312008
18322009 #ifdef CONFIG_CPU_IDLE
....@@ -1842,17 +2019,6 @@
18422019
18432020 return rq->idle_state;
18442021 }
1845
-
1846
-static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx)
1847
-{
1848
- rq->idle_state_idx = idle_state_idx;
1849
-}
1850
-
1851
-static inline int idle_get_state_idx(struct rq *rq)
1852
-{
1853
- WARN_ON(!rcu_read_lock_held());
1854
- return rq->idle_state_idx;
1855
-}
18562022 #else
18572023 static inline void idle_set_state(struct rq *rq,
18582024 struct cpuidle_state *idle_state)
....@@ -1862,15 +2028,6 @@
18622028 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
18632029 {
18642030 return NULL;
1865
-}
1866
-
1867
-static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx)
1868
-{
1869
-}
1870
-
1871
-static inline int idle_get_state_idx(struct rq *rq)
1872
-{
1873
- return -1;
18742031 }
18752032 #endif
18762033
....@@ -1889,6 +2046,15 @@
18892046 extern void resched_curr(struct rq *rq);
18902047 extern void resched_cpu(int cpu);
18912048
2049
+#ifdef CONFIG_PREEMPT_LAZY
2050
+extern void resched_curr_lazy(struct rq *rq);
2051
+#else
2052
+static inline void resched_curr_lazy(struct rq *rq)
2053
+{
2054
+ resched_curr(rq);
2055
+}
2056
+#endif
2057
+
18922058 extern struct rt_bandwidth def_rt_bandwidth;
18932059 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
18942060
....@@ -1896,15 +2062,16 @@
18962062 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
18972063 extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
18982064 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
1899
-extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
19002065
19012066 #define BW_SHIFT 20
19022067 #define BW_UNIT (1 << BW_SHIFT)
19032068 #define RATIO_SHIFT 8
2069
+#define MAX_BW_BITS (64 - BW_SHIFT)
2070
+#define MAX_BW ((1ULL << MAX_BW_BITS) - 1)
19042071 unsigned long to_ratio(u64 period, u64 runtime);
19052072
19062073 extern void init_entity_runnable_average(struct sched_entity *se);
1907
-extern void post_init_entity_util_avg(struct sched_entity *se);
2074
+extern void post_init_entity_util_avg(struct task_struct *p);
19082075
19092076 #ifdef CONFIG_NO_HZ_FULL
19102077 extern bool sched_can_stop_tick(struct rq *rq);
....@@ -1917,12 +2084,7 @@
19172084 */
19182085 static inline void sched_update_tick_dependency(struct rq *rq)
19192086 {
1920
- int cpu;
1921
-
1922
- if (!tick_nohz_full_enabled())
1923
- return;
1924
-
1925
- cpu = cpu_of(rq);
2087
+ int cpu = cpu_of(rq);
19262088
19272089 if (!tick_nohz_full_cpu(cpu))
19282090 return;
....@@ -1942,13 +2104,16 @@
19422104 unsigned prev_nr = rq->nr_running;
19432105
19442106 rq->nr_running = prev_nr + count;
2107
+ if (trace_sched_update_nr_running_tp_enabled()) {
2108
+ call_trace_sched_update_nr_running(rq, count);
2109
+ }
19452110
1946
- if (prev_nr < 2 && rq->nr_running >= 2) {
19472111 #ifdef CONFIG_SMP
2112
+ if (prev_nr < 2 && rq->nr_running >= 2) {
19482113 if (!READ_ONCE(rq->rd->overload))
19492114 WRITE_ONCE(rq->rd->overload, 1);
1950
-#endif
19512115 }
2116
+#endif
19522117
19532118 sched_update_tick_dependency(rq);
19542119 }
....@@ -1956,6 +2121,10 @@
19562121 static inline void sub_nr_running(struct rq *rq, unsigned count)
19572122 {
19582123 rq->nr_running -= count;
2124
+ if (trace_sched_update_nr_running_tp_enabled()) {
2125
+ call_trace_sched_update_nr_running(rq, -count);
2126
+ }
2127
+
19592128 /* Check if we still need preemption */
19602129 sched_update_tick_dependency(rq);
19612130 }
....@@ -1995,7 +2164,24 @@
19952164
19962165 #endif /* CONFIG_SCHED_HRTICK */
19972166
2167
+#ifndef arch_scale_freq_tick
2168
+static __always_inline
2169
+void arch_scale_freq_tick(void)
2170
+{
2171
+}
2172
+#endif
2173
+
19982174 #ifndef arch_scale_freq_capacity
2175
+/**
2176
+ * arch_scale_freq_capacity - get the frequency scale factor of a given CPU.
2177
+ * @cpu: the CPU in question.
2178
+ *
2179
+ * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
2180
+ *
2181
+ * f_curr
2182
+ * ------ * SCHED_CAPACITY_SCALE
2183
+ * f_max
2184
+ */
19992185 static __always_inline
20002186 unsigned long arch_scale_freq_capacity(int cpu)
20012187 {
....@@ -2003,17 +2189,8 @@
20032189 }
20042190 #endif
20052191
2006
-#ifndef arch_scale_max_freq_capacity
2007
-struct sched_domain;
2008
-static __always_inline
2009
-unsigned long arch_scale_max_freq_capacity(struct sched_domain *sd, int cpu)
2010
-{
2011
- return SCHED_CAPACITY_SCALE;
2012
-}
2013
-#endif
2014
-
20152192 #ifdef CONFIG_SMP
2016
-#ifdef CONFIG_PREEMPT
2193
+#ifdef CONFIG_PREEMPTION
20172194
20182195 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
20192196
....@@ -2065,7 +2242,7 @@
20652242 return ret;
20662243 }
20672244
2068
-#endif /* CONFIG_PREEMPT */
2245
+#endif /* CONFIG_PREEMPTION */
20692246
20702247 /*
20712248 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
....@@ -2240,6 +2417,16 @@
22402417 static inline void nohz_balance_exit_idle(struct rq *rq) { }
22412418 #endif
22422419
2420
+#define MDF_PUSH 0x01
2421
+
2422
+static inline bool is_migration_disabled(struct task_struct *p)
2423
+{
2424
+#ifdef CONFIG_SMP
2425
+ return p->migration_disabled;
2426
+#else
2427
+ return false;
2428
+#endif
2429
+}
22432430
22442431 #ifdef CONFIG_SMP
22452432 static inline
....@@ -2298,7 +2485,7 @@
22982485 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
22992486
23002487 #ifdef CONFIG_CPU_FREQ
2301
-DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
2488
+DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
23022489
23032490 /**
23042491 * cpufreq_update_util - Take a note about CPU utilization changes.
....@@ -2338,18 +2525,48 @@
23382525 #ifdef CONFIG_UCLAMP_TASK
23392526 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
23402527
2528
+/**
2529
+ * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.
2530
+ * @rq: The rq to clamp against. Must not be NULL.
2531
+ * @util: The util value to clamp.
2532
+ * @p: The task to clamp against. Can be NULL if you want to clamp
2533
+ * against @rq only.
2534
+ *
2535
+ * Clamps the passed @util to the max(@rq, @p) effective uclamp values.
2536
+ *
2537
+ * If sched_uclamp_used static key is disabled, then just return the util
2538
+ * without any clamping since uclamp aggregation at the rq level in the fast
2539
+ * path is disabled, rendering this operation a NOP.
2540
+ *
2541
+ * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It
2542
+ * will return the correct effective uclamp value of the task even if the
2543
+ * static key is disabled.
2544
+ */
23412545 static __always_inline
23422546 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
23432547 struct task_struct *p)
23442548 {
2345
- unsigned long min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
2346
- unsigned long max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
2549
+ unsigned long min_util = 0;
2550
+ unsigned long max_util = 0;
2551
+
2552
+ if (!static_branch_likely(&sched_uclamp_used))
2553
+ return util;
23472554
23482555 if (p) {
2349
- min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN));
2350
- max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX));
2556
+ min_util = uclamp_eff_value(p, UCLAMP_MIN);
2557
+ max_util = uclamp_eff_value(p, UCLAMP_MAX);
2558
+
2559
+ /*
2560
+ * Ignore last runnable task's max clamp, as this task will
2561
+ * reset it. Similarly, no need to read the rq's min clamp.
2562
+ */
2563
+ if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
2564
+ goto out;
23512565 }
23522566
2567
+ min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value));
2568
+ max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value));
2569
+out:
23532570 /*
23542571 * Since CPU's {min,max}_util clamps are MAX aggregated considering
23552572 * RUNNABLE tasks with _different_ clamps, we can end up with an
....@@ -2360,6 +2577,24 @@
23602577
23612578 return clamp(util, min_util, max_util);
23622579 }
2580
+
2581
+static inline bool uclamp_boosted(struct task_struct *p)
2582
+{
2583
+ return uclamp_eff_value(p, UCLAMP_MIN) > 0;
2584
+}
2585
+
2586
+/*
2587
+ * When uclamp is compiled in, the aggregation at rq level is 'turned off'
2588
+ * by default in the fast path and only gets turned on once userspace performs
2589
+ * an operation that requires it.
2590
+ *
2591
+ * Returns true if userspace opted-in to use uclamp and aggregation at rq level
2592
+ * hence is active.
2593
+ */
2594
+static inline bool uclamp_is_used(void)
2595
+{
2596
+ return static_branch_likely(&sched_uclamp_used);
2597
+}
23632598 #else /* CONFIG_UCLAMP_TASK */
23642599 static inline
23652600 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
....@@ -2367,12 +2602,36 @@
23672602 {
23682603 return util;
23692604 }
2605
+
2606
+static inline bool uclamp_boosted(struct task_struct *p)
2607
+{
2608
+ return false;
2609
+}
2610
+
2611
+static inline bool uclamp_is_used(void)
2612
+{
2613
+ return false;
2614
+}
23702615 #endif /* CONFIG_UCLAMP_TASK */
23712616
2372
-unsigned long task_util_est(struct task_struct *p);
2373
-unsigned int uclamp_task(struct task_struct *p);
2374
-bool uclamp_latency_sensitive(struct task_struct *p);
2375
-bool uclamp_boosted(struct task_struct *p);
2617
+#ifdef CONFIG_UCLAMP_TASK_GROUP
2618
+static inline bool uclamp_latency_sensitive(struct task_struct *p)
2619
+{
2620
+ struct cgroup_subsys_state *css = task_css(p, cpu_cgrp_id);
2621
+ struct task_group *tg;
2622
+
2623
+ if (!css)
2624
+ return false;
2625
+ tg = container_of(css, struct task_group, css);
2626
+
2627
+ return tg->latency_sensitive;
2628
+}
2629
+#else
2630
+static inline bool uclamp_latency_sensitive(struct task_struct *p)
2631
+{
2632
+ return false;
2633
+}
2634
+#endif /* CONFIG_UCLAMP_TASK_GROUP */
23762635
23772636 #ifdef arch_scale_freq_capacity
23782637 # ifndef arch_scale_freq_invariant
....@@ -2404,20 +2663,6 @@
24042663 ENERGY_UTIL,
24052664 };
24062665
2407
-#ifdef CONFIG_SMP
2408
-static inline unsigned long cpu_util_cfs(struct rq *rq)
2409
-{
2410
- unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2411
-
2412
- if (sched_feat(UTIL_EST)) {
2413
- util = max_t(unsigned long, util,
2414
- READ_ONCE(rq->cfs.avg.util_est.enqueued));
2415
- }
2416
-
2417
- return util;
2418
-}
2419
-#endif
2420
-
24212666 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
24222667
24232668 unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
....@@ -2434,11 +2679,22 @@
24342679 return READ_ONCE(rq->avg_dl.util_avg);
24352680 }
24362681
2682
+static inline unsigned long cpu_util_cfs(struct rq *rq)
2683
+{
2684
+ unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2685
+
2686
+ if (sched_feat(UTIL_EST)) {
2687
+ util = max_t(unsigned long, util,
2688
+ READ_ONCE(rq->cfs.avg.util_est.enqueued));
2689
+ }
2690
+
2691
+ return util;
2692
+}
2693
+
24372694 static inline unsigned long cpu_util_rt(struct rq *rq)
24382695 {
24392696 return READ_ONCE(rq->avg_rt.util_avg);
24402697 }
2441
-
24422698 #else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
24432699 static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
24442700 unsigned long max, enum schedutil_type type,
....@@ -2476,14 +2732,78 @@
24762732 }
24772733 #endif
24782734
2479
-#ifdef CONFIG_SMP
2480
-#ifdef CONFIG_ENERGY_MODEL
2735
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
2736
+
24812737 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
2482
-#else
2738
+
2739
+DECLARE_STATIC_KEY_FALSE(sched_energy_present);
2740
+
2741
+static inline bool sched_energy_enabled(void)
2742
+{
2743
+ return static_branch_unlikely(&sched_energy_present);
2744
+}
2745
+
2746
+#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */
2747
+
24832748 #define perf_domain_span(pd) NULL
2484
-#endif
2749
+static inline bool sched_energy_enabled(void) { return false; }
2750
+
2751
+#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
2752
+
2753
+#ifdef CONFIG_MEMBARRIER
2754
+/*
2755
+ * The scheduler provides memory barriers required by membarrier between:
2756
+ * - prior user-space memory accesses and store to rq->membarrier_state,
2757
+ * - store to rq->membarrier_state and following user-space memory accesses.
2758
+ * In the same way it provides those guarantees around store to rq->curr.
2759
+ */
2760
+static inline void membarrier_switch_mm(struct rq *rq,
2761
+ struct mm_struct *prev_mm,
2762
+ struct mm_struct *next_mm)
2763
+{
2764
+ int membarrier_state;
2765
+
2766
+ if (prev_mm == next_mm)
2767
+ return;
2768
+
2769
+ membarrier_state = atomic_read(&next_mm->membarrier_state);
2770
+ if (READ_ONCE(rq->membarrier_state) == membarrier_state)
2771
+ return;
2772
+
2773
+ WRITE_ONCE(rq->membarrier_state, membarrier_state);
2774
+}
2775
+#else
2776
+static inline void membarrier_switch_mm(struct rq *rq,
2777
+ struct mm_struct *prev_mm,
2778
+ struct mm_struct *next_mm)
2779
+{
2780
+}
24852781 #endif
24862782
24872783 #ifdef CONFIG_SMP
2488
-extern struct static_key_false sched_energy_present;
2784
+static inline bool is_per_cpu_kthread(struct task_struct *p)
2785
+{
2786
+ if (!(p->flags & PF_KTHREAD))
2787
+ return false;
2788
+
2789
+ if (p->nr_cpus_allowed != 1)
2790
+ return false;
2791
+
2792
+ return true;
2793
+}
24892794 #endif
2795
+
2796
+void swake_up_all_locked(struct swait_queue_head *q);
2797
+void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
2798
+
2799
+/*
2800
+ * task_may_not_preempt - check whether a task may not be preemptible soon
2801
+ */
2802
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
2803
+extern bool task_may_not_preempt(struct task_struct *task, int cpu);
2804
+#else
2805
+static inline bool task_may_not_preempt(struct task_struct *task, int cpu)
2806
+{
2807
+ return false;
2808
+}
2809
+#endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */