hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/kernel/sched/sched.h
....@@ -996,7 +996,6 @@
996996 unsigned long cpu_capacity_orig;
997997
998998 struct callback_head *balance_callback;
999
- unsigned char balance_flags;
1000999
10011000 unsigned char nohz_idle_balance;
10021001 unsigned char idle_balance;
....@@ -1027,10 +1026,6 @@
10271026
10281027 /* This is used to determine avg_idle's max value */
10291028 u64 max_idle_balance_cost;
1030
-
1031
-#ifdef CONFIG_HOTPLUG_CPU
1032
- struct rcuwait hotplug_wait;
1033
-#endif
10341029 #endif /* CONFIG_SMP */
10351030
10361031 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
....@@ -1082,12 +1077,6 @@
10821077 /* Must be inspected within a rcu lock section */
10831078 struct cpuidle_state *idle_state;
10841079 #endif
1085
-
1086
-#ifdef CONFIG_SMP
1087
- unsigned int nr_pinned;
1088
-#endif
1089
- unsigned int push_busy;
1090
- struct cpu_stop_work push_work;
10911080
10921081 ANDROID_VENDOR_DATA_ARRAY(1, 96);
10931082 ANDROID_OEM_DATA_ARRAY(1, 16);
....@@ -1286,9 +1275,6 @@
12861275 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
12871276 rf->clock_update_flags = 0;
12881277 #endif
1289
-#ifdef CONFIG_SMP
1290
- SCHED_WARN_ON(rq->balance_callback);
1291
-#endif
12921278 }
12931279
12941280 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
....@@ -1448,9 +1434,6 @@
14481434
14491435 #ifdef CONFIG_SMP
14501436
1451
-#define BALANCE_WORK 0x01
1452
-#define BALANCE_PUSH 0x02
1453
-
14541437 extern int migrate_swap(struct task_struct *p, struct task_struct *t,
14551438 int cpu, int scpu);
14561439 static inline void
....@@ -1460,13 +1443,12 @@
14601443 {
14611444 lockdep_assert_held(&rq->lock);
14621445
1463
- if (unlikely(head->next || (rq->balance_flags & BALANCE_PUSH)))
1446
+ if (unlikely(head->next))
14641447 return;
14651448
14661449 head->func = (void (*)(struct callback_head *))func;
14671450 head->next = rq->balance_callback;
14681451 rq->balance_callback = head;
1469
- rq->balance_flags |= BALANCE_WORK;
14701452 }
14711453
14721454 #define rcu_dereference_check_sched_domain(p) \
....@@ -1795,7 +1777,6 @@
17951777 #define WF_FORK 0x02 /* Child wakeup after fork */
17961778 #define WF_MIGRATED 0x04 /* Internal use, task got migrated */
17971779 #define WF_ON_CPU 0x08 /* Wakee is on_cpu */
1798
-#define WF_LOCK_SLEEPER 0x10 /* Wakeup spinlock "sleeper" */
17991780 #define WF_ANDROID_VENDOR 0x1000 /* Vendor specific for Android */
18001781
18011782 /*
....@@ -1880,13 +1861,10 @@
18801861 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
18811862
18821863 void (*set_cpus_allowed)(struct task_struct *p,
1883
- const struct cpumask *newmask,
1884
- u32 flags);
1864
+ const struct cpumask *newmask);
18851865
18861866 void (*rq_online)(struct rq *rq);
18871867 void (*rq_offline)(struct rq *rq);
1888
-
1889
- struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
18901868 #endif
18911869
18921870 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
....@@ -1970,38 +1948,13 @@
19701948 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
19711949 extern struct task_struct *pick_next_task_idle(struct rq *rq);
19721950
1973
-#define SCA_CHECK 0x01
1974
-#define SCA_MIGRATE_DISABLE 0x02
1975
-#define SCA_MIGRATE_ENABLE 0x04
1976
-
19771951 #ifdef CONFIG_SMP
19781952
19791953 extern void update_group_capacity(struct sched_domain *sd, int cpu);
19801954
19811955 extern void trigger_load_balance(struct rq *rq);
19821956
1983
-extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
1984
-
1985
-static inline struct task_struct *get_push_task(struct rq *rq)
1986
-{
1987
- struct task_struct *p = rq->curr;
1988
-
1989
- lockdep_assert_held(&rq->lock);
1990
-
1991
- if (rq->push_busy)
1992
- return NULL;
1993
-
1994
- if (p->nr_cpus_allowed == 1)
1995
- return NULL;
1996
-
1997
- if (p->migration_disabled)
1998
- return NULL;
1999
-
2000
- rq->push_busy = true;
2001
- return get_task_struct(p);
2002
-}
2003
-
2004
-extern int push_cpu_stop(void *arg);
1957
+extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
20051958
20061959 extern unsigned long __read_mostly max_load_balance_interval;
20071960 #endif
....@@ -2045,15 +1998,6 @@
20451998
20461999 extern void resched_curr(struct rq *rq);
20472000 extern void resched_cpu(int cpu);
2048
-
2049
-#ifdef CONFIG_PREEMPT_LAZY
2050
-extern void resched_curr_lazy(struct rq *rq);
2051
-#else
2052
-static inline void resched_curr_lazy(struct rq *rq)
2053
-{
2054
- resched_curr(rq);
2055
-}
2056
-#endif
20572001
20582002 extern struct rt_bandwidth def_rt_bandwidth;
20592003 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
....@@ -2417,16 +2361,6 @@
24172361 static inline void nohz_balance_exit_idle(struct rq *rq) { }
24182362 #endif
24192363
2420
-#define MDF_PUSH 0x01
2421
-
2422
-static inline bool is_migration_disabled(struct task_struct *p)
2423
-{
2424
-#ifdef CONFIG_SMP
2425
- return p->migration_disabled;
2426
-#else
2427
- return false;
2428
-#endif
2429
-}
24302364
24312365 #ifdef CONFIG_SMP
24322366 static inline