hc
2023-11-22 f743a7adbd6e230d66a6206fa115b59fec2d88eb
kernel/include/linux/sched.h
....@@ -30,6 +30,7 @@
3030 #include <linux/task_io_accounting.h>
3131 #include <linux/rseq.h>
3232 #include <linux/android_kabi.h>
33
+#include <asm/kmap_types.h>
3334
3435 /* task_struct member predeclarations (sorted alphabetically): */
3536 struct audit_context;
....@@ -103,11 +104,7 @@
103104 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
104105 TASK_PARKED)
105106
106
-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
107
-
108107 #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
109
-
110
-#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
111108
112109 #define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
113110 (task->flags & PF_FROZEN) == 0 && \
....@@ -136,6 +133,9 @@
136133 smp_store_mb(current->state, (state_value)); \
137134 } while (0)
138135
136
+#define __set_current_state_no_track(state_value) \
137
+ current->state = (state_value);
138
+
139139 #define set_special_state(state_value) \
140140 do { \
141141 unsigned long flags; /* may shadow */ \
....@@ -145,6 +145,7 @@
145145 current->state = (state_value); \
146146 raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
147147 } while (0)
148
+
148149 #else
149150 /*
150151 * set_current_state() includes a barrier so that the write of current->state
....@@ -189,6 +190,9 @@
189190 #define set_current_state(state_value) \
190191 smp_store_mb(current->state, (state_value))
191192
193
+#define __set_current_state_no_track(state_value) \
194
+ __set_current_state(state_value)
195
+
192196 /*
193197 * set_special_state() should be used for those states when the blocking task
194198 * can not use the regular condition based wait-loop. In that case we must
....@@ -224,6 +228,8 @@
224228 extern void io_schedule_finish(int token);
225229 extern long io_schedule_timeout(long timeout);
226230 extern void io_schedule(void);
231
+
232
+int cpu_nr_pinned(int cpu);
227233
228234 /**
229235 * struct prev_cputime - snapshot of system and user cputime
....@@ -654,6 +660,8 @@
654660 #endif
655661 /* -1 unrunnable, 0 runnable, >0 stopped: */
656662 volatile long state;
663
+ /* saved state for "spinlock sleepers" */
664
+ volatile long saved_state;
657665
658666 /*
659667 * This begins the randomizable portion of task_struct. Only
....@@ -728,8 +736,24 @@
728736
729737 unsigned int policy;
730738 int nr_cpus_allowed;
731
- cpumask_t cpus_allowed;
739
+// cpumask_t cpus_allowed;
732740 cpumask_t cpus_requested;
741
+ const cpumask_t *cpus_ptr;
742
+ cpumask_t cpus_mask;
743
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
744
+ int migrate_disable;
745
+ bool migrate_disable_scheduled;
746
+# ifdef CONFIG_SCHED_DEBUG
747
+ int pinned_on_cpu;
748
+# endif
749
+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
750
+# ifdef CONFIG_SCHED_DEBUG
751
+ int migrate_disable;
752
+# endif
753
+#endif
754
+#ifdef CONFIG_PREEMPT_RT_FULL
755
+ int sleeping_lock;
756
+#endif
733757
734758 #ifdef CONFIG_PREEMPT_RCU
735759 int rcu_read_lock_nesting;
....@@ -901,6 +925,9 @@
901925 #ifdef CONFIG_POSIX_TIMERS
902926 struct task_cputime cputime_expires;
903927 struct list_head cpu_timers[3];
928
+#ifdef CONFIG_PREEMPT_RT_BASE
929
+ struct task_struct *posix_timer_list;
930
+#endif
904931 #endif
905932
906933 /* Process credentials: */
....@@ -945,11 +972,17 @@
945972 /* Signal handlers: */
946973 struct signal_struct *signal;
947974 struct sighand_struct *sighand;
975
+ struct sigqueue *sigqueue_cache;
976
+
948977 sigset_t blocked;
949978 sigset_t real_blocked;
950979 /* Restored if set_restore_sigmask() was used: */
951980 sigset_t saved_sigmask;
952981 struct sigpending pending;
982
+#ifdef CONFIG_PREEMPT_RT_FULL
983
+ /* TODO: move me into ->restart_block ? */
984
+ struct siginfo forced_info;
985
+#endif
953986 unsigned long sas_ss_sp;
954987 size_t sas_ss_size;
955988 unsigned int sas_ss_flags;
....@@ -974,6 +1007,7 @@
9741007 raw_spinlock_t pi_lock;
9751008
9761009 struct wake_q_node wake_q;
1010
+ struct wake_q_node wake_q_sleeper;
9771011
9781012 #ifdef CONFIG_RT_MUTEXES
9791013 /* PI waiters blocked on a rt_mutex held by this task: */
....@@ -1271,8 +1305,22 @@
12711305 unsigned int sequential_io;
12721306 unsigned int sequential_io_avg;
12731307 #endif
1308
+#ifdef CONFIG_PREEMPT_RT_BASE
1309
+ struct rcu_head put_rcu;
1310
+ int softirq_nestcnt;
1311
+ unsigned int softirqs_raised;
1312
+#endif
1313
+#ifdef CONFIG_PREEMPT_RT_FULL
1314
+# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
1315
+ int kmap_idx;
1316
+ pte_t kmap_pte[KM_TYPE_NR];
1317
+# endif
1318
+#endif
12741319 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
12751320 unsigned long task_state_change;
1321
+#endif
1322
+#ifdef CONFIG_PREEMPT_RT_FULL
1323
+ int xmit_recursion;
12761324 #endif
12771325 int pagefault_disabled;
12781326 #ifdef CONFIG_MMU
....@@ -1490,6 +1538,7 @@
14901538 /*
14911539 * Per process flags
14921540 */
1541
+#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
14931542 #define PF_IDLE 0x00000002 /* I am an IDLE thread */
14941543 #define PF_EXITING 0x00000004 /* Getting shut down */
14951544 #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
....@@ -1717,6 +1766,7 @@
17171766
17181767 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
17191768 extern int wake_up_process(struct task_struct *tsk);
1769
+extern int wake_up_lock_sleeper(struct task_struct *tsk);
17201770 extern void wake_up_new_task(struct task_struct *tsk);
17211771
17221772 #ifdef CONFIG_SMP
....@@ -1799,6 +1849,89 @@
17991849 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
18001850 }
18011851
1852
+#ifdef CONFIG_PREEMPT_LAZY
1853
+static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
1854
+{
1855
+ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
1856
+}
1857
+
1858
+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
1859
+{
1860
+ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
1861
+}
1862
+
1863
+static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
1864
+{
1865
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
1866
+}
1867
+
1868
+static inline int need_resched_lazy(void)
1869
+{
1870
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
1871
+}
1872
+
1873
+static inline int need_resched_now(void)
1874
+{
1875
+ return test_thread_flag(TIF_NEED_RESCHED);
1876
+}
1877
+
1878
+#else
1879
+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
1880
+static inline int need_resched_lazy(void) { return 0; }
1881
+
1882
+static inline int need_resched_now(void)
1883
+{
1884
+ return test_thread_flag(TIF_NEED_RESCHED);
1885
+}
1886
+
1887
+#endif
1888
+
1889
+
1890
+static inline bool __task_is_stopped_or_traced(struct task_struct *task)
1891
+{
1892
+ if (task->state & (__TASK_STOPPED | __TASK_TRACED))
1893
+ return true;
1894
+#ifdef CONFIG_PREEMPT_RT_FULL
1895
+ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
1896
+ return true;
1897
+#endif
1898
+ return false;
1899
+}
1900
+
1901
+static inline bool task_is_stopped_or_traced(struct task_struct *task)
1902
+{
1903
+ bool traced_stopped;
1904
+
1905
+#ifdef CONFIG_PREEMPT_RT_FULL
1906
+ unsigned long flags;
1907
+
1908
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
1909
+ traced_stopped = __task_is_stopped_or_traced(task);
1910
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1911
+#else
1912
+ traced_stopped = __task_is_stopped_or_traced(task);
1913
+#endif
1914
+ return traced_stopped;
1915
+}
1916
+
1917
+static inline bool task_is_traced(struct task_struct *task)
1918
+{
1919
+ bool traced = false;
1920
+
1921
+ if (task->state & __TASK_TRACED)
1922
+ return true;
1923
+#ifdef CONFIG_PREEMPT_RT_FULL
1924
+ /* in case the task is sleeping on tasklist_lock */
1925
+ raw_spin_lock_irq(&task->pi_lock);
1926
+ if (task->state & __TASK_TRACED)
1927
+ traced = true;
1928
+ else if (task->saved_state & __TASK_TRACED)
1929
+ traced = true;
1930
+ raw_spin_unlock_irq(&task->pi_lock);
1931
+#endif
1932
+ return traced;
1933
+}
1934
+
18021935 /*
18031936 * cond_resched() and cond_resched_lock(): latency reduction via
18041937 * explicit rescheduling in places that are safe. The return
....@@ -1850,6 +1983,23 @@
18501983 {
18511984 return unlikely(tif_need_resched());
18521985 }
1986
+
1987
+#ifdef CONFIG_PREEMPT_RT_FULL
1988
+static inline void sleeping_lock_inc(void)
1989
+{
1990
+ current->sleeping_lock++;
1991
+}
1992
+
1993
+static inline void sleeping_lock_dec(void)
1994
+{
1995
+ current->sleeping_lock--;
1996
+}
1997
+
1998
+#else
1999
+
2000
+static inline void sleeping_lock_inc(void) { }
2001
+static inline void sleeping_lock_dec(void) { }
2002
+#endif
18532003
18542004 /*
18552005 * Wrappers for p->thread_info->cpu access. No-op on UP.
....@@ -2022,4 +2172,6 @@
20222172
20232173 #endif
20242174
2175
+extern struct task_struct *takedown_cpu_task;
2176
+
20252177 #endif