.. | .. |
---|
30 | 30 | #include <linux/task_io_accounting.h> |
---|
31 | 31 | #include <linux/rseq.h> |
---|
32 | 32 | #include <linux/android_kabi.h> |
---|
| 33 | +#include <asm/kmap_types.h> |
---|
33 | 34 | |
---|
34 | 35 | /* task_struct member predeclarations (sorted alphabetically): */ |
---|
35 | 36 | struct audit_context; |
---|
.. | .. |
---|
103 | 104 | __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ |
---|
104 | 105 | TASK_PARKED) |
---|
105 | 106 | |
---|
106 | | -#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) |
---|
107 | | - |
---|
108 | 107 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) |
---|
109 | | - |
---|
110 | | -#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
---|
111 | 108 | |
---|
112 | 109 | #define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ |
---|
113 | 110 | (task->flags & PF_FROZEN) == 0 && \ |
---|
.. | .. |
---|
136 | 133 | smp_store_mb(current->state, (state_value)); \ |
---|
137 | 134 | } while (0) |
---|
138 | 135 | |
---|
| 136 | +#define __set_current_state_no_track(state_value) \ |
---|
| 137 | + current->state = (state_value); |
---|
| 138 | + |
---|
139 | 139 | #define set_special_state(state_value) \ |
---|
140 | 140 | do { \ |
---|
141 | 141 | unsigned long flags; /* may shadow */ \ |
---|
.. | .. |
---|
145 | 145 | current->state = (state_value); \ |
---|
146 | 146 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ |
---|
147 | 147 | } while (0) |
---|
| 148 | + |
---|
148 | 149 | #else |
---|
149 | 150 | /* |
---|
150 | 151 | * set_current_state() includes a barrier so that the write of current->state |
---|
.. | .. |
---|
189 | 190 | #define set_current_state(state_value) \ |
---|
190 | 191 | smp_store_mb(current->state, (state_value)) |
---|
191 | 192 | |
---|
| 193 | +#define __set_current_state_no_track(state_value) \ |
---|
| 194 | + __set_current_state(state_value) |
---|
| 195 | + |
---|
192 | 196 | /* |
---|
193 | 197 | * set_special_state() should be used for those states when the blocking task |
---|
194 | 198 | * can not use the regular condition based wait-loop. In that case we must |
---|
.. | .. |
---|
224 | 228 | extern void io_schedule_finish(int token); |
---|
225 | 229 | extern long io_schedule_timeout(long timeout); |
---|
226 | 230 | extern void io_schedule(void); |
---|
| 231 | + |
---|
| 232 | +int cpu_nr_pinned(int cpu); |
---|
227 | 233 | |
---|
228 | 234 | /** |
---|
229 | 235 | * struct prev_cputime - snapshot of system and user cputime |
---|
.. | .. |
---|
654 | 660 | #endif |
---|
655 | 661 | /* -1 unrunnable, 0 runnable, >0 stopped: */ |
---|
656 | 662 | volatile long state; |
---|
| 663 | + /* saved state for "spinlock sleepers" */ |
---|
| 664 | + volatile long saved_state; |
---|
657 | 665 | |
---|
658 | 666 | /* |
---|
659 | 667 | * This begins the randomizable portion of task_struct. Only |
---|
.. | .. |
---|
728 | 736 | |
---|
729 | 737 | unsigned int policy; |
---|
730 | 738 | int nr_cpus_allowed; |
---|
731 | | - cpumask_t cpus_allowed; |
---|
| 739 | +// cpumask_t cpus_allowed; |
---|
732 | 740 | cpumask_t cpus_requested; |
---|
| 741 | + const cpumask_t *cpus_ptr; |
---|
| 742 | + cpumask_t cpus_mask; |
---|
| 743 | +#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) |
---|
| 744 | + int migrate_disable; |
---|
| 745 | + bool migrate_disable_scheduled; |
---|
| 746 | +# ifdef CONFIG_SCHED_DEBUG |
---|
| 747 | + int pinned_on_cpu; |
---|
| 748 | +# endif |
---|
| 749 | +#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) |
---|
| 750 | +# ifdef CONFIG_SCHED_DEBUG |
---|
| 751 | + int migrate_disable; |
---|
| 752 | +# endif |
---|
| 753 | +#endif |
---|
| 754 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 755 | + int sleeping_lock; |
---|
| 756 | +#endif |
---|
733 | 757 | |
---|
734 | 758 | #ifdef CONFIG_PREEMPT_RCU |
---|
735 | 759 | int rcu_read_lock_nesting; |
---|
.. | .. |
---|
901 | 925 | #ifdef CONFIG_POSIX_TIMERS |
---|
902 | 926 | struct task_cputime cputime_expires; |
---|
903 | 927 | struct list_head cpu_timers[3]; |
---|
| 928 | +#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 929 | + struct task_struct *posix_timer_list; |
---|
| 930 | +#endif |
---|
904 | 931 | #endif |
---|
905 | 932 | |
---|
906 | 933 | /* Process credentials: */ |
---|
.. | .. |
---|
945 | 972 | /* Signal handlers: */ |
---|
946 | 973 | struct signal_struct *signal; |
---|
947 | 974 | struct sighand_struct *sighand; |
---|
| 975 | + struct sigqueue *sigqueue_cache; |
---|
| 976 | + |
---|
948 | 977 | sigset_t blocked; |
---|
949 | 978 | sigset_t real_blocked; |
---|
950 | 979 | /* Restored if set_restore_sigmask() was used: */ |
---|
951 | 980 | sigset_t saved_sigmask; |
---|
952 | 981 | struct sigpending pending; |
---|
| 982 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 983 | + /* TODO: move me into ->restart_block ? */ |
---|
| 984 | + struct siginfo forced_info; |
---|
| 985 | +#endif |
---|
953 | 986 | unsigned long sas_ss_sp; |
---|
954 | 987 | size_t sas_ss_size; |
---|
955 | 988 | unsigned int sas_ss_flags; |
---|
.. | .. |
---|
974 | 1007 | raw_spinlock_t pi_lock; |
---|
975 | 1008 | |
---|
976 | 1009 | struct wake_q_node wake_q; |
---|
| 1010 | + struct wake_q_node wake_q_sleeper; |
---|
977 | 1011 | |
---|
978 | 1012 | #ifdef CONFIG_RT_MUTEXES |
---|
979 | 1013 | /* PI waiters blocked on a rt_mutex held by this task: */ |
---|
.. | .. |
---|
1271 | 1305 | unsigned int sequential_io; |
---|
1272 | 1306 | unsigned int sequential_io_avg; |
---|
1273 | 1307 | #endif |
---|
| 1308 | +#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 1309 | + struct rcu_head put_rcu; |
---|
| 1310 | + int softirq_nestcnt; |
---|
| 1311 | + unsigned int softirqs_raised; |
---|
| 1312 | +#endif |
---|
| 1313 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 1314 | +# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 |
---|
| 1315 | + int kmap_idx; |
---|
| 1316 | + pte_t kmap_pte[KM_TYPE_NR]; |
---|
| 1317 | +# endif |
---|
| 1318 | +#endif |
---|
1274 | 1319 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
---|
1275 | 1320 | unsigned long task_state_change; |
---|
| 1321 | +#endif |
---|
| 1322 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 1323 | + int xmit_recursion; |
---|
1276 | 1324 | #endif |
---|
1277 | 1325 | int pagefault_disabled; |
---|
1278 | 1326 | #ifdef CONFIG_MMU |
---|
.. | .. |
---|
1490 | 1538 | /* |
---|
1491 | 1539 | * Per process flags |
---|
1492 | 1540 | */ |
---|
| 1541 | +#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ |
---|
1493 | 1542 | #define PF_IDLE 0x00000002 /* I am an IDLE thread */ |
---|
1494 | 1543 | #define PF_EXITING 0x00000004 /* Getting shut down */ |
---|
1495 | 1544 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ |
---|
.. | .. |
---|
1717 | 1766 | |
---|
1718 | 1767 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); |
---|
1719 | 1768 | extern int wake_up_process(struct task_struct *tsk); |
---|
| 1769 | +extern int wake_up_lock_sleeper(struct task_struct *tsk); |
---|
1720 | 1770 | extern void wake_up_new_task(struct task_struct *tsk); |
---|
1721 | 1771 | |
---|
1722 | 1772 | #ifdef CONFIG_SMP |
---|
.. | .. |
---|
1799 | 1849 | return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); |
---|
1800 | 1850 | } |
---|
1801 | 1851 | |
---|
| 1852 | +#ifdef CONFIG_PREEMPT_LAZY |
---|
| 1853 | +static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) |
---|
| 1854 | +{ |
---|
| 1855 | + set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); |
---|
| 1856 | +} |
---|
| 1857 | + |
---|
| 1858 | +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) |
---|
| 1859 | +{ |
---|
| 1860 | + clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); |
---|
| 1861 | +} |
---|
| 1862 | + |
---|
| 1863 | +static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) |
---|
| 1864 | +{ |
---|
| 1865 | + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); |
---|
| 1866 | +} |
---|
| 1867 | + |
---|
| 1868 | +static inline int need_resched_lazy(void) |
---|
| 1869 | +{ |
---|
| 1870 | + return test_thread_flag(TIF_NEED_RESCHED_LAZY); |
---|
| 1871 | +} |
---|
| 1872 | + |
---|
| 1873 | +static inline int need_resched_now(void) |
---|
| 1874 | +{ |
---|
| 1875 | + return test_thread_flag(TIF_NEED_RESCHED); |
---|
| 1876 | +} |
---|
| 1877 | + |
---|
| 1878 | +#else |
---|
| 1879 | +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } |
---|
| 1880 | +static inline int need_resched_lazy(void) { return 0; } |
---|
| 1881 | + |
---|
| 1882 | +static inline int need_resched_now(void) |
---|
| 1883 | +{ |
---|
| 1884 | + return test_thread_flag(TIF_NEED_RESCHED); |
---|
| 1885 | +} |
---|
| 1886 | + |
---|
| 1887 | +#endif |
---|
| 1888 | + |
---|
| 1889 | + |
---|
| 1890 | +static inline bool __task_is_stopped_or_traced(struct task_struct *task) |
---|
| 1891 | +{ |
---|
| 1892 | + if (task->state & (__TASK_STOPPED | __TASK_TRACED)) |
---|
| 1893 | + return true; |
---|
| 1894 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 1895 | + if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED)) |
---|
| 1896 | + return true; |
---|
| 1897 | +#endif |
---|
| 1898 | + return false; |
---|
| 1899 | +} |
---|
| 1900 | + |
---|
| 1901 | +static inline bool task_is_stopped_or_traced(struct task_struct *task) |
---|
| 1902 | +{ |
---|
| 1903 | + bool traced_stopped; |
---|
| 1904 | + |
---|
| 1905 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 1906 | + unsigned long flags; |
---|
| 1907 | + |
---|
| 1908 | + raw_spin_lock_irqsave(&task->pi_lock, flags); |
---|
| 1909 | + traced_stopped = __task_is_stopped_or_traced(task); |
---|
| 1910 | + raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
---|
| 1911 | +#else |
---|
| 1912 | + traced_stopped = __task_is_stopped_or_traced(task); |
---|
| 1913 | +#endif |
---|
| 1914 | + return traced_stopped; |
---|
| 1915 | +} |
---|
| 1916 | + |
---|
| 1917 | +static inline bool task_is_traced(struct task_struct *task) |
---|
| 1918 | +{ |
---|
| 1919 | + bool traced = false; |
---|
| 1920 | + |
---|
| 1921 | + if (task->state & __TASK_TRACED) |
---|
| 1922 | + return true; |
---|
| 1923 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 1924 | + /* in case the task is sleeping on tasklist_lock */ |
---|
| 1925 | + raw_spin_lock_irq(&task->pi_lock); |
---|
| 1926 | + if (task->state & __TASK_TRACED) |
---|
| 1927 | + traced = true; |
---|
| 1928 | + else if (task->saved_state & __TASK_TRACED) |
---|
| 1929 | + traced = true; |
---|
| 1930 | + raw_spin_unlock_irq(&task->pi_lock); |
---|
| 1931 | +#endif |
---|
| 1932 | + return traced; |
---|
| 1933 | +} |
---|
| 1934 | + |
---|
1802 | 1935 | /* |
---|
1803 | 1936 | * cond_resched() and cond_resched_lock(): latency reduction via |
---|
1804 | 1937 | * explicit rescheduling in places that are safe. The return |
---|
.. | .. |
---|
1850 | 1983 | { |
---|
1851 | 1984 | return unlikely(tif_need_resched()); |
---|
1852 | 1985 | } |
---|
| 1986 | + |
---|
| 1987 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 1988 | +static inline void sleeping_lock_inc(void) |
---|
| 1989 | +{ |
---|
| 1990 | + current->sleeping_lock++; |
---|
| 1991 | +} |
---|
| 1992 | + |
---|
| 1993 | +static inline void sleeping_lock_dec(void) |
---|
| 1994 | +{ |
---|
| 1995 | + current->sleeping_lock--; |
---|
| 1996 | +} |
---|
| 1997 | + |
---|
| 1998 | +#else |
---|
| 1999 | + |
---|
| 2000 | +static inline void sleeping_lock_inc(void) { } |
---|
| 2001 | +static inline void sleeping_lock_dec(void) { } |
---|
| 2002 | +#endif |
---|
1853 | 2003 | |
---|
1854 | 2004 | /* |
---|
1855 | 2005 | * Wrappers for p->thread_info->cpu access. No-op on UP. |
---|
.. | .. |
---|
2022 | 2172 | |
---|
2023 | 2173 | #endif |
---|
2024 | 2174 | |
---|
| 2175 | +extern struct task_struct *takedown_cpu_task; |
---|
| 2176 | + |
---|
2025 | 2177 | #endif |
---|