.. | .. |
---|
113 | 113 | __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ |
---|
114 | 114 | TASK_PARKED) |
---|
115 | 115 | |
---|
| 116 | +#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) |
---|
| 117 | + |
---|
116 | 118 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) |
---|
| 119 | + |
---|
| 120 | +#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
---|
117 | 121 | |
---|
118 | 122 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
---|
119 | 123 | |
---|
.. | .. |
---|
137 | 141 | current->task_state_change = _THIS_IP_; \ |
---|
138 | 142 | smp_store_mb(current->state, (state_value)); \ |
---|
139 | 143 | } while (0) |
---|
140 | | - |
---|
141 | | -#define __set_current_state_no_track(state_value) \ |
---|
142 | | - current->state = (state_value); |
---|
143 | 144 | |
---|
144 | 145 | #define set_special_state(state_value) \ |
---|
145 | 146 | do { \ |
---|
.. | .. |
---|
193 | 194 | |
---|
194 | 195 | #define set_current_state(state_value) \ |
---|
195 | 196 | smp_store_mb(current->state, (state_value)) |
---|
196 | | - |
---|
197 | | -#define __set_current_state_no_track(state_value) \ |
---|
198 | | - __set_current_state(state_value) |
---|
199 | 197 | |
---|
200 | 198 | /* |
---|
201 | 199 | * set_special_state() should be used for those states when the blocking task |
---|
.. | .. |
---|
655 | 653 | struct wake_q_node *next; |
---|
656 | 654 | }; |
---|
657 | 655 | |
---|
658 | | -struct kmap_ctrl { |
---|
659 | | -#ifdef CONFIG_KMAP_LOCAL |
---|
660 | | - int idx; |
---|
661 | | - pte_t pteval[KM_MAX_IDX]; |
---|
662 | | -#endif |
---|
663 | | -}; |
---|
664 | | - |
---|
665 | 656 | struct task_struct { |
---|
666 | 657 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
---|
667 | 658 | /* |
---|
.. | .. |
---|
672 | 663 | #endif |
---|
673 | 664 | /* -1 unrunnable, 0 runnable, >0 stopped: */ |
---|
674 | 665 | volatile long state; |
---|
675 | | - /* saved state for "spinlock sleepers" */ |
---|
676 | | - volatile long saved_state; |
---|
677 | 666 | |
---|
678 | 667 | /* |
---|
679 | 668 | * This begins the randomizable portion of task_struct. Only |
---|
.. | .. |
---|
753 | 742 | int nr_cpus_allowed; |
---|
754 | 743 | const cpumask_t *cpus_ptr; |
---|
755 | 744 | cpumask_t cpus_mask; |
---|
756 | | - void *migration_pending; |
---|
757 | | -#ifdef CONFIG_SMP |
---|
758 | | - unsigned short migration_disabled; |
---|
759 | | -#endif |
---|
760 | | - unsigned short migration_flags; |
---|
761 | 745 | |
---|
762 | 746 | #ifdef CONFIG_PREEMPT_RCU |
---|
763 | 747 | int rcu_read_lock_nesting; |
---|
.. | .. |
---|
862 | 846 | #ifdef CONFIG_PSI |
---|
863 | 847 | /* Stalled due to lack of memory */ |
---|
864 | 848 | unsigned in_memstall:1; |
---|
865 | | -#endif |
---|
866 | | -#ifdef CONFIG_EVENTFD |
---|
867 | | - /* Recursion prevention for eventfd_signal() */ |
---|
868 | | - unsigned in_eventfd_signal:1; |
---|
869 | 849 | #endif |
---|
870 | 850 | |
---|
871 | 851 | unsigned long atomic_flags; /* Flags requiring atomic access. */ |
---|
.. | .. |
---|
1012 | 992 | /* Signal handlers: */ |
---|
1013 | 993 | struct signal_struct *signal; |
---|
1014 | 994 | struct sighand_struct __rcu *sighand; |
---|
1015 | | - struct sigqueue *sigqueue_cache; |
---|
1016 | 995 | sigset_t blocked; |
---|
1017 | 996 | sigset_t real_blocked; |
---|
1018 | 997 | /* Restored if set_restore_sigmask() was used: */ |
---|
1019 | 998 | sigset_t saved_sigmask; |
---|
1020 | 999 | struct sigpending pending; |
---|
1021 | | -#ifdef CONFIG_PREEMPT_RT |
---|
1022 | | - /* TODO: move me into ->restart_block ? */ |
---|
1023 | | - struct kernel_siginfo forced_info; |
---|
1024 | | -#endif |
---|
1025 | 1000 | unsigned long sas_ss_sp; |
---|
1026 | 1001 | size_t sas_ss_size; |
---|
1027 | 1002 | unsigned int sas_ss_flags; |
---|
.. | .. |
---|
1048 | 1023 | raw_spinlock_t pi_lock; |
---|
1049 | 1024 | |
---|
1050 | 1025 | struct wake_q_node wake_q; |
---|
1051 | | - struct wake_q_node wake_q_sleeper; |
---|
1052 | 1026 | int wake_q_count; |
---|
1053 | 1027 | |
---|
1054 | 1028 | #ifdef CONFIG_RT_MUTEXES |
---|
.. | .. |
---|
1076 | 1050 | int softirqs_enabled; |
---|
1077 | 1051 | int softirq_context; |
---|
1078 | 1052 | int irq_config; |
---|
1079 | | -#endif |
---|
1080 | | -#ifdef CONFIG_PREEMPT_RT |
---|
1081 | | - int softirq_disable_cnt; |
---|
1082 | 1053 | #endif |
---|
1083 | 1054 | |
---|
1084 | 1055 | #ifdef CONFIG_LOCKDEP |
---|
.. | .. |
---|
1365 | 1336 | unsigned int sequential_io; |
---|
1366 | 1337 | unsigned int sequential_io_avg; |
---|
1367 | 1338 | #endif |
---|
1368 | | - struct kmap_ctrl kmap_ctrl; |
---|
1369 | 1339 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
---|
1370 | 1340 | unsigned long task_state_change; |
---|
1371 | 1341 | #endif |
---|
.. | .. |
---|
1834 | 1804 | |
---|
1835 | 1805 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); |
---|
1836 | 1806 | extern int wake_up_process(struct task_struct *tsk); |
---|
1837 | | -extern int wake_up_lock_sleeper(struct task_struct *tsk); |
---|
1838 | 1807 | extern void wake_up_new_task(struct task_struct *tsk); |
---|
1839 | 1808 | |
---|
1840 | 1809 | #ifdef CONFIG_SMP |
---|
.. | .. |
---|
1923 | 1892 | static inline int test_tsk_need_resched(struct task_struct *tsk) |
---|
1924 | 1893 | { |
---|
1925 | 1894 | return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); |
---|
1926 | | -} |
---|
1927 | | - |
---|
1928 | | -#ifdef CONFIG_PREEMPT_LAZY |
---|
1929 | | -static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) |
---|
1930 | | -{ |
---|
1931 | | - set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); |
---|
1932 | | -} |
---|
1933 | | - |
---|
1934 | | -static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) |
---|
1935 | | -{ |
---|
1936 | | - clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); |
---|
1937 | | -} |
---|
1938 | | - |
---|
1939 | | -static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) |
---|
1940 | | -{ |
---|
1941 | | - return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); |
---|
1942 | | -} |
---|
1943 | | - |
---|
1944 | | -static inline int need_resched_lazy(void) |
---|
1945 | | -{ |
---|
1946 | | - return test_thread_flag(TIF_NEED_RESCHED_LAZY); |
---|
1947 | | -} |
---|
1948 | | - |
---|
1949 | | -static inline int need_resched_now(void) |
---|
1950 | | -{ |
---|
1951 | | - return test_thread_flag(TIF_NEED_RESCHED); |
---|
1952 | | -} |
---|
1953 | | - |
---|
1954 | | -#else |
---|
1955 | | -static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } |
---|
1956 | | -static inline int need_resched_lazy(void) { return 0; } |
---|
1957 | | - |
---|
1958 | | -static inline int need_resched_now(void) |
---|
1959 | | -{ |
---|
1960 | | - return test_thread_flag(TIF_NEED_RESCHED); |
---|
1961 | | -} |
---|
1962 | | - |
---|
1963 | | -#endif |
---|
1964 | | - |
---|
1965 | | - |
---|
1966 | | -static inline bool __task_is_stopped_or_traced(struct task_struct *task) |
---|
1967 | | -{ |
---|
1968 | | - if (task->state & (__TASK_STOPPED | __TASK_TRACED)) |
---|
1969 | | - return true; |
---|
1970 | | -#ifdef CONFIG_PREEMPT_RT |
---|
1971 | | - if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED)) |
---|
1972 | | - return true; |
---|
1973 | | -#endif |
---|
1974 | | - return false; |
---|
1975 | | -} |
---|
1976 | | - |
---|
1977 | | -static inline bool task_is_stopped_or_traced(struct task_struct *task) |
---|
1978 | | -{ |
---|
1979 | | - bool traced_stopped; |
---|
1980 | | - |
---|
1981 | | -#ifdef CONFIG_PREEMPT_RT |
---|
1982 | | - unsigned long flags; |
---|
1983 | | - |
---|
1984 | | - raw_spin_lock_irqsave(&task->pi_lock, flags); |
---|
1985 | | - traced_stopped = __task_is_stopped_or_traced(task); |
---|
1986 | | - raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
---|
1987 | | -#else |
---|
1988 | | - traced_stopped = __task_is_stopped_or_traced(task); |
---|
1989 | | -#endif |
---|
1990 | | - return traced_stopped; |
---|
1991 | | -} |
---|
1992 | | - |
---|
1993 | | -static inline bool task_is_traced(struct task_struct *task) |
---|
1994 | | -{ |
---|
1995 | | - bool traced = false; |
---|
1996 | | - |
---|
1997 | | - if (task->state & __TASK_TRACED) |
---|
1998 | | - return true; |
---|
1999 | | -#ifdef CONFIG_PREEMPT_RT |
---|
2000 | | - /* in case the task is sleeping on tasklist_lock */ |
---|
2001 | | - raw_spin_lock_irq(&task->pi_lock); |
---|
2002 | | - if (task->state & __TASK_TRACED) |
---|
2003 | | - traced = true; |
---|
2004 | | - else if (task->saved_state & __TASK_TRACED) |
---|
2005 | | - traced = true; |
---|
2006 | | - raw_spin_unlock_irq(&task->pi_lock); |
---|
2007 | | -#endif |
---|
2008 | | - return traced; |
---|
2009 | 1895 | } |
---|
2010 | 1896 | |
---|
2011 | 1897 | /* |
---|