.. | .. |
---|
18 | 18 | #include <linux/mutex.h> |
---|
19 | 19 | #include <linux/plist.h> |
---|
20 | 20 | #include <linux/hrtimer.h> |
---|
| 21 | +#include <linux/irqflags.h> |
---|
21 | 22 | #include <linux/seccomp.h> |
---|
22 | 23 | #include <linux/nodemask.h> |
---|
23 | 24 | #include <linux/rcupdate.h> |
---|
| 25 | +#include <linux/refcount.h> |
---|
24 | 26 | #include <linux/resource.h> |
---|
25 | 27 | #include <linux/latencytop.h> |
---|
26 | 28 | #include <linux/sched/prio.h> |
---|
| 29 | +#include <linux/sched/types.h> |
---|
27 | 30 | #include <linux/signal_types.h> |
---|
28 | 31 | #include <linux/mm_types_task.h> |
---|
29 | | -#include <linux/mm_event.h> |
---|
30 | 32 | #include <linux/task_io_accounting.h> |
---|
| 33 | +#include <linux/posix-timers.h> |
---|
31 | 34 | #include <linux/rseq.h> |
---|
| 35 | +#include <linux/seqlock.h> |
---|
| 36 | +#include <linux/kcsan.h> |
---|
| 37 | +#include <linux/android_vendor.h> |
---|
32 | 38 | #include <linux/android_kabi.h> |
---|
33 | | -#include <asm/kmap_types.h> |
---|
34 | 39 | |
---|
35 | 40 | /* task_struct member predeclarations (sorted alphabetically): */ |
---|
36 | 41 | struct audit_context; |
---|
37 | 42 | struct backing_dev_info; |
---|
38 | 43 | struct bio_list; |
---|
39 | 44 | struct blk_plug; |
---|
| 45 | +struct capture_control; |
---|
40 | 46 | struct cfs_rq; |
---|
41 | 47 | struct fs_struct; |
---|
42 | 48 | struct futex_pi_state; |
---|
.. | .. |
---|
50 | 56 | struct rcu_node; |
---|
51 | 57 | struct reclaim_state; |
---|
52 | 58 | struct robust_list_head; |
---|
| 59 | +struct root_domain; |
---|
| 60 | +struct rq; |
---|
53 | 61 | struct sched_attr; |
---|
54 | 62 | struct sched_param; |
---|
55 | 63 | struct seq_file; |
---|
.. | .. |
---|
57 | 65 | struct signal_struct; |
---|
58 | 66 | struct task_delay_info; |
---|
59 | 67 | struct task_group; |
---|
| 68 | +struct io_uring_task; |
---|
60 | 69 | |
---|
61 | 70 | /* |
---|
62 | 71 | * Task state bitmask. NOTE! These bits are also |
---|
.. | .. |
---|
104 | 113 | __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ |
---|
105 | 114 | TASK_PARKED) |
---|
106 | 115 | |
---|
| 116 | +#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) |
---|
| 117 | + |
---|
107 | 118 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) |
---|
108 | 119 | |
---|
109 | | -#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ |
---|
110 | | - (task->flags & PF_FROZEN) == 0 && \ |
---|
111 | | - (task->state & TASK_NOLOAD) == 0) |
---|
| 120 | +#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
---|
112 | 121 | |
---|
113 | 122 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
---|
114 | 123 | |
---|
.. | .. |
---|
133 | 142 | smp_store_mb(current->state, (state_value)); \ |
---|
134 | 143 | } while (0) |
---|
135 | 144 | |
---|
136 | | -#define __set_current_state_no_track(state_value) \ |
---|
137 | | - current->state = (state_value); |
---|
138 | | - |
---|
139 | 145 | #define set_special_state(state_value) \ |
---|
140 | 146 | do { \ |
---|
141 | 147 | unsigned long flags; /* may shadow */ \ |
---|
.. | .. |
---|
145 | 151 | current->state = (state_value); \ |
---|
146 | 152 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ |
---|
147 | 153 | } while (0) |
---|
148 | | - |
---|
149 | 154 | #else |
---|
150 | 155 | /* |
---|
151 | 156 | * set_current_state() includes a barrier so that the write of current->state |
---|
.. | .. |
---|
154 | 159 | * |
---|
155 | 160 | * for (;;) { |
---|
156 | 161 | * set_current_state(TASK_UNINTERRUPTIBLE); |
---|
157 | | - * if (!need_sleep) |
---|
158 | | - * break; |
---|
| 162 | + * if (CONDITION) |
---|
| 163 | + * break; |
---|
159 | 164 | * |
---|
160 | 165 | * schedule(); |
---|
161 | 166 | * } |
---|
162 | 167 | * __set_current_state(TASK_RUNNING); |
---|
163 | 168 | * |
---|
164 | 169 | * If the caller does not need such serialisation (because, for instance, the |
---|
165 | | - * condition test and condition change and wakeup are under the same lock) then |
---|
| 170 | + * CONDITION test and condition change and wakeup are under the same lock) then |
---|
166 | 171 | * use __set_current_state(). |
---|
167 | 172 | * |
---|
168 | 173 | * The above is typically ordered against the wakeup, which does: |
---|
169 | 174 | * |
---|
170 | | - * need_sleep = false; |
---|
| 175 | + * CONDITION = 1; |
---|
171 | 176 | * wake_up_state(p, TASK_UNINTERRUPTIBLE); |
---|
172 | 177 | * |
---|
173 | | - * where wake_up_state() executes a full memory barrier before accessing the |
---|
174 | | - * task state. |
---|
| 178 | + * where wake_up_state()/try_to_wake_up() executes a full memory barrier before |
---|
| 179 | + * accessing p->state. |
---|
175 | 180 | * |
---|
176 | 181 | * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, |
---|
177 | 182 | * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a |
---|
178 | 183 | * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). |
---|
179 | 184 | * |
---|
180 | 185 | * However, with slightly different timing the wakeup TASK_RUNNING store can |
---|
181 | | - * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not |
---|
| 186 | + * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not |
---|
182 | 187 | * a problem either because that will result in one extra go around the loop |
---|
183 | 188 | * and our @cond test will save the day. |
---|
184 | 189 | * |
---|
.. | .. |
---|
189 | 194 | |
---|
190 | 195 | #define set_current_state(state_value) \ |
---|
191 | 196 | smp_store_mb(current->state, (state_value)) |
---|
192 | | - |
---|
193 | | -#define __set_current_state_no_track(state_value) \ |
---|
194 | | - __set_current_state(state_value) |
---|
195 | 197 | |
---|
196 | 198 | /* |
---|
197 | 199 | * set_special_state() should be used for those states when the blocking task |
---|
.. | .. |
---|
223 | 225 | extern long schedule_timeout_idle(long timeout); |
---|
224 | 226 | asmlinkage void schedule(void); |
---|
225 | 227 | extern void schedule_preempt_disabled(void); |
---|
| 228 | +asmlinkage void preempt_schedule_irq(void); |
---|
226 | 229 | |
---|
227 | 230 | extern int __must_check io_schedule_prepare(void); |
---|
228 | 231 | extern void io_schedule_finish(int token); |
---|
229 | 232 | extern long io_schedule_timeout(long timeout); |
---|
230 | 233 | extern void io_schedule(void); |
---|
231 | | - |
---|
232 | | -int cpu_nr_pinned(int cpu); |
---|
233 | 234 | |
---|
234 | 235 | /** |
---|
235 | 236 | * struct prev_cputime - snapshot of system and user cputime |
---|
.. | .. |
---|
248 | 249 | #endif |
---|
249 | 250 | }; |
---|
250 | 251 | |
---|
251 | | -/** |
---|
252 | | - * struct task_cputime - collected CPU time counts |
---|
253 | | - * @utime: time spent in user mode, in nanoseconds |
---|
254 | | - * @stime: time spent in kernel mode, in nanoseconds |
---|
255 | | - * @sum_exec_runtime: total time spent on the CPU, in nanoseconds |
---|
256 | | - * |
---|
257 | | - * This structure groups together three kinds of CPU time that are tracked for |
---|
258 | | - * threads and thread groups. Most things considering CPU time want to group |
---|
259 | | - * these counts together and treat all three of them in parallel. |
---|
260 | | - */ |
---|
261 | | -struct task_cputime { |
---|
262 | | - u64 utime; |
---|
263 | | - u64 stime; |
---|
264 | | - unsigned long long sum_exec_runtime; |
---|
265 | | -}; |
---|
266 | | - |
---|
267 | | -/* Alternate field names when used on cache expirations: */ |
---|
268 | | -#define virt_exp utime |
---|
269 | | -#define prof_exp stime |
---|
270 | | -#define sched_exp sum_exec_runtime |
---|
271 | | - |
---|
272 | 252 | enum vtime_state { |
---|
273 | 253 | /* Task is sleeping or running in a CPU with VTIME inactive: */ |
---|
274 | 254 | VTIME_INACTIVE = 0, |
---|
275 | | - /* Task runs in userspace in a CPU with VTIME active: */ |
---|
276 | | - VTIME_USER, |
---|
| 255 | + /* Task is idle */ |
---|
| 256 | + VTIME_IDLE, |
---|
277 | 257 | /* Task runs in kernelspace in a CPU with VTIME active: */ |
---|
278 | 258 | VTIME_SYS, |
---|
| 259 | + /* Task runs in userspace in a CPU with VTIME active: */ |
---|
| 260 | + VTIME_USER, |
---|
| 261 | + /* Task runs as guests in a CPU with VTIME active: */ |
---|
| 262 | + VTIME_GUEST, |
---|
279 | 263 | }; |
---|
280 | 264 | |
---|
281 | 265 | struct vtime { |
---|
282 | 266 | seqcount_t seqcount; |
---|
283 | 267 | unsigned long long starttime; |
---|
284 | 268 | enum vtime_state state; |
---|
| 269 | + unsigned int cpu; |
---|
285 | 270 | u64 utime; |
---|
286 | 271 | u64 stime; |
---|
287 | 272 | u64 gtime; |
---|
.. | .. |
---|
298 | 283 | UCLAMP_MAX, |
---|
299 | 284 | UCLAMP_CNT |
---|
300 | 285 | }; |
---|
| 286 | + |
---|
| 287 | +#ifdef CONFIG_SMP |
---|
| 288 | +extern struct root_domain def_root_domain; |
---|
| 289 | +extern struct mutex sched_domains_mutex; |
---|
| 290 | +#endif |
---|
301 | 291 | |
---|
302 | 292 | struct sched_info { |
---|
303 | 293 | #ifdef CONFIG_SCHED_INFO |
---|
.. | .. |
---|
360 | 350 | * Only for tasks we track a moving average of the past instantaneous |
---|
361 | 351 | * estimated utilization. This allows to absorb sporadic drops in utilization |
---|
362 | 352 | * of an otherwise almost periodic task. |
---|
| 353 | + * |
---|
| 354 | + * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg |
---|
| 355 | + * updates. When a task is dequeued, its util_est should not be updated if its |
---|
| 356 | + * util_avg has not been updated in the meantime. |
---|
| 357 | + * This information is mapped into the MSB bit of util_est.enqueued at dequeue |
---|
| 358 | + * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg |
---|
| 359 | + * for a task) it is safe to use MSB. |
---|
363 | 360 | */ |
---|
364 | 361 | struct util_est { |
---|
365 | 362 | unsigned int enqueued; |
---|
366 | 363 | unsigned int ewma; |
---|
367 | 364 | #define UTIL_EST_WEIGHT_SHIFT 2 |
---|
| 365 | +#define UTIL_AVG_UNCHANGED 0x80000000 |
---|
368 | 366 | } __attribute__((__aligned__(sizeof(u64)))); |
---|
369 | 367 | |
---|
370 | 368 | /* |
---|
371 | | - * The load_avg/util_avg accumulates an infinite geometric series |
---|
372 | | - * (see __update_load_avg() in kernel/sched/fair.c). |
---|
| 369 | + * The load/runnable/util_avg accumulates an infinite geometric series |
---|
| 370 | + * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c). |
---|
373 | 371 | * |
---|
374 | 372 | * [load_avg definition] |
---|
375 | 373 | * |
---|
376 | 374 | * load_avg = runnable% * scale_load_down(load) |
---|
377 | 375 | * |
---|
378 | | - * where runnable% is the time ratio that a sched_entity is runnable. |
---|
379 | | - * For cfs_rq, it is the aggregated load_avg of all runnable and |
---|
380 | | - * blocked sched_entities. |
---|
| 376 | + * [runnable_avg definition] |
---|
| 377 | + * |
---|
| 378 | + * runnable_avg = runnable% * SCHED_CAPACITY_SCALE |
---|
381 | 379 | * |
---|
382 | 380 | * [util_avg definition] |
---|
383 | 381 | * |
---|
384 | 382 | * util_avg = running% * SCHED_CAPACITY_SCALE |
---|
385 | 383 | * |
---|
386 | | - * where running% is the time ratio that a sched_entity is running on |
---|
387 | | - * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable |
---|
388 | | - * and blocked sched_entities. |
---|
| 384 | + * where runnable% is the time ratio that a sched_entity is runnable and |
---|
| 385 | + * running% the time ratio that a sched_entity is running. |
---|
389 | 386 | * |
---|
390 | | - * load_avg and util_avg don't direcly factor frequency scaling and CPU |
---|
391 | | - * capacity scaling. The scaling is done through the rq_clock_pelt that |
---|
392 | | - * is used for computing those signals (see update_rq_clock_pelt()) |
---|
| 387 | + * For cfs_rq, they are the aggregated values of all runnable and blocked |
---|
| 388 | + * sched_entities. |
---|
| 389 | + * |
---|
| 390 | + * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU |
---|
| 391 | + * capacity scaling. The scaling is done through the rq_clock_pelt that is used |
---|
| 392 | + * for computing those signals (see update_rq_clock_pelt()) |
---|
393 | 393 | * |
---|
394 | 394 | * N.B., the above ratios (runnable% and running%) themselves are in the |
---|
395 | 395 | * range of [0, 1]. To do fixed point arithmetics, we therefore scale them |
---|
.. | .. |
---|
413 | 413 | struct sched_avg { |
---|
414 | 414 | u64 last_update_time; |
---|
415 | 415 | u64 load_sum; |
---|
416 | | - u64 runnable_load_sum; |
---|
| 416 | + u64 runnable_sum; |
---|
417 | 417 | u32 util_sum; |
---|
418 | 418 | u32 period_contrib; |
---|
419 | 419 | unsigned long load_avg; |
---|
420 | | - unsigned long runnable_load_avg; |
---|
| 420 | + unsigned long runnable_avg; |
---|
421 | 421 | unsigned long util_avg; |
---|
422 | 422 | struct util_est util_est; |
---|
423 | 423 | } ____cacheline_aligned; |
---|
.. | .. |
---|
461 | 461 | struct sched_entity { |
---|
462 | 462 | /* For load-balancing: */ |
---|
463 | 463 | struct load_weight load; |
---|
464 | | - unsigned long runnable_weight; |
---|
465 | 464 | struct rb_node run_node; |
---|
466 | 465 | struct list_head group_node; |
---|
467 | 466 | unsigned int on_rq; |
---|
.. | .. |
---|
482 | 481 | struct cfs_rq *cfs_rq; |
---|
483 | 482 | /* rq "owned" by this entity/group: */ |
---|
484 | 483 | struct cfs_rq *my_q; |
---|
| 484 | + /* cached value of my_q->h_nr_running */ |
---|
| 485 | + unsigned long runnable_weight; |
---|
485 | 486 | #endif |
---|
486 | 487 | |
---|
487 | 488 | #ifdef CONFIG_SMP |
---|
.. | .. |
---|
539 | 540 | |
---|
540 | 541 | /* |
---|
541 | 542 | * Actual scheduling parameters. Initialized with the values above, |
---|
542 | | - * they are continously updated during task execution. Note that |
---|
| 543 | + * they are continuously updated during task execution. Note that |
---|
543 | 544 | * the remaining runtime could be < 0 in case we are in overrun. |
---|
544 | 545 | */ |
---|
545 | 546 | s64 runtime; /* Remaining runtime for this instance */ |
---|
.. | .. |
---|
552 | 553 | * @dl_throttled tells if we exhausted the runtime. If so, the |
---|
553 | 554 | * task has to wait for a replenishment to be performed at the |
---|
554 | 555 | * next firing of dl_timer. |
---|
555 | | - * |
---|
556 | | - * @dl_boosted tells if we are boosted due to DI. If so we are |
---|
557 | | - * outside bandwidth enforcement mechanism (but only until we |
---|
558 | | - * exit the critical section); |
---|
559 | 556 | * |
---|
560 | 557 | * @dl_yielded tells if task gave up the CPU before consuming |
---|
561 | 558 | * all its available runtime during the last job. |
---|
.. | .. |
---|
571 | 568 | * overruns. |
---|
572 | 569 | */ |
---|
573 | 570 | unsigned int dl_throttled : 1; |
---|
574 | | - unsigned int dl_boosted : 1; |
---|
575 | 571 | unsigned int dl_yielded : 1; |
---|
576 | 572 | unsigned int dl_non_contending : 1; |
---|
577 | 573 | unsigned int dl_overrun : 1; |
---|
.. | .. |
---|
590 | 586 | * time. |
---|
591 | 587 | */ |
---|
592 | 588 | struct hrtimer inactive_timer; |
---|
| 589 | + |
---|
| 590 | +#ifdef CONFIG_RT_MUTEXES |
---|
| 591 | + /* |
---|
| 592 | + * Priority Inheritance. When a DEADLINE scheduling entity is boosted |
---|
| 593 | + * pi_se points to the donor, otherwise points to the dl_se it belongs |
---|
| 594 | + * to (the original one/itself). |
---|
| 595 | + */ |
---|
| 596 | + struct sched_dl_entity *pi_se; |
---|
| 597 | +#endif |
---|
593 | 598 | }; |
---|
594 | 599 | |
---|
595 | 600 | #ifdef CONFIG_UCLAMP_TASK |
---|
.. | .. |
---|
631 | 636 | struct { |
---|
632 | 637 | u8 blocked; |
---|
633 | 638 | u8 need_qs; |
---|
634 | | - u8 exp_need_qs; |
---|
635 | | - |
---|
636 | | - /* Otherwise the compiler can store garbage here: */ |
---|
637 | | - u8 pad; |
---|
| 639 | + u8 exp_hint; /* Hint for performance. */ |
---|
| 640 | + u8 need_mb; /* Readers need smp_mb(). */ |
---|
638 | 641 | } b; /* Bits. */ |
---|
639 | 642 | u32 s; /* Set of bits. */ |
---|
640 | 643 | }; |
---|
.. | .. |
---|
660 | 663 | #endif |
---|
661 | 664 | /* -1 unrunnable, 0 runnable, >0 stopped: */ |
---|
662 | 665 | volatile long state; |
---|
663 | | - /* saved state for "spinlock sleepers" */ |
---|
664 | | - volatile long saved_state; |
---|
665 | 666 | |
---|
666 | 667 | /* |
---|
667 | 668 | * This begins the randomizable portion of task_struct. Only |
---|
.. | .. |
---|
670 | 671 | randomized_struct_fields_start |
---|
671 | 672 | |
---|
672 | 673 | void *stack; |
---|
673 | | - atomic_t usage; |
---|
| 674 | + refcount_t usage; |
---|
674 | 675 | /* Per task flags (PF_*), defined further below: */ |
---|
675 | 676 | unsigned int flags; |
---|
676 | 677 | unsigned int ptrace; |
---|
677 | 678 | |
---|
678 | 679 | #ifdef CONFIG_SMP |
---|
679 | | - struct llist_node wake_entry; |
---|
680 | 680 | int on_cpu; |
---|
| 681 | + struct __call_single_node wake_entry; |
---|
681 | 682 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
---|
682 | 683 | /* Current CPU: */ |
---|
683 | 684 | unsigned int cpu; |
---|
.. | .. |
---|
706 | 707 | const struct sched_class *sched_class; |
---|
707 | 708 | struct sched_entity se; |
---|
708 | 709 | struct sched_rt_entity rt; |
---|
709 | | - |
---|
710 | | - /* task boost vendor fields */ |
---|
711 | | - u64 last_sleep_ts; |
---|
712 | | - int boost; |
---|
713 | | - u64 boost_period; |
---|
714 | | - u64 boost_expires; |
---|
715 | | - |
---|
716 | 710 | #ifdef CONFIG_CGROUP_SCHED |
---|
717 | 711 | struct task_group *sched_task_group; |
---|
718 | 712 | #endif |
---|
719 | 713 | struct sched_dl_entity dl; |
---|
720 | 714 | |
---|
721 | 715 | #ifdef CONFIG_UCLAMP_TASK |
---|
722 | | - /* Clamp values requested for a scheduling entity */ |
---|
| 716 | + /* |
---|
| 717 | + * Clamp values requested for a scheduling entity. |
---|
| 718 | + * Must be updated with task_rq_lock() held. |
---|
| 719 | + */ |
---|
723 | 720 | struct uclamp_se uclamp_req[UCLAMP_CNT]; |
---|
724 | | - /* Effective clamp values used for a scheduling entity */ |
---|
| 721 | + /* |
---|
| 722 | + * Effective clamp values used for a scheduling entity. |
---|
| 723 | + * Must be updated with task_rq_lock() held. |
---|
| 724 | + */ |
---|
725 | 725 | struct uclamp_se uclamp[UCLAMP_CNT]; |
---|
| 726 | +#endif |
---|
| 727 | + |
---|
| 728 | +#ifdef CONFIG_HOTPLUG_CPU |
---|
| 729 | + struct list_head percpu_kthread_node; |
---|
726 | 730 | #endif |
---|
727 | 731 | |
---|
728 | 732 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
---|
.. | .. |
---|
736 | 740 | |
---|
737 | 741 | unsigned int policy; |
---|
738 | 742 | int nr_cpus_allowed; |
---|
739 | | -// cpumask_t cpus_allowed; |
---|
740 | | - cpumask_t cpus_requested; |
---|
741 | 743 | const cpumask_t *cpus_ptr; |
---|
742 | 744 | cpumask_t cpus_mask; |
---|
743 | | -#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) |
---|
744 | | - int migrate_disable; |
---|
745 | | - bool migrate_disable_scheduled; |
---|
746 | | -# ifdef CONFIG_SCHED_DEBUG |
---|
747 | | - int pinned_on_cpu; |
---|
748 | | -# endif |
---|
749 | | -#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) |
---|
750 | | -# ifdef CONFIG_SCHED_DEBUG |
---|
751 | | - int migrate_disable; |
---|
752 | | -# endif |
---|
753 | | -#endif |
---|
754 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
755 | | - int sleeping_lock; |
---|
756 | | -#endif |
---|
757 | 745 | |
---|
758 | 746 | #ifdef CONFIG_PREEMPT_RCU |
---|
759 | 747 | int rcu_read_lock_nesting; |
---|
.. | .. |
---|
769 | 757 | int rcu_tasks_idle_cpu; |
---|
770 | 758 | struct list_head rcu_tasks_holdout_list; |
---|
771 | 759 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
---|
| 760 | + |
---|
| 761 | +#ifdef CONFIG_TASKS_TRACE_RCU |
---|
| 762 | + int trc_reader_nesting; |
---|
| 763 | + int trc_ipi_to_cpu; |
---|
| 764 | + union rcu_special trc_reader_special; |
---|
| 765 | + bool trc_reader_checked; |
---|
| 766 | + struct list_head trc_holdout_list; |
---|
| 767 | +#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
---|
772 | 768 | |
---|
773 | 769 | struct sched_info sched_info; |
---|
774 | 770 | |
---|
.. | .. |
---|
802 | 798 | unsigned sched_reset_on_fork:1; |
---|
803 | 799 | unsigned sched_contributes_to_load:1; |
---|
804 | 800 | unsigned sched_migrated:1; |
---|
805 | | - unsigned sched_remote_wakeup:1; |
---|
806 | 801 | #ifdef CONFIG_PSI |
---|
807 | 802 | unsigned sched_psi_wake_requeue:1; |
---|
808 | 803 | #endif |
---|
.. | .. |
---|
812 | 807 | |
---|
813 | 808 | /* Unserialized, strictly 'current' */ |
---|
814 | 809 | |
---|
| 810 | + /* |
---|
| 811 | + * This field must not be in the scheduler word above due to wakelist |
---|
| 812 | + * queueing no longer being serialized by p->on_cpu. However: |
---|
| 813 | + * |
---|
| 814 | + * p->XXX = X; ttwu() |
---|
| 815 | + * schedule() if (p->on_rq && ..) // false |
---|
| 816 | + * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true |
---|
| 817 | + * deactivate_task() ttwu_queue_wakelist()) |
---|
| 818 | + * p->on_rq = 0; p->sched_remote_wakeup = Y; |
---|
| 819 | + * |
---|
| 820 | + * guarantees all stores of 'current' are visible before |
---|
| 821 | + * ->sched_remote_wakeup gets used, so it can be in this word. |
---|
| 822 | + */ |
---|
| 823 | + unsigned sched_remote_wakeup:1; |
---|
| 824 | + |
---|
815 | 825 | /* Bit to tell LSMs we're in execve(): */ |
---|
816 | 826 | unsigned in_execve:1; |
---|
817 | 827 | unsigned in_iowait:1; |
---|
.. | .. |
---|
820 | 830 | #endif |
---|
821 | 831 | #ifdef CONFIG_MEMCG |
---|
822 | 832 | unsigned in_user_fault:1; |
---|
823 | | -#ifdef CONFIG_MEMCG_KMEM |
---|
824 | | - unsigned memcg_kmem_skip_account:1; |
---|
825 | | -#endif |
---|
826 | 833 | #endif |
---|
827 | 834 | #ifdef CONFIG_COMPAT_BRK |
---|
828 | 835 | unsigned brk_randomized:1; |
---|
.. | .. |
---|
830 | 837 | #ifdef CONFIG_CGROUPS |
---|
831 | 838 | /* disallow userland-initiated cgroup migration */ |
---|
832 | 839 | unsigned no_cgroup_migration:1; |
---|
| 840 | + /* task is frozen/stopped (used by the cgroup freezer) */ |
---|
| 841 | + unsigned frozen:1; |
---|
833 | 842 | #endif |
---|
834 | 843 | #ifdef CONFIG_BLK_CGROUP |
---|
835 | | - /* to be used once the psi infrastructure lands upstream. */ |
---|
836 | 844 | unsigned use_memdelay:1; |
---|
| 845 | +#endif |
---|
| 846 | +#ifdef CONFIG_PSI |
---|
| 847 | + /* Stalled due to lack of memory */ |
---|
| 848 | + unsigned in_memstall:1; |
---|
837 | 849 | #endif |
---|
838 | 850 | |
---|
839 | 851 | unsigned long atomic_flags; /* Flags requiring atomic access. */ |
---|
.. | .. |
---|
916 | 928 | u64 start_time; |
---|
917 | 929 | |
---|
918 | 930 | /* Boot based time in nsecs: */ |
---|
919 | | - u64 real_start_time; |
---|
| 931 | + u64 start_boottime; |
---|
920 | 932 | |
---|
921 | 933 | /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ |
---|
922 | 934 | unsigned long min_flt; |
---|
923 | 935 | unsigned long maj_flt; |
---|
924 | 936 | |
---|
925 | | -#ifdef CONFIG_POSIX_TIMERS |
---|
926 | | - struct task_cputime cputime_expires; |
---|
927 | | - struct list_head cpu_timers[3]; |
---|
928 | | -#ifdef CONFIG_PREEMPT_RT_BASE |
---|
929 | | - struct task_struct *posix_timer_list; |
---|
930 | | -#endif |
---|
| 937 | + /* Empty if CONFIG_POSIX_CPUTIMERS=n */ |
---|
| 938 | + struct posix_cputimers posix_cputimers; |
---|
| 939 | + |
---|
| 940 | +#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK |
---|
| 941 | + struct posix_cputimers_work posix_cputimers_work; |
---|
931 | 942 | #endif |
---|
932 | 943 | |
---|
933 | 944 | /* Process credentials: */ |
---|
.. | .. |
---|
940 | 951 | |
---|
941 | 952 | /* Effective (overridable) subjective task credentials (COW): */ |
---|
942 | 953 | const struct cred __rcu *cred; |
---|
| 954 | + |
---|
| 955 | +#ifdef CONFIG_KEYS |
---|
| 956 | + /* Cached requested key. */ |
---|
| 957 | + struct key *cached_requested_key; |
---|
| 958 | +#endif |
---|
943 | 959 | |
---|
944 | 960 | /* |
---|
945 | 961 | * executable name, excluding path. |
---|
.. | .. |
---|
966 | 982 | /* Open file information: */ |
---|
967 | 983 | struct files_struct *files; |
---|
968 | 984 | |
---|
| 985 | +#ifdef CONFIG_IO_URING |
---|
| 986 | + struct io_uring_task *io_uring; |
---|
| 987 | +#endif |
---|
| 988 | + |
---|
969 | 989 | /* Namespaces: */ |
---|
970 | 990 | struct nsproxy *nsproxy; |
---|
971 | 991 | |
---|
972 | 992 | /* Signal handlers: */ |
---|
973 | 993 | struct signal_struct *signal; |
---|
974 | | - struct sighand_struct *sighand; |
---|
975 | | - struct sigqueue *sigqueue_cache; |
---|
976 | | - |
---|
| 994 | + struct sighand_struct __rcu *sighand; |
---|
977 | 995 | sigset_t blocked; |
---|
978 | 996 | sigset_t real_blocked; |
---|
979 | 997 | /* Restored if set_restore_sigmask() was used: */ |
---|
980 | 998 | sigset_t saved_sigmask; |
---|
981 | 999 | struct sigpending pending; |
---|
982 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
983 | | - /* TODO: move me into ->restart_block ? */ |
---|
984 | | - struct siginfo forced_info; |
---|
985 | | -#endif |
---|
986 | 1000 | unsigned long sas_ss_sp; |
---|
987 | 1001 | size_t sas_ss_size; |
---|
988 | 1002 | unsigned int sas_ss_flags; |
---|
989 | 1003 | |
---|
990 | 1004 | struct callback_head *task_works; |
---|
991 | 1005 | |
---|
992 | | - struct audit_context *audit_context; |
---|
| 1006 | +#ifdef CONFIG_AUDIT |
---|
993 | 1007 | #ifdef CONFIG_AUDITSYSCALL |
---|
| 1008 | + struct audit_context *audit_context; |
---|
| 1009 | +#endif |
---|
994 | 1010 | kuid_t loginuid; |
---|
995 | 1011 | unsigned int sessionid; |
---|
996 | 1012 | #endif |
---|
.. | .. |
---|
1007 | 1023 | raw_spinlock_t pi_lock; |
---|
1008 | 1024 | |
---|
1009 | 1025 | struct wake_q_node wake_q; |
---|
1010 | | - struct wake_q_node wake_q_sleeper; |
---|
| 1026 | + int wake_q_count; |
---|
1011 | 1027 | |
---|
1012 | 1028 | #ifdef CONFIG_RT_MUTEXES |
---|
1013 | 1029 | /* PI waiters blocked on a rt_mutex held by this task: */ |
---|
.. | .. |
---|
1017 | 1033 | /* Deadlock detection and priority inheritance handling: */ |
---|
1018 | 1034 | struct rt_mutex_waiter *pi_blocked_on; |
---|
1019 | 1035 | #endif |
---|
1020 | | -#ifdef CONFIG_MM_EVENT_STAT |
---|
1021 | | - struct mm_event_task mm_event[MM_TYPE_NUM]; |
---|
1022 | | - unsigned long next_period; |
---|
1023 | | -#endif |
---|
| 1036 | + |
---|
1024 | 1037 | #ifdef CONFIG_DEBUG_MUTEXES |
---|
1025 | 1038 | /* Mutex deadlock detection: */ |
---|
1026 | 1039 | struct mutex_waiter *blocked_on; |
---|
1027 | 1040 | #endif |
---|
1028 | 1041 | |
---|
| 1042 | +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
---|
| 1043 | + int non_block_count; |
---|
| 1044 | +#endif |
---|
| 1045 | + |
---|
1029 | 1046 | #ifdef CONFIG_TRACE_IRQFLAGS |
---|
1030 | | - unsigned int irq_events; |
---|
1031 | | - unsigned long hardirq_enable_ip; |
---|
1032 | | - unsigned long hardirq_disable_ip; |
---|
1033 | | - unsigned int hardirq_enable_event; |
---|
1034 | | - unsigned int hardirq_disable_event; |
---|
1035 | | - int hardirqs_enabled; |
---|
1036 | | - int hardirq_context; |
---|
1037 | | - unsigned long softirq_disable_ip; |
---|
1038 | | - unsigned long softirq_enable_ip; |
---|
1039 | | - unsigned int softirq_disable_event; |
---|
1040 | | - unsigned int softirq_enable_event; |
---|
| 1047 | + struct irqtrace_events irqtrace; |
---|
| 1048 | + unsigned int hardirq_threaded; |
---|
| 1049 | + u64 hardirq_chain_key; |
---|
1041 | 1050 | int softirqs_enabled; |
---|
1042 | 1051 | int softirq_context; |
---|
| 1052 | + int irq_config; |
---|
1043 | 1053 | #endif |
---|
1044 | 1054 | |
---|
1045 | 1055 | #ifdef CONFIG_LOCKDEP |
---|
.. | .. |
---|
1050 | 1060 | struct held_lock held_locks[MAX_LOCK_DEPTH]; |
---|
1051 | 1061 | #endif |
---|
1052 | 1062 | |
---|
1053 | | -#ifdef CONFIG_UBSAN |
---|
| 1063 | +#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP) |
---|
1054 | 1064 | unsigned int in_ubsan; |
---|
1055 | 1065 | #endif |
---|
1056 | 1066 | |
---|
.. | .. |
---|
1072 | 1082 | |
---|
1073 | 1083 | struct io_context *io_context; |
---|
1074 | 1084 | |
---|
| 1085 | +#ifdef CONFIG_COMPACTION |
---|
| 1086 | + struct capture_control *capture_control; |
---|
| 1087 | +#endif |
---|
1075 | 1088 | /* Ptrace state: */ |
---|
1076 | 1089 | unsigned long ptrace_message; |
---|
1077 | | - siginfo_t *last_siginfo; |
---|
| 1090 | + kernel_siginfo_t *last_siginfo; |
---|
1078 | 1091 | |
---|
1079 | 1092 | struct task_io_accounting ioac; |
---|
1080 | 1093 | #ifdef CONFIG_PSI |
---|
.. | .. |
---|
1093 | 1106 | /* Protected by ->alloc_lock: */ |
---|
1094 | 1107 | nodemask_t mems_allowed; |
---|
1095 | 1108 | /* Seqence number to catch updates: */ |
---|
1096 | | - seqcount_t mems_allowed_seq; |
---|
| 1109 | + seqcount_spinlock_t mems_allowed_seq; |
---|
1097 | 1110 | int cpuset_mem_spread_rotor; |
---|
1098 | 1111 | int cpuset_slab_spread_rotor; |
---|
1099 | 1112 | #endif |
---|
.. | .. |
---|
1103 | 1116 | /* cg_list protected by css_set_lock and tsk->alloc_lock: */ |
---|
1104 | 1117 | struct list_head cg_list; |
---|
1105 | 1118 | #endif |
---|
1106 | | -#ifdef CONFIG_INTEL_RDT |
---|
| 1119 | +#ifdef CONFIG_X86_CPU_RESCTRL |
---|
1107 | 1120 | u32 closid; |
---|
1108 | 1121 | u32 rmid; |
---|
1109 | 1122 | #endif |
---|
.. | .. |
---|
1114 | 1127 | #endif |
---|
1115 | 1128 | struct list_head pi_state_list; |
---|
1116 | 1129 | struct futex_pi_state *pi_state_cache; |
---|
| 1130 | + struct mutex futex_exit_mutex; |
---|
| 1131 | + unsigned int futex_state; |
---|
1117 | 1132 | #endif |
---|
1118 | 1133 | #ifdef CONFIG_PERF_EVENTS |
---|
1119 | 1134 | struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; |
---|
.. | .. |
---|
1181 | 1196 | |
---|
1182 | 1197 | #ifdef CONFIG_RSEQ |
---|
1183 | 1198 | struct rseq __user *rseq; |
---|
1184 | | - u32 rseq_len; |
---|
1185 | 1199 | u32 rseq_sig; |
---|
1186 | 1200 | /* |
---|
1187 | 1201 | * RmW on rseq_event_mask must be performed atomically |
---|
.. | .. |
---|
1192 | 1206 | |
---|
1193 | 1207 | struct tlbflush_unmap_batch tlb_ubc; |
---|
1194 | 1208 | |
---|
1195 | | - struct rcu_head rcu; |
---|
| 1209 | + union { |
---|
| 1210 | + refcount_t rcu_users; |
---|
| 1211 | + struct rcu_head rcu; |
---|
| 1212 | + }; |
---|
1196 | 1213 | |
---|
1197 | 1214 | /* Cache last used pipe for splice(): */ |
---|
1198 | 1215 | struct pipe_inode_info *splice_pipe; |
---|
.. | .. |
---|
1227 | 1244 | u64 timer_slack_ns; |
---|
1228 | 1245 | u64 default_timer_slack_ns; |
---|
1229 | 1246 | |
---|
1230 | | -#ifdef CONFIG_KASAN |
---|
| 1247 | +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
---|
1231 | 1248 | unsigned int kasan_depth; |
---|
| 1249 | +#endif |
---|
| 1250 | + |
---|
| 1251 | +#ifdef CONFIG_KCSAN |
---|
| 1252 | + struct kcsan_ctx kcsan_ctx; |
---|
| 1253 | +#ifdef CONFIG_TRACE_IRQFLAGS |
---|
| 1254 | + struct irqtrace_events kcsan_save_irqtrace; |
---|
| 1255 | +#endif |
---|
| 1256 | +#endif |
---|
| 1257 | + |
---|
| 1258 | +#if IS_ENABLED(CONFIG_KUNIT) |
---|
| 1259 | + struct kunit *kunit_test; |
---|
1232 | 1260 | #endif |
---|
1233 | 1261 | |
---|
1234 | 1262 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
.. | .. |
---|
1280 | 1308 | |
---|
1281 | 1309 | /* KCOV sequence number: */ |
---|
1282 | 1310 | int kcov_sequence; |
---|
| 1311 | + |
---|
| 1312 | + /* Collect coverage from softirq context: */ |
---|
| 1313 | + unsigned int kcov_softirq; |
---|
1283 | 1314 | #endif |
---|
1284 | 1315 | |
---|
1285 | 1316 | #ifdef CONFIG_MEMCG |
---|
.. | .. |
---|
1305 | 1336 | unsigned int sequential_io; |
---|
1306 | 1337 | unsigned int sequential_io_avg; |
---|
1307 | 1338 | #endif |
---|
1308 | | -#ifdef CONFIG_PREEMPT_RT_BASE |
---|
1309 | | - struct rcu_head put_rcu; |
---|
1310 | | - int softirq_nestcnt; |
---|
1311 | | - unsigned int softirqs_raised; |
---|
1312 | | -#endif |
---|
1313 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
1314 | | -# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 |
---|
1315 | | - int kmap_idx; |
---|
1316 | | - pte_t kmap_pte[KM_TYPE_NR]; |
---|
1317 | | -# endif |
---|
1318 | | -#endif |
---|
1319 | 1339 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
---|
1320 | 1340 | unsigned long task_state_change; |
---|
1321 | | -#endif |
---|
1322 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
1323 | | - int xmit_recursion; |
---|
1324 | 1341 | #endif |
---|
1325 | 1342 | int pagefault_disabled; |
---|
1326 | 1343 | #ifdef CONFIG_MMU |
---|
.. | .. |
---|
1331 | 1348 | #endif |
---|
1332 | 1349 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
---|
1333 | 1350 | /* A live task holds one reference: */ |
---|
1334 | | - atomic_t stack_refcount; |
---|
| 1351 | + refcount_t stack_refcount; |
---|
1335 | 1352 | #endif |
---|
1336 | 1353 | #ifdef CONFIG_LIVEPATCH |
---|
1337 | 1354 | int patch_state; |
---|
.. | .. |
---|
1340 | 1357 | /* Used by LSM modules for access restriction: */ |
---|
1341 | 1358 | void *security; |
---|
1342 | 1359 | #endif |
---|
1343 | | - /* task is frozen/stopped (used by the cgroup freezer) */ |
---|
1344 | | - ANDROID_KABI_USE(1, unsigned frozen:1); |
---|
1345 | 1360 | |
---|
1346 | | - /* 095444fad7e3 ("futex: Replace PF_EXITPIDONE with a state") */ |
---|
1347 | | - ANDROID_KABI_USE(2, unsigned int futex_state); |
---|
| 1361 | +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK |
---|
| 1362 | + unsigned long lowest_stack; |
---|
| 1363 | + unsigned long prev_lowest_stack; |
---|
| 1364 | +#endif |
---|
1348 | 1365 | |
---|
1349 | | - /* |
---|
1350 | | - * f9b0c6c556db ("futex: Add mutex around futex exit") |
---|
1351 | | - * A struct mutex takes 32 bytes, or 4 64bit entries, so pick off |
---|
1352 | | - * 4 of the reserved members, and replace them with a struct mutex. |
---|
1353 | | - * Do the GENKSYMS hack to work around the CRC issues |
---|
1354 | | - */ |
---|
1355 | | -#ifdef __GENKSYMS__ |
---|
| 1366 | +#ifdef CONFIG_X86_MCE |
---|
| 1367 | + void __user *mce_vaddr; |
---|
| 1368 | + __u64 mce_kflags; |
---|
| 1369 | + u64 mce_addr; |
---|
| 1370 | + __u64 mce_ripv : 1, |
---|
| 1371 | + mce_whole_page : 1, |
---|
| 1372 | + __mce_reserved : 62; |
---|
| 1373 | + struct callback_head mce_kill_me; |
---|
| 1374 | + int mce_count; |
---|
| 1375 | +#endif |
---|
| 1376 | + ANDROID_VENDOR_DATA_ARRAY(1, 64); |
---|
| 1377 | + ANDROID_OEM_DATA_ARRAY(1, 32); |
---|
| 1378 | + |
---|
| 1379 | + /* PF_IO_WORKER */ |
---|
| 1380 | + ANDROID_KABI_USE(1, void *pf_io_worker); |
---|
| 1381 | + |
---|
| 1382 | + ANDROID_KABI_RESERVE(2); |
---|
1356 | 1383 | ANDROID_KABI_RESERVE(3); |
---|
1357 | 1384 | ANDROID_KABI_RESERVE(4); |
---|
1358 | 1385 | ANDROID_KABI_RESERVE(5); |
---|
1359 | 1386 | ANDROID_KABI_RESERVE(6); |
---|
1360 | | -#else |
---|
1361 | | - struct mutex futex_exit_mutex; |
---|
1362 | | -#endif |
---|
1363 | | - |
---|
1364 | 1387 | ANDROID_KABI_RESERVE(7); |
---|
1365 | 1388 | ANDROID_KABI_RESERVE(8); |
---|
1366 | 1389 | |
---|
.. | .. |
---|
1538 | 1561 | /* |
---|
1539 | 1562 | * Per process flags |
---|
1540 | 1563 | */ |
---|
1541 | | -#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ |
---|
| 1564 | +#define PF_VCPU 0x00000001 /* I'm a virtual CPU */ |
---|
1542 | 1565 | #define PF_IDLE 0x00000002 /* I am an IDLE thread */ |
---|
1543 | 1566 | #define PF_EXITING 0x00000004 /* Getting shut down */ |
---|
1544 | | -#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ |
---|
| 1567 | +#define PF_IO_WORKER 0x00000010 /* Task is an IO worker */ |
---|
1545 | 1568 | #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ |
---|
1546 | 1569 | #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ |
---|
1547 | 1570 | #define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ |
---|
.. | .. |
---|
1556 | 1579 | #define PF_KSWAPD 0x00020000 /* I am kswapd */ |
---|
1557 | 1580 | #define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */ |
---|
1558 | 1581 | #define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */ |
---|
1559 | | -#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ |
---|
| 1582 | +#define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to, |
---|
| 1583 | + * I am cleaning dirty pages from some other bdi. */ |
---|
1560 | 1584 | #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ |
---|
1561 | 1585 | #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ |
---|
1562 | 1586 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
---|
1563 | | -#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ |
---|
1564 | | -#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ |
---|
| 1587 | +#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ |
---|
1565 | 1588 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
---|
1566 | | -#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
---|
| 1589 | +#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ |
---|
1567 | 1590 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ |
---|
1568 | 1591 | #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ |
---|
1569 | 1592 | |
---|
.. | .. |
---|
1613 | 1636 | #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ |
---|
1614 | 1637 | #define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ |
---|
1615 | 1638 | #define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ |
---|
| 1639 | +#define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */ |
---|
1616 | 1640 | |
---|
1617 | 1641 | #define TASK_PFA_TEST(name, func) \ |
---|
1618 | 1642 | static inline bool task_##func(struct task_struct *p) \ |
---|
.. | .. |
---|
1641 | 1665 | TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) |
---|
1642 | 1666 | TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) |
---|
1643 | 1667 | |
---|
| 1668 | +TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec) |
---|
| 1669 | +TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec) |
---|
| 1670 | +TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec) |
---|
| 1671 | + |
---|
1644 | 1672 | TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) |
---|
1645 | 1673 | TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) |
---|
1646 | 1674 | |
---|
.. | .. |
---|
1659 | 1687 | } |
---|
1660 | 1688 | |
---|
1661 | 1689 | extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); |
---|
1662 | | -extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); |
---|
| 1690 | +extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus); |
---|
| 1691 | + |
---|
| 1692 | +#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION |
---|
| 1693 | +extern bool cpupri_check_rt(void); |
---|
| 1694 | +#else |
---|
| 1695 | +static inline bool cpupri_check_rt(void) |
---|
| 1696 | +{ |
---|
| 1697 | + return false; |
---|
| 1698 | +} |
---|
| 1699 | +#endif |
---|
| 1700 | + |
---|
1663 | 1701 | #ifdef CONFIG_SMP |
---|
1664 | 1702 | extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); |
---|
1665 | 1703 | extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); |
---|
| 1704 | +extern void force_compatible_cpus_allowed_ptr(struct task_struct *p); |
---|
1666 | 1705 | #else |
---|
1667 | 1706 | static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) |
---|
1668 | 1707 | { |
---|
.. | .. |
---|
1673 | 1712 | return -EINVAL; |
---|
1674 | 1713 | return 0; |
---|
1675 | 1714 | } |
---|
1676 | | -#endif |
---|
1677 | | - |
---|
1678 | | -#ifndef cpu_relax_yield |
---|
1679 | | -#define cpu_relax_yield() cpu_relax() |
---|
1680 | 1715 | #endif |
---|
1681 | 1716 | |
---|
1682 | 1717 | extern int yield_to(struct task_struct *p, bool preempt); |
---|
.. | .. |
---|
1700 | 1735 | extern int available_idle_cpu(int cpu); |
---|
1701 | 1736 | extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); |
---|
1702 | 1737 | extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); |
---|
| 1738 | +extern void sched_set_fifo(struct task_struct *p); |
---|
| 1739 | +extern void sched_set_fifo_low(struct task_struct *p); |
---|
| 1740 | +extern void sched_set_normal(struct task_struct *p, int nice); |
---|
1703 | 1741 | extern int sched_setattr(struct task_struct *, const struct sched_attr *); |
---|
1704 | 1742 | extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); |
---|
1705 | 1743 | extern struct task_struct *idle_task(int cpu); |
---|
.. | .. |
---|
1710 | 1748 | * |
---|
1711 | 1749 | * Return: 1 if @p is an idle task. 0 otherwise. |
---|
1712 | 1750 | */ |
---|
1713 | | -static inline bool is_idle_task(const struct task_struct *p) |
---|
| 1751 | +static __always_inline bool is_idle_task(const struct task_struct *p) |
---|
1714 | 1752 | { |
---|
1715 | 1753 | return !!(p->flags & PF_IDLE); |
---|
1716 | 1754 | } |
---|
.. | .. |
---|
1766 | 1804 | |
---|
1767 | 1805 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); |
---|
1768 | 1806 | extern int wake_up_process(struct task_struct *tsk); |
---|
1769 | | -extern int wake_up_lock_sleeper(struct task_struct *tsk); |
---|
1770 | 1807 | extern void wake_up_new_task(struct task_struct *tsk); |
---|
1771 | 1808 | |
---|
1772 | 1809 | #ifdef CONFIG_SMP |
---|
.. | .. |
---|
1789 | 1826 | }) |
---|
1790 | 1827 | |
---|
1791 | 1828 | #ifdef CONFIG_SMP |
---|
1792 | | -void scheduler_ipi(void); |
---|
| 1829 | +static __always_inline void scheduler_ipi(void) |
---|
| 1830 | +{ |
---|
| 1831 | + /* |
---|
| 1832 | + * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting |
---|
| 1833 | + * TIF_NEED_RESCHED remotely (for the first time) will also send |
---|
| 1834 | + * this IPI. |
---|
| 1835 | + */ |
---|
| 1836 | + preempt_fold_need_resched(); |
---|
| 1837 | +} |
---|
1793 | 1838 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
---|
1794 | 1839 | #else |
---|
1795 | 1840 | static inline void scheduler_ipi(void) { } |
---|
.. | .. |
---|
1849 | 1894 | return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); |
---|
1850 | 1895 | } |
---|
1851 | 1896 | |
---|
1852 | | -#ifdef CONFIG_PREEMPT_LAZY |
---|
1853 | | -static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) |
---|
1854 | | -{ |
---|
1855 | | - set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); |
---|
1856 | | -} |
---|
1857 | | - |
---|
1858 | | -static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) |
---|
1859 | | -{ |
---|
1860 | | - clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); |
---|
1861 | | -} |
---|
1862 | | - |
---|
1863 | | -static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) |
---|
1864 | | -{ |
---|
1865 | | - return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); |
---|
1866 | | -} |
---|
1867 | | - |
---|
1868 | | -static inline int need_resched_lazy(void) |
---|
1869 | | -{ |
---|
1870 | | - return test_thread_flag(TIF_NEED_RESCHED_LAZY); |
---|
1871 | | -} |
---|
1872 | | - |
---|
1873 | | -static inline int need_resched_now(void) |
---|
1874 | | -{ |
---|
1875 | | - return test_thread_flag(TIF_NEED_RESCHED); |
---|
1876 | | -} |
---|
1877 | | - |
---|
1878 | | -#else |
---|
1879 | | -static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } |
---|
1880 | | -static inline int need_resched_lazy(void) { return 0; } |
---|
1881 | | - |
---|
1882 | | -static inline int need_resched_now(void) |
---|
1883 | | -{ |
---|
1884 | | - return test_thread_flag(TIF_NEED_RESCHED); |
---|
1885 | | -} |
---|
1886 | | - |
---|
1887 | | -#endif |
---|
1888 | | - |
---|
1889 | | - |
---|
1890 | | -static inline bool __task_is_stopped_or_traced(struct task_struct *task) |
---|
1891 | | -{ |
---|
1892 | | - if (task->state & (__TASK_STOPPED | __TASK_TRACED)) |
---|
1893 | | - return true; |
---|
1894 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
1895 | | - if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED)) |
---|
1896 | | - return true; |
---|
1897 | | -#endif |
---|
1898 | | - return false; |
---|
1899 | | -} |
---|
1900 | | - |
---|
1901 | | -static inline bool task_is_stopped_or_traced(struct task_struct *task) |
---|
1902 | | -{ |
---|
1903 | | - bool traced_stopped; |
---|
1904 | | - |
---|
1905 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
1906 | | - unsigned long flags; |
---|
1907 | | - |
---|
1908 | | - raw_spin_lock_irqsave(&task->pi_lock, flags); |
---|
1909 | | - traced_stopped = __task_is_stopped_or_traced(task); |
---|
1910 | | - raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
---|
1911 | | -#else |
---|
1912 | | - traced_stopped = __task_is_stopped_or_traced(task); |
---|
1913 | | -#endif |
---|
1914 | | - return traced_stopped; |
---|
1915 | | -} |
---|
1916 | | - |
---|
1917 | | -static inline bool task_is_traced(struct task_struct *task) |
---|
1918 | | -{ |
---|
1919 | | - bool traced = false; |
---|
1920 | | - |
---|
1921 | | - if (task->state & __TASK_TRACED) |
---|
1922 | | - return true; |
---|
1923 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
1924 | | - /* in case the task is sleeping on tasklist_lock */ |
---|
1925 | | - raw_spin_lock_irq(&task->pi_lock); |
---|
1926 | | - if (task->state & __TASK_TRACED) |
---|
1927 | | - traced = true; |
---|
1928 | | - else if (task->saved_state & __TASK_TRACED) |
---|
1929 | | - traced = true; |
---|
1930 | | - raw_spin_unlock_irq(&task->pi_lock); |
---|
1931 | | -#endif |
---|
1932 | | - return traced; |
---|
1933 | | -} |
---|
1934 | | - |
---|
1935 | 1897 | /* |
---|
1936 | 1898 | * cond_resched() and cond_resched_lock(): latency reduction via |
---|
1937 | 1899 | * explicit rescheduling in places that are safe. The return |
---|
1938 | 1900 | * value indicates whether a reschedule was done in fact. |
---|
1939 | 1901 | * cond_resched_lock() will drop the spinlock before scheduling, |
---|
1940 | 1902 | */ |
---|
1941 | | -#ifndef CONFIG_PREEMPT |
---|
| 1903 | +#ifndef CONFIG_PREEMPTION |
---|
1942 | 1904 | extern int _cond_resched(void); |
---|
1943 | 1905 | #else |
---|
1944 | 1906 | static inline int _cond_resched(void) { return 0; } |
---|
.. | .. |
---|
1967 | 1929 | |
---|
1968 | 1930 | /* |
---|
1969 | 1931 | * Does a critical section need to be broken due to another |
---|
1970 | | - * task waiting?: (technically does not depend on CONFIG_PREEMPT, |
---|
| 1932 | + * task waiting?: (technically does not depend on CONFIG_PREEMPTION, |
---|
1971 | 1933 | * but a general need for low latency) |
---|
1972 | 1934 | */ |
---|
1973 | 1935 | static inline int spin_needbreak(spinlock_t *lock) |
---|
1974 | 1936 | { |
---|
1975 | | -#ifdef CONFIG_PREEMPT |
---|
| 1937 | +#ifdef CONFIG_PREEMPTION |
---|
1976 | 1938 | return spin_is_contended(lock); |
---|
1977 | 1939 | #else |
---|
1978 | 1940 | return 0; |
---|
.. | .. |
---|
1983 | 1945 | { |
---|
1984 | 1946 | return unlikely(tif_need_resched()); |
---|
1985 | 1947 | } |
---|
1986 | | - |
---|
1987 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
1988 | | -static inline void sleeping_lock_inc(void) |
---|
1989 | | -{ |
---|
1990 | | - current->sleeping_lock++; |
---|
1991 | | -} |
---|
1992 | | - |
---|
1993 | | -static inline void sleeping_lock_dec(void) |
---|
1994 | | -{ |
---|
1995 | | - current->sleeping_lock--; |
---|
1996 | | -} |
---|
1997 | | - |
---|
1998 | | -#else |
---|
1999 | | - |
---|
2000 | | -static inline void sleeping_lock_inc(void) { } |
---|
2001 | | -static inline void sleeping_lock_dec(void) { } |
---|
2002 | | -#endif |
---|
2003 | 1948 | |
---|
2004 | 1949 | /* |
---|
2005 | 1950 | * Wrappers for p->thread_info->cpu access. No-op on UP. |
---|
.. | .. |
---|
2039 | 1984 | * running or not. |
---|
2040 | 1985 | */ |
---|
2041 | 1986 | #ifndef vcpu_is_preempted |
---|
2042 | | -# define vcpu_is_preempted(cpu) false |
---|
| 1987 | +static inline bool vcpu_is_preempted(int cpu) |
---|
| 1988 | +{ |
---|
| 1989 | + return false; |
---|
| 1990 | +} |
---|
2043 | 1991 | #endif |
---|
2044 | 1992 | |
---|
2045 | 1993 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
---|
.. | .. |
---|
2113 | 2061 | { |
---|
2114 | 2062 | if (clone_flags & CLONE_VM) { |
---|
2115 | 2063 | t->rseq = NULL; |
---|
2116 | | - t->rseq_len = 0; |
---|
2117 | 2064 | t->rseq_sig = 0; |
---|
2118 | 2065 | t->rseq_event_mask = 0; |
---|
2119 | 2066 | } else { |
---|
2120 | 2067 | t->rseq = current->rseq; |
---|
2121 | | - t->rseq_len = current->rseq_len; |
---|
2122 | 2068 | t->rseq_sig = current->rseq_sig; |
---|
2123 | 2069 | t->rseq_event_mask = current->rseq_event_mask; |
---|
2124 | 2070 | } |
---|
.. | .. |
---|
2127 | 2073 | static inline void rseq_execve(struct task_struct *t) |
---|
2128 | 2074 | { |
---|
2129 | 2075 | t->rseq = NULL; |
---|
2130 | | - t->rseq_len = 0; |
---|
2131 | 2076 | t->rseq_sig = 0; |
---|
2132 | 2077 | t->rseq_event_mask = 0; |
---|
2133 | 2078 | } |
---|
.. | .. |
---|
2172 | 2117 | |
---|
2173 | 2118 | #endif |
---|
2174 | 2119 | |
---|
2175 | | -extern struct task_struct *takedown_cpu_task; |
---|
| 2120 | +const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq); |
---|
| 2121 | +char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len); |
---|
| 2122 | +int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq); |
---|
| 2123 | + |
---|
| 2124 | +const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq); |
---|
| 2125 | +const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq); |
---|
| 2126 | +const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq); |
---|
| 2127 | + |
---|
| 2128 | +int sched_trace_rq_cpu(struct rq *rq); |
---|
| 2129 | +int sched_trace_rq_cpu_capacity(struct rq *rq); |
---|
| 2130 | +int sched_trace_rq_nr_running(struct rq *rq); |
---|
| 2131 | + |
---|
| 2132 | +const struct cpumask *sched_trace_rd_span(struct root_domain *rd); |
---|
2176 | 2133 | |
---|
2177 | 2134 | #endif |
---|