| .. | .. |
|---|
| 18 | 18 | #include <linux/mutex.h> |
|---|
| 19 | 19 | #include <linux/plist.h> |
|---|
| 20 | 20 | #include <linux/hrtimer.h> |
|---|
| 21 | +#include <linux/irqflags.h> |
|---|
| 21 | 22 | #include <linux/seccomp.h> |
|---|
| 22 | 23 | #include <linux/nodemask.h> |
|---|
| 23 | 24 | #include <linux/rcupdate.h> |
|---|
| 25 | +#include <linux/refcount.h> |
|---|
| 24 | 26 | #include <linux/resource.h> |
|---|
| 25 | 27 | #include <linux/latencytop.h> |
|---|
| 26 | 28 | #include <linux/sched/prio.h> |
|---|
| 29 | +#include <linux/sched/types.h> |
|---|
| 27 | 30 | #include <linux/signal_types.h> |
|---|
| 28 | 31 | #include <linux/mm_types_task.h> |
|---|
| 29 | | -#include <linux/mm_event.h> |
|---|
| 30 | 32 | #include <linux/task_io_accounting.h> |
|---|
| 33 | +#include <linux/posix-timers.h> |
|---|
| 31 | 34 | #include <linux/rseq.h> |
|---|
| 35 | +#include <linux/seqlock.h> |
|---|
| 36 | +#include <linux/kcsan.h> |
|---|
| 37 | +#include <linux/android_vendor.h> |
|---|
| 32 | 38 | #include <linux/android_kabi.h> |
|---|
| 33 | 39 | |
|---|
| 34 | 40 | /* task_struct member predeclarations (sorted alphabetically): */ |
|---|
| .. | .. |
|---|
| 36 | 42 | struct backing_dev_info; |
|---|
| 37 | 43 | struct bio_list; |
|---|
| 38 | 44 | struct blk_plug; |
|---|
| 45 | +struct capture_control; |
|---|
| 39 | 46 | struct cfs_rq; |
|---|
| 40 | 47 | struct fs_struct; |
|---|
| 41 | 48 | struct futex_pi_state; |
|---|
| .. | .. |
|---|
| 49 | 56 | struct rcu_node; |
|---|
| 50 | 57 | struct reclaim_state; |
|---|
| 51 | 58 | struct robust_list_head; |
|---|
| 59 | +struct root_domain; |
|---|
| 60 | +struct rq; |
|---|
| 52 | 61 | struct sched_attr; |
|---|
| 53 | 62 | struct sched_param; |
|---|
| 54 | 63 | struct seq_file; |
|---|
| .. | .. |
|---|
| 56 | 65 | struct signal_struct; |
|---|
| 57 | 66 | struct task_delay_info; |
|---|
| 58 | 67 | struct task_group; |
|---|
| 68 | +struct io_uring_task; |
|---|
| 59 | 69 | |
|---|
| 60 | 70 | /* |
|---|
| 61 | 71 | * Task state bitmask. NOTE! These bits are also |
|---|
| .. | .. |
|---|
| 103 | 113 | __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ |
|---|
| 104 | 114 | TASK_PARKED) |
|---|
| 105 | 115 | |
|---|
| 106 | | -#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) |
|---|
| 107 | | - |
|---|
| 108 | 116 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) |
|---|
| 109 | | - |
|---|
| 110 | | -#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
|---|
| 111 | | - |
|---|
| 112 | | -#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ |
|---|
| 113 | | - (task->flags & PF_FROZEN) == 0 && \ |
|---|
| 114 | | - (task->state & TASK_NOLOAD) == 0) |
|---|
| 115 | 117 | |
|---|
| 116 | 118 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
|---|
| 117 | 119 | |
|---|
| .. | .. |
|---|
| 136 | 138 | smp_store_mb(current->state, (state_value)); \ |
|---|
| 137 | 139 | } while (0) |
|---|
| 138 | 140 | |
|---|
| 141 | +#define __set_current_state_no_track(state_value) \ |
|---|
| 142 | + current->state = (state_value); |
|---|
| 143 | + |
|---|
| 139 | 144 | #define set_special_state(state_value) \ |
|---|
| 140 | 145 | do { \ |
|---|
| 141 | 146 | unsigned long flags; /* may shadow */ \ |
|---|
| .. | .. |
|---|
| 153 | 158 | * |
|---|
| 154 | 159 | * for (;;) { |
|---|
| 155 | 160 | * set_current_state(TASK_UNINTERRUPTIBLE); |
|---|
| 156 | | - * if (!need_sleep) |
|---|
| 157 | | - * break; |
|---|
| 161 | + * if (CONDITION) |
|---|
| 162 | + * break; |
|---|
| 158 | 163 | * |
|---|
| 159 | 164 | * schedule(); |
|---|
| 160 | 165 | * } |
|---|
| 161 | 166 | * __set_current_state(TASK_RUNNING); |
|---|
| 162 | 167 | * |
|---|
| 163 | 168 | * If the caller does not need such serialisation (because, for instance, the |
|---|
| 164 | | - * condition test and condition change and wakeup are under the same lock) then |
|---|
| 169 | + * CONDITION test and condition change and wakeup are under the same lock) then |
|---|
| 165 | 170 | * use __set_current_state(). |
|---|
| 166 | 171 | * |
|---|
| 167 | 172 | * The above is typically ordered against the wakeup, which does: |
|---|
| 168 | 173 | * |
|---|
| 169 | | - * need_sleep = false; |
|---|
| 174 | + * CONDITION = 1; |
|---|
| 170 | 175 | * wake_up_state(p, TASK_UNINTERRUPTIBLE); |
|---|
| 171 | 176 | * |
|---|
| 172 | | - * where wake_up_state() executes a full memory barrier before accessing the |
|---|
| 173 | | - * task state. |
|---|
| 177 | + * where wake_up_state()/try_to_wake_up() executes a full memory barrier before |
|---|
| 178 | + * accessing p->state. |
|---|
| 174 | 179 | * |
|---|
| 175 | 180 | * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, |
|---|
| 176 | 181 | * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a |
|---|
| 177 | 182 | * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). |
|---|
| 178 | 183 | * |
|---|
| 179 | 184 | * However, with slightly different timing the wakeup TASK_RUNNING store can |
|---|
| 180 | | - * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not |
|---|
| 185 | + * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not |
|---|
| 181 | 186 | * a problem either because that will result in one extra go around the loop |
|---|
| 182 | 187 | * and our @cond test will save the day. |
|---|
| 183 | 188 | * |
|---|
| .. | .. |
|---|
| 188 | 193 | |
|---|
| 189 | 194 | #define set_current_state(state_value) \ |
|---|
| 190 | 195 | smp_store_mb(current->state, (state_value)) |
|---|
| 196 | + |
|---|
| 197 | +#define __set_current_state_no_track(state_value) \ |
|---|
| 198 | + __set_current_state(state_value) |
|---|
| 191 | 199 | |
|---|
| 192 | 200 | /* |
|---|
| 193 | 201 | * set_special_state() should be used for those states when the blocking task |
|---|
| .. | .. |
|---|
| 219 | 227 | extern long schedule_timeout_idle(long timeout); |
|---|
| 220 | 228 | asmlinkage void schedule(void); |
|---|
| 221 | 229 | extern void schedule_preempt_disabled(void); |
|---|
| 230 | +asmlinkage void preempt_schedule_irq(void); |
|---|
| 222 | 231 | |
|---|
| 223 | 232 | extern int __must_check io_schedule_prepare(void); |
|---|
| 224 | 233 | extern void io_schedule_finish(int token); |
|---|
| .. | .. |
|---|
| 242 | 251 | #endif |
|---|
| 243 | 252 | }; |
|---|
| 244 | 253 | |
|---|
| 245 | | -/** |
|---|
| 246 | | - * struct task_cputime - collected CPU time counts |
|---|
| 247 | | - * @utime: time spent in user mode, in nanoseconds |
|---|
| 248 | | - * @stime: time spent in kernel mode, in nanoseconds |
|---|
| 249 | | - * @sum_exec_runtime: total time spent on the CPU, in nanoseconds |
|---|
| 250 | | - * |
|---|
| 251 | | - * This structure groups together three kinds of CPU time that are tracked for |
|---|
| 252 | | - * threads and thread groups. Most things considering CPU time want to group |
|---|
| 253 | | - * these counts together and treat all three of them in parallel. |
|---|
| 254 | | - */ |
|---|
| 255 | | -struct task_cputime { |
|---|
| 256 | | - u64 utime; |
|---|
| 257 | | - u64 stime; |
|---|
| 258 | | - unsigned long long sum_exec_runtime; |
|---|
| 259 | | -}; |
|---|
| 260 | | - |
|---|
| 261 | | -/* Alternate field names when used on cache expirations: */ |
|---|
| 262 | | -#define virt_exp utime |
|---|
| 263 | | -#define prof_exp stime |
|---|
| 264 | | -#define sched_exp sum_exec_runtime |
|---|
| 265 | | - |
|---|
| 266 | 254 | enum vtime_state { |
|---|
| 267 | 255 | /* Task is sleeping or running in a CPU with VTIME inactive: */ |
|---|
| 268 | 256 | VTIME_INACTIVE = 0, |
|---|
| 269 | | - /* Task runs in userspace in a CPU with VTIME active: */ |
|---|
| 270 | | - VTIME_USER, |
|---|
| 257 | + /* Task is idle */ |
|---|
| 258 | + VTIME_IDLE, |
|---|
| 271 | 259 | /* Task runs in kernelspace in a CPU with VTIME active: */ |
|---|
| 272 | 260 | VTIME_SYS, |
|---|
| 261 | + /* Task runs in userspace in a CPU with VTIME active: */ |
|---|
| 262 | + VTIME_USER, |
|---|
| 263 | + /* Task runs as guests in a CPU with VTIME active: */ |
|---|
| 264 | + VTIME_GUEST, |
|---|
| 273 | 265 | }; |
|---|
| 274 | 266 | |
|---|
| 275 | 267 | struct vtime { |
|---|
| 276 | 268 | seqcount_t seqcount; |
|---|
| 277 | 269 | unsigned long long starttime; |
|---|
| 278 | 270 | enum vtime_state state; |
|---|
| 271 | + unsigned int cpu; |
|---|
| 279 | 272 | u64 utime; |
|---|
| 280 | 273 | u64 stime; |
|---|
| 281 | 274 | u64 gtime; |
|---|
| .. | .. |
|---|
| 292 | 285 | UCLAMP_MAX, |
|---|
| 293 | 286 | UCLAMP_CNT |
|---|
| 294 | 287 | }; |
|---|
| 288 | + |
|---|
| 289 | +#ifdef CONFIG_SMP |
|---|
| 290 | +extern struct root_domain def_root_domain; |
|---|
| 291 | +extern struct mutex sched_domains_mutex; |
|---|
| 292 | +#endif |
|---|
| 295 | 293 | |
|---|
| 296 | 294 | struct sched_info { |
|---|
| 297 | 295 | #ifdef CONFIG_SCHED_INFO |
|---|
| .. | .. |
|---|
| 354 | 352 | * Only for tasks we track a moving average of the past instantaneous |
|---|
| 355 | 353 | * estimated utilization. This allows to absorb sporadic drops in utilization |
|---|
| 356 | 354 | * of an otherwise almost periodic task. |
|---|
| 355 | + * |
|---|
| 356 | + * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg |
|---|
| 357 | + * updates. When a task is dequeued, its util_est should not be updated if its |
|---|
| 358 | + * util_avg has not been updated in the meantime. |
|---|
| 359 | + * This information is mapped into the MSB bit of util_est.enqueued at dequeue |
|---|
| 360 | + * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg |
|---|
| 361 | + * for a task) it is safe to use MSB. |
|---|
| 357 | 362 | */ |
|---|
| 358 | 363 | struct util_est { |
|---|
| 359 | 364 | unsigned int enqueued; |
|---|
| 360 | 365 | unsigned int ewma; |
|---|
| 361 | 366 | #define UTIL_EST_WEIGHT_SHIFT 2 |
|---|
| 367 | +#define UTIL_AVG_UNCHANGED 0x80000000 |
|---|
| 362 | 368 | } __attribute__((__aligned__(sizeof(u64)))); |
|---|
| 363 | 369 | |
|---|
| 364 | 370 | /* |
|---|
| 365 | | - * The load_avg/util_avg accumulates an infinite geometric series |
|---|
| 366 | | - * (see __update_load_avg() in kernel/sched/fair.c). |
|---|
| 371 | + * The load/runnable/util_avg accumulates an infinite geometric series |
|---|
| 372 | + * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c). |
|---|
| 367 | 373 | * |
|---|
| 368 | 374 | * [load_avg definition] |
|---|
| 369 | 375 | * |
|---|
| 370 | 376 | * load_avg = runnable% * scale_load_down(load) |
|---|
| 371 | 377 | * |
|---|
| 372 | | - * where runnable% is the time ratio that a sched_entity is runnable. |
|---|
| 373 | | - * For cfs_rq, it is the aggregated load_avg of all runnable and |
|---|
| 374 | | - * blocked sched_entities. |
|---|
| 378 | + * [runnable_avg definition] |
|---|
| 379 | + * |
|---|
| 380 | + * runnable_avg = runnable% * SCHED_CAPACITY_SCALE |
|---|
| 375 | 381 | * |
|---|
| 376 | 382 | * [util_avg definition] |
|---|
| 377 | 383 | * |
|---|
| 378 | 384 | * util_avg = running% * SCHED_CAPACITY_SCALE |
|---|
| 379 | 385 | * |
|---|
| 380 | | - * where running% is the time ratio that a sched_entity is running on |
|---|
| 381 | | - * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable |
|---|
| 382 | | - * and blocked sched_entities. |
|---|
| 386 | + * where runnable% is the time ratio that a sched_entity is runnable and |
|---|
| 387 | + * running% the time ratio that a sched_entity is running. |
|---|
| 383 | 388 | * |
|---|
| 384 | | - * load_avg and util_avg don't direcly factor frequency scaling and CPU |
|---|
| 385 | | - * capacity scaling. The scaling is done through the rq_clock_pelt that |
|---|
| 386 | | - * is used for computing those signals (see update_rq_clock_pelt()) |
|---|
| 389 | + * For cfs_rq, they are the aggregated values of all runnable and blocked |
|---|
| 390 | + * sched_entities. |
|---|
| 391 | + * |
|---|
| 392 | + * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU |
|---|
| 393 | + * capacity scaling. The scaling is done through the rq_clock_pelt that is used |
|---|
| 394 | + * for computing those signals (see update_rq_clock_pelt()) |
|---|
| 387 | 395 | * |
|---|
| 388 | 396 | * N.B., the above ratios (runnable% and running%) themselves are in the |
|---|
| 389 | 397 | * range of [0, 1]. To do fixed point arithmetics, we therefore scale them |
|---|
| .. | .. |
|---|
| 407 | 415 | struct sched_avg { |
|---|
| 408 | 416 | u64 last_update_time; |
|---|
| 409 | 417 | u64 load_sum; |
|---|
| 410 | | - u64 runnable_load_sum; |
|---|
| 418 | + u64 runnable_sum; |
|---|
| 411 | 419 | u32 util_sum; |
|---|
| 412 | 420 | u32 period_contrib; |
|---|
| 413 | 421 | unsigned long load_avg; |
|---|
| 414 | | - unsigned long runnable_load_avg; |
|---|
| 422 | + unsigned long runnable_avg; |
|---|
| 415 | 423 | unsigned long util_avg; |
|---|
| 416 | 424 | struct util_est util_est; |
|---|
| 417 | 425 | } ____cacheline_aligned; |
|---|
| .. | .. |
|---|
| 455 | 463 | struct sched_entity { |
|---|
| 456 | 464 | /* For load-balancing: */ |
|---|
| 457 | 465 | struct load_weight load; |
|---|
| 458 | | - unsigned long runnable_weight; |
|---|
| 459 | 466 | struct rb_node run_node; |
|---|
| 460 | 467 | struct list_head group_node; |
|---|
| 461 | 468 | unsigned int on_rq; |
|---|
| .. | .. |
|---|
| 476 | 483 | struct cfs_rq *cfs_rq; |
|---|
| 477 | 484 | /* rq "owned" by this entity/group: */ |
|---|
| 478 | 485 | struct cfs_rq *my_q; |
|---|
| 486 | + /* cached value of my_q->h_nr_running */ |
|---|
| 487 | + unsigned long runnable_weight; |
|---|
| 479 | 488 | #endif |
|---|
| 480 | 489 | |
|---|
| 481 | 490 | #ifdef CONFIG_SMP |
|---|
| .. | .. |
|---|
| 533 | 542 | |
|---|
| 534 | 543 | /* |
|---|
| 535 | 544 | * Actual scheduling parameters. Initialized with the values above, |
|---|
| 536 | | - * they are continously updated during task execution. Note that |
|---|
| 545 | + * they are continuously updated during task execution. Note that |
|---|
| 537 | 546 | * the remaining runtime could be < 0 in case we are in overrun. |
|---|
| 538 | 547 | */ |
|---|
| 539 | 548 | s64 runtime; /* Remaining runtime for this instance */ |
|---|
| .. | .. |
|---|
| 546 | 555 | * @dl_throttled tells if we exhausted the runtime. If so, the |
|---|
| 547 | 556 | * task has to wait for a replenishment to be performed at the |
|---|
| 548 | 557 | * next firing of dl_timer. |
|---|
| 549 | | - * |
|---|
| 550 | | - * @dl_boosted tells if we are boosted due to DI. If so we are |
|---|
| 551 | | - * outside bandwidth enforcement mechanism (but only until we |
|---|
| 552 | | - * exit the critical section); |
|---|
| 553 | 558 | * |
|---|
| 554 | 559 | * @dl_yielded tells if task gave up the CPU before consuming |
|---|
| 555 | 560 | * all its available runtime during the last job. |
|---|
| .. | .. |
|---|
| 565 | 570 | * overruns. |
|---|
| 566 | 571 | */ |
|---|
| 567 | 572 | unsigned int dl_throttled : 1; |
|---|
| 568 | | - unsigned int dl_boosted : 1; |
|---|
| 569 | 573 | unsigned int dl_yielded : 1; |
|---|
| 570 | 574 | unsigned int dl_non_contending : 1; |
|---|
| 571 | 575 | unsigned int dl_overrun : 1; |
|---|
| .. | .. |
|---|
| 584 | 588 | * time. |
|---|
| 585 | 589 | */ |
|---|
| 586 | 590 | struct hrtimer inactive_timer; |
|---|
| 591 | + |
|---|
| 592 | +#ifdef CONFIG_RT_MUTEXES |
|---|
| 593 | + /* |
|---|
| 594 | + * Priority Inheritance. When a DEADLINE scheduling entity is boosted |
|---|
| 595 | + * pi_se points to the donor, otherwise points to the dl_se it belongs |
|---|
| 596 | + * to (the original one/itself). |
|---|
| 597 | + */ |
|---|
| 598 | + struct sched_dl_entity *pi_se; |
|---|
| 599 | +#endif |
|---|
| 587 | 600 | }; |
|---|
| 588 | 601 | |
|---|
| 589 | 602 | #ifdef CONFIG_UCLAMP_TASK |
|---|
| .. | .. |
|---|
| 625 | 638 | struct { |
|---|
| 626 | 639 | u8 blocked; |
|---|
| 627 | 640 | u8 need_qs; |
|---|
| 628 | | - u8 exp_need_qs; |
|---|
| 629 | | - |
|---|
| 630 | | - /* Otherwise the compiler can store garbage here: */ |
|---|
| 631 | | - u8 pad; |
|---|
| 641 | + u8 exp_hint; /* Hint for performance. */ |
|---|
| 642 | + u8 need_mb; /* Readers need smp_mb(). */ |
|---|
| 632 | 643 | } b; /* Bits. */ |
|---|
| 633 | 644 | u32 s; /* Set of bits. */ |
|---|
| 634 | 645 | }; |
|---|
| .. | .. |
|---|
| 644 | 655 | struct wake_q_node *next; |
|---|
| 645 | 656 | }; |
|---|
| 646 | 657 | |
|---|
| 658 | +struct kmap_ctrl { |
|---|
| 659 | +#ifdef CONFIG_KMAP_LOCAL |
|---|
| 660 | + int idx; |
|---|
| 661 | + pte_t pteval[KM_MAX_IDX]; |
|---|
| 662 | +#endif |
|---|
| 663 | +}; |
|---|
| 664 | + |
|---|
| 647 | 665 | struct task_struct { |
|---|
| 648 | 666 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
|---|
| 649 | 667 | /* |
|---|
| .. | .. |
|---|
| 654 | 672 | #endif |
|---|
| 655 | 673 | /* -1 unrunnable, 0 runnable, >0 stopped: */ |
|---|
| 656 | 674 | volatile long state; |
|---|
| 675 | + /* saved state for "spinlock sleepers" */ |
|---|
| 676 | + volatile long saved_state; |
|---|
| 657 | 677 | |
|---|
| 658 | 678 | /* |
|---|
| 659 | 679 | * This begins the randomizable portion of task_struct. Only |
|---|
| .. | .. |
|---|
| 662 | 682 | randomized_struct_fields_start |
|---|
| 663 | 683 | |
|---|
| 664 | 684 | void *stack; |
|---|
| 665 | | - atomic_t usage; |
|---|
| 685 | + refcount_t usage; |
|---|
| 666 | 686 | /* Per task flags (PF_*), defined further below: */ |
|---|
| 667 | 687 | unsigned int flags; |
|---|
| 668 | 688 | unsigned int ptrace; |
|---|
| 669 | 689 | |
|---|
| 670 | 690 | #ifdef CONFIG_SMP |
|---|
| 671 | | - struct llist_node wake_entry; |
|---|
| 672 | 691 | int on_cpu; |
|---|
| 692 | + struct __call_single_node wake_entry; |
|---|
| 673 | 693 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
|---|
| 674 | 694 | /* Current CPU: */ |
|---|
| 675 | 695 | unsigned int cpu; |
|---|
| .. | .. |
|---|
| 698 | 718 | const struct sched_class *sched_class; |
|---|
| 699 | 719 | struct sched_entity se; |
|---|
| 700 | 720 | struct sched_rt_entity rt; |
|---|
| 701 | | - |
|---|
| 702 | | - /* task boost vendor fields */ |
|---|
| 703 | | - u64 last_sleep_ts; |
|---|
| 704 | | - int boost; |
|---|
| 705 | | - u64 boost_period; |
|---|
| 706 | | - u64 boost_expires; |
|---|
| 707 | | - |
|---|
| 708 | 721 | #ifdef CONFIG_CGROUP_SCHED |
|---|
| 709 | 722 | struct task_group *sched_task_group; |
|---|
| 710 | 723 | #endif |
|---|
| 711 | 724 | struct sched_dl_entity dl; |
|---|
| 712 | 725 | |
|---|
| 713 | 726 | #ifdef CONFIG_UCLAMP_TASK |
|---|
| 714 | | - /* Clamp values requested for a scheduling entity */ |
|---|
| 727 | + /* |
|---|
| 728 | + * Clamp values requested for a scheduling entity. |
|---|
| 729 | + * Must be updated with task_rq_lock() held. |
|---|
| 730 | + */ |
|---|
| 715 | 731 | struct uclamp_se uclamp_req[UCLAMP_CNT]; |
|---|
| 716 | | - /* Effective clamp values used for a scheduling entity */ |
|---|
| 732 | + /* |
|---|
| 733 | + * Effective clamp values used for a scheduling entity. |
|---|
| 734 | + * Must be updated with task_rq_lock() held. |
|---|
| 735 | + */ |
|---|
| 717 | 736 | struct uclamp_se uclamp[UCLAMP_CNT]; |
|---|
| 737 | +#endif |
|---|
| 738 | + |
|---|
| 739 | +#ifdef CONFIG_HOTPLUG_CPU |
|---|
| 740 | + struct list_head percpu_kthread_node; |
|---|
| 718 | 741 | #endif |
|---|
| 719 | 742 | |
|---|
| 720 | 743 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
|---|
| .. | .. |
|---|
| 728 | 751 | |
|---|
| 729 | 752 | unsigned int policy; |
|---|
| 730 | 753 | int nr_cpus_allowed; |
|---|
| 731 | | - cpumask_t cpus_allowed; |
|---|
| 732 | | - cpumask_t cpus_requested; |
|---|
| 754 | + const cpumask_t *cpus_ptr; |
|---|
| 755 | + cpumask_t cpus_mask; |
|---|
| 756 | + void *migration_pending; |
|---|
| 757 | +#ifdef CONFIG_SMP |
|---|
| 758 | + unsigned short migration_disabled; |
|---|
| 759 | +#endif |
|---|
| 760 | + unsigned short migration_flags; |
|---|
| 733 | 761 | |
|---|
| 734 | 762 | #ifdef CONFIG_PREEMPT_RCU |
|---|
| 735 | 763 | int rcu_read_lock_nesting; |
|---|
| .. | .. |
|---|
| 745 | 773 | int rcu_tasks_idle_cpu; |
|---|
| 746 | 774 | struct list_head rcu_tasks_holdout_list; |
|---|
| 747 | 775 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
|---|
| 776 | + |
|---|
| 777 | +#ifdef CONFIG_TASKS_TRACE_RCU |
|---|
| 778 | + int trc_reader_nesting; |
|---|
| 779 | + int trc_ipi_to_cpu; |
|---|
| 780 | + union rcu_special trc_reader_special; |
|---|
| 781 | + bool trc_reader_checked; |
|---|
| 782 | + struct list_head trc_holdout_list; |
|---|
| 783 | +#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
|---|
| 748 | 784 | |
|---|
| 749 | 785 | struct sched_info sched_info; |
|---|
| 750 | 786 | |
|---|
| .. | .. |
|---|
| 778 | 814 | unsigned sched_reset_on_fork:1; |
|---|
| 779 | 815 | unsigned sched_contributes_to_load:1; |
|---|
| 780 | 816 | unsigned sched_migrated:1; |
|---|
| 781 | | - unsigned sched_remote_wakeup:1; |
|---|
| 782 | 817 | #ifdef CONFIG_PSI |
|---|
| 783 | 818 | unsigned sched_psi_wake_requeue:1; |
|---|
| 784 | 819 | #endif |
|---|
| .. | .. |
|---|
| 788 | 823 | |
|---|
| 789 | 824 | /* Unserialized, strictly 'current' */ |
|---|
| 790 | 825 | |
|---|
| 826 | + /* |
|---|
| 827 | + * This field must not be in the scheduler word above due to wakelist |
|---|
| 828 | + * queueing no longer being serialized by p->on_cpu. However: |
|---|
| 829 | + * |
|---|
| 830 | + * p->XXX = X; ttwu() |
|---|
| 831 | + * schedule() if (p->on_rq && ..) // false |
|---|
| 832 | + * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true |
|---|
| 833 | + * deactivate_task() ttwu_queue_wakelist()) |
|---|
| 834 | + * p->on_rq = 0; p->sched_remote_wakeup = Y; |
|---|
| 835 | + * |
|---|
| 836 | + * guarantees all stores of 'current' are visible before |
|---|
| 837 | + * ->sched_remote_wakeup gets used, so it can be in this word. |
|---|
| 838 | + */ |
|---|
| 839 | + unsigned sched_remote_wakeup:1; |
|---|
| 840 | + |
|---|
| 791 | 841 | /* Bit to tell LSMs we're in execve(): */ |
|---|
| 792 | 842 | unsigned in_execve:1; |
|---|
| 793 | 843 | unsigned in_iowait:1; |
|---|
| .. | .. |
|---|
| 796 | 846 | #endif |
|---|
| 797 | 847 | #ifdef CONFIG_MEMCG |
|---|
| 798 | 848 | unsigned in_user_fault:1; |
|---|
| 799 | | -#ifdef CONFIG_MEMCG_KMEM |
|---|
| 800 | | - unsigned memcg_kmem_skip_account:1; |
|---|
| 801 | | -#endif |
|---|
| 802 | 849 | #endif |
|---|
| 803 | 850 | #ifdef CONFIG_COMPAT_BRK |
|---|
| 804 | 851 | unsigned brk_randomized:1; |
|---|
| .. | .. |
|---|
| 806 | 853 | #ifdef CONFIG_CGROUPS |
|---|
| 807 | 854 | /* disallow userland-initiated cgroup migration */ |
|---|
| 808 | 855 | unsigned no_cgroup_migration:1; |
|---|
| 856 | + /* task is frozen/stopped (used by the cgroup freezer) */ |
|---|
| 857 | + unsigned frozen:1; |
|---|
| 809 | 858 | #endif |
|---|
| 810 | 859 | #ifdef CONFIG_BLK_CGROUP |
|---|
| 811 | | - /* to be used once the psi infrastructure lands upstream. */ |
|---|
| 812 | 860 | unsigned use_memdelay:1; |
|---|
| 861 | +#endif |
|---|
| 862 | +#ifdef CONFIG_PSI |
|---|
| 863 | + /* Stalled due to lack of memory */ |
|---|
| 864 | + unsigned in_memstall:1; |
|---|
| 865 | +#endif |
|---|
| 866 | +#ifdef CONFIG_EVENTFD |
|---|
| 867 | + /* Recursion prevention for eventfd_signal() */ |
|---|
| 868 | + unsigned in_eventfd_signal:1; |
|---|
| 813 | 869 | #endif |
|---|
| 814 | 870 | |
|---|
| 815 | 871 | unsigned long atomic_flags; /* Flags requiring atomic access. */ |
|---|
| .. | .. |
|---|
| 892 | 948 | u64 start_time; |
|---|
| 893 | 949 | |
|---|
| 894 | 950 | /* Boot based time in nsecs: */ |
|---|
| 895 | | - u64 real_start_time; |
|---|
| 951 | + u64 start_boottime; |
|---|
| 896 | 952 | |
|---|
| 897 | 953 | /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ |
|---|
| 898 | 954 | unsigned long min_flt; |
|---|
| 899 | 955 | unsigned long maj_flt; |
|---|
| 900 | 956 | |
|---|
| 901 | | -#ifdef CONFIG_POSIX_TIMERS |
|---|
| 902 | | - struct task_cputime cputime_expires; |
|---|
| 903 | | - struct list_head cpu_timers[3]; |
|---|
| 957 | + /* Empty if CONFIG_POSIX_CPUTIMERS=n */ |
|---|
| 958 | + struct posix_cputimers posix_cputimers; |
|---|
| 959 | + |
|---|
| 960 | +#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK |
|---|
| 961 | + struct posix_cputimers_work posix_cputimers_work; |
|---|
| 904 | 962 | #endif |
|---|
| 905 | 963 | |
|---|
| 906 | 964 | /* Process credentials: */ |
|---|
| .. | .. |
|---|
| 913 | 971 | |
|---|
| 914 | 972 | /* Effective (overridable) subjective task credentials (COW): */ |
|---|
| 915 | 973 | const struct cred __rcu *cred; |
|---|
| 974 | + |
|---|
| 975 | +#ifdef CONFIG_KEYS |
|---|
| 976 | + /* Cached requested key. */ |
|---|
| 977 | + struct key *cached_requested_key; |
|---|
| 978 | +#endif |
|---|
| 916 | 979 | |
|---|
| 917 | 980 | /* |
|---|
| 918 | 981 | * executable name, excluding path. |
|---|
| .. | .. |
|---|
| 939 | 1002 | /* Open file information: */ |
|---|
| 940 | 1003 | struct files_struct *files; |
|---|
| 941 | 1004 | |
|---|
| 1005 | +#ifdef CONFIG_IO_URING |
|---|
| 1006 | + struct io_uring_task *io_uring; |
|---|
| 1007 | +#endif |
|---|
| 1008 | + |
|---|
| 942 | 1009 | /* Namespaces: */ |
|---|
| 943 | 1010 | struct nsproxy *nsproxy; |
|---|
| 944 | 1011 | |
|---|
| 945 | 1012 | /* Signal handlers: */ |
|---|
| 946 | 1013 | struct signal_struct *signal; |
|---|
| 947 | | - struct sighand_struct *sighand; |
|---|
| 1014 | + struct sighand_struct __rcu *sighand; |
|---|
| 1015 | + struct sigqueue *sigqueue_cache; |
|---|
| 948 | 1016 | sigset_t blocked; |
|---|
| 949 | 1017 | sigset_t real_blocked; |
|---|
| 950 | 1018 | /* Restored if set_restore_sigmask() was used: */ |
|---|
| 951 | 1019 | sigset_t saved_sigmask; |
|---|
| 952 | 1020 | struct sigpending pending; |
|---|
| 1021 | +#ifdef CONFIG_PREEMPT_RT |
|---|
| 1022 | + /* TODO: move me into ->restart_block ? */ |
|---|
| 1023 | + struct kernel_siginfo forced_info; |
|---|
| 1024 | +#endif |
|---|
| 953 | 1025 | unsigned long sas_ss_sp; |
|---|
| 954 | 1026 | size_t sas_ss_size; |
|---|
| 955 | 1027 | unsigned int sas_ss_flags; |
|---|
| 956 | 1028 | |
|---|
| 957 | 1029 | struct callback_head *task_works; |
|---|
| 958 | 1030 | |
|---|
| 959 | | - struct audit_context *audit_context; |
|---|
| 1031 | +#ifdef CONFIG_AUDIT |
|---|
| 960 | 1032 | #ifdef CONFIG_AUDITSYSCALL |
|---|
| 1033 | + struct audit_context *audit_context; |
|---|
| 1034 | +#endif |
|---|
| 961 | 1035 | kuid_t loginuid; |
|---|
| 962 | 1036 | unsigned int sessionid; |
|---|
| 963 | 1037 | #endif |
|---|
| .. | .. |
|---|
| 974 | 1048 | raw_spinlock_t pi_lock; |
|---|
| 975 | 1049 | |
|---|
| 976 | 1050 | struct wake_q_node wake_q; |
|---|
| 1051 | + struct wake_q_node wake_q_sleeper; |
|---|
| 1052 | + int wake_q_count; |
|---|
| 977 | 1053 | |
|---|
| 978 | 1054 | #ifdef CONFIG_RT_MUTEXES |
|---|
| 979 | 1055 | /* PI waiters blocked on a rt_mutex held by this task: */ |
|---|
| .. | .. |
|---|
| 983 | 1059 | /* Deadlock detection and priority inheritance handling: */ |
|---|
| 984 | 1060 | struct rt_mutex_waiter *pi_blocked_on; |
|---|
| 985 | 1061 | #endif |
|---|
| 986 | | -#ifdef CONFIG_MM_EVENT_STAT |
|---|
| 987 | | - struct mm_event_task mm_event[MM_TYPE_NUM]; |
|---|
| 988 | | - unsigned long next_period; |
|---|
| 989 | | -#endif |
|---|
| 1062 | + |
|---|
| 990 | 1063 | #ifdef CONFIG_DEBUG_MUTEXES |
|---|
| 991 | 1064 | /* Mutex deadlock detection: */ |
|---|
| 992 | 1065 | struct mutex_waiter *blocked_on; |
|---|
| 993 | 1066 | #endif |
|---|
| 994 | 1067 | |
|---|
| 1068 | +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
|---|
| 1069 | + int non_block_count; |
|---|
| 1070 | +#endif |
|---|
| 1071 | + |
|---|
| 995 | 1072 | #ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 996 | | - unsigned int irq_events; |
|---|
| 997 | | - unsigned long hardirq_enable_ip; |
|---|
| 998 | | - unsigned long hardirq_disable_ip; |
|---|
| 999 | | - unsigned int hardirq_enable_event; |
|---|
| 1000 | | - unsigned int hardirq_disable_event; |
|---|
| 1001 | | - int hardirqs_enabled; |
|---|
| 1002 | | - int hardirq_context; |
|---|
| 1003 | | - unsigned long softirq_disable_ip; |
|---|
| 1004 | | - unsigned long softirq_enable_ip; |
|---|
| 1005 | | - unsigned int softirq_disable_event; |
|---|
| 1006 | | - unsigned int softirq_enable_event; |
|---|
| 1073 | + struct irqtrace_events irqtrace; |
|---|
| 1074 | + unsigned int hardirq_threaded; |
|---|
| 1075 | + u64 hardirq_chain_key; |
|---|
| 1007 | 1076 | int softirqs_enabled; |
|---|
| 1008 | 1077 | int softirq_context; |
|---|
| 1078 | + int irq_config; |
|---|
| 1079 | +#endif |
|---|
| 1080 | +#ifdef CONFIG_PREEMPT_RT |
|---|
| 1081 | + int softirq_disable_cnt; |
|---|
| 1009 | 1082 | #endif |
|---|
| 1010 | 1083 | |
|---|
| 1011 | 1084 | #ifdef CONFIG_LOCKDEP |
|---|
| .. | .. |
|---|
| 1016 | 1089 | struct held_lock held_locks[MAX_LOCK_DEPTH]; |
|---|
| 1017 | 1090 | #endif |
|---|
| 1018 | 1091 | |
|---|
| 1019 | | -#ifdef CONFIG_UBSAN |
|---|
| 1092 | +#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP) |
|---|
| 1020 | 1093 | unsigned int in_ubsan; |
|---|
| 1021 | 1094 | #endif |
|---|
| 1022 | 1095 | |
|---|
| .. | .. |
|---|
| 1038 | 1111 | |
|---|
| 1039 | 1112 | struct io_context *io_context; |
|---|
| 1040 | 1113 | |
|---|
| 1114 | +#ifdef CONFIG_COMPACTION |
|---|
| 1115 | + struct capture_control *capture_control; |
|---|
| 1116 | +#endif |
|---|
| 1041 | 1117 | /* Ptrace state: */ |
|---|
| 1042 | 1118 | unsigned long ptrace_message; |
|---|
| 1043 | | - siginfo_t *last_siginfo; |
|---|
| 1119 | + kernel_siginfo_t *last_siginfo; |
|---|
| 1044 | 1120 | |
|---|
| 1045 | 1121 | struct task_io_accounting ioac; |
|---|
| 1046 | 1122 | #ifdef CONFIG_PSI |
|---|
| .. | .. |
|---|
| 1059 | 1135 | /* Protected by ->alloc_lock: */ |
|---|
| 1060 | 1136 | nodemask_t mems_allowed; |
|---|
| 1061 | 1137 | /* Seqence number to catch updates: */ |
|---|
| 1062 | | - seqcount_t mems_allowed_seq; |
|---|
| 1138 | + seqcount_spinlock_t mems_allowed_seq; |
|---|
| 1063 | 1139 | int cpuset_mem_spread_rotor; |
|---|
| 1064 | 1140 | int cpuset_slab_spread_rotor; |
|---|
| 1065 | 1141 | #endif |
|---|
| .. | .. |
|---|
| 1069 | 1145 | /* cg_list protected by css_set_lock and tsk->alloc_lock: */ |
|---|
| 1070 | 1146 | struct list_head cg_list; |
|---|
| 1071 | 1147 | #endif |
|---|
| 1072 | | -#ifdef CONFIG_INTEL_RDT |
|---|
| 1148 | +#ifdef CONFIG_X86_CPU_RESCTRL |
|---|
| 1073 | 1149 | u32 closid; |
|---|
| 1074 | 1150 | u32 rmid; |
|---|
| 1075 | 1151 | #endif |
|---|
| .. | .. |
|---|
| 1080 | 1156 | #endif |
|---|
| 1081 | 1157 | struct list_head pi_state_list; |
|---|
| 1082 | 1158 | struct futex_pi_state *pi_state_cache; |
|---|
| 1159 | + struct mutex futex_exit_mutex; |
|---|
| 1160 | + unsigned int futex_state; |
|---|
| 1083 | 1161 | #endif |
|---|
| 1084 | 1162 | #ifdef CONFIG_PERF_EVENTS |
|---|
| 1085 | 1163 | struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; |
|---|
| .. | .. |
|---|
| 1147 | 1225 | |
|---|
| 1148 | 1226 | #ifdef CONFIG_RSEQ |
|---|
| 1149 | 1227 | struct rseq __user *rseq; |
|---|
| 1150 | | - u32 rseq_len; |
|---|
| 1151 | 1228 | u32 rseq_sig; |
|---|
| 1152 | 1229 | /* |
|---|
| 1153 | 1230 | * RmW on rseq_event_mask must be performed atomically |
|---|
| .. | .. |
|---|
| 1158 | 1235 | |
|---|
| 1159 | 1236 | struct tlbflush_unmap_batch tlb_ubc; |
|---|
| 1160 | 1237 | |
|---|
| 1161 | | - struct rcu_head rcu; |
|---|
| 1238 | + union { |
|---|
| 1239 | + refcount_t rcu_users; |
|---|
| 1240 | + struct rcu_head rcu; |
|---|
| 1241 | + }; |
|---|
| 1162 | 1242 | |
|---|
| 1163 | 1243 | /* Cache last used pipe for splice(): */ |
|---|
| 1164 | 1244 | struct pipe_inode_info *splice_pipe; |
|---|
| .. | .. |
|---|
| 1193 | 1273 | u64 timer_slack_ns; |
|---|
| 1194 | 1274 | u64 default_timer_slack_ns; |
|---|
| 1195 | 1275 | |
|---|
| 1196 | | -#ifdef CONFIG_KASAN |
|---|
| 1276 | +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
|---|
| 1197 | 1277 | unsigned int kasan_depth; |
|---|
| 1278 | +#endif |
|---|
| 1279 | + |
|---|
| 1280 | +#ifdef CONFIG_KCSAN |
|---|
| 1281 | + struct kcsan_ctx kcsan_ctx; |
|---|
| 1282 | +#ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 1283 | + struct irqtrace_events kcsan_save_irqtrace; |
|---|
| 1284 | +#endif |
|---|
| 1285 | +#endif |
|---|
| 1286 | + |
|---|
| 1287 | +#if IS_ENABLED(CONFIG_KUNIT) |
|---|
| 1288 | + struct kunit *kunit_test; |
|---|
| 1198 | 1289 | #endif |
|---|
| 1199 | 1290 | |
|---|
| 1200 | 1291 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
|---|
| .. | .. |
|---|
| 1246 | 1337 | |
|---|
| 1247 | 1338 | /* KCOV sequence number: */ |
|---|
| 1248 | 1339 | int kcov_sequence; |
|---|
| 1340 | + |
|---|
| 1341 | + /* Collect coverage from softirq context: */ |
|---|
| 1342 | + unsigned int kcov_softirq; |
|---|
| 1249 | 1343 | #endif |
|---|
| 1250 | 1344 | |
|---|
| 1251 | 1345 | #ifdef CONFIG_MEMCG |
|---|
| .. | .. |
|---|
| 1271 | 1365 | unsigned int sequential_io; |
|---|
| 1272 | 1366 | unsigned int sequential_io_avg; |
|---|
| 1273 | 1367 | #endif |
|---|
| 1368 | + struct kmap_ctrl kmap_ctrl; |
|---|
| 1274 | 1369 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
|---|
| 1275 | 1370 | unsigned long task_state_change; |
|---|
| 1276 | 1371 | #endif |
|---|
| .. | .. |
|---|
| 1283 | 1378 | #endif |
|---|
| 1284 | 1379 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
|---|
| 1285 | 1380 | /* A live task holds one reference: */ |
|---|
| 1286 | | - atomic_t stack_refcount; |
|---|
| 1381 | + refcount_t stack_refcount; |
|---|
| 1287 | 1382 | #endif |
|---|
| 1288 | 1383 | #ifdef CONFIG_LIVEPATCH |
|---|
| 1289 | 1384 | int patch_state; |
|---|
| .. | .. |
|---|
| 1292 | 1387 | /* Used by LSM modules for access restriction: */ |
|---|
| 1293 | 1388 | void *security; |
|---|
| 1294 | 1389 | #endif |
|---|
| 1295 | | - /* task is frozen/stopped (used by the cgroup freezer) */ |
|---|
| 1296 | | - ANDROID_KABI_USE(1, unsigned frozen:1); |
|---|
| 1297 | 1390 | |
|---|
| 1298 | | - /* 095444fad7e3 ("futex: Replace PF_EXITPIDONE with a state") */ |
|---|
| 1299 | | - ANDROID_KABI_USE(2, unsigned int futex_state); |
|---|
| 1391 | +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK |
|---|
| 1392 | + unsigned long lowest_stack; |
|---|
| 1393 | + unsigned long prev_lowest_stack; |
|---|
| 1394 | +#endif |
|---|
| 1300 | 1395 | |
|---|
| 1301 | | - /* |
|---|
| 1302 | | - * f9b0c6c556db ("futex: Add mutex around futex exit") |
|---|
| 1303 | | - * A struct mutex takes 32 bytes, or 4 64bit entries, so pick off |
|---|
| 1304 | | - * 4 of the reserved members, and replace them with a struct mutex. |
|---|
| 1305 | | - * Do the GENKSYMS hack to work around the CRC issues |
|---|
| 1306 | | - */ |
|---|
| 1307 | | -#ifdef __GENKSYMS__ |
|---|
| 1396 | +#ifdef CONFIG_X86_MCE |
|---|
| 1397 | + void __user *mce_vaddr; |
|---|
| 1398 | + __u64 mce_kflags; |
|---|
| 1399 | + u64 mce_addr; |
|---|
| 1400 | + __u64 mce_ripv : 1, |
|---|
| 1401 | + mce_whole_page : 1, |
|---|
| 1402 | + __mce_reserved : 62; |
|---|
| 1403 | + struct callback_head mce_kill_me; |
|---|
| 1404 | + int mce_count; |
|---|
| 1405 | +#endif |
|---|
| 1406 | + ANDROID_VENDOR_DATA_ARRAY(1, 64); |
|---|
| 1407 | + ANDROID_OEM_DATA_ARRAY(1, 32); |
|---|
| 1408 | + |
|---|
| 1409 | + /* PF_IO_WORKER */ |
|---|
| 1410 | + ANDROID_KABI_USE(1, void *pf_io_worker); |
|---|
| 1411 | + |
|---|
| 1412 | + ANDROID_KABI_RESERVE(2); |
|---|
| 1308 | 1413 | ANDROID_KABI_RESERVE(3); |
|---|
| 1309 | 1414 | ANDROID_KABI_RESERVE(4); |
|---|
| 1310 | 1415 | ANDROID_KABI_RESERVE(5); |
|---|
| 1311 | 1416 | ANDROID_KABI_RESERVE(6); |
|---|
| 1312 | | -#else |
|---|
| 1313 | | - struct mutex futex_exit_mutex; |
|---|
| 1314 | | -#endif |
|---|
| 1315 | | - |
|---|
| 1316 | 1417 | ANDROID_KABI_RESERVE(7); |
|---|
| 1317 | 1418 | ANDROID_KABI_RESERVE(8); |
|---|
| 1318 | 1419 | |
|---|
| .. | .. |
|---|
| 1490 | 1591 | /* |
|---|
| 1491 | 1592 | * Per process flags |
|---|
| 1492 | 1593 | */ |
|---|
| 1594 | +#define PF_VCPU 0x00000001 /* I'm a virtual CPU */ |
|---|
| 1493 | 1595 | #define PF_IDLE 0x00000002 /* I am an IDLE thread */ |
|---|
| 1494 | 1596 | #define PF_EXITING 0x00000004 /* Getting shut down */ |
|---|
| 1495 | | -#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ |
|---|
| 1597 | +#define PF_IO_WORKER 0x00000010 /* Task is an IO worker */ |
|---|
| 1496 | 1598 | #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ |
|---|
| 1497 | 1599 | #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ |
|---|
| 1498 | 1600 | #define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ |
|---|
| .. | .. |
|---|
| 1507 | 1609 | #define PF_KSWAPD 0x00020000 /* I am kswapd */ |
|---|
| 1508 | 1610 | #define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */ |
|---|
| 1509 | 1611 | #define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */ |
|---|
| 1510 | | -#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ |
|---|
| 1612 | +#define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to, |
|---|
| 1613 | + * I am cleaning dirty pages from some other bdi. */ |
|---|
| 1511 | 1614 | #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ |
|---|
| 1512 | 1615 | #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ |
|---|
| 1513 | 1616 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
|---|
| 1514 | | -#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ |
|---|
| 1515 | | -#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ |
|---|
| 1617 | +#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ |
|---|
| 1516 | 1618 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
|---|
| 1517 | | -#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
|---|
| 1619 | +#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ |
|---|
| 1518 | 1620 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ |
|---|
| 1519 | 1621 | #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ |
|---|
| 1520 | 1622 | |
|---|
| .. | .. |
|---|
| 1564 | 1666 | #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ |
|---|
| 1565 | 1667 | #define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ |
|---|
| 1566 | 1668 | #define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ |
|---|
| 1669 | +#define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */ |
|---|
| 1567 | 1670 | |
|---|
| 1568 | 1671 | #define TASK_PFA_TEST(name, func) \ |
|---|
| 1569 | 1672 | static inline bool task_##func(struct task_struct *p) \ |
|---|
| .. | .. |
|---|
| 1592 | 1695 | TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) |
|---|
| 1593 | 1696 | TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) |
|---|
| 1594 | 1697 | |
|---|
| 1698 | +TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec) |
|---|
| 1699 | +TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec) |
|---|
| 1700 | +TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec) |
|---|
| 1701 | + |
|---|
| 1595 | 1702 | TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) |
|---|
| 1596 | 1703 | TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) |
|---|
| 1597 | 1704 | |
|---|
| .. | .. |
|---|
| 1610 | 1717 | } |
|---|
| 1611 | 1718 | |
|---|
| 1612 | 1719 | extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); |
|---|
| 1613 | | -extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); |
|---|
| 1720 | +extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus); |
|---|
| 1721 | + |
|---|
| 1722 | +#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION |
|---|
| 1723 | +extern bool cpupri_check_rt(void); |
|---|
| 1724 | +#else |
|---|
| 1725 | +static inline bool cpupri_check_rt(void) |
|---|
| 1726 | +{ |
|---|
| 1727 | + return false; |
|---|
| 1728 | +} |
|---|
| 1729 | +#endif |
|---|
| 1730 | + |
|---|
| 1614 | 1731 | #ifdef CONFIG_SMP |
|---|
| 1615 | 1732 | extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); |
|---|
| 1616 | 1733 | extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); |
|---|
| 1734 | +extern void force_compatible_cpus_allowed_ptr(struct task_struct *p); |
|---|
| 1617 | 1735 | #else |
|---|
| 1618 | 1736 | static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) |
|---|
| 1619 | 1737 | { |
|---|
| .. | .. |
|---|
| 1624 | 1742 | return -EINVAL; |
|---|
| 1625 | 1743 | return 0; |
|---|
| 1626 | 1744 | } |
|---|
| 1627 | | -#endif |
|---|
| 1628 | | - |
|---|
| 1629 | | -#ifndef cpu_relax_yield |
|---|
| 1630 | | -#define cpu_relax_yield() cpu_relax() |
|---|
| 1631 | 1745 | #endif |
|---|
| 1632 | 1746 | |
|---|
| 1633 | 1747 | extern int yield_to(struct task_struct *p, bool preempt); |
|---|
| .. | .. |
|---|
| 1651 | 1765 | extern int available_idle_cpu(int cpu); |
|---|
| 1652 | 1766 | extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); |
|---|
| 1653 | 1767 | extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); |
|---|
| 1768 | +extern void sched_set_fifo(struct task_struct *p); |
|---|
| 1769 | +extern void sched_set_fifo_low(struct task_struct *p); |
|---|
| 1770 | +extern void sched_set_normal(struct task_struct *p, int nice); |
|---|
| 1654 | 1771 | extern int sched_setattr(struct task_struct *, const struct sched_attr *); |
|---|
| 1655 | 1772 | extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); |
|---|
| 1656 | 1773 | extern struct task_struct *idle_task(int cpu); |
|---|
| .. | .. |
|---|
| 1661 | 1778 | * |
|---|
| 1662 | 1779 | * Return: 1 if @p is an idle task. 0 otherwise. |
|---|
| 1663 | 1780 | */ |
|---|
| 1664 | | -static inline bool is_idle_task(const struct task_struct *p) |
|---|
| 1781 | +static __always_inline bool is_idle_task(const struct task_struct *p) |
|---|
| 1665 | 1782 | { |
|---|
| 1666 | 1783 | return !!(p->flags & PF_IDLE); |
|---|
| 1667 | 1784 | } |
|---|
| .. | .. |
|---|
| 1717 | 1834 | |
|---|
| 1718 | 1835 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); |
|---|
| 1719 | 1836 | extern int wake_up_process(struct task_struct *tsk); |
|---|
| 1837 | +extern int wake_up_lock_sleeper(struct task_struct *tsk); |
|---|
| 1720 | 1838 | extern void wake_up_new_task(struct task_struct *tsk); |
|---|
| 1721 | 1839 | |
|---|
| 1722 | 1840 | #ifdef CONFIG_SMP |
|---|
| .. | .. |
|---|
| 1739 | 1857 | }) |
|---|
| 1740 | 1858 | |
|---|
| 1741 | 1859 | #ifdef CONFIG_SMP |
|---|
| 1742 | | -void scheduler_ipi(void); |
|---|
| 1860 | +static __always_inline void scheduler_ipi(void) |
|---|
| 1861 | +{ |
|---|
| 1862 | + /* |
|---|
| 1863 | + * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting |
|---|
| 1864 | + * TIF_NEED_RESCHED remotely (for the first time) will also send |
|---|
| 1865 | + * this IPI. |
|---|
| 1866 | + */ |
|---|
| 1867 | + preempt_fold_need_resched(); |
|---|
| 1868 | +} |
|---|
| 1743 | 1869 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
|---|
| 1744 | 1870 | #else |
|---|
| 1745 | 1871 | static inline void scheduler_ipi(void) { } |
|---|
| .. | .. |
|---|
| 1799 | 1925 | return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); |
|---|
| 1800 | 1926 | } |
|---|
| 1801 | 1927 | |
|---|
| 1928 | +#ifdef CONFIG_PREEMPT_LAZY |
|---|
| 1929 | +static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) |
|---|
| 1930 | +{ |
|---|
| 1931 | + set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); |
|---|
| 1932 | +} |
|---|
| 1933 | + |
|---|
| 1934 | +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) |
|---|
| 1935 | +{ |
|---|
| 1936 | + clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); |
|---|
| 1937 | +} |
|---|
| 1938 | + |
|---|
| 1939 | +static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) |
|---|
| 1940 | +{ |
|---|
| 1941 | + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); |
|---|
| 1942 | +} |
|---|
| 1943 | + |
|---|
| 1944 | +static inline int need_resched_lazy(void) |
|---|
| 1945 | +{ |
|---|
| 1946 | + return test_thread_flag(TIF_NEED_RESCHED_LAZY); |
|---|
| 1947 | +} |
|---|
| 1948 | + |
|---|
| 1949 | +static inline int need_resched_now(void) |
|---|
| 1950 | +{ |
|---|
| 1951 | + return test_thread_flag(TIF_NEED_RESCHED); |
|---|
| 1952 | +} |
|---|
| 1953 | + |
|---|
| 1954 | +#else |
|---|
| 1955 | +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } |
|---|
| 1956 | +static inline int need_resched_lazy(void) { return 0; } |
|---|
| 1957 | + |
|---|
| 1958 | +static inline int need_resched_now(void) |
|---|
| 1959 | +{ |
|---|
| 1960 | + return test_thread_flag(TIF_NEED_RESCHED); |
|---|
| 1961 | +} |
|---|
| 1962 | + |
|---|
| 1963 | +#endif |
|---|
| 1964 | + |
|---|
| 1965 | + |
|---|
| 1966 | +static inline bool __task_is_stopped_or_traced(struct task_struct *task) |
|---|
| 1967 | +{ |
|---|
| 1968 | + if (task->state & (__TASK_STOPPED | __TASK_TRACED)) |
|---|
| 1969 | + return true; |
|---|
| 1970 | +#ifdef CONFIG_PREEMPT_RT |
|---|
| 1971 | + if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED)) |
|---|
| 1972 | + return true; |
|---|
| 1973 | +#endif |
|---|
| 1974 | + return false; |
|---|
| 1975 | +} |
|---|
| 1976 | + |
|---|
| 1977 | +static inline bool task_is_stopped_or_traced(struct task_struct *task) |
|---|
| 1978 | +{ |
|---|
| 1979 | + bool traced_stopped; |
|---|
| 1980 | + |
|---|
| 1981 | +#ifdef CONFIG_PREEMPT_RT |
|---|
| 1982 | + unsigned long flags; |
|---|
| 1983 | + |
|---|
| 1984 | + raw_spin_lock_irqsave(&task->pi_lock, flags); |
|---|
| 1985 | + traced_stopped = __task_is_stopped_or_traced(task); |
|---|
| 1986 | + raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
|---|
| 1987 | +#else |
|---|
| 1988 | + traced_stopped = __task_is_stopped_or_traced(task); |
|---|
| 1989 | +#endif |
|---|
| 1990 | + return traced_stopped; |
|---|
| 1991 | +} |
|---|
| 1992 | + |
|---|
| 1993 | +static inline bool task_is_traced(struct task_struct *task) |
|---|
| 1994 | +{ |
|---|
| 1995 | + bool traced = false; |
|---|
| 1996 | + |
|---|
| 1997 | + if (task->state & __TASK_TRACED) |
|---|
| 1998 | + return true; |
|---|
| 1999 | +#ifdef CONFIG_PREEMPT_RT |
|---|
| 2000 | + /* in case the task is sleeping on tasklist_lock */ |
|---|
| 2001 | + raw_spin_lock_irq(&task->pi_lock); |
|---|
| 2002 | + if (task->state & __TASK_TRACED) |
|---|
| 2003 | + traced = true; |
|---|
| 2004 | + else if (task->saved_state & __TASK_TRACED) |
|---|
| 2005 | + traced = true; |
|---|
| 2006 | + raw_spin_unlock_irq(&task->pi_lock); |
|---|
| 2007 | +#endif |
|---|
| 2008 | + return traced; |
|---|
| 2009 | +} |
|---|
| 2010 | + |
|---|
| 1802 | 2011 | /* |
|---|
| 1803 | 2012 | * cond_resched() and cond_resched_lock(): latency reduction via |
|---|
| 1804 | 2013 | * explicit rescheduling in places that are safe. The return |
|---|
| 1805 | 2014 | * value indicates whether a reschedule was done in fact. |
|---|
| 1806 | 2015 | * cond_resched_lock() will drop the spinlock before scheduling, |
|---|
| 1807 | 2016 | */ |
|---|
| 1808 | | -#ifndef CONFIG_PREEMPT |
|---|
| 2017 | +#ifndef CONFIG_PREEMPTION |
|---|
| 1809 | 2018 | extern int _cond_resched(void); |
|---|
| 1810 | 2019 | #else |
|---|
| 1811 | 2020 | static inline int _cond_resched(void) { return 0; } |
|---|
| .. | .. |
|---|
| 1834 | 2043 | |
|---|
| 1835 | 2044 | /* |
|---|
| 1836 | 2045 | * Does a critical section need to be broken due to another |
|---|
| 1837 | | - * task waiting?: (technically does not depend on CONFIG_PREEMPT, |
|---|
| 2046 | + * task waiting?: (technically does not depend on CONFIG_PREEMPTION, |
|---|
| 1838 | 2047 | * but a general need for low latency) |
|---|
| 1839 | 2048 | */ |
|---|
| 1840 | 2049 | static inline int spin_needbreak(spinlock_t *lock) |
|---|
| 1841 | 2050 | { |
|---|
| 1842 | | -#ifdef CONFIG_PREEMPT |
|---|
| 2051 | +#ifdef CONFIG_PREEMPTION |
|---|
| 1843 | 2052 | return spin_is_contended(lock); |
|---|
| 1844 | 2053 | #else |
|---|
| 1845 | 2054 | return 0; |
|---|
| .. | .. |
|---|
| 1889 | 2098 | * running or not. |
|---|
| 1890 | 2099 | */ |
|---|
| 1891 | 2100 | #ifndef vcpu_is_preempted |
|---|
| 1892 | | -# define vcpu_is_preempted(cpu) false |
|---|
| 2101 | +static inline bool vcpu_is_preempted(int cpu) |
|---|
| 2102 | +{ |
|---|
| 2103 | + return false; |
|---|
| 2104 | +} |
|---|
| 1893 | 2105 | #endif |
|---|
| 1894 | 2106 | |
|---|
| 1895 | 2107 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
|---|
| .. | .. |
|---|
| 1963 | 2175 | { |
|---|
| 1964 | 2176 | if (clone_flags & CLONE_VM) { |
|---|
| 1965 | 2177 | t->rseq = NULL; |
|---|
| 1966 | | - t->rseq_len = 0; |
|---|
| 1967 | 2178 | t->rseq_sig = 0; |
|---|
| 1968 | 2179 | t->rseq_event_mask = 0; |
|---|
| 1969 | 2180 | } else { |
|---|
| 1970 | 2181 | t->rseq = current->rseq; |
|---|
| 1971 | | - t->rseq_len = current->rseq_len; |
|---|
| 1972 | 2182 | t->rseq_sig = current->rseq_sig; |
|---|
| 1973 | 2183 | t->rseq_event_mask = current->rseq_event_mask; |
|---|
| 1974 | 2184 | } |
|---|
| .. | .. |
|---|
| 1977 | 2187 | static inline void rseq_execve(struct task_struct *t) |
|---|
| 1978 | 2188 | { |
|---|
| 1979 | 2189 | t->rseq = NULL; |
|---|
| 1980 | | - t->rseq_len = 0; |
|---|
| 1981 | 2190 | t->rseq_sig = 0; |
|---|
| 1982 | 2191 | t->rseq_event_mask = 0; |
|---|
| 1983 | 2192 | } |
|---|
| .. | .. |
|---|
| 2022 | 2231 | |
|---|
| 2023 | 2232 | #endif |
|---|
| 2024 | 2233 | |
|---|
| 2234 | +const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq); |
|---|
| 2235 | +char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len); |
|---|
| 2236 | +int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq); |
|---|
| 2237 | + |
|---|
| 2238 | +const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq); |
|---|
| 2239 | +const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq); |
|---|
| 2240 | +const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq); |
|---|
| 2241 | + |
|---|
| 2242 | +int sched_trace_rq_cpu(struct rq *rq); |
|---|
| 2243 | +int sched_trace_rq_cpu_capacity(struct rq *rq); |
|---|
| 2244 | +int sched_trace_rq_nr_running(struct rq *rq); |
|---|
| 2245 | + |
|---|
| 2246 | +const struct cpumask *sched_trace_rd_span(struct root_domain *rd); |
|---|
| 2247 | + |
|---|
| 2025 | 2248 | #endif |
|---|