| .. | .. |
|---|
| 70 | 70 | return; |
|---|
| 71 | 71 | |
|---|
| 72 | 72 | if (!wakeup || p->sched_psi_wake_requeue) { |
|---|
| 73 | | - if (p->flags & PF_MEMSTALL) |
|---|
| 73 | + if (p->in_memstall) |
|---|
| 74 | 74 | set |= TSK_MEMSTALL; |
|---|
| 75 | 75 | if (p->sched_psi_wake_requeue) |
|---|
| 76 | 76 | p->sched_psi_wake_requeue = 0; |
|---|
| .. | .. |
|---|
| 90 | 90 | return; |
|---|
| 91 | 91 | |
|---|
| 92 | 92 | if (!sleep) { |
|---|
| 93 | | - if (p->flags & PF_MEMSTALL) |
|---|
| 93 | + if (p->in_memstall) |
|---|
| 94 | 94 | clear |= TSK_MEMSTALL; |
|---|
| 95 | 95 | } else { |
|---|
| 96 | + /* |
|---|
| 97 | + * When a task sleeps, schedule() dequeues it before |
|---|
| 98 | + * switching to the next one. Merge the clearing of |
|---|
| 99 | + * TSK_RUNNING and TSK_ONCPU to save an unnecessary |
|---|
| 100 | + * psi_task_change() call in psi_sched_switch(). |
|---|
| 101 | + */ |
|---|
| 102 | + clear |= TSK_ONCPU; |
|---|
| 103 | + |
|---|
| 96 | 104 | if (p->in_iowait) |
|---|
| 97 | 105 | set |= TSK_IOWAIT; |
|---|
| 98 | 106 | } |
|---|
| .. | .. |
|---|
| 109 | 117 | * deregister its sleep-persistent psi states from the old |
|---|
| 110 | 118 | * queue, and let psi_enqueue() know it has to requeue. |
|---|
| 111 | 119 | */ |
|---|
| 112 | | - if (unlikely(p->in_iowait || (p->flags & PF_MEMSTALL))) { |
|---|
| 120 | + if (unlikely(p->in_iowait || p->in_memstall)) { |
|---|
| 113 | 121 | struct rq_flags rf; |
|---|
| 114 | 122 | struct rq *rq; |
|---|
| 115 | 123 | int clear = 0; |
|---|
| 116 | 124 | |
|---|
| 117 | 125 | if (p->in_iowait) |
|---|
| 118 | 126 | clear |= TSK_IOWAIT; |
|---|
| 119 | | - if (p->flags & PF_MEMSTALL) |
|---|
| 127 | + if (p->in_memstall) |
|---|
| 120 | 128 | clear |= TSK_MEMSTALL; |
|---|
| 121 | 129 | |
|---|
| 122 | 130 | rq = __task_rq_lock(p, &rf); |
|---|
| .. | .. |
|---|
| 126 | 134 | } |
|---|
| 127 | 135 | } |
|---|
| 128 | 136 | |
|---|
| 137 | +static inline void psi_sched_switch(struct task_struct *prev, |
|---|
| 138 | + struct task_struct *next, |
|---|
| 139 | + bool sleep) |
|---|
| 140 | +{ |
|---|
| 141 | + if (static_branch_likely(&psi_disabled)) |
|---|
| 142 | + return; |
|---|
| 143 | + |
|---|
| 144 | + psi_task_switch(prev, next, sleep); |
|---|
| 145 | +} |
|---|
| 146 | + |
|---|
| 129 | 147 | static inline void psi_task_tick(struct rq *rq) |
|---|
| 130 | 148 | { |
|---|
| 131 | 149 | if (static_branch_likely(&psi_disabled)) |
|---|
| 132 | 150 | return; |
|---|
| 133 | 151 | |
|---|
| 134 | | - if (unlikely(rq->curr->flags & PF_MEMSTALL)) |
|---|
| 152 | + if (unlikely(rq->curr->in_memstall)) |
|---|
| 135 | 153 | psi_memstall_tick(rq->curr, cpu_of(rq)); |
|---|
| 136 | 154 | } |
|---|
| 137 | 155 | #else /* CONFIG_PSI */ |
|---|
| 138 | 156 | static inline void psi_enqueue(struct task_struct *p, bool wakeup) {} |
|---|
| 139 | 157 | static inline void psi_dequeue(struct task_struct *p, bool sleep) {} |
|---|
| 140 | 158 | static inline void psi_ttwu_dequeue(struct task_struct *p) {} |
|---|
| 159 | +static inline void psi_sched_switch(struct task_struct *prev, |
|---|
| 160 | + struct task_struct *next, |
|---|
| 161 | + bool sleep) {} |
|---|
| 141 | 162 | static inline void psi_task_tick(struct rq *rq) {} |
|---|
| 142 | 163 | #endif /* CONFIG_PSI */ |
|---|
| 143 | 164 | |
|---|
| .. | .. |
|---|
| 157 | 178 | { |
|---|
| 158 | 179 | unsigned long long now = rq_clock(rq), delta = 0; |
|---|
| 159 | 180 | |
|---|
| 160 | | - if (unlikely(sched_info_on())) |
|---|
| 181 | + if (sched_info_on()) { |
|---|
| 161 | 182 | if (t->sched_info.last_queued) |
|---|
| 162 | 183 | delta = now - t->sched_info.last_queued; |
|---|
| 184 | + } |
|---|
| 163 | 185 | sched_info_reset_dequeued(t); |
|---|
| 164 | 186 | t->sched_info.run_delay += delta; |
|---|
| 165 | 187 | |
|---|
| .. | .. |
|---|
| 192 | 214 | */ |
|---|
| 193 | 215 | static inline void sched_info_queued(struct rq *rq, struct task_struct *t) |
|---|
| 194 | 216 | { |
|---|
| 195 | | - if (unlikely(sched_info_on())) { |
|---|
| 217 | + if (sched_info_on()) { |
|---|
| 196 | 218 | if (!t->sched_info.last_queued) |
|---|
| 197 | 219 | t->sched_info.last_queued = rq_clock(rq); |
|---|
| 198 | 220 | } |
|---|
| .. | .. |
|---|
| 239 | 261 | static inline void |
|---|
| 240 | 262 | sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) |
|---|
| 241 | 263 | { |
|---|
| 242 | | - if (unlikely(sched_info_on())) |
|---|
| 264 | + if (sched_info_on()) |
|---|
| 243 | 265 | __sched_info_switch(rq, prev, next); |
|---|
| 244 | 266 | } |
|---|
| 245 | 267 | |
|---|