.. | .. |
---|
311 | 311 | * executing task might have its own closid selected. Just reuse |
---|
312 | 312 | * the context switch code. |
---|
313 | 313 | */ |
---|
314 | | - resctrl_sched_in(); |
---|
| 314 | + resctrl_sched_in(current); |
---|
315 | 315 | } |
---|
316 | 316 | |
---|
317 | 317 | /* |
---|
.. | .. |
---|
532 | 532 | * Otherwise, the MSR is updated when the task is scheduled in. |
---|
533 | 533 | */ |
---|
534 | 534 | if (task == current) |
---|
535 | | - resctrl_sched_in(); |
---|
| 535 | + resctrl_sched_in(task); |
---|
536 | 536 | } |
---|
537 | 537 | |
---|
538 | 538 | static void update_task_closid_rmid(struct task_struct *t) |
---|
.. | .. |
---|
563 | 563 | */ |
---|
564 | 564 | |
---|
565 | 565 | if (rdtgrp->type == RDTCTRL_GROUP) { |
---|
566 | | - tsk->closid = rdtgrp->closid; |
---|
567 | | - tsk->rmid = rdtgrp->mon.rmid; |
---|
| 566 | + WRITE_ONCE(tsk->closid, rdtgrp->closid); |
---|
| 567 | + WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); |
---|
568 | 568 | } else if (rdtgrp->type == RDTMON_GROUP) { |
---|
569 | 569 | if (rdtgrp->mon.parent->closid == tsk->closid) { |
---|
570 | | - tsk->rmid = rdtgrp->mon.rmid; |
---|
| 570 | + WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); |
---|
571 | 571 | } else { |
---|
572 | 572 | rdt_last_cmd_puts("Can't move task to different control group\n"); |
---|
573 | 573 | return -EINVAL; |
---|
.. | .. |
---|
577 | 577 | /* |
---|
578 | 578 | * Ensure the task's closid and rmid are written before determining if |
---|
579 | 579 | * the task is current that will decide if it will be interrupted. |
---|
| 580 | + * This pairs with the full barrier between the rq->curr update and |
---|
| 581 | + * resctrl_sched_in() during context switch. |
---|
580 | 582 | */ |
---|
581 | | - barrier(); |
---|
| 583 | + smp_mb(); |
---|
582 | 584 | |
---|
583 | 585 | /* |
---|
584 | 586 | * By now, the task's closid and rmid are set. If the task is current |
---|
.. | .. |
---|
713 | 715 | static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) |
---|
714 | 716 | { |
---|
715 | 717 | struct task_struct *p, *t; |
---|
| 718 | + pid_t pid; |
---|
716 | 719 | |
---|
717 | 720 | rcu_read_lock(); |
---|
718 | 721 | for_each_process_thread(p, t) { |
---|
719 | | - if (is_closid_match(t, r) || is_rmid_match(t, r)) |
---|
720 | | - seq_printf(s, "%d\n", t->pid); |
---|
| 722 | + if (is_closid_match(t, r) || is_rmid_match(t, r)) { |
---|
| 723 | + pid = task_pid_vnr(t); |
---|
| 724 | + if (pid) |
---|
| 725 | + seq_printf(s, "%d\n", pid); |
---|
| 726 | + } |
---|
721 | 727 | } |
---|
722 | 728 | rcu_read_unlock(); |
---|
723 | 729 | } |
---|
.. | .. |
---|
2310 | 2316 | for_each_process_thread(p, t) { |
---|
2311 | 2317 | if (!from || is_closid_match(t, from) || |
---|
2312 | 2318 | is_rmid_match(t, from)) { |
---|
2313 | | - t->closid = to->closid; |
---|
2314 | | - t->rmid = to->mon.rmid; |
---|
| 2319 | + WRITE_ONCE(t->closid, to->closid); |
---|
| 2320 | + WRITE_ONCE(t->rmid, to->mon.rmid); |
---|
2315 | 2321 | |
---|
2316 | | -#ifdef CONFIG_SMP |
---|
2317 | 2322 | /* |
---|
2318 | | - * This is safe on x86 w/o barriers as the ordering |
---|
2319 | | - * of writing to task_cpu() and t->on_cpu is |
---|
2320 | | - * reverse to the reading here. The detection is |
---|
2321 | | - * inaccurate as tasks might move or schedule |
---|
2322 | | - * before the smp function call takes place. In |
---|
2323 | | - * such a case the function call is pointless, but |
---|
| 2323 | + * Order the closid/rmid stores above before the loads |
---|
| 2324 | + * in task_curr(). This pairs with the full barrier |
---|
| 2325 | + * between the rq->curr update and resctrl_sched_in() |
---|
| 2326 | + * during context switch. |
---|
| 2327 | + */ |
---|
| 2328 | + smp_mb(); |
---|
| 2329 | + |
---|
| 2330 | + /* |
---|
| 2331 | + * If the task is on a CPU, set the CPU in the mask. |
---|
| 2332 | + * The detection is inaccurate as tasks might move or |
---|
| 2333 | + * schedule before the smp function call takes place. |
---|
| 2334 | + * In such a case the function call is pointless, but |
---|
2324 | 2335 | * there is no other side effect. |
---|
2325 | 2336 | */ |
---|
2326 | | - if (mask && t->on_cpu) |
---|
| 2337 | + if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t)) |
---|
2327 | 2338 | cpumask_set_cpu(task_cpu(t), mask); |
---|
2328 | | -#endif |
---|
2329 | 2339 | } |
---|
2330 | 2340 | } |
---|
2331 | 2341 | read_unlock(&tasklist_lock); |
---|