hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/arch/x86/kernel/cpu/resctrl/rdtgroup.c
....@@ -311,7 +311,7 @@
311311 * executing task might have its own closid selected. Just reuse
312312 * the context switch code.
313313 */
314
- resctrl_sched_in();
314
+ resctrl_sched_in(current);
315315 }
316316
317317 /*
....@@ -532,7 +532,7 @@
532532 * Otherwise, the MSR is updated when the task is scheduled in.
533533 */
534534 if (task == current)
535
- resctrl_sched_in();
535
+ resctrl_sched_in(task);
536536 }
537537
538538 static void update_task_closid_rmid(struct task_struct *t)
....@@ -563,11 +563,11 @@
563563 */
564564
565565 if (rdtgrp->type == RDTCTRL_GROUP) {
566
- tsk->closid = rdtgrp->closid;
567
- tsk->rmid = rdtgrp->mon.rmid;
566
+ WRITE_ONCE(tsk->closid, rdtgrp->closid);
567
+ WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
568568 } else if (rdtgrp->type == RDTMON_GROUP) {
569569 if (rdtgrp->mon.parent->closid == tsk->closid) {
570
- tsk->rmid = rdtgrp->mon.rmid;
570
+ WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
571571 } else {
572572 rdt_last_cmd_puts("Can't move task to different control group\n");
573573 return -EINVAL;
....@@ -577,8 +577,10 @@
577577 /*
578578 * Ensure the task's closid and rmid are written before determining if
579579 * the task is current that will decide if it will be interrupted.
580
+ * This pairs with the full barrier between the rq->curr update and
581
+ * resctrl_sched_in() during context switch.
580582 */
581
- barrier();
583
+ smp_mb();
582584
583585 /*
584586 * By now, the task's closid and rmid are set. If the task is current
....@@ -713,11 +715,15 @@
713715 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
714716 {
715717 struct task_struct *p, *t;
718
+ pid_t pid;
716719
717720 rcu_read_lock();
718721 for_each_process_thread(p, t) {
719
- if (is_closid_match(t, r) || is_rmid_match(t, r))
720
- seq_printf(s, "%d\n", t->pid);
722
+ if (is_closid_match(t, r) || is_rmid_match(t, r)) {
723
+ pid = task_pid_vnr(t);
724
+ if (pid)
725
+ seq_printf(s, "%d\n", pid);
726
+ }
721727 }
722728 rcu_read_unlock();
723729 }
....@@ -2310,22 +2316,26 @@
23102316 for_each_process_thread(p, t) {
23112317 if (!from || is_closid_match(t, from) ||
23122318 is_rmid_match(t, from)) {
2313
- t->closid = to->closid;
2314
- t->rmid = to->mon.rmid;
2319
+ WRITE_ONCE(t->closid, to->closid);
2320
+ WRITE_ONCE(t->rmid, to->mon.rmid);
23152321
2316
-#ifdef CONFIG_SMP
23172322 /*
2318
- * This is safe on x86 w/o barriers as the ordering
2319
- * of writing to task_cpu() and t->on_cpu is
2320
- * reverse to the reading here. The detection is
2321
- * inaccurate as tasks might move or schedule
2322
- * before the smp function call takes place. In
2323
- * such a case the function call is pointless, but
2323
+ * Order the closid/rmid stores above before the loads
2324
+ * in task_curr(). This pairs with the full barrier
2325
+ * between the rq->curr update and resctrl_sched_in()
2326
+ * during context switch.
2327
+ */
2328
+ smp_mb();
2329
+
2330
+ /*
2331
+ * If the task is on a CPU, set the CPU in the mask.
2332
+ * The detection is inaccurate as tasks might move or
2333
+ * schedule before the smp function call takes place.
2334
+ * In such a case the function call is pointless, but
23242335 * there is no other side effect.
23252336 */
2326
- if (mask && t->on_cpu)
2337
+ if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
23272338 cpumask_set_cpu(task_cpu(t), mask);
2328
-#endif
23292339 }
23302340 }
23312341 read_unlock(&tasklist_lock);