hc
2024-05-10 ee930fffee469d076998274a2ca55e13dc1efb67
kernel/drivers/misc/uid_sys_stats.c
....@@ -77,12 +77,12 @@
7777 #endif
7878 };
7979
80
-static u64 compute_write_bytes(struct task_struct *task)
80
+static u64 compute_write_bytes(struct task_io_accounting *ioac)
8181 {
82
- if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes)
82
+ if (ioac->write_bytes <= ioac->cancelled_write_bytes)
8383 return 0;
8484
85
- return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
85
+ return ioac->write_bytes - ioac->cancelled_write_bytes;
8686 }
8787
8888 static void compute_io_bucket_stats(struct io_stats *io_bucket,
....@@ -239,17 +239,16 @@
239239 }
240240 }
241241
242
-static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
243
- struct task_struct *task, int slot)
242
+static void add_uid_tasks_io_stats(struct task_entry *task_entry,
243
+ struct task_io_accounting *ioac, int slot)
244244 {
245
- struct task_entry *task_entry = find_or_register_task(uid_entry, task);
246245 struct io_stats *task_io_slot = &task_entry->io[slot];
247246
248
- task_io_slot->read_bytes += task->ioac.read_bytes;
249
- task_io_slot->write_bytes += compute_write_bytes(task);
250
- task_io_slot->rchar += task->ioac.rchar;
251
- task_io_slot->wchar += task->ioac.wchar;
252
- task_io_slot->fsync += task->ioac.syscfs;
247
+ task_io_slot->read_bytes += ioac->read_bytes;
248
+ task_io_slot->write_bytes += compute_write_bytes(ioac);
249
+ task_io_slot->rchar += ioac->rchar;
250
+ task_io_slot->wchar += ioac->wchar;
251
+ task_io_slot->fsync += ioac->syscfs;
253252 }
254253
255254 static void compute_io_uid_tasks(struct uid_entry *uid_entry)
....@@ -290,8 +289,6 @@
290289 #else
291290 static void remove_uid_tasks(struct uid_entry *uid_entry) {};
292291 static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) {};
293
-static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
294
- struct task_struct *task, int slot) {};
295292 static void compute_io_uid_tasks(struct uid_entry *uid_entry) {};
296293 static void show_io_uid_tasks(struct seq_file *m,
297294 struct uid_entry *uid_entry) {}
....@@ -446,23 +443,32 @@
446443 .proc_write = uid_remove_write,
447444 };
448445
446
+static void __add_uid_io_stats(struct uid_entry *uid_entry,
447
+ struct task_io_accounting *ioac, int slot)
448
+{
449
+ struct io_stats *io_slot = &uid_entry->io[slot];
450
+
451
+ io_slot->read_bytes += ioac->read_bytes;
452
+ io_slot->write_bytes += compute_write_bytes(ioac);
453
+ io_slot->rchar += ioac->rchar;
454
+ io_slot->wchar += ioac->wchar;
455
+ io_slot->fsync += ioac->syscfs;
456
+}
449457
450458 static void add_uid_io_stats(struct uid_entry *uid_entry,
451459 struct task_struct *task, int slot)
452460 {
453
- struct io_stats *io_slot = &uid_entry->io[slot];
461
+ struct task_entry *task_entry __maybe_unused;
454462
455463 /* avoid double accounting of dying threads */
456464 if (slot != UID_STATE_DEAD_TASKS && (task->flags & PF_EXITING))
457465 return;
458466
459
- io_slot->read_bytes += task->ioac.read_bytes;
460
- io_slot->write_bytes += compute_write_bytes(task);
461
- io_slot->rchar += task->ioac.rchar;
462
- io_slot->wchar += task->ioac.wchar;
463
- io_slot->fsync += task->ioac.syscfs;
464
-
465
- add_uid_tasks_io_stats(uid_entry, task, slot);
467
+#ifdef CONFIG_UID_SYS_STATS_DEBUG
468
+ task_entry = find_or_register_task(uid_entry, task);
469
+ add_uid_tasks_io_stats(task_entry, &task->ioac, slot);
470
+#endif
471
+ __add_uid_io_stats(uid_entry, &task->ioac, slot);
466472 }
467473
468474 static void update_io_stats_all_locked(void)
....@@ -622,6 +628,48 @@
622628 .proc_write = uid_procstat_write,
623629 };
624630
631
+struct update_stats_work {
632
+ struct work_struct work;
633
+ uid_t uid;
634
+#ifdef CONFIG_UID_SYS_STATS_DEBUG
635
+ struct task_struct *task;
636
+#endif
637
+ struct task_io_accounting ioac;
638
+ u64 utime;
639
+ u64 stime;
640
+};
641
+
642
+static void update_stats_workfn(struct work_struct *work)
643
+{
644
+ struct update_stats_work *usw =
645
+ container_of(work, struct update_stats_work, work);
646
+ struct uid_entry *uid_entry;
647
+ struct task_entry *task_entry __maybe_unused;
648
+
649
+ rt_mutex_lock(&uid_lock);
650
+ uid_entry = find_uid_entry(usw->uid);
651
+ if (!uid_entry)
652
+ goto exit;
653
+
654
+ uid_entry->utime += usw->utime;
655
+ uid_entry->stime += usw->stime;
656
+
657
+#ifdef CONFIG_UID_SYS_STATS_DEBUG
658
+ task_entry = find_task_entry(uid_entry, usw->task);
659
+ if (!task_entry)
660
+ goto exit;
661
+ add_uid_tasks_io_stats(task_entry, &usw->ioac,
662
+ UID_STATE_DEAD_TASKS);
663
+#endif
664
+ __add_uid_io_stats(uid_entry, &usw->ioac, UID_STATE_DEAD_TASKS);
665
+exit:
666
+ rt_mutex_unlock(&uid_lock);
667
+#ifdef CONFIG_UID_SYS_STATS_DEBUG
668
+ put_task_struct(usw->task);
669
+#endif
670
+ kfree(usw);
671
+}
672
+
625673 static int process_notifier(struct notifier_block *self,
626674 unsigned long cmd, void *v)
627675 {
....@@ -633,8 +681,28 @@
633681 if (!task)
634682 return NOTIFY_OK;
635683
636
- rt_mutex_lock(&uid_lock);
637684 uid = from_kuid_munged(current_user_ns(), task_uid(task));
685
+ if (!rt_mutex_trylock(&uid_lock)) {
686
+ struct update_stats_work *usw;
687
+
688
+ usw = kmalloc(sizeof(struct update_stats_work), GFP_KERNEL);
689
+ if (usw) {
690
+ INIT_WORK(&usw->work, update_stats_workfn);
691
+ usw->uid = uid;
692
+#ifdef CONFIG_UID_SYS_STATS_DEBUG
693
+ usw->task = get_task_struct(task);
694
+#endif
695
+ /*
696
+ * Copy task->ioac since task might be destroyed before
697
+ * the work is later performed.
698
+ */
699
+ usw->ioac = task->ioac;
700
+ task_cputime_adjusted(task, &usw->utime, &usw->stime);
701
+ schedule_work(&usw->work);
702
+ }
703
+ return NOTIFY_OK;
704
+ }
705
+
638706 uid_entry = find_or_register_uid(uid);
639707 if (!uid_entry) {
640708 pr_err("%s: failed to find uid %d\n", __func__, uid);