| .. | .. |
|---|
| 77 | 77 | #endif |
|---|
| 78 | 78 | }; |
|---|
| 79 | 79 | |
|---|
| 80 | | -static u64 compute_write_bytes(struct task_struct *task) |
|---|
| 80 | +static u64 compute_write_bytes(struct task_io_accounting *ioac) |
|---|
| 81 | 81 | { |
|---|
| 82 | | - if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes) |
|---|
| 82 | + if (ioac->write_bytes <= ioac->cancelled_write_bytes) |
|---|
| 83 | 83 | return 0; |
|---|
| 84 | 84 | |
|---|
| 85 | | - return task->ioac.write_bytes - task->ioac.cancelled_write_bytes; |
|---|
| 85 | + return ioac->write_bytes - ioac->cancelled_write_bytes; |
|---|
| 86 | 86 | } |
|---|
| 87 | 87 | |
|---|
| 88 | 88 | static void compute_io_bucket_stats(struct io_stats *io_bucket, |
|---|
| .. | .. |
|---|
| 239 | 239 | } |
|---|
| 240 | 240 | } |
|---|
| 241 | 241 | |
|---|
| 242 | | -static void add_uid_tasks_io_stats(struct uid_entry *uid_entry, |
|---|
| 243 | | - struct task_struct *task, int slot) |
|---|
| 242 | +static void add_uid_tasks_io_stats(struct task_entry *task_entry, |
|---|
| 243 | + struct task_io_accounting *ioac, int slot) |
|---|
| 244 | 244 | { |
|---|
| 245 | | - struct task_entry *task_entry = find_or_register_task(uid_entry, task); |
|---|
| 246 | 245 | struct io_stats *task_io_slot = &task_entry->io[slot]; |
|---|
| 247 | 246 | |
|---|
| 248 | | - task_io_slot->read_bytes += task->ioac.read_bytes; |
|---|
| 249 | | - task_io_slot->write_bytes += compute_write_bytes(task); |
|---|
| 250 | | - task_io_slot->rchar += task->ioac.rchar; |
|---|
| 251 | | - task_io_slot->wchar += task->ioac.wchar; |
|---|
| 252 | | - task_io_slot->fsync += task->ioac.syscfs; |
|---|
| 247 | + task_io_slot->read_bytes += ioac->read_bytes; |
|---|
| 248 | + task_io_slot->write_bytes += compute_write_bytes(ioac); |
|---|
| 249 | + task_io_slot->rchar += ioac->rchar; |
|---|
| 250 | + task_io_slot->wchar += ioac->wchar; |
|---|
| 251 | + task_io_slot->fsync += ioac->syscfs; |
|---|
| 253 | 252 | } |
|---|
| 254 | 253 | |
|---|
| 255 | 254 | static void compute_io_uid_tasks(struct uid_entry *uid_entry) |
|---|
| .. | .. |
|---|
| 290 | 289 | #else |
|---|
| 291 | 290 | static void remove_uid_tasks(struct uid_entry *uid_entry) {}; |
|---|
| 292 | 291 | static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) {}; |
|---|
| 293 | | -static void add_uid_tasks_io_stats(struct uid_entry *uid_entry, |
|---|
| 294 | | - struct task_struct *task, int slot) {}; |
|---|
| 295 | 292 | static void compute_io_uid_tasks(struct uid_entry *uid_entry) {}; |
|---|
| 296 | 293 | static void show_io_uid_tasks(struct seq_file *m, |
|---|
| 297 | 294 | struct uid_entry *uid_entry) {} |
|---|
| .. | .. |
|---|
| 446 | 443 | .proc_write = uid_remove_write, |
|---|
| 447 | 444 | }; |
|---|
| 448 | 445 | |
|---|
| 446 | +static void __add_uid_io_stats(struct uid_entry *uid_entry, |
|---|
| 447 | + struct task_io_accounting *ioac, int slot) |
|---|
| 448 | +{ |
|---|
| 449 | + struct io_stats *io_slot = &uid_entry->io[slot]; |
|---|
| 450 | + |
|---|
| 451 | + io_slot->read_bytes += ioac->read_bytes; |
|---|
| 452 | + io_slot->write_bytes += compute_write_bytes(ioac); |
|---|
| 453 | + io_slot->rchar += ioac->rchar; |
|---|
| 454 | + io_slot->wchar += ioac->wchar; |
|---|
| 455 | + io_slot->fsync += ioac->syscfs; |
|---|
| 456 | +} |
|---|
| 449 | 457 | |
|---|
| 450 | 458 | static void add_uid_io_stats(struct uid_entry *uid_entry, |
|---|
| 451 | 459 | struct task_struct *task, int slot) |
|---|
| 452 | 460 | { |
|---|
| 453 | | - struct io_stats *io_slot = &uid_entry->io[slot]; |
|---|
| 461 | + struct task_entry *task_entry __maybe_unused; |
|---|
| 454 | 462 | |
|---|
| 455 | 463 | /* avoid double accounting of dying threads */ |
|---|
| 456 | 464 | if (slot != UID_STATE_DEAD_TASKS && (task->flags & PF_EXITING)) |
|---|
| 457 | 465 | return; |
|---|
| 458 | 466 | |
|---|
| 459 | | - io_slot->read_bytes += task->ioac.read_bytes; |
|---|
| 460 | | - io_slot->write_bytes += compute_write_bytes(task); |
|---|
| 461 | | - io_slot->rchar += task->ioac.rchar; |
|---|
| 462 | | - io_slot->wchar += task->ioac.wchar; |
|---|
| 463 | | - io_slot->fsync += task->ioac.syscfs; |
|---|
| 464 | | - |
|---|
| 465 | | - add_uid_tasks_io_stats(uid_entry, task, slot); |
|---|
| 467 | +#ifdef CONFIG_UID_SYS_STATS_DEBUG |
|---|
| 468 | + task_entry = find_or_register_task(uid_entry, task); |
|---|
| 469 | + add_uid_tasks_io_stats(task_entry, &task->ioac, slot); |
|---|
| 470 | +#endif |
|---|
| 471 | + __add_uid_io_stats(uid_entry, &task->ioac, slot); |
|---|
| 466 | 472 | } |
|---|
| 467 | 473 | |
|---|
| 468 | 474 | static void update_io_stats_all_locked(void) |
|---|
| .. | .. |
|---|
| 622 | 628 | .proc_write = uid_procstat_write, |
|---|
| 623 | 629 | }; |
|---|
| 624 | 630 | |
|---|
| 631 | +struct update_stats_work { |
|---|
| 632 | + struct work_struct work; |
|---|
| 633 | + uid_t uid; |
|---|
| 634 | +#ifdef CONFIG_UID_SYS_STATS_DEBUG |
|---|
| 635 | + struct task_struct *task; |
|---|
| 636 | +#endif |
|---|
| 637 | + struct task_io_accounting ioac; |
|---|
| 638 | + u64 utime; |
|---|
| 639 | + u64 stime; |
|---|
| 640 | +}; |
|---|
| 641 | + |
|---|
| 642 | +static void update_stats_workfn(struct work_struct *work) |
|---|
| 643 | +{ |
|---|
| 644 | + struct update_stats_work *usw = |
|---|
| 645 | + container_of(work, struct update_stats_work, work); |
|---|
| 646 | + struct uid_entry *uid_entry; |
|---|
| 647 | + struct task_entry *task_entry __maybe_unused; |
|---|
| 648 | + |
|---|
| 649 | + rt_mutex_lock(&uid_lock); |
|---|
| 650 | + uid_entry = find_uid_entry(usw->uid); |
|---|
| 651 | + if (!uid_entry) |
|---|
| 652 | + goto exit; |
|---|
| 653 | + |
|---|
| 654 | + uid_entry->utime += usw->utime; |
|---|
| 655 | + uid_entry->stime += usw->stime; |
|---|
| 656 | + |
|---|
| 657 | +#ifdef CONFIG_UID_SYS_STATS_DEBUG |
|---|
| 658 | + task_entry = find_task_entry(uid_entry, usw->task); |
|---|
| 659 | + if (!task_entry) |
|---|
| 660 | + goto exit; |
|---|
| 661 | + add_uid_tasks_io_stats(task_entry, &usw->ioac, |
|---|
| 662 | + UID_STATE_DEAD_TASKS); |
|---|
| 663 | +#endif |
|---|
| 664 | + __add_uid_io_stats(uid_entry, &usw->ioac, UID_STATE_DEAD_TASKS); |
|---|
| 665 | +exit: |
|---|
| 666 | + rt_mutex_unlock(&uid_lock); |
|---|
| 667 | +#ifdef CONFIG_UID_SYS_STATS_DEBUG |
|---|
| 668 | + put_task_struct(usw->task); |
|---|
| 669 | +#endif |
|---|
| 670 | + kfree(usw); |
|---|
| 671 | +} |
|---|
| 672 | + |
|---|
| 625 | 673 | static int process_notifier(struct notifier_block *self, |
|---|
| 626 | 674 | unsigned long cmd, void *v) |
|---|
| 627 | 675 | { |
|---|
| .. | .. |
|---|
| 633 | 681 | if (!task) |
|---|
| 634 | 682 | return NOTIFY_OK; |
|---|
| 635 | 683 | |
|---|
| 636 | | - rt_mutex_lock(&uid_lock); |
|---|
| 637 | 684 | uid = from_kuid_munged(current_user_ns(), task_uid(task)); |
|---|
| 685 | + if (!rt_mutex_trylock(&uid_lock)) { |
|---|
| 686 | + struct update_stats_work *usw; |
|---|
| 687 | + |
|---|
| 688 | + usw = kmalloc(sizeof(struct update_stats_work), GFP_KERNEL); |
|---|
| 689 | + if (usw) { |
|---|
| 690 | + INIT_WORK(&usw->work, update_stats_workfn); |
|---|
| 691 | + usw->uid = uid; |
|---|
| 692 | +#ifdef CONFIG_UID_SYS_STATS_DEBUG |
|---|
| 693 | + usw->task = get_task_struct(task); |
|---|
| 694 | +#endif |
|---|
| 695 | + /* |
|---|
| 696 | + * Copy task->ioac since task might be destroyed before |
|---|
| 697 | + * the work is later performed. |
|---|
| 698 | + */ |
|---|
| 699 | + usw->ioac = task->ioac; |
|---|
| 700 | + task_cputime_adjusted(task, &usw->utime, &usw->stime); |
|---|
| 701 | + schedule_work(&usw->work); |
|---|
| 702 | + } |
|---|
| 703 | + return NOTIFY_OK; |
|---|
| 704 | + } |
|---|
| 705 | + |
|---|
| 638 | 706 | uid_entry = find_or_register_uid(uid); |
|---|
| 639 | 707 | if (!uid_entry) { |
|---|
| 640 | 708 | pr_err("%s: failed to find uid %d\n", __func__, uid); |
|---|