.. | .. |
---|
96 | 96 | struct task_struct *task; |
---|
97 | 97 | struct rb_root pfn_list; /* Ex-user pinned pfn list */ |
---|
98 | 98 | unsigned long *bitmap; |
---|
| 99 | + struct mm_struct *mm; |
---|
99 | 100 | }; |
---|
100 | 101 | |
---|
101 | 102 | struct vfio_batch { |
---|
.. | .. |
---|
391 | 392 | if (!npage) |
---|
392 | 393 | return 0; |
---|
393 | 394 | |
---|
394 | | - mm = async ? get_task_mm(dma->task) : dma->task->mm; |
---|
395 | | - if (!mm) |
---|
| 395 | + mm = dma->mm; |
---|
| 396 | + if (async && !mmget_not_zero(mm)) |
---|
396 | 397 | return -ESRCH; /* process exited */ |
---|
397 | 398 | |
---|
398 | 399 | ret = mmap_write_lock_killable(mm); |
---|
.. | .. |
---|
666 | 667 | struct mm_struct *mm; |
---|
667 | 668 | int ret; |
---|
668 | 669 | |
---|
669 | | - mm = get_task_mm(dma->task); |
---|
670 | | - if (!mm) |
---|
| 670 | + mm = dma->mm; |
---|
| 671 | + if (!mmget_not_zero(mm)) |
---|
671 | 672 | return -ENODEV; |
---|
672 | 673 | |
---|
673 | 674 | ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages); |
---|
.. | .. |
---|
677 | 678 | ret = 0; |
---|
678 | 679 | |
---|
679 | 680 | if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { |
---|
680 | | - ret = vfio_lock_acct(dma, 1, true); |
---|
| 681 | + ret = vfio_lock_acct(dma, 1, false); |
---|
681 | 682 | if (ret) { |
---|
682 | 683 | put_pfn(*pfn_base, dma->prot); |
---|
683 | 684 | if (ret == -ENOMEM) |
---|
.. | .. |
---|
1031 | 1032 | vfio_unmap_unpin(iommu, dma, true); |
---|
1032 | 1033 | vfio_unlink_dma(iommu, dma); |
---|
1033 | 1034 | put_task_struct(dma->task); |
---|
| 1035 | + mmdrop(dma->mm); |
---|
1034 | 1036 | vfio_dma_bitmap_free(dma); |
---|
1035 | 1037 | kfree(dma); |
---|
1036 | 1038 | iommu->dma_avail++; |
---|
.. | .. |
---|
1452 | 1454 | * against the locked memory limit and we need to be able to do both |
---|
1453 | 1455 | * outside of this call path as pinning can be asynchronous via the |
---|
1454 | 1456 | * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a |
---|
1455 | | - * task_struct and VM locked pages requires an mm_struct, however |
---|
1456 | | - * holding an indefinite mm reference is not recommended, therefore we |
---|
1457 | | - * only hold a reference to a task. We could hold a reference to |
---|
1458 | | - * current, however QEMU uses this call path through vCPU threads, |
---|
1459 | | - * which can be killed resulting in a NULL mm and failure in the unmap |
---|
1460 | | - * path when called via a different thread. Avoid this problem by |
---|
1461 | | - * using the group_leader as threads within the same group require |
---|
1462 | | - * both CLONE_THREAD and CLONE_VM and will therefore use the same |
---|
1463 | | - * mm_struct. |
---|
1464 | | - * |
---|
1465 | | - * Previously we also used the task for testing CAP_IPC_LOCK at the |
---|
1466 | | - * time of pinning and accounting, however has_capability() makes use |
---|
1467 | | - * of real_cred, a copy-on-write field, so we can't guarantee that it |
---|
1468 | | - * matches group_leader, or in fact that it might not change by the |
---|
1469 | | - * time it's evaluated. If a process were to call MAP_DMA with |
---|
1470 | | - * CAP_IPC_LOCK but later drop it, it doesn't make sense that they |
---|
1471 | | - * possibly see different results for an iommu_mapped vfio_dma vs |
---|
1472 | | - * externally mapped. Therefore track CAP_IPC_LOCK in vfio_dma at the |
---|
1473 | | - * time of calling MAP_DMA. |
---|
| 1457 | + * task_struct. Save the group_leader so that all DMA tracking uses |
---|
| 1458 | + * the same task, to make debugging easier. VM locked pages requires |
---|
| 1459 | + * an mm_struct, so grab the mm in case the task dies. |
---|
1474 | 1460 | */ |
---|
1475 | 1461 | get_task_struct(current->group_leader); |
---|
1476 | 1462 | dma->task = current->group_leader; |
---|
1477 | 1463 | dma->lock_cap = capable(CAP_IPC_LOCK); |
---|
| 1464 | + dma->mm = current->mm; |
---|
| 1465 | + mmgrab(dma->mm); |
---|
1478 | 1466 | |
---|
1479 | 1467 | dma->pfn_list = RB_ROOT; |
---|
1480 | 1468 | |
---|
.. | .. |
---|
2671 | 2659 | static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu, |
---|
2672 | 2660 | struct vfio_info_cap *caps) |
---|
2673 | 2661 | { |
---|
2674 | | - struct vfio_iommu_type1_info_cap_migration cap_mig; |
---|
| 2662 | + struct vfio_iommu_type1_info_cap_migration cap_mig = {}; |
---|
2675 | 2663 | |
---|
2676 | 2664 | cap_mig.header.id = VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION; |
---|
2677 | 2665 | cap_mig.header.version = 1; |
---|
.. | .. |
---|
2998 | 2986 | !(dma->prot & IOMMU_READ)) |
---|
2999 | 2987 | return -EPERM; |
---|
3000 | 2988 | |
---|
3001 | | - mm = get_task_mm(dma->task); |
---|
3002 | | - |
---|
3003 | | - if (!mm) |
---|
| 2989 | + mm = dma->mm; |
---|
| 2990 | + if (!mmget_not_zero(mm)) |
---|
3004 | 2991 | return -EPERM; |
---|
3005 | 2992 | |
---|
3006 | 2993 | if (kthread) |
---|