hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/xen/privcmd.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /******************************************************************************
23 * privcmd.c
34 *
....@@ -24,9 +25,6 @@
2425 #include <linux/miscdevice.h>
2526 #include <linux/moduleparam.h>
2627
27
-#include <asm/pgalloc.h>
28
-#include <asm/pgtable.h>
29
-#include <asm/tlb.h>
3028 #include <asm/xen/hypervisor.h>
3129 #include <asm/xen/hypercall.h>
3230
....@@ -277,7 +275,7 @@
277275 if (rc || list_empty(&pagelist))
278276 goto out;
279277
280
- down_write(&mm->mmap_sem);
278
+ mmap_write_lock(mm);
281279
282280 {
283281 struct page *page = list_first_entry(&pagelist,
....@@ -302,7 +300,7 @@
302300
303301
304302 out_up:
305
- up_write(&mm->mmap_sem);
303
+ mmap_write_unlock(mm);
306304
307305 out:
308306 free_page_list(&pagelist);
....@@ -426,7 +424,7 @@
426424 if (pages == NULL)
427425 return -ENOMEM;
428426
429
- rc = alloc_xenballooned_pages(numpgs, pages);
427
+ rc = xen_alloc_unpopulated_pages(numpgs, pages);
430428 if (rc != 0) {
431429 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
432430 numpgs, rc);
....@@ -459,14 +457,14 @@
459457 return -EFAULT;
460458 /* Returns per-frame error in m.arr. */
461459 m.err = NULL;
462
- if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
460
+ if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
463461 return -EFAULT;
464462 break;
465463 case 2:
466464 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
467465 return -EFAULT;
468466 /* Returns per-frame error code in m.err. */
469
- if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
467
+ if (!access_ok(m.err, m.num * (sizeof(*m.err))))
470468 return -EFAULT;
471469 break;
472470 default:
....@@ -498,7 +496,7 @@
498496 }
499497 }
500498
501
- down_write(&mm->mmap_sem);
499
+ mmap_write_lock(mm);
502500
503501 vma = find_vma(mm, m.addr);
504502 if (!vma ||
....@@ -554,7 +552,7 @@
554552 BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
555553 &pagelist, mmap_batch_fn, &state));
556554
557
- up_write(&mm->mmap_sem);
555
+ mmap_write_unlock(mm);
558556
559557 if (state.global_error) {
560558 /* Write back errors in second pass. */
....@@ -575,34 +573,38 @@
575573 return ret;
576574
577575 out_unlock:
578
- up_write(&mm->mmap_sem);
576
+ mmap_write_unlock(mm);
579577 goto out;
580578 }
581579
582580 static int lock_pages(
583581 struct privcmd_dm_op_buf kbufs[], unsigned int num,
584
- struct page *pages[], unsigned int nr_pages)
582
+ struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
585583 {
586
- unsigned int i;
584
+ unsigned int i, off = 0;
587585
588
- for (i = 0; i < num; i++) {
586
+ for (i = 0; i < num; ) {
589587 unsigned int requested;
590
- int pinned;
588
+ int page_count;
591589
592590 requested = DIV_ROUND_UP(
593591 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
594
- PAGE_SIZE);
592
+ PAGE_SIZE) - off;
595593 if (requested > nr_pages)
596594 return -ENOSPC;
597595
598
- pinned = get_user_pages_fast(
599
- (unsigned long) kbufs[i].uptr,
596
+ page_count = pin_user_pages_fast(
597
+ (unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
600598 requested, FOLL_WRITE, pages);
601
- if (pinned < 0)
602
- return pinned;
599
+ if (page_count <= 0)
600
+ return page_count ? : -EFAULT;
603601
604
- nr_pages -= pinned;
605
- pages += pinned;
602
+ *pinned += page_count;
603
+ nr_pages -= page_count;
604
+ pages += page_count;
605
+
606
+ off = (requested == page_count) ? 0 : off + page_count;
607
+ i += !off;
606608 }
607609
608610 return 0;
....@@ -610,15 +612,7 @@
610612
611613 static void unlock_pages(struct page *pages[], unsigned int nr_pages)
612614 {
613
- unsigned int i;
614
-
615
- if (!pages)
616
- return;
617
-
618
- for (i = 0; i < nr_pages; i++) {
619
- if (pages[i])
620
- put_page(pages[i]);
621
- }
615
+ unpin_user_pages_dirty_lock(pages, nr_pages, true);
622616 }
623617
624618 static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
....@@ -631,6 +625,7 @@
631625 struct xen_dm_op_buf *xbufs = NULL;
632626 unsigned int i;
633627 long rc;
628
+ unsigned int pinned = 0;
634629
635630 if (copy_from_user(&kdata, udata, sizeof(kdata)))
636631 return -EFAULT;
....@@ -661,7 +656,7 @@
661656 goto out;
662657 }
663658
664
- if (!access_ok(VERIFY_WRITE, kbufs[i].uptr,
659
+ if (!access_ok(kbufs[i].uptr,
665660 kbufs[i].size)) {
666661 rc = -EFAULT;
667662 goto out;
....@@ -684,8 +679,8 @@
684679 goto out;
685680 }
686681
687
- rc = lock_pages(kbufs, kdata.num, pages, nr_pages);
688
- if (rc)
682
+ rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
683
+ if (rc < 0)
689684 goto out;
690685
691686 for (i = 0; i < kdata.num; i++) {
....@@ -698,7 +693,7 @@
698693 xen_preemptible_hcall_end();
699694
700695 out:
701
- unlock_pages(pages, nr_pages);
696
+ unlock_pages(pages, pinned);
702697 kfree(xbufs);
703698 kfree(pages);
704699 kfree(kbufs);
....@@ -719,26 +714,6 @@
719714 data->domid = dom;
720715 else if (data->domid != dom)
721716 return -EINVAL;
722
-
723
- return 0;
724
-}
725
-
726
-struct remap_pfn {
727
- struct mm_struct *mm;
728
- struct page **pages;
729
- pgprot_t prot;
730
- unsigned long i;
731
-};
732
-
733
-static int remap_pfn_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
734
- void *data)
735
-{
736
- struct remap_pfn *r = data;
737
- struct page *page = r->pages[r->i];
738
- pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
739
-
740
- set_pte_at(r->mm, addr, ptep, pte);
741
- r->i++;
742717
743718 return 0;
744719 }
....@@ -777,7 +752,7 @@
777752 return __put_user(xdata.nr_frames, &udata->num);
778753 }
779754
780
- down_write(&mm->mmap_sem);
755
+ mmap_write_lock(mm);
781756
782757 vma = find_vma(mm, kdata.addr);
783758 if (!vma || vma->vm_ops != &privcmd_vm_ops) {
....@@ -785,13 +760,14 @@
785760 goto out;
786761 }
787762
788
- pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
763
+ pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
789764 if (!pfns) {
790765 rc = -ENOMEM;
791766 goto out;
792767 }
793768
794
- if (xen_feature(XENFEAT_auto_translated_physmap)) {
769
+ if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
770
+ xen_feature(XENFEAT_auto_translated_physmap)) {
795771 unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
796772 struct page **pages;
797773 unsigned int i;
....@@ -821,16 +797,9 @@
821797 if (rc)
822798 goto out;
823799
824
- if (xen_feature(XENFEAT_auto_translated_physmap)) {
825
- struct remap_pfn r = {
826
- .mm = vma->vm_mm,
827
- .pages = vma->vm_private_data,
828
- .prot = vma->vm_page_prot,
829
- };
830
-
831
- rc = apply_to_page_range(r.mm, kdata.addr,
832
- kdata.num << PAGE_SHIFT,
833
- remap_pfn_fn, &r);
800
+ if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
801
+ xen_feature(XENFEAT_auto_translated_physmap)) {
802
+ rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
834803 } else {
835804 unsigned int domid =
836805 (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
....@@ -859,7 +828,7 @@
859828 }
860829
861830 out:
862
- up_write(&mm->mmap_sem);
831
+ mmap_write_unlock(mm);
863832 kfree(pfns);
864833
865834 return rc;
....@@ -941,7 +910,7 @@
941910
942911 rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
943912 if (rc == 0)
944
- free_xenballooned_pages(numpgs, pages);
913
+ xen_free_unpopulated_pages(numpgs, pages);
945914 else
946915 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
947916 numpgs, rc);
....@@ -979,8 +948,7 @@
979948 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
980949 * can be then retried until success.
981950 */
982
-static int is_mapped_fn(pte_t *pte, struct page *pmd_page,
983
- unsigned long addr, void *data)
951
+static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
984952 {
985953 return pte_none(*pte) ? 0 : -EBUSY;
986954 }