forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/xen/xenbus/xenbus_client.c
....@@ -69,11 +69,24 @@
6969 unsigned int nr_handles;
7070 };
7171
72
+struct map_ring_valloc {
73
+ struct xenbus_map_node *node;
74
+
75
+ /* Why do we need two arrays? See comment of __xenbus_map_ring */
76
+ unsigned long addrs[XENBUS_MAX_RING_GRANTS];
77
+ phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
78
+
79
+ struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
80
+ struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
81
+
82
+ unsigned int idx;
83
+};
84
+
7285 static DEFINE_SPINLOCK(xenbus_valloc_lock);
7386 static LIST_HEAD(xenbus_valloc_pages);
7487
7588 struct xenbus_ring_ops {
76
- int (*map)(struct xenbus_device *dev,
89
+ int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
7790 grant_ref_t *gnt_refs, unsigned int nr_grefs,
7891 void **vaddr);
7992 int (*unmap)(struct xenbus_device *dev, void *vaddr);
....@@ -284,10 +297,8 @@
284297 dev_err(&dev->dev, "%s\n", printf_buffer);
285298
286299 path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
287
- if (!path_buffer ||
288
- xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer))
289
- dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
290
- dev->nodename, printf_buffer);
300
+ if (path_buffer)
301
+ xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer);
291302
292303 kfree(printf_buffer);
293304 kfree(path_buffer);
....@@ -368,7 +379,14 @@
368379 unsigned int nr_pages, grant_ref_t *grefs)
369380 {
370381 int err;
371
- int i, j;
382
+ unsigned int i;
383
+ grant_ref_t gref_head;
384
+
385
+ err = gnttab_alloc_grant_references(nr_pages, &gref_head);
386
+ if (err) {
387
+ xenbus_dev_fatal(dev, err, "granting access to ring page");
388
+ return err;
389
+ }
372390
373391 for (i = 0; i < nr_pages; i++) {
374392 unsigned long gfn;
....@@ -378,23 +396,14 @@
378396 else
379397 gfn = virt_to_gfn(vaddr);
380398
381
- err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
382
- if (err < 0) {
383
- xenbus_dev_fatal(dev, err,
384
- "granting access to ring page");
385
- goto fail;
386
- }
387
- grefs[i] = err;
399
+ grefs[i] = gnttab_claim_grant_reference(&gref_head);
400
+ gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
401
+ gfn, 0);
388402
389403 vaddr = vaddr + XEN_PAGE_SIZE;
390404 }
391405
392406 return 0;
393
-
394
-fail:
395
- for (j = 0; j < i; j++)
396
- gnttab_end_foreign_access_ref(grefs[j], 0);
397
- return err;
398407 }
399408 EXPORT_SYMBOL_GPL(xenbus_grant_ring);
400409
....@@ -405,7 +414,7 @@
405414 * error, the device will switch to XenbusStateClosing, and the error will be
406415 * saved in the store.
407416 */
408
-int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
417
+int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port)
409418 {
410419 struct evtchn_alloc_unbound alloc_unbound;
411420 int err;
....@@ -428,7 +437,7 @@
428437 /**
429438 * Free an existing event channel. Returns 0 on success or -errno on error.
430439 */
431
-int xenbus_free_evtchn(struct xenbus_device *dev, int port)
440
+int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port)
432441 {
433442 struct evtchn_close close;
434443 int err;
....@@ -437,7 +446,7 @@
437446
438447 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
439448 if (err)
440
- xenbus_dev_error(dev, err, "freeing event channel %d", port);
449
+ xenbus_dev_error(dev, err, "freeing event channel %u", port);
441450
442451 return err;
443452 }
....@@ -454,8 +463,7 @@
454463 * Map @nr_grefs pages of memory into this domain from another
455464 * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
456465 * pages of virtual address space, maps the pages to that address, and
457
- * sets *vaddr to that address. Returns 0 on success, and GNTST_*
458
- * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
466
+ * sets *vaddr to that address. Returns 0 on success, and -errno on
459467 * error. If an error is returned, device will switch to
460468 * XenbusStateClosing and the error message will be saved in XenStore.
461469 */
....@@ -463,12 +471,25 @@
463471 unsigned int nr_grefs, void **vaddr)
464472 {
465473 int err;
474
+ struct map_ring_valloc *info;
466475
467
- err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
468
- /* Some hypervisors are buggy and can return 1. */
469
- if (err > 0)
470
- err = GNTST_general_error;
476
+ *vaddr = NULL;
471477
478
+ if (nr_grefs > XENBUS_MAX_RING_GRANTS)
479
+ return -EINVAL;
480
+
481
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
482
+ if (!info)
483
+ return -ENOMEM;
484
+
485
+ info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
486
+ if (!info->node)
487
+ err = -ENOMEM;
488
+ else
489
+ err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
490
+
491
+ kfree(info->node);
492
+ kfree(info);
472493 return err;
473494 }
474495 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
....@@ -480,57 +501,94 @@
480501 grant_ref_t *gnt_refs,
481502 unsigned int nr_grefs,
482503 grant_handle_t *handles,
483
- phys_addr_t *addrs,
504
+ struct map_ring_valloc *info,
484505 unsigned int flags,
485506 bool *leaked)
486507 {
487
- struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
488
- struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
489508 int i, j;
490
- int err = GNTST_okay;
491509
492510 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
493511 return -EINVAL;
494512
495513 for (i = 0; i < nr_grefs; i++) {
496
- memset(&map[i], 0, sizeof(map[i]));
497
- gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
498
- dev->otherend_id);
514
+ gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
515
+ gnt_refs[i], dev->otherend_id);
499516 handles[i] = INVALID_GRANT_HANDLE;
500517 }
501518
502
- gnttab_batch_map(map, i);
519
+ gnttab_batch_map(info->map, i);
503520
504521 for (i = 0; i < nr_grefs; i++) {
505
- if (map[i].status != GNTST_okay) {
506
- err = map[i].status;
507
- xenbus_dev_fatal(dev, map[i].status,
522
+ if (info->map[i].status != GNTST_okay) {
523
+ xenbus_dev_fatal(dev, info->map[i].status,
508524 "mapping in shared page %d from domain %d",
509525 gnt_refs[i], dev->otherend_id);
510526 goto fail;
511527 } else
512
- handles[i] = map[i].handle;
528
+ handles[i] = info->map[i].handle;
513529 }
514530
515
- return GNTST_okay;
531
+ return 0;
516532
517533 fail:
518534 for (i = j = 0; i < nr_grefs; i++) {
519535 if (handles[i] != INVALID_GRANT_HANDLE) {
520
- memset(&unmap[j], 0, sizeof(unmap[j]));
521
- gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
536
+ gnttab_set_unmap_op(&info->unmap[j],
537
+ info->phys_addrs[i],
522538 GNTMAP_host_map, handles[i]);
523539 j++;
524540 }
525541 }
526542
527
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
543
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j))
528544 BUG();
529545
530546 *leaked = false;
531547 for (i = 0; i < j; i++) {
532
- if (unmap[i].status != GNTST_okay) {
548
+ if (info->unmap[i].status != GNTST_okay) {
533549 *leaked = true;
550
+ break;
551
+ }
552
+ }
553
+
554
+ return -ENOENT;
555
+}
556
+
557
+/**
558
+ * xenbus_unmap_ring
559
+ * @dev: xenbus device
560
+ * @handles: grant handle array
561
+ * @nr_handles: number of handles in the array
562
+ * @vaddrs: addresses to unmap
563
+ *
564
+ * Unmap memory in this domain that was imported from another domain.
565
+ * Returns 0 on success and returns GNTST_* on error
566
+ * (see xen/include/interface/grant_table.h).
567
+ */
568
+static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
569
+ unsigned int nr_handles, unsigned long *vaddrs)
570
+{
571
+ struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
572
+ int i;
573
+ int err;
574
+
575
+ if (nr_handles > XENBUS_MAX_RING_GRANTS)
576
+ return -EINVAL;
577
+
578
+ for (i = 0; i < nr_handles; i++)
579
+ gnttab_set_unmap_op(&unmap[i], vaddrs[i],
580
+ GNTMAP_host_map, handles[i]);
581
+
582
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
583
+ BUG();
584
+
585
+ err = GNTST_okay;
586
+ for (i = 0; i < nr_handles; i++) {
587
+ if (unmap[i].status != GNTST_okay) {
588
+ xenbus_dev_error(dev, unmap[i].status,
589
+ "unmapping page at handle %d error %d",
590
+ handles[i], unmap[i].status);
591
+ err = unmap[i].status;
534592 break;
535593 }
536594 }
....@@ -538,21 +596,12 @@
538596 return err;
539597 }
540598
541
-struct map_ring_valloc_hvm
542
-{
543
- unsigned int idx;
544
-
545
- /* Why do we need two arrays? See comment of __xenbus_map_ring */
546
- phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
547
- unsigned long addrs[XENBUS_MAX_RING_GRANTS];
548
-};
549
-
550599 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
551600 unsigned int goffset,
552601 unsigned int len,
553602 void *data)
554603 {
555
- struct map_ring_valloc_hvm *info = data;
604
+ struct map_ring_valloc *info = data;
556605 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
557606
558607 info->phys_addrs[info->idx] = vaddr;
....@@ -561,39 +610,28 @@
561610 info->idx++;
562611 }
563612
564
-static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
565
- grant_ref_t *gnt_ref,
566
- unsigned int nr_grefs,
567
- void **vaddr)
613
+static int xenbus_map_ring_hvm(struct xenbus_device *dev,
614
+ struct map_ring_valloc *info,
615
+ grant_ref_t *gnt_ref,
616
+ unsigned int nr_grefs,
617
+ void **vaddr)
568618 {
569
- struct xenbus_map_node *node;
619
+ struct xenbus_map_node *node = info->node;
570620 int err;
571621 void *addr;
572622 bool leaked = false;
573
- struct map_ring_valloc_hvm info = {
574
- .idx = 0,
575
- };
576623 unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
577624
578
- if (nr_grefs > XENBUS_MAX_RING_GRANTS)
579
- return -EINVAL;
580
-
581
- *vaddr = NULL;
582
-
583
- node = kzalloc(sizeof(*node), GFP_KERNEL);
584
- if (!node)
585
- return -ENOMEM;
586
-
587
- err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
625
+ err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages);
588626 if (err)
589627 goto out_err;
590628
591629 gnttab_foreach_grant(node->hvm.pages, nr_grefs,
592630 xenbus_map_ring_setup_grant_hvm,
593
- &info);
631
+ info);
594632
595633 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
596
- info.phys_addrs, GNTMAP_host_map, &leaked);
634
+ info, GNTMAP_host_map, &leaked);
597635 node->nr_handles = nr_grefs;
598636
599637 if (err)
....@@ -613,60 +651,22 @@
613651 spin_unlock(&xenbus_valloc_lock);
614652
615653 *vaddr = addr;
654
+ info->node = NULL;
655
+
616656 return 0;
617657
618658 out_xenbus_unmap_ring:
619659 if (!leaked)
620
- xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs);
660
+ xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
621661 else
622662 pr_alert("leaking %p size %u page(s)",
623663 addr, nr_pages);
624664 out_free_ballooned_pages:
625665 if (!leaked)
626
- free_xenballooned_pages(nr_pages, node->hvm.pages);
666
+ xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
627667 out_err:
628
- kfree(node);
629668 return err;
630669 }
631
-
632
-
633
-/**
634
- * xenbus_map_ring
635
- * @dev: xenbus device
636
- * @gnt_refs: grant reference array
637
- * @nr_grefs: number of grant reference
638
- * @handles: pointer to grant handle to be filled
639
- * @vaddrs: addresses to be mapped to
640
- * @leaked: fail to clean up a failed map, caller should not free vaddr
641
- *
642
- * Map pages of memory into this domain from another domain's grant table.
643
- * xenbus_map_ring does not allocate the virtual address space (you must do
644
- * this yourself!). It only maps in the pages to the specified address.
645
- * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
646
- * or -ENOMEM / -EINVAL on error. If an error is returned, device will switch to
647
- * XenbusStateClosing and the first error message will be saved in XenStore.
648
- * Further more if we fail to map the ring, caller should check @leaked.
649
- * If @leaked is not zero it means xenbus_map_ring fails to clean up, caller
650
- * should not free the address space of @vaddr.
651
- */
652
-int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs,
653
- unsigned int nr_grefs, grant_handle_t *handles,
654
- unsigned long *vaddrs, bool *leaked)
655
-{
656
- phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
657
- int i;
658
-
659
- if (nr_grefs > XENBUS_MAX_RING_GRANTS)
660
- return -EINVAL;
661
-
662
- for (i = 0; i < nr_grefs; i++)
663
- phys_addrs[i] = (unsigned long)vaddrs[i];
664
-
665
- return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles,
666
- phys_addrs, GNTMAP_host_map, leaked);
667
-}
668
-EXPORT_SYMBOL_GPL(xenbus_map_ring);
669
-
670670
671671 /**
672672 * xenbus_unmap_ring_vfree
....@@ -687,40 +687,33 @@
687687 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
688688
689689 #ifdef CONFIG_XEN_PV
690
-static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
691
- grant_ref_t *gnt_refs,
692
- unsigned int nr_grefs,
693
- void **vaddr)
690
+static int map_ring_apply(pte_t *pte, unsigned long addr, void *data)
694691 {
695
- struct xenbus_map_node *node;
692
+ struct map_ring_valloc *info = data;
693
+
694
+ info->phys_addrs[info->idx++] = arbitrary_virt_to_machine(pte).maddr;
695
+ return 0;
696
+}
697
+
698
+static int xenbus_map_ring_pv(struct xenbus_device *dev,
699
+ struct map_ring_valloc *info,
700
+ grant_ref_t *gnt_refs,
701
+ unsigned int nr_grefs,
702
+ void **vaddr)
703
+{
704
+ struct xenbus_map_node *node = info->node;
696705 struct vm_struct *area;
697
- pte_t *ptes[XENBUS_MAX_RING_GRANTS];
698
- phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
699
- int err = GNTST_okay;
700
- int i;
701
- bool leaked;
706
+ bool leaked = false;
707
+ int err = -ENOMEM;
702708
703
- *vaddr = NULL;
704
-
705
- if (nr_grefs > XENBUS_MAX_RING_GRANTS)
706
- return -EINVAL;
707
-
708
- node = kzalloc(sizeof(*node), GFP_KERNEL);
709
- if (!node)
709
+ area = get_vm_area(XEN_PAGE_SIZE * nr_grefs, VM_IOREMAP);
710
+ if (!area)
710711 return -ENOMEM;
711
-
712
- area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
713
- if (!area) {
714
- kfree(node);
715
- return -ENOMEM;
716
- }
717
-
718
- for (i = 0; i < nr_grefs; i++)
719
- phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
720
-
712
+ if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
713
+ XEN_PAGE_SIZE * nr_grefs, map_ring_apply, info))
714
+ goto failed;
721715 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
722
- phys_addrs,
723
- GNTMAP_host_map | GNTMAP_contains_pte,
716
+ info, GNTMAP_host_map | GNTMAP_contains_pte,
724717 &leaked);
725718 if (err)
726719 goto failed;
....@@ -733,6 +726,8 @@
733726 spin_unlock(&xenbus_valloc_lock);
734727
735728 *vaddr = area->addr;
729
+ info->node = NULL;
730
+
736731 return 0;
737732
738733 failed:
....@@ -741,11 +736,10 @@
741736 else
742737 pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
743738
744
- kfree(node);
745739 return err;
746740 }
747741
748
-static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
742
+static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
749743 {
750744 struct xenbus_map_node *node;
751745 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
....@@ -809,12 +803,12 @@
809803 }
810804
811805 static const struct xenbus_ring_ops ring_ops_pv = {
812
- .map = xenbus_map_ring_valloc_pv,
813
- .unmap = xenbus_unmap_ring_vfree_pv,
806
+ .map = xenbus_map_ring_pv,
807
+ .unmap = xenbus_unmap_ring_pv,
814808 };
815809 #endif
816810
817
-struct unmap_ring_vfree_hvm
811
+struct unmap_ring_hvm
818812 {
819813 unsigned int idx;
820814 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
....@@ -825,19 +819,19 @@
825819 unsigned int len,
826820 void *data)
827821 {
828
- struct unmap_ring_vfree_hvm *info = data;
822
+ struct unmap_ring_hvm *info = data;
829823
830824 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
831825
832826 info->idx++;
833827 }
834828
835
-static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
829
+static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
836830 {
837831 int rv;
838832 struct xenbus_map_node *node;
839833 void *addr;
840
- struct unmap_ring_vfree_hvm info = {
834
+ struct unmap_ring_hvm info = {
841835 .idx = 0,
842836 };
843837 unsigned int nr_pages;
....@@ -870,7 +864,7 @@
870864 info.addrs);
871865 if (!rv) {
872866 vunmap(vaddr);
873
- free_xenballooned_pages(nr_pages, node->hvm.pages);
867
+ xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
874868 }
875869 else
876870 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
....@@ -878,51 +872,6 @@
878872 kfree(node);
879873 return rv;
880874 }
881
-
882
-/**
883
- * xenbus_unmap_ring
884
- * @dev: xenbus device
885
- * @handles: grant handle array
886
- * @nr_handles: number of handles in the array
887
- * @vaddrs: addresses to unmap
888
- *
889
- * Unmap memory in this domain that was imported from another domain.
890
- * Returns 0 on success and returns GNTST_* on error
891
- * (see xen/include/interface/grant_table.h).
892
- */
893
-int xenbus_unmap_ring(struct xenbus_device *dev,
894
- grant_handle_t *handles, unsigned int nr_handles,
895
- unsigned long *vaddrs)
896
-{
897
- struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
898
- int i;
899
- int err;
900
-
901
- if (nr_handles > XENBUS_MAX_RING_GRANTS)
902
- return -EINVAL;
903
-
904
- for (i = 0; i < nr_handles; i++)
905
- gnttab_set_unmap_op(&unmap[i], vaddrs[i],
906
- GNTMAP_host_map, handles[i]);
907
-
908
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
909
- BUG();
910
-
911
- err = GNTST_okay;
912
- for (i = 0; i < nr_handles; i++) {
913
- if (unmap[i].status != GNTST_okay) {
914
- xenbus_dev_error(dev, unmap[i].status,
915
- "unmapping page at handle %d error %d",
916
- handles[i], unmap[i].status);
917
- err = unmap[i].status;
918
- break;
919
- }
920
- }
921
-
922
- return err;
923
-}
924
-EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
925
-
926875
927876 /**
928877 * xenbus_read_driver_state
....@@ -943,8 +892,8 @@
943892 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
944893
945894 static const struct xenbus_ring_ops ring_ops_hvm = {
946
- .map = xenbus_map_ring_valloc_hvm,
947
- .unmap = xenbus_unmap_ring_vfree_hvm,
895
+ .map = xenbus_map_ring_hvm,
896
+ .unmap = xenbus_unmap_ring_hvm,
948897 };
949898
950899 void __init xenbus_ring_ops_init(void)