hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/arch/arm/xen/mm.c
....@@ -1,6 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 #include <linux/cpu.h>
2
-#include <linux/dma-mapping.h>
3
-#include <linux/bootmem.h>
3
+#include <linux/dma-direct.h>
4
+#include <linux/dma-map-ops.h>
45 #include <linux/gfp.h>
56 #include <linux/highmem.h>
67 #include <linux/export.h>
....@@ -8,7 +9,6 @@
89 #include <linux/of_address.h>
910 #include <linux/slab.h>
1011 #include <linux/types.h>
11
-#include <linux/dma-mapping.h>
1212 #include <linux/vmalloc.h>
1313 #include <linux/swiotlb.h>
1414
....@@ -16,6 +16,7 @@
1616 #include <xen/interface/grant_table.h>
1717 #include <xen/interface/memory.h>
1818 #include <xen/page.h>
19
+#include <xen/xen-ops.h>
1920 #include <xen/swiotlb-xen.h>
2021
2122 #include <asm/cacheflush.h>
....@@ -24,117 +25,71 @@
2425
2526 unsigned long xen_get_swiotlb_free_pages(unsigned int order)
2627 {
27
- struct memblock_region *reg;
28
+ phys_addr_t base;
2829 gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
30
+ u64 i;
2931
30
- for_each_memblock(memory, reg) {
31
- if (reg->base < (phys_addr_t)0xffffffff) {
32
- flags |= __GFP_DMA;
32
+ for_each_mem_range(i, &base, NULL) {
33
+ if (base < (phys_addr_t)0xffffffff) {
34
+ if (IS_ENABLED(CONFIG_ZONE_DMA32))
35
+ flags |= __GFP_DMA32;
36
+ else
37
+ flags |= __GFP_DMA;
3338 break;
3439 }
3540 }
3641 return __get_free_pages(flags, order);
3742 }
3843
39
-enum dma_cache_op {
40
- DMA_UNMAP,
41
- DMA_MAP,
42
-};
4344 static bool hypercall_cflush = false;
4445
45
-/* functions called by SWIOTLB */
46
-
47
-static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
48
- size_t size, enum dma_data_direction dir, enum dma_cache_op op)
46
+/* buffers in highmem or foreign pages cannot cross page boundaries */
47
+static void dma_cache_maint(struct device *dev, dma_addr_t handle,
48
+ size_t size, u32 op)
4949 {
5050 struct gnttab_cache_flush cflush;
51
- unsigned long xen_pfn;
52
- size_t left = size;
5351
54
- xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE;
55
- offset %= XEN_PAGE_SIZE;
52
+ cflush.offset = xen_offset_in_page(handle);
53
+ cflush.op = op;
54
+ handle &= XEN_PAGE_MASK;
5655
5756 do {
58
- size_t len = left;
59
-
60
- /* buffers in highmem or foreign pages cannot cross page
61
- * boundaries */
62
- if (len + offset > XEN_PAGE_SIZE)
63
- len = XEN_PAGE_SIZE - offset;
57
+ cflush.a.dev_bus_addr = dma_to_phys(dev, handle);
6458
65
- cflush.op = 0;
66
- cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT;
67
- cflush.offset = offset;
68
- cflush.length = len;
59
+ if (size + cflush.offset > XEN_PAGE_SIZE)
60
+ cflush.length = XEN_PAGE_SIZE - cflush.offset;
61
+ else
62
+ cflush.length = size;
6963
70
- if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
71
- cflush.op = GNTTAB_CACHE_INVAL;
72
- if (op == DMA_MAP) {
73
- if (dir == DMA_FROM_DEVICE)
74
- cflush.op = GNTTAB_CACHE_INVAL;
75
- else
76
- cflush.op = GNTTAB_CACHE_CLEAN;
77
- }
78
- if (cflush.op)
79
- HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
64
+ HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
8065
81
- offset = 0;
82
- xen_pfn++;
83
- left -= len;
84
- } while (left);
66
+ cflush.offset = 0;
67
+ handle += cflush.length;
68
+ size -= cflush.length;
69
+ } while (size);
8570 }
8671
87
-static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
88
- size_t size, enum dma_data_direction dir)
72
+/*
73
+ * Dom0 is mapped 1:1, and while the Linux page can span across multiple Xen
74
+ * pages, it is not possible for it to contain a mix of local and foreign Xen
75
+ * pages. Calling pfn_valid on a foreign mfn will always return false, so if
76
+ * pfn_valid returns true the pages is local and we can use the native
77
+ * dma-direct functions, otherwise we call the Xen specific version.
78
+ */
79
+void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
80
+ size_t size, enum dma_data_direction dir)
8981 {
90
- dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
82
+ if (dir != DMA_TO_DEVICE)
83
+ dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL);
9184 }
9285
93
-static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
94
- size_t size, enum dma_data_direction dir)
86
+void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
87
+ size_t size, enum dma_data_direction dir)
9588 {
96
- dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
97
-}
98
-
99
-void __xen_dma_map_page(struct device *hwdev, struct page *page,
100
- dma_addr_t dev_addr, unsigned long offset, size_t size,
101
- enum dma_data_direction dir, unsigned long attrs)
102
-{
103
- if (is_device_dma_coherent(hwdev))
104
- return;
105
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
106
- return;
107
-
108
- __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
109
-}
110
-
111
-void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
112
- size_t size, enum dma_data_direction dir,
113
- unsigned long attrs)
114
-
115
-{
116
- if (is_device_dma_coherent(hwdev))
117
- return;
118
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
119
- return;
120
-
121
- __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
122
-}
123
-
124
-void __xen_dma_sync_single_for_cpu(struct device *hwdev,
125
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
126
-{
127
- if (is_device_dma_coherent(hwdev))
128
- return;
129
- __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
130
-}
131
-
132
-void __xen_dma_sync_single_for_device(struct device *hwdev,
133
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
134
-{
135
- if (is_device_dma_coherent(hwdev))
136
- return;
137
- __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
89
+ if (dir == DMA_FROM_DEVICE)
90
+ dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL);
91
+ else
92
+ dma_cache_maint(dev, handle, size, GNTTAB_CACHE_CLEAN);
13893 }
13994
14095 bool xen_arch_need_swiotlb(struct device *dev,
....@@ -142,7 +97,7 @@
14297 dma_addr_t dev_addr)
14398 {
14499 unsigned int xen_pfn = XEN_PFN_DOWN(phys);
145
- unsigned int bfn = XEN_PFN_DOWN(dev_addr);
100
+ unsigned int bfn = XEN_PFN_DOWN(dma_to_phys(dev, dev_addr));
146101
147102 /*
148103 * The swiotlb buffer should be used if
....@@ -160,7 +115,7 @@
160115 * memory and we are not able to flush the cache.
161116 */
162117 return (!hypercall_cflush && (xen_pfn != bfn) &&
163
- !is_device_dma_coherent(dev));
118
+ !dev_is_dma_coherent(dev));
164119 }
165120
166121 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
....@@ -174,24 +129,18 @@
174129 *dma_handle = pstart;
175130 return 0;
176131 }
177
-EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
178132
179133 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
180134 {
181135 return;
182136 }
183
-EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
184137
185
-const struct dma_map_ops *xen_dma_ops;
186
-EXPORT_SYMBOL(xen_dma_ops);
187
-
188
-int __init xen_mm_init(void)
138
+static int __init xen_mm_init(void)
189139 {
190140 struct gnttab_cache_flush cflush;
191141 if (!xen_initial_domain())
192142 return 0;
193143 xen_swiotlb_init(1, false);
194
- xen_dma_ops = &xen_swiotlb_dma_ops;
195144
196145 cflush.op = 0;
197146 cflush.a.dev_bus_addr = 0;