.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | #include <linux/cpu.h> |
---|
2 | | -#include <linux/dma-mapping.h> |
---|
3 | | -#include <linux/bootmem.h> |
---|
| 3 | +#include <linux/dma-direct.h> |
---|
| 4 | +#include <linux/dma-map-ops.h> |
---|
4 | 5 | #include <linux/gfp.h> |
---|
5 | 6 | #include <linux/highmem.h> |
---|
6 | 7 | #include <linux/export.h> |
---|
.. | .. |
---|
8 | 9 | #include <linux/of_address.h> |
---|
9 | 10 | #include <linux/slab.h> |
---|
10 | 11 | #include <linux/types.h> |
---|
11 | | -#include <linux/dma-mapping.h> |
---|
12 | 12 | #include <linux/vmalloc.h> |
---|
13 | 13 | #include <linux/swiotlb.h> |
---|
14 | 14 | |
---|
.. | .. |
---|
16 | 16 | #include <xen/interface/grant_table.h> |
---|
17 | 17 | #include <xen/interface/memory.h> |
---|
18 | 18 | #include <xen/page.h> |
---|
| 19 | +#include <xen/xen-ops.h> |
---|
19 | 20 | #include <xen/swiotlb-xen.h> |
---|
20 | 21 | |
---|
21 | 22 | #include <asm/cacheflush.h> |
---|
.. | .. |
---|
24 | 25 | |
---|
25 | 26 | unsigned long xen_get_swiotlb_free_pages(unsigned int order) |
---|
26 | 27 | { |
---|
27 | | - struct memblock_region *reg; |
---|
| 28 | + phys_addr_t base; |
---|
28 | 29 | gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM; |
---|
| 30 | + u64 i; |
---|
29 | 31 | |
---|
30 | | - for_each_memblock(memory, reg) { |
---|
31 | | - if (reg->base < (phys_addr_t)0xffffffff) { |
---|
32 | | - flags |= __GFP_DMA; |
---|
| 32 | + for_each_mem_range(i, &base, NULL) { |
---|
| 33 | + if (base < (phys_addr_t)0xffffffff) { |
---|
| 34 | + if (IS_ENABLED(CONFIG_ZONE_DMA32)) |
---|
| 35 | + flags |= __GFP_DMA32; |
---|
| 36 | + else |
---|
| 37 | + flags |= __GFP_DMA; |
---|
33 | 38 | break; |
---|
34 | 39 | } |
---|
35 | 40 | } |
---|
36 | 41 | return __get_free_pages(flags, order); |
---|
37 | 42 | } |
---|
38 | 43 | |
---|
39 | | -enum dma_cache_op { |
---|
40 | | - DMA_UNMAP, |
---|
41 | | - DMA_MAP, |
---|
42 | | -}; |
---|
43 | 44 | static bool hypercall_cflush = false; |
---|
44 | 45 | |
---|
45 | | -/* functions called by SWIOTLB */ |
---|
46 | | - |
---|
47 | | -static void dma_cache_maint(dma_addr_t handle, unsigned long offset, |
---|
48 | | - size_t size, enum dma_data_direction dir, enum dma_cache_op op) |
---|
| 46 | +/* buffers in highmem or foreign pages cannot cross page boundaries */ |
---|
| 47 | +static void dma_cache_maint(struct device *dev, dma_addr_t handle, |
---|
| 48 | + size_t size, u32 op) |
---|
49 | 49 | { |
---|
50 | 50 | struct gnttab_cache_flush cflush; |
---|
51 | | - unsigned long xen_pfn; |
---|
52 | | - size_t left = size; |
---|
53 | 51 | |
---|
54 | | - xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE; |
---|
55 | | - offset %= XEN_PAGE_SIZE; |
---|
| 52 | + cflush.offset = xen_offset_in_page(handle); |
---|
| 53 | + cflush.op = op; |
---|
| 54 | + handle &= XEN_PAGE_MASK; |
---|
56 | 55 | |
---|
57 | 56 | do { |
---|
58 | | - size_t len = left; |
---|
59 | | - |
---|
60 | | - /* buffers in highmem or foreign pages cannot cross page |
---|
61 | | - * boundaries */ |
---|
62 | | - if (len + offset > XEN_PAGE_SIZE) |
---|
63 | | - len = XEN_PAGE_SIZE - offset; |
---|
| 57 | + cflush.a.dev_bus_addr = dma_to_phys(dev, handle); |
---|
64 | 58 | |
---|
65 | | - cflush.op = 0; |
---|
66 | | - cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT; |
---|
67 | | - cflush.offset = offset; |
---|
68 | | - cflush.length = len; |
---|
| 59 | + if (size + cflush.offset > XEN_PAGE_SIZE) |
---|
| 60 | + cflush.length = XEN_PAGE_SIZE - cflush.offset; |
---|
| 61 | + else |
---|
| 62 | + cflush.length = size; |
---|
69 | 63 | |
---|
70 | | - if (op == DMA_UNMAP && dir != DMA_TO_DEVICE) |
---|
71 | | - cflush.op = GNTTAB_CACHE_INVAL; |
---|
72 | | - if (op == DMA_MAP) { |
---|
73 | | - if (dir == DMA_FROM_DEVICE) |
---|
74 | | - cflush.op = GNTTAB_CACHE_INVAL; |
---|
75 | | - else |
---|
76 | | - cflush.op = GNTTAB_CACHE_CLEAN; |
---|
77 | | - } |
---|
78 | | - if (cflush.op) |
---|
79 | | - HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1); |
---|
| 64 | + HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1); |
---|
80 | 65 | |
---|
81 | | - offset = 0; |
---|
82 | | - xen_pfn++; |
---|
83 | | - left -= len; |
---|
84 | | - } while (left); |
---|
| 66 | + cflush.offset = 0; |
---|
| 67 | + handle += cflush.length; |
---|
| 68 | + size -= cflush.length; |
---|
| 69 | + } while (size); |
---|
85 | 70 | } |
---|
86 | 71 | |
---|
87 | | -static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle, |
---|
88 | | - size_t size, enum dma_data_direction dir) |
---|
| 72 | +/* |
---|
| 73 | + * Dom0 is mapped 1:1, and while the Linux page can span across multiple Xen |
---|
| 74 | + * pages, it is not possible for it to contain a mix of local and foreign Xen |
---|
| 75 | + * pages. Calling pfn_valid on a foreign mfn will always return false, so if |
---|
| 76 | + * pfn_valid returns true the pages is local and we can use the native |
---|
| 77 | + * dma-direct functions, otherwise we call the Xen specific version. |
---|
| 78 | + */ |
---|
| 79 | +void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, |
---|
| 80 | + size_t size, enum dma_data_direction dir) |
---|
89 | 81 | { |
---|
90 | | - dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP); |
---|
| 82 | + if (dir != DMA_TO_DEVICE) |
---|
| 83 | + dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL); |
---|
91 | 84 | } |
---|
92 | 85 | |
---|
93 | | -static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle, |
---|
94 | | - size_t size, enum dma_data_direction dir) |
---|
| 86 | +void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, |
---|
| 87 | + size_t size, enum dma_data_direction dir) |
---|
95 | 88 | { |
---|
96 | | - dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP); |
---|
97 | | -} |
---|
98 | | - |
---|
99 | | -void __xen_dma_map_page(struct device *hwdev, struct page *page, |
---|
100 | | - dma_addr_t dev_addr, unsigned long offset, size_t size, |
---|
101 | | - enum dma_data_direction dir, unsigned long attrs) |
---|
102 | | -{ |
---|
103 | | - if (is_device_dma_coherent(hwdev)) |
---|
104 | | - return; |
---|
105 | | - if (attrs & DMA_ATTR_SKIP_CPU_SYNC) |
---|
106 | | - return; |
---|
107 | | - |
---|
108 | | - __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir); |
---|
109 | | -} |
---|
110 | | - |
---|
111 | | -void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
---|
112 | | - size_t size, enum dma_data_direction dir, |
---|
113 | | - unsigned long attrs) |
---|
114 | | - |
---|
115 | | -{ |
---|
116 | | - if (is_device_dma_coherent(hwdev)) |
---|
117 | | - return; |
---|
118 | | - if (attrs & DMA_ATTR_SKIP_CPU_SYNC) |
---|
119 | | - return; |
---|
120 | | - |
---|
121 | | - __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); |
---|
122 | | -} |
---|
123 | | - |
---|
124 | | -void __xen_dma_sync_single_for_cpu(struct device *hwdev, |
---|
125 | | - dma_addr_t handle, size_t size, enum dma_data_direction dir) |
---|
126 | | -{ |
---|
127 | | - if (is_device_dma_coherent(hwdev)) |
---|
128 | | - return; |
---|
129 | | - __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); |
---|
130 | | -} |
---|
131 | | - |
---|
132 | | -void __xen_dma_sync_single_for_device(struct device *hwdev, |
---|
133 | | - dma_addr_t handle, size_t size, enum dma_data_direction dir) |
---|
134 | | -{ |
---|
135 | | - if (is_device_dma_coherent(hwdev)) |
---|
136 | | - return; |
---|
137 | | - __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir); |
---|
| 89 | + if (dir == DMA_FROM_DEVICE) |
---|
| 90 | + dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL); |
---|
| 91 | + else |
---|
| 92 | + dma_cache_maint(dev, handle, size, GNTTAB_CACHE_CLEAN); |
---|
138 | 93 | } |
---|
139 | 94 | |
---|
140 | 95 | bool xen_arch_need_swiotlb(struct device *dev, |
---|
.. | .. |
---|
142 | 97 | dma_addr_t dev_addr) |
---|
143 | 98 | { |
---|
144 | 99 | unsigned int xen_pfn = XEN_PFN_DOWN(phys); |
---|
145 | | - unsigned int bfn = XEN_PFN_DOWN(dev_addr); |
---|
| 100 | + unsigned int bfn = XEN_PFN_DOWN(dma_to_phys(dev, dev_addr)); |
---|
146 | 101 | |
---|
147 | 102 | /* |
---|
148 | 103 | * The swiotlb buffer should be used if |
---|
.. | .. |
---|
160 | 115 | * memory and we are not able to flush the cache. |
---|
161 | 116 | */ |
---|
162 | 117 | return (!hypercall_cflush && (xen_pfn != bfn) && |
---|
163 | | - !is_device_dma_coherent(dev)); |
---|
| 118 | + !dev_is_dma_coherent(dev)); |
---|
164 | 119 | } |
---|
165 | 120 | |
---|
166 | 121 | int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, |
---|
.. | .. |
---|
174 | 129 | *dma_handle = pstart; |
---|
175 | 130 | return 0; |
---|
176 | 131 | } |
---|
177 | | -EXPORT_SYMBOL_GPL(xen_create_contiguous_region); |
---|
178 | 132 | |
---|
179 | 133 | void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) |
---|
180 | 134 | { |
---|
181 | 135 | return; |
---|
182 | 136 | } |
---|
183 | | -EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); |
---|
184 | 137 | |
---|
185 | | -const struct dma_map_ops *xen_dma_ops; |
---|
186 | | -EXPORT_SYMBOL(xen_dma_ops); |
---|
187 | | - |
---|
188 | | -int __init xen_mm_init(void) |
---|
| 138 | +static int __init xen_mm_init(void) |
---|
189 | 139 | { |
---|
190 | 140 | struct gnttab_cache_flush cflush; |
---|
191 | 141 | if (!xen_initial_domain()) |
---|
192 | 142 | return 0; |
---|
193 | 143 | xen_swiotlb_init(1, false); |
---|
194 | | - xen_dma_ops = &xen_swiotlb_dma_ops; |
---|
195 | 144 | |
---|
196 | 145 | cflush.op = 0; |
---|
197 | 146 | cflush.a.dev_bus_addr = 0; |
---|