forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/include/xen/arm/page-coherent.h
....@@ -1,107 +1,20 @@
11 /* SPDX-License-Identifier: GPL-2.0 */
2
-#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
3
-#define _ASM_ARM_XEN_PAGE_COHERENT_H
2
+#ifndef _XEN_ARM_PAGE_COHERENT_H
3
+#define _XEN_ARM_PAGE_COHERENT_H
44
5
-#include <asm/page.h>
6
-#include <asm/dma-mapping.h>
75 #include <linux/dma-mapping.h>
8
-
9
-static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
10
-{
11
- if (dev && dev->archdata.dev_dma_ops)
12
- return dev->archdata.dev_dma_ops;
13
- return get_arch_dma_ops(NULL);
14
-}
15
-
16
-void __xen_dma_map_page(struct device *hwdev, struct page *page,
17
- dma_addr_t dev_addr, unsigned long offset, size_t size,
18
- enum dma_data_direction dir, unsigned long attrs);
19
-void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
20
- size_t size, enum dma_data_direction dir,
21
- unsigned long attrs);
22
-void __xen_dma_sync_single_for_cpu(struct device *hwdev,
23
- dma_addr_t handle, size_t size, enum dma_data_direction dir);
24
-
25
-void __xen_dma_sync_single_for_device(struct device *hwdev,
26
- dma_addr_t handle, size_t size, enum dma_data_direction dir);
6
+#include <asm/page.h>
277
288 static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
299 dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
3010 {
31
- return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
11
+ return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
3212 }
3313
3414 static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
3515 void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
3616 {
37
- xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
17
+ dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
3818 }
3919
40
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
41
- dma_addr_t dev_addr, unsigned long offset, size_t size,
42
- enum dma_data_direction dir, unsigned long attrs)
43
-{
44
- unsigned long page_pfn = page_to_xen_pfn(page);
45
- unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
46
- unsigned long compound_pages =
47
- (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
48
- bool local = (page_pfn <= dev_pfn) &&
49
- (dev_pfn - page_pfn < compound_pages);
50
-
51
- /*
52
- * Dom0 is mapped 1:1, while the Linux page can span across
53
- * multiple Xen pages, it's not possible for it to contain a
54
- * mix of local and foreign Xen pages. So if the first xen_pfn
55
- * == mfn the page is local otherwise it's a foreign page
56
- * grant-mapped in dom0. If the page is local we can safely
57
- * call the native dma_ops function, otherwise we call the xen
58
- * specific function.
59
- */
60
- if (local)
61
- xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
62
- else
63
- __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
64
-}
65
-
66
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
67
- size_t size, enum dma_data_direction dir, unsigned long attrs)
68
-{
69
- unsigned long pfn = PFN_DOWN(handle);
70
- /*
71
- * Dom0 is mapped 1:1, while the Linux page can be spanned accross
72
- * multiple Xen page, it's not possible to have a mix of local and
73
- * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
74
- * foreign mfn will always return false. If the page is local we can
75
- * safely call the native dma_ops function, otherwise we call the xen
76
- * specific function.
77
- */
78
- if (pfn_valid(pfn)) {
79
- if (xen_get_dma_ops(hwdev)->unmap_page)
80
- xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
81
- } else
82
- __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
83
-}
84
-
85
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
86
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
87
-{
88
- unsigned long pfn = PFN_DOWN(handle);
89
- if (pfn_valid(pfn)) {
90
- if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
91
- xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
92
- } else
93
- __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
94
-}
95
-
96
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
97
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
98
-{
99
- unsigned long pfn = PFN_DOWN(handle);
100
- if (pfn_valid(pfn)) {
101
- if (xen_get_dma_ops(hwdev)->sync_single_for_device)
102
- xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
103
- } else
104
- __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
105
-}
106
-
107
-#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
20
+#endif /* _XEN_ARM_PAGE_COHERENT_H */