forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/arch/xtensa/kernel/pci-dma.c
....@@ -1,10 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * DMA coherent memory allocation.
3
- *
4
- * This program is free software; you can redistribute it and/or modify it
5
- * under the terms of the GNU General Public License as published by the
6
- * Free Software Foundation; either version 2 of the License, or (at your
7
- * option) any later version.
84 *
95 * Copyright (C) 2002 - 2005 Tensilica Inc.
106 * Copyright (C) 2015 Cadence Design Systems Inc.
....@@ -15,8 +11,7 @@
1511 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
1612 */
1713
18
-#include <linux/dma-contiguous.h>
19
-#include <linux/dma-noncoherent.h>
14
+#include <linux/dma-map-ops.h>
2015 #include <linux/dma-direct.h>
2116 #include <linux/gfp.h>
2217 #include <linux/highmem.h>
....@@ -48,8 +43,8 @@
4843 }
4944 }
5045
51
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
52
- size_t size, enum dma_data_direction dir)
46
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
47
+ enum dma_data_direction dir)
5348 {
5449 switch (dir) {
5550 case DMA_BIDIRECTIONAL:
....@@ -66,8 +61,8 @@
6661 }
6762 }
6863
69
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
70
- size_t size, enum dma_data_direction dir)
64
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
65
+ enum dma_data_direction dir)
7166 {
7267 switch (dir) {
7368 case DMA_BIDIRECTIONAL:
....@@ -85,128 +80,19 @@
8580 }
8681 }
8782
83
+void arch_dma_prep_coherent(struct page *page, size_t size)
84
+{
85
+ __invalidate_dcache_range((unsigned long)page_address(page), size);
86
+}
87
+
88
+/*
89
+ * Memory caching is platform-dependent in noMMU xtensa configurations.
90
+ * This function should be implemented in platform code in order to enable
91
+ * coherent DMA memory operations when CONFIG_MMU is not enabled.
92
+ */
8893 #ifdef CONFIG_MMU
89
-bool platform_vaddr_cached(const void *p)
90
-{
91
- unsigned long addr = (unsigned long)p;
92
-
93
- return addr >= XCHAL_KSEG_CACHED_VADDR &&
94
- addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE;
95
-}
96
-
97
-bool platform_vaddr_uncached(const void *p)
98
-{
99
- unsigned long addr = (unsigned long)p;
100
-
101
- return addr >= XCHAL_KSEG_BYPASS_VADDR &&
102
- addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE;
103
-}
104
-
105
-void *platform_vaddr_to_uncached(void *p)
94
+void *arch_dma_set_uncached(void *p, size_t size)
10695 {
10796 return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
10897 }
109
-
110
-void *platform_vaddr_to_cached(void *p)
111
-{
112
- return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
113
-}
114
-#else
115
-bool __attribute__((weak)) platform_vaddr_cached(const void *p)
116
-{
117
- WARN_ONCE(1, "Default %s implementation is used\n", __func__);
118
- return true;
119
-}
120
-
121
-bool __attribute__((weak)) platform_vaddr_uncached(const void *p)
122
-{
123
- WARN_ONCE(1, "Default %s implementation is used\n", __func__);
124
- return false;
125
-}
126
-
127
-void __attribute__((weak)) *platform_vaddr_to_uncached(void *p)
128
-{
129
- WARN_ONCE(1, "Default %s implementation is used\n", __func__);
130
- return p;
131
-}
132
-
133
-void __attribute__((weak)) *platform_vaddr_to_cached(void *p)
134
-{
135
- WARN_ONCE(1, "Default %s implementation is used\n", __func__);
136
- return p;
137
-}
138
-#endif
139
-
140
-/*
141
- * Note: We assume that the full memory space is always mapped to 'kseg'
142
- * Otherwise we have to use page attributes (not implemented).
143
- */
144
-
145
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
146
- gfp_t flag, unsigned long attrs)
147
-{
148
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
149
- struct page *page = NULL;
150
-
151
- /* ignore region speicifiers */
152
-
153
- flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
154
-
155
- if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
156
- flag |= GFP_DMA;
157
-
158
- if (gfpflags_allow_blocking(flag))
159
- page = dma_alloc_from_contiguous(dev, count, get_order(size),
160
- flag & __GFP_NOWARN);
161
-
162
- if (!page)
163
- page = alloc_pages(flag, get_order(size));
164
-
165
- if (!page)
166
- return NULL;
167
-
168
- *handle = phys_to_dma(dev, page_to_phys(page));
169
-
170
- if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
171
- return page;
172
- }
173
-
174
-#ifdef CONFIG_MMU
175
- if (PageHighMem(page)) {
176
- void *p;
177
-
178
- p = dma_common_contiguous_remap(page, size, VM_MAP,
179
- pgprot_noncached(PAGE_KERNEL),
180
- __builtin_return_address(0));
181
- if (!p) {
182
- if (!dma_release_from_contiguous(dev, page, count))
183
- __free_pages(page, get_order(size));
184
- }
185
- return p;
186
- }
187
-#endif
188
- BUG_ON(!platform_vaddr_cached(page_address(page)));
189
- __invalidate_dcache_range((unsigned long)page_address(page), size);
190
- return platform_vaddr_to_uncached(page_address(page));
191
-}
192
-
193
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
194
- dma_addr_t dma_handle, unsigned long attrs)
195
-{
196
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
197
- struct page *page;
198
-
199
- if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
200
- page = vaddr;
201
- } else if (platform_vaddr_uncached(vaddr)) {
202
- page = virt_to_page(platform_vaddr_to_cached(vaddr));
203
- } else {
204
-#ifdef CONFIG_MMU
205
- dma_common_free_remap(vaddr, size, VM_MAP);
206
-#endif
207
- page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
208
- }
209
-
210
- if (!dma_release_from_contiguous(dev, page, count))
211
- __free_pages(page, get_order(size));
212
-}
98
+#endif /* CONFIG_MMU */