forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 04dd17822334871b23ea2862f7798fb0e0007777
kernel/arch/powerpc/mm/dma-noncoherent.c
....@@ -1,320 +1,25 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * PowerPC version derived from arch/arm/mm/consistent.c
34 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
45 *
56 * Copyright (C) 2000 Russell King
6
- *
7
- * Consistent memory allocators. Used for DMA devices that want to
8
- * share uncached memory with the processor core. The function return
9
- * is the virtual address and 'dma_handle' is the physical address.
10
- * Mostly stolen from the ARM port, with some changes for PowerPC.
11
- * -- Dan
12
- *
13
- * Reorganized to get rid of the arch-specific consistent_* functions
14
- * and provide non-coherent implementations for the DMA API. -Matt
15
- *
16
- * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17
- * implementation. This is pulled straight from ARM and barely
18
- * modified. -Matt
19
- *
20
- * This program is free software; you can redistribute it and/or modify
21
- * it under the terms of the GNU General Public License version 2 as
22
- * published by the Free Software Foundation.
237 */
248
25
-#include <linux/sched.h>
26
-#include <linux/slab.h>
279 #include <linux/kernel.h>
2810 #include <linux/errno.h>
29
-#include <linux/string.h>
3011 #include <linux/types.h>
3112 #include <linux/highmem.h>
32
-#include <linux/dma-mapping.h>
33
-#include <linux/export.h>
13
+#include <linux/dma-direct.h>
14
+#include <linux/dma-map-ops.h>
3415
3516 #include <asm/tlbflush.h>
3617 #include <asm/dma.h>
3718
38
-#include "mmu_decl.h"
39
-
40
-/*
41
- * This address range defaults to a value that is safe for all
42
- * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
43
- * can be further configured for specific applications under
44
- * the "Advanced Setup" menu. -Matt
45
- */
46
-#define CONSISTENT_BASE (IOREMAP_TOP)
47
-#define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
48
-#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
49
-
50
-/*
51
- * This is the page table (2MB) covering uncached, DMA consistent allocations
52
- */
53
-static DEFINE_SPINLOCK(consistent_lock);
54
-
55
-/*
56
- * VM region handling support.
57
- *
58
- * This should become something generic, handling VM region allocations for
59
- * vmalloc and similar (ioremap, module space, etc).
60
- *
61
- * I envisage vmalloc()'s supporting vm_struct becoming:
62
- *
63
- * struct vm_struct {
64
- * struct vm_region region;
65
- * unsigned long flags;
66
- * struct page **pages;
67
- * unsigned int nr_pages;
68
- * unsigned long phys_addr;
69
- * };
70
- *
71
- * get_vm_area() would then call vm_region_alloc with an appropriate
72
- * struct vm_region head (eg):
73
- *
74
- * struct vm_region vmalloc_head = {
75
- * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
76
- * .vm_start = VMALLOC_START,
77
- * .vm_end = VMALLOC_END,
78
- * };
79
- *
80
- * However, vmalloc_head.vm_start is variable (typically, it is dependent on
81
- * the amount of RAM found at boot time.) I would imagine that get_vm_area()
82
- * would have to initialise this each time prior to calling vm_region_alloc().
83
- */
84
-struct ppc_vm_region {
85
- struct list_head vm_list;
86
- unsigned long vm_start;
87
- unsigned long vm_end;
88
-};
89
-
90
-static struct ppc_vm_region consistent_head = {
91
- .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
92
- .vm_start = CONSISTENT_BASE,
93
- .vm_end = CONSISTENT_END,
94
-};
95
-
96
-static struct ppc_vm_region *
97
-ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
98
-{
99
- unsigned long addr = head->vm_start, end = head->vm_end - size;
100
- unsigned long flags;
101
- struct ppc_vm_region *c, *new;
102
-
103
- new = kmalloc(sizeof(struct ppc_vm_region), gfp);
104
- if (!new)
105
- goto out;
106
-
107
- spin_lock_irqsave(&consistent_lock, flags);
108
-
109
- list_for_each_entry(c, &head->vm_list, vm_list) {
110
- if ((addr + size) < addr)
111
- goto nospc;
112
- if ((addr + size) <= c->vm_start)
113
- goto found;
114
- addr = c->vm_end;
115
- if (addr > end)
116
- goto nospc;
117
- }
118
-
119
- found:
120
- /*
121
- * Insert this entry _before_ the one we found.
122
- */
123
- list_add_tail(&new->vm_list, &c->vm_list);
124
- new->vm_start = addr;
125
- new->vm_end = addr + size;
126
-
127
- spin_unlock_irqrestore(&consistent_lock, flags);
128
- return new;
129
-
130
- nospc:
131
- spin_unlock_irqrestore(&consistent_lock, flags);
132
- kfree(new);
133
- out:
134
- return NULL;
135
-}
136
-
137
-static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
138
-{
139
- struct ppc_vm_region *c;
140
-
141
- list_for_each_entry(c, &head->vm_list, vm_list) {
142
- if (c->vm_start == addr)
143
- goto out;
144
- }
145
- c = NULL;
146
- out:
147
- return c;
148
-}
149
-
150
-/*
151
- * Allocate DMA-coherent memory space and return both the kernel remapped
152
- * virtual and bus address for that space.
153
- */
154
-void *
155
-__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
156
-{
157
- struct page *page;
158
- struct ppc_vm_region *c;
159
- unsigned long order;
160
- u64 mask = ISA_DMA_THRESHOLD, limit;
161
-
162
- if (dev) {
163
- mask = dev->coherent_dma_mask;
164
-
165
- /*
166
- * Sanity check the DMA mask - it must be non-zero, and
167
- * must be able to be satisfied by a DMA allocation.
168
- */
169
- if (mask == 0) {
170
- dev_warn(dev, "coherent DMA mask is unset\n");
171
- goto no_page;
172
- }
173
-
174
- if ((~mask) & ISA_DMA_THRESHOLD) {
175
- dev_warn(dev, "coherent DMA mask %#llx is smaller "
176
- "than system GFP_DMA mask %#llx\n",
177
- mask, (unsigned long long)ISA_DMA_THRESHOLD);
178
- goto no_page;
179
- }
180
- }
181
-
182
-
183
- size = PAGE_ALIGN(size);
184
- limit = (mask + 1) & ~mask;
185
- if ((limit && size >= limit) ||
186
- size >= (CONSISTENT_END - CONSISTENT_BASE)) {
187
- printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
188
- size, mask);
189
- return NULL;
190
- }
191
-
192
- order = get_order(size);
193
-
194
- /* Might be useful if we ever have a real legacy DMA zone... */
195
- if (mask != 0xffffffff)
196
- gfp |= GFP_DMA;
197
-
198
- page = alloc_pages(gfp, order);
199
- if (!page)
200
- goto no_page;
201
-
202
- /*
203
- * Invalidate any data that might be lurking in the
204
- * kernel direct-mapped region for device DMA.
205
- */
206
- {
207
- unsigned long kaddr = (unsigned long)page_address(page);
208
- memset(page_address(page), 0, size);
209
- flush_dcache_range(kaddr, kaddr + size);
210
- }
211
-
212
- /*
213
- * Allocate a virtual address in the consistent mapping region.
214
- */
215
- c = ppc_vm_region_alloc(&consistent_head, size,
216
- gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
217
- if (c) {
218
- unsigned long vaddr = c->vm_start;
219
- struct page *end = page + (1 << order);
220
-
221
- split_page(page, order);
222
-
223
- /*
224
- * Set the "dma handle"
225
- */
226
- *handle = page_to_phys(page);
227
-
228
- do {
229
- SetPageReserved(page);
230
- map_kernel_page(vaddr, page_to_phys(page),
231
- pgprot_val(pgprot_noncached(PAGE_KERNEL)));
232
- page++;
233
- vaddr += PAGE_SIZE;
234
- } while (size -= PAGE_SIZE);
235
-
236
- /*
237
- * Free the otherwise unused pages.
238
- */
239
- while (page < end) {
240
- __free_page(page);
241
- page++;
242
- }
243
-
244
- return (void *)c->vm_start;
245
- }
246
-
247
- if (page)
248
- __free_pages(page, order);
249
- no_page:
250
- return NULL;
251
-}
252
-EXPORT_SYMBOL(__dma_alloc_coherent);
253
-
254
-/*
255
- * free a page as defined by the above mapping.
256
- */
257
-void __dma_free_coherent(size_t size, void *vaddr)
258
-{
259
- struct ppc_vm_region *c;
260
- unsigned long flags, addr;
261
-
262
- size = PAGE_ALIGN(size);
263
-
264
- spin_lock_irqsave(&consistent_lock, flags);
265
-
266
- c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
267
- if (!c)
268
- goto no_area;
269
-
270
- if ((c->vm_end - c->vm_start) != size) {
271
- printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
272
- __func__, c->vm_end - c->vm_start, size);
273
- dump_stack();
274
- size = c->vm_end - c->vm_start;
275
- }
276
-
277
- addr = c->vm_start;
278
- do {
279
- pte_t *ptep;
280
- unsigned long pfn;
281
-
282
- ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
283
- addr),
284
- addr),
285
- addr);
286
- if (!pte_none(*ptep) && pte_present(*ptep)) {
287
- pfn = pte_pfn(*ptep);
288
- pte_clear(&init_mm, addr, ptep);
289
- if (pfn_valid(pfn)) {
290
- struct page *page = pfn_to_page(pfn);
291
- __free_reserved_page(page);
292
- }
293
- }
294
- addr += PAGE_SIZE;
295
- } while (size -= PAGE_SIZE);
296
-
297
- flush_tlb_kernel_range(c->vm_start, c->vm_end);
298
-
299
- list_del(&c->vm_list);
300
-
301
- spin_unlock_irqrestore(&consistent_lock, flags);
302
-
303
- kfree(c);
304
- return;
305
-
306
- no_area:
307
- spin_unlock_irqrestore(&consistent_lock, flags);
308
- printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
309
- __func__, vaddr);
310
- dump_stack();
311
-}
312
-EXPORT_SYMBOL(__dma_free_coherent);
313
-
31419 /*
31520 * make an area consistent.
31621 */
317
-void __dma_sync(void *vaddr, size_t size, int direction)
22
+static void __dma_sync(void *vaddr, size_t size, int direction)
31823 {
31924 unsigned long start = (unsigned long)vaddr;
32025 unsigned long end = start + size;
....@@ -340,7 +45,6 @@
34045 break;
34146 }
34247 }
343
-EXPORT_SYMBOL(__dma_sync);
34448
34549 #ifdef CONFIG_HIGHMEM
34650 /*
....@@ -387,34 +91,34 @@
38791 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
38892 * takes a struct page instead of a virtual address
38993 */
390
-void __dma_sync_page(struct page *page, unsigned long offset,
391
- size_t size, int direction)
94
+static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir)
39295 {
96
+ struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
97
+ unsigned offset = paddr & ~PAGE_MASK;
98
+
39399 #ifdef CONFIG_HIGHMEM
394
- __dma_sync_page_highmem(page, offset, size, direction);
100
+ __dma_sync_page_highmem(page, offset, size, dir);
395101 #else
396102 unsigned long start = (unsigned long)page_address(page) + offset;
397
- __dma_sync((void *)start, size, direction);
103
+ __dma_sync((void *)start, size, dir);
398104 #endif
399105 }
400
-EXPORT_SYMBOL(__dma_sync_page);
401106
402
-/*
403
- * Return the PFN for a given cpu virtual address returned by
404
- * __dma_alloc_coherent. This is used by dma_mmap_coherent()
405
- */
406
-unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
107
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
108
+ enum dma_data_direction dir)
407109 {
408
- /* This should always be populated, so we don't test every
409
- * level. If that fails, we'll have a nice crash which
410
- * will be as good as a BUG_ON()
411
- */
412
- pgd_t *pgd = pgd_offset_k(cpu_addr);
413
- pud_t *pud = pud_offset(pgd, cpu_addr);
414
- pmd_t *pmd = pmd_offset(pud, cpu_addr);
415
- pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);
110
+ __dma_sync_page(paddr, size, dir);
111
+}
416112
417
- if (pte_none(*ptep) || !pte_present(*ptep))
418
- return 0;
419
- return pte_pfn(*ptep);
113
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
114
+ enum dma_data_direction dir)
115
+{
116
+ __dma_sync_page(paddr, size, dir);
117
+}
118
+
119
+void arch_dma_prep_coherent(struct page *page, size_t size)
120
+{
121
+ unsigned long kaddr = (unsigned long)page_address(page);
122
+
123
+ flush_dcache_range(kaddr, kaddr + size);
420124 }