hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/arch/sparc/mm/iommu.c
....@@ -12,13 +12,10 @@
1212 #include <linux/init.h>
1313 #include <linux/mm.h>
1414 #include <linux/slab.h>
15
-#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
16
-#include <linux/scatterlist.h>
15
+#include <linux/dma-map-ops.h>
1716 #include <linux/of.h>
1817 #include <linux/of_device.h>
1918
20
-#include <asm/pgalloc.h>
21
-#include <asm/pgtable.h>
2219 #include <asm/io.h>
2320 #include <asm/mxcc.h>
2421 #include <asm/mbus.h>
....@@ -53,6 +50,9 @@
5350
5451 #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
5552 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
53
+
54
+static const struct dma_map_ops sbus_iommu_dma_gflush_ops;
55
+static const struct dma_map_ops sbus_iommu_dma_pflush_ops;
5656
5757 static void __init sbus_iommu_init(struct platform_device *op)
5858 {
....@@ -129,6 +129,11 @@
129129 (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
130130
131131 op->dev.archdata.iommu = iommu;
132
+
133
+ if (flush_page_for_dma_global)
134
+ op->dev.dma_ops = &sbus_iommu_dma_gflush_ops;
135
+ else
136
+ op->dev.dma_ops = &sbus_iommu_dma_pflush_ops;
132137 }
133138
134139 static int __init iommu_init(void)
....@@ -175,16 +180,37 @@
175180 }
176181 }
177182
178
-static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
183
+static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
184
+ unsigned long offset, size_t len, bool per_page_flush)
179185 {
180186 struct iommu_struct *iommu = dev->archdata.iommu;
181
- int ioptex;
182
- iopte_t *iopte, *iopte0;
187
+ phys_addr_t paddr = page_to_phys(page) + offset;
188
+ unsigned long off = paddr & ~PAGE_MASK;
189
+ unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
190
+ unsigned long pfn = __phys_to_pfn(paddr);
183191 unsigned int busa, busa0;
184
- int i;
192
+ iopte_t *iopte, *iopte0;
193
+ int ioptex, i;
194
+
195
+ /* XXX So what is maxphys for us and how do drivers know it? */
196
+ if (!len || len > 256 * 1024)
197
+ return DMA_MAPPING_ERROR;
198
+
199
+ /*
200
+ * We expect unmapped highmem pages to be not in the cache.
201
+ * XXX Is this a good assumption?
202
+ * XXX What if someone else unmaps it here and races us?
203
+ */
204
+ if (per_page_flush && !PageHighMem(page)) {
205
+ unsigned long vaddr, p;
206
+
207
+ vaddr = (unsigned long)page_address(page) + offset;
208
+ for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
209
+ flush_page_for_dma(p);
210
+ }
185211
186212 /* page color = pfn of page */
187
- ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
213
+ ioptex = bit_map_string_get(&iommu->usemap, npages, pfn);
188214 if (ioptex < 0)
189215 panic("iommu out");
190216 busa0 = iommu->start + (ioptex << PAGE_SHIFT);
....@@ -193,102 +219,74 @@
193219 busa = busa0;
194220 iopte = iopte0;
195221 for (i = 0; i < npages; i++) {
196
- iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
222
+ iopte_val(*iopte) = MKIOPTE(pfn, IOPERM);
197223 iommu_invalidate_page(iommu->regs, busa);
198224 busa += PAGE_SIZE;
199225 iopte++;
200
- page++;
226
+ pfn++;
201227 }
202228
203229 iommu_flush_iotlb(iopte0, npages);
204
-
205
- return busa0;
230
+ return busa0 + off;
206231 }
207232
208
-static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
209
-{
210
- unsigned long off;
211
- int npages;
212
- struct page *page;
213
- u32 busa;
214
-
215
- off = (unsigned long)vaddr & ~PAGE_MASK;
216
- npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
217
- page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
218
- busa = iommu_get_one(dev, page, npages);
219
- return busa + off;
220
-}
221
-
222
-static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
233
+static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
234
+ struct page *page, unsigned long offset, size_t len,
235
+ enum dma_data_direction dir, unsigned long attrs)
223236 {
224237 flush_page_for_dma(0);
225
- return iommu_get_scsi_one(dev, vaddr, len);
238
+ return __sbus_iommu_map_page(dev, page, offset, len, false);
226239 }
227240
228
-static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
241
+static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
242
+ struct page *page, unsigned long offset, size_t len,
243
+ enum dma_data_direction dir, unsigned long attrs)
229244 {
230
- unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
245
+ return __sbus_iommu_map_page(dev, page, offset, len, true);
246
+}
231247
232
- while(page < ((unsigned long)(vaddr + len))) {
233
- flush_page_for_dma(page);
234
- page += PAGE_SIZE;
248
+static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
249
+ int nents, enum dma_data_direction dir, unsigned long attrs,
250
+ bool per_page_flush)
251
+{
252
+ struct scatterlist *sg;
253
+ int j;
254
+
255
+ for_each_sg(sgl, sg, nents, j) {
256
+ sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
257
+ sg->offset, sg->length, per_page_flush);
258
+ if (sg->dma_address == DMA_MAPPING_ERROR)
259
+ return 0;
260
+ sg->dma_length = sg->length;
235261 }
236
- return iommu_get_scsi_one(dev, vaddr, len);
262
+
263
+ return nents;
237264 }
238265
239
-static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
266
+static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
267
+ int nents, enum dma_data_direction dir, unsigned long attrs)
240268 {
241
- int n;
242
-
243269 flush_page_for_dma(0);
244
- while (sz != 0) {
245
- --sz;
246
- n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
247
- sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
248
- sg->dma_length = sg->length;
249
- sg = sg_next(sg);
250
- }
270
+ return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, false);
251271 }
252272
253
-static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
273
+static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
274
+ int nents, enum dma_data_direction dir, unsigned long attrs)
254275 {
255
- unsigned long page, oldpage = 0;
256
- int n, i;
257
-
258
- while(sz != 0) {
259
- --sz;
260
-
261
- n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
262
-
263
- /*
264
- * We expect unmapped highmem pages to be not in the cache.
265
- * XXX Is this a good assumption?
266
- * XXX What if someone else unmaps it here and races us?
267
- */
268
- if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
269
- for (i = 0; i < n; i++) {
270
- if (page != oldpage) { /* Already flushed? */
271
- flush_page_for_dma(page);
272
- oldpage = page;
273
- }
274
- page += PAGE_SIZE;
275
- }
276
- }
277
-
278
- sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
279
- sg->dma_length = sg->length;
280
- sg = sg_next(sg);
281
- }
276
+ return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true);
282277 }
283278
284
-static void iommu_release_one(struct device *dev, u32 busa, int npages)
279
+static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
280
+ size_t len, enum dma_data_direction dir, unsigned long attrs)
285281 {
286282 struct iommu_struct *iommu = dev->archdata.iommu;
287
- int ioptex;
288
- int i;
283
+ unsigned int busa = dma_addr & PAGE_MASK;
284
+ unsigned long off = dma_addr & ~PAGE_MASK;
285
+ unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
286
+ unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
287
+ unsigned int i;
289288
290289 BUG_ON(busa < iommu->start);
291
- ioptex = (busa - iommu->start) >> PAGE_SHIFT;
292290 for (i = 0; i < npages; i++) {
293291 iopte_val(iommu->page_table[ioptex + i]) = 0;
294292 iommu_invalidate_page(iommu->regs, busa);
....@@ -297,39 +295,41 @@
297295 bit_map_clear(&iommu->usemap, ioptex, npages);
298296 }
299297
300
-static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
298
+static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
299
+ int nents, enum dma_data_direction dir, unsigned long attrs)
301300 {
302
- unsigned long off;
303
- int npages;
301
+ struct scatterlist *sg;
302
+ int i;
304303
305
- off = vaddr & ~PAGE_MASK;
306
- npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
307
- iommu_release_one(dev, vaddr & PAGE_MASK, npages);
308
-}
309
-
310
-static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
311
-{
312
- int n;
313
-
314
- while(sz != 0) {
315
- --sz;
316
-
317
- n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
318
- iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
304
+ for_each_sg(sgl, sg, nents, i) {
305
+ sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
306
+ attrs);
319307 sg->dma_address = 0x21212121;
320
- sg = sg_next(sg);
321308 }
322309 }
323310
324311 #ifdef CONFIG_SBUS
325
-static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
326
- unsigned long addr, int len)
312
+static void *sbus_iommu_alloc(struct device *dev, size_t len,
313
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
327314 {
328315 struct iommu_struct *iommu = dev->archdata.iommu;
329
- unsigned long page, end;
316
+ unsigned long va, addr, page, end, ret;
330317 iopte_t *iopte = iommu->page_table;
331318 iopte_t *first;
332319 int ioptex;
320
+
321
+ /* XXX So what is maxphys for us and how do drivers know it? */
322
+ if (!len || len > 256 * 1024)
323
+ return NULL;
324
+
325
+ len = PAGE_ALIGN(len);
326
+ va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
327
+ if (va == 0)
328
+ return NULL;
329
+
330
+ addr = ret = sparc_dma_alloc_resource(dev, len);
331
+ if (!addr)
332
+ goto out_free_pages;
333333
334334 BUG_ON((va & ~PAGE_MASK) != 0);
335335 BUG_ON((addr & ~PAGE_MASK) != 0);
....@@ -347,7 +347,6 @@
347347 while(addr < end) {
348348 page = va;
349349 {
350
- pgd_t *pgdp;
351350 pmd_t *pmdp;
352351 pte_t *ptep;
353352
....@@ -358,8 +357,7 @@
358357 else
359358 __flush_page_to_ram(page);
360359
361
- pgdp = pgd_offset(&init_mm, addr);
362
- pmdp = pmd_offset(pgdp, addr);
360
+ pmdp = pmd_off_k(addr);
363361 ptep = pte_offset_map(pmdp, addr);
364362
365363 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
....@@ -385,16 +383,25 @@
385383 flush_tlb_all();
386384 iommu_invalidate(iommu->regs);
387385
388
- *pba = iommu->start + (ioptex << PAGE_SHIFT);
389
- return 0;
386
+ *dma_handle = iommu->start + (ioptex << PAGE_SHIFT);
387
+ return (void *)ret;
388
+
389
+out_free_pages:
390
+ free_pages(va, get_order(len));
391
+ return NULL;
390392 }
391393
392
-static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
394
+static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr,
395
+ dma_addr_t busa, unsigned long attrs)
393396 {
394397 struct iommu_struct *iommu = dev->archdata.iommu;
395398 iopte_t *iopte = iommu->page_table;
396
- unsigned long end;
399
+ struct page *page = virt_to_page(cpu_addr);
397400 int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
401
+ unsigned long end;
402
+
403
+ if (!sparc_dma_free_resource(cpu_addr, len))
404
+ return;
398405
399406 BUG_ON((busa & ~PAGE_MASK) != 0);
400407 BUG_ON((len & ~PAGE_MASK) != 0);
....@@ -408,40 +415,35 @@
408415 flush_tlb_all();
409416 iommu_invalidate(iommu->regs);
410417 bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
418
+
419
+ __free_pages(page, get_order(len));
411420 }
412421 #endif
413422
414
-static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
415
- .get_scsi_one = iommu_get_scsi_one_gflush,
416
- .get_scsi_sgl = iommu_get_scsi_sgl_gflush,
417
- .release_scsi_one = iommu_release_scsi_one,
418
- .release_scsi_sgl = iommu_release_scsi_sgl,
423
+static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
419424 #ifdef CONFIG_SBUS
420
- .map_dma_area = iommu_map_dma_area,
421
- .unmap_dma_area = iommu_unmap_dma_area,
425
+ .alloc = sbus_iommu_alloc,
426
+ .free = sbus_iommu_free,
422427 #endif
428
+ .map_page = sbus_iommu_map_page_gflush,
429
+ .unmap_page = sbus_iommu_unmap_page,
430
+ .map_sg = sbus_iommu_map_sg_gflush,
431
+ .unmap_sg = sbus_iommu_unmap_sg,
423432 };
424433
425
-static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
426
- .get_scsi_one = iommu_get_scsi_one_pflush,
427
- .get_scsi_sgl = iommu_get_scsi_sgl_pflush,
428
- .release_scsi_one = iommu_release_scsi_one,
429
- .release_scsi_sgl = iommu_release_scsi_sgl,
434
+static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
430435 #ifdef CONFIG_SBUS
431
- .map_dma_area = iommu_map_dma_area,
432
- .unmap_dma_area = iommu_unmap_dma_area,
436
+ .alloc = sbus_iommu_alloc,
437
+ .free = sbus_iommu_free,
433438 #endif
439
+ .map_page = sbus_iommu_map_page_pflush,
440
+ .unmap_page = sbus_iommu_unmap_page,
441
+ .map_sg = sbus_iommu_map_sg_pflush,
442
+ .unmap_sg = sbus_iommu_unmap_sg,
434443 };
435444
436445 void __init ld_mmu_iommu(void)
437446 {
438
- if (flush_page_for_dma_global) {
439
- /* flush_page_for_dma flushes everything, no matter of what page is it */
440
- sparc32_dma_ops = &iommu_dma_gflush_ops;
441
- } else {
442
- sparc32_dma_ops = &iommu_dma_pflush_ops;
443
- }
444
-
445447 if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
446448 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
447449 ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;