hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/alpha/kernel/pci_iommu.c
....@@ -7,11 +7,11 @@
77 #include <linux/mm.h>
88 #include <linux/pci.h>
99 #include <linux/gfp.h>
10
-#include <linux/bootmem.h>
10
+#include <linux/memblock.h>
1111 #include <linux/export.h>
1212 #include <linux/scatterlist.h>
1313 #include <linux/log2.h>
14
-#include <linux/dma-mapping.h>
14
+#include <linux/dma-map-ops.h>
1515 #include <linux/iommu-helper.h>
1616
1717 #include <asm/io.h>
....@@ -74,26 +74,38 @@
7474
7575 #ifdef CONFIG_DISCONTIGMEM
7676
77
- arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
77
+ arena = memblock_alloc_node(sizeof(*arena), align, nid);
7878 if (!NODE_DATA(nid) || !arena) {
7979 printk("%s: couldn't allocate arena from node %d\n"
8080 " falling back to system-wide allocation\n",
8181 __func__, nid);
82
- arena = alloc_bootmem(sizeof(*arena));
82
+ arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
83
+ if (!arena)
84
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
85
+ sizeof(*arena));
8386 }
8487
85
- arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
88
+ arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
8689 if (!NODE_DATA(nid) || !arena->ptes) {
8790 printk("%s: couldn't allocate arena ptes from node %d\n"
8891 " falling back to system-wide allocation\n",
8992 __func__, nid);
90
- arena->ptes = __alloc_bootmem(mem_size, align, 0);
93
+ arena->ptes = memblock_alloc(mem_size, align);
94
+ if (!arena->ptes)
95
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
96
+ __func__, mem_size, align);
9197 }
9298
9399 #else /* CONFIG_DISCONTIGMEM */
94100
95
- arena = alloc_bootmem(sizeof(*arena));
96
- arena->ptes = __alloc_bootmem(mem_size, align, 0);
101
+ arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
102
+ if (!arena)
103
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
104
+ sizeof(*arena));
105
+ arena->ptes = memblock_alloc(mem_size, align);
106
+ if (!arena->ptes)
107
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
108
+ __func__, mem_size, align);
97109
98110 #endif /* CONFIG_DISCONTIGMEM */
99111
....@@ -129,12 +141,7 @@
129141 unsigned long boundary_size;
130142
131143 base = arena->dma_base >> PAGE_SHIFT;
132
- if (dev) {
133
- boundary_size = dma_get_seg_boundary(dev) + 1;
134
- boundary_size >>= PAGE_SHIFT;
135
- } else {
136
- boundary_size = 1UL << (32 - PAGE_SHIFT);
137
- }
144
+ boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT);
138145
139146 /* Search forward for the first mask-aligned sequence of N free ptes */
140147 ptes = arena->ptes;
....@@ -237,7 +244,7 @@
237244 ok = 0;
238245
239246 /* If both conditions above are met, we are fine. */
240
- DBGA("pci_dac_dma_supported %s from %pf\n",
247
+ DBGA("pci_dac_dma_supported %s from %ps\n",
241248 ok ? "yes" : "no", __builtin_return_address(0));
242249
243250 return ok;
....@@ -269,7 +276,7 @@
269276 && paddr + size <= __direct_map_size) {
270277 ret = paddr + __direct_map_base;
271278
272
- DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n",
279
+ DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n",
273280 cpu_addr, size, ret, __builtin_return_address(0));
274281
275282 return ret;
....@@ -280,7 +287,7 @@
280287 if (dac_allowed) {
281288 ret = paddr + alpha_mv.pci_dac_offset;
282289
283
- DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n",
290
+ DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n",
284291 cpu_addr, size, ret, __builtin_return_address(0));
285292
286293 return ret;
....@@ -291,7 +298,7 @@
291298 use direct_map above, it now must be considered an error. */
292299 if (! alpha_mv.mv_pci_tbi) {
293300 printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
294
- return 0;
301
+ return DMA_MAPPING_ERROR;
295302 }
296303
297304 arena = hose->sg_pci;
....@@ -307,7 +314,7 @@
307314 if (dma_ofs < 0) {
308315 printk(KERN_WARNING "pci_map_single failed: "
309316 "could not allocate dma page tables\n");
310
- return 0;
317
+ return DMA_MAPPING_ERROR;
311318 }
312319
313320 paddr &= PAGE_MASK;
....@@ -317,7 +324,7 @@
317324 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
318325 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
319326
320
- DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n",
327
+ DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n",
321328 cpu_addr, size, npages, ret, __builtin_return_address(0));
322329
323330 return ret;
....@@ -384,14 +391,14 @@
384391 && dma_addr < __direct_map_base + __direct_map_size) {
385392 /* Nothing to do. */
386393
387
- DBGA2("pci_unmap_single: direct [%llx,%zx] from %pf\n",
394
+ DBGA2("pci_unmap_single: direct [%llx,%zx] from %ps\n",
388395 dma_addr, size, __builtin_return_address(0));
389396
390397 return;
391398 }
392399
393400 if (dma_addr > 0xffffffff) {
394
- DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %pf\n",
401
+ DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %ps\n",
395402 dma_addr, size, __builtin_return_address(0));
396403 return;
397404 }
....@@ -423,7 +430,7 @@
423430
424431 spin_unlock_irqrestore(&arena->lock, flags);
425432
426
- DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n",
433
+ DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %ps\n",
427434 dma_addr, size, npages, __builtin_return_address(0));
428435 }
429436
....@@ -443,10 +450,10 @@
443450 gfp &= ~GFP_DMA;
444451
445452 try_again:
446
- cpu_addr = (void *)__get_free_pages(gfp, order);
453
+ cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order);
447454 if (! cpu_addr) {
448455 printk(KERN_INFO "pci_alloc_consistent: "
449
- "get_free_pages failed from %pf\n",
456
+ "get_free_pages failed from %ps\n",
450457 __builtin_return_address(0));
451458 /* ??? Really atomic allocation? Otherwise we could play
452459 with vmalloc and sg if we can't find contiguous memory. */
....@@ -455,7 +462,7 @@
455462 memset(cpu_addr, 0, size);
456463
457464 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
458
- if (*dma_addrp == 0) {
465
+ if (*dma_addrp == DMA_MAPPING_ERROR) {
459466 free_pages((unsigned long)cpu_addr, order);
460467 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
461468 return NULL;
....@@ -465,7 +472,7 @@
465472 goto try_again;
466473 }
467474
468
- DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %pf\n",
475
+ DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n",
469476 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
470477
471478 return cpu_addr;
....@@ -485,7 +492,7 @@
485492 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
486493 free_pages((unsigned long)cpu_addr, get_order(size));
487494
488
- DBGA2("pci_free_consistent: [%llx,%zx] from %pf\n",
495
+ DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n",
489496 dma_addr, size, __builtin_return_address(0));
490497 }
491498
....@@ -626,7 +633,7 @@
626633
627634 while (sg+1 < end && (int) sg[1].dma_address == -1) {
628635 size += sg[1].length;
629
- sg++;
636
+ sg = sg_next(sg);
630637 }
631638
632639 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
....@@ -671,7 +678,7 @@
671678 sg->dma_address
672679 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
673680 sg->length, dac_allowed);
674
- return sg->dma_address != 0;
681
+ return sg->dma_address != DMA_MAPPING_ERROR;
675682 }
676683
677684 start = sg;
....@@ -935,11 +942,6 @@
935942 return 0;
936943 }
937944
938
-static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr)
939
-{
940
- return dma_addr == 0;
941
-}
942
-
943945 const struct dma_map_ops alpha_pci_ops = {
944946 .alloc = alpha_pci_alloc_coherent,
945947 .free = alpha_pci_free_coherent,
....@@ -947,7 +949,10 @@
947949 .unmap_page = alpha_pci_unmap_page,
948950 .map_sg = alpha_pci_map_sg,
949951 .unmap_sg = alpha_pci_unmap_sg,
950
- .mapping_error = alpha_pci_mapping_error,
951952 .dma_supported = alpha_pci_supported,
953
+ .mmap = dma_common_mmap,
954
+ .get_sgtable = dma_common_get_sgtable,
955
+ .alloc_pages = dma_common_alloc_pages,
956
+ .free_pages = dma_common_free_pages,
952957 };
953958 EXPORT_SYMBOL(alpha_pci_ops);