forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 297b60346df8beafee954a0fd7c2d64f33f3b9bc
kernel/arch/ia64/kernel/pci-dma.c
....@@ -10,15 +10,9 @@
1010 #include <linux/module.h>
1111 #include <linux/dmar.h>
1212 #include <asm/iommu.h>
13
-#include <asm/machvec.h>
1413 #include <linux/dma-mapping.h>
1514 #include <linux/kernel.h>
1615 #include <asm/page.h>
17
-
18
-dma_addr_t bad_dma_address __read_mostly;
19
-EXPORT_SYMBOL(bad_dma_address);
20
-
21
-static int iommu_sac_force __read_mostly;
2216
2317 int no_iommu __read_mostly;
2418 #ifdef CONFIG_IOMMU_DEBUG
....@@ -26,10 +20,6 @@
2620 #else
2721 int force_iommu __read_mostly;
2822 #endif
29
-
30
-int iommu_pass_through;
31
-
32
-extern struct dma_map_ops intel_dma_ops;
3323
3424 static int __init pci_iommu_init(void)
3525 {
....@@ -41,72 +31,3 @@
4131
4232 /* Must execute after PCI subsystem */
4333 fs_initcall(pci_iommu_init);
44
-
45
-void pci_iommu_shutdown(void)
46
-{
47
- return;
48
-}
49
-
50
-void __init
51
-iommu_dma_init(void)
52
-{
53
- return;
54
-}
55
-
56
-int iommu_dma_supported(struct device *dev, u64 mask)
57
-{
58
- /* Copied from i386. Doesn't make much sense, because it will
59
- only work for pci_alloc_coherent.
60
- The caller just has to use GFP_DMA in this case. */
61
- if (mask < DMA_BIT_MASK(24))
62
- return 0;
63
-
64
- /* Tell the device to use SAC when IOMMU force is on. This
65
- allows the driver to use cheaper accesses in some cases.
66
-
67
- Problem with this is that if we overflow the IOMMU area and
68
- return DAC as fallback address the device may not handle it
69
- correctly.
70
-
71
- As a special case some controllers have a 39bit address
72
- mode that is as efficient as 32bit (aic79xx). Don't force
73
- SAC for these. Assume all masks <= 40 bits are of this
74
- type. Normally this doesn't make any difference, but gives
75
- more gentle handling of IOMMU overflow. */
76
- if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
77
- dev_info(dev, "Force SAC with mask %llx\n", mask);
78
- return 0;
79
- }
80
-
81
- return 1;
82
-}
83
-EXPORT_SYMBOL(iommu_dma_supported);
84
-
85
-void __init pci_iommu_alloc(void)
86
-{
87
- dma_ops = &intel_dma_ops;
88
-
89
- intel_dma_ops.sync_single_for_cpu = machvec_dma_sync_single;
90
- intel_dma_ops.sync_sg_for_cpu = machvec_dma_sync_sg;
91
- intel_dma_ops.sync_single_for_device = machvec_dma_sync_single;
92
- intel_dma_ops.sync_sg_for_device = machvec_dma_sync_sg;
93
- intel_dma_ops.dma_supported = iommu_dma_supported;
94
-
95
- /*
96
- * The order of these functions is important for
97
- * fall-back/fail-over reasons
98
- */
99
- detect_intel_iommu();
100
-
101
-#ifdef CONFIG_SWIOTLB
102
- if (!iommu_detected) {
103
-#ifdef CONFIG_IA64_GENERIC
104
- printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
105
- machvec_init("dig");
106
- swiotlb_dma_init();
107
-#else
108
- panic("Unable to find Intel IOMMU");
109
-#endif /* CONFIG_IA64_GENERIC */
110
- }
111
-#endif /* CONFIG_SWIOTLB */
112
-}