hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/gpu/drm/drm_memory.c
....@@ -1,4 +1,4 @@
1
-/**
1
+/*
22 * \file drm_memory.c
33 * Memory management wrappers for DRM
44 *
....@@ -33,9 +33,16 @@
3333 * OTHER DEALINGS IN THE SOFTWARE.
3434 */
3535
36
-#include <linux/highmem.h>
3736 #include <linux/export.h>
38
-#include <drm/drmP.h>
37
+#include <linux/highmem.h>
38
+#include <linux/pci.h>
39
+#include <linux/vmalloc.h>
40
+#include <xen/xen.h>
41
+
42
+#include <drm/drm_agpsupport.h>
43
+#include <drm/drm_cache.h>
44
+#include <drm/drm_device.h>
45
+
3946 #include "drm_legacy.h"
4047
4148 #if IS_ENABLED(CONFIG_AGP)
....@@ -51,7 +58,7 @@
5158 #endif
5259
5360 static void *agp_remap(unsigned long offset, unsigned long size,
54
- struct drm_device * dev)
61
+ struct drm_device *dev)
5562 {
5663 unsigned long i, num_pages =
5764 PAGE_ALIGN(size) / PAGE_SIZE;
....@@ -94,26 +101,26 @@
94101 }
95102
96103 /** Wrapper around agp_free_memory() */
97
-void drm_free_agp(struct agp_memory * handle, int pages)
104
+void drm_free_agp(struct agp_memory *handle, int pages)
98105 {
99106 agp_free_memory(handle);
100107 }
101108
102109 /** Wrapper around agp_bind_memory() */
103
-int drm_bind_agp(struct agp_memory * handle, unsigned int start)
110
+int drm_bind_agp(struct agp_memory *handle, unsigned int start)
104111 {
105112 return agp_bind_memory(handle, start);
106113 }
107114
108115 /** Wrapper around agp_unbind_memory() */
109
-int drm_unbind_agp(struct agp_memory * handle)
116
+int drm_unbind_agp(struct agp_memory *handle)
110117 {
111118 return agp_unbind_memory(handle);
112119 }
113120
114121 #else /* CONFIG_AGP */
115122 static inline void *agp_remap(unsigned long offset, unsigned long size,
116
- struct drm_device * dev)
123
+ struct drm_device *dev)
117124 {
118125 return NULL;
119126 }
....@@ -150,15 +157,34 @@
150157 }
151158 EXPORT_SYMBOL(drm_legacy_ioremapfree);
152159
153
-u64 drm_get_max_iomem(void)
160
+bool drm_need_swiotlb(int dma_bits)
154161 {
155162 struct resource *tmp;
156163 resource_size_t max_iomem = 0;
164
+
165
+ /*
166
+ * Xen paravirtual hosts require swiotlb regardless of requested dma
167
+ * transfer size.
168
+ *
169
+ * NOTE: Really, what it requires is use of the dma_alloc_coherent
170
+ * allocator used in ttm_dma_populate() instead of
171
+ * ttm_populate_and_map_pages(), which bounce buffers so much in
172
+ * Xen it leads to swiotlb buffer exhaustion.
173
+ */
174
+ if (xen_pv_domain())
175
+ return true;
176
+
177
+ /*
178
+ * Enforce dma_alloc_coherent when memory encryption is active as well
179
+ * for the same reasons as for Xen paravirtual hosts.
180
+ */
181
+ if (mem_encrypt_active())
182
+ return true;
157183
158184 for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
159185 max_iomem = max(max_iomem, tmp->end);
160186 }
161187
162
- return max_iomem;
188
+ return max_iomem > ((u64)1 << dma_bits);
163189 }
164
-EXPORT_SYMBOL(drm_get_max_iomem);
190
+EXPORT_SYMBOL(drm_need_swiotlb);