forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/ttm/ttm_bo_vm.c
....@@ -42,8 +42,6 @@
4242 #include <linux/uaccess.h>
4343 #include <linux/mem_encrypt.h>
4444
45
-#define TTM_BO_VM_NUM_PREFAULT 16
46
-
4745 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
4846 struct vm_fault *vmf)
4947 {
....@@ -60,18 +58,19 @@
6058 goto out_clear;
6159
6260 /*
63
- * If possible, avoid waiting for GPU with mmap_sem
64
- * held.
61
+ * If possible, avoid waiting for GPU with mmap_lock
62
+ * held. We only do this if the fault allows retry and this
63
+ * is the first attempt.
6564 */
66
- if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
65
+ if (fault_flag_allow_retry_first(vmf->flags)) {
6766 ret = VM_FAULT_RETRY;
6867 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
6968 goto out_unlock;
7069
7170 ttm_bo_get(bo);
72
- up_read(&vmf->vma->vm_mm->mmap_sem);
71
+ mmap_read_unlock(vmf->vma->vm_mm);
7372 (void) dma_fence_wait(bo->moving, true);
74
- ttm_bo_unreserve(bo);
73
+ dma_resv_unlock(bo->base.resv);
7574 ttm_bo_put(bo);
7675 goto out_unlock;
7776 }
....@@ -102,15 +101,175 @@
102101 if (bdev->driver->io_mem_pfn)
103102 return bdev->driver->io_mem_pfn(bo, page_offset);
104103
105
- return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
106
- + page_offset;
104
+ return (bo->mem.bus.offset >> PAGE_SHIFT) + page_offset;
107105 }
108106
109
-static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
107
+/**
108
+ * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
109
+ * @bo: The buffer object
110
+ * @vmf: The fault structure handed to the callback
111
+ *
112
+ * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
113
+ * during long waits, and after the wait the callback will be restarted. This
114
+ * is to allow other threads using the same virtual memory space concurrent
115
+ * access to map(), unmap() completely unrelated buffer objects. TTM buffer
116
+ * object reservations sometimes wait for GPU and should therefore be
117
+ * considered long waits. This function reserves the buffer object interruptibly
118
+ * taking this into account. Starvation is avoided by the vm system not
119
+ * allowing too many repeated restarts.
120
+ * This function is intended to be used in customized fault() and _mkwrite()
121
+ * handlers.
122
+ *
123
+ * Return:
124
+ * 0 on success and the bo was reserved.
125
+ * VM_FAULT_RETRY if blocking wait.
126
+ * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
127
+ */
128
+vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
129
+ struct vm_fault *vmf)
130
+{
131
+ /*
132
+ * Work around locking order reversal in fault / nopfn
133
+ * between mmap_lock and bo_reserve: Perform a trylock operation
134
+ * for reserve, and if it fails, retry the fault after waiting
135
+ * for the buffer to become unreserved.
136
+ */
137
+ if (unlikely(!dma_resv_trylock(bo->base.resv))) {
138
+ /*
139
+ * If the fault allows retry and this is the first
140
+ * fault attempt, we try to release the mmap_lock
141
+ * before waiting
142
+ */
143
+ if (fault_flag_allow_retry_first(vmf->flags)) {
144
+ if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
145
+ ttm_bo_get(bo);
146
+ mmap_read_unlock(vmf->vma->vm_mm);
147
+ if (!dma_resv_lock_interruptible(bo->base.resv,
148
+ NULL))
149
+ dma_resv_unlock(bo->base.resv);
150
+ ttm_bo_put(bo);
151
+ }
152
+
153
+ return VM_FAULT_RETRY;
154
+ }
155
+
156
+ if (dma_resv_lock_interruptible(bo->base.resv, NULL))
157
+ return VM_FAULT_NOPAGE;
158
+ }
159
+
160
+ return 0;
161
+}
162
+EXPORT_SYMBOL(ttm_bo_vm_reserve);
163
+
164
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
165
+/**
166
+ * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults
167
+ * @vmf: Fault data
168
+ * @bo: The buffer object
169
+ * @page_offset: Page offset from bo start
170
+ * @fault_page_size: The size of the fault in pages.
171
+ * @pgprot: The page protections.
172
+ * Does additional checking whether it's possible to insert a PUD or PMD
173
+ * pfn and performs the insertion.
174
+ *
175
+ * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if
176
+ * a huge fault was not possible, or on insertion error.
177
+ */
178
+static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
179
+ struct ttm_buffer_object *bo,
180
+ pgoff_t page_offset,
181
+ pgoff_t fault_page_size,
182
+ pgprot_t pgprot)
183
+{
184
+ pgoff_t i;
185
+ vm_fault_t ret;
186
+ unsigned long pfn;
187
+ pfn_t pfnt;
188
+ struct ttm_tt *ttm = bo->ttm;
189
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
190
+
191
+ /* Fault should not cross bo boundary. */
192
+ page_offset &= ~(fault_page_size - 1);
193
+ if (page_offset + fault_page_size > bo->num_pages)
194
+ goto out_fallback;
195
+
196
+ if (bo->mem.bus.is_iomem)
197
+ pfn = ttm_bo_io_mem_pfn(bo, page_offset);
198
+ else
199
+ pfn = page_to_pfn(ttm->pages[page_offset]);
200
+
201
+ /* pfn must be fault_page_size aligned. */
202
+ if ((pfn & (fault_page_size - 1)) != 0)
203
+ goto out_fallback;
204
+
205
+ /* Check that memory is contiguous. */
206
+ if (!bo->mem.bus.is_iomem) {
207
+ for (i = 1; i < fault_page_size; ++i) {
208
+ if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i)
209
+ goto out_fallback;
210
+ }
211
+ } else if (bo->bdev->driver->io_mem_pfn) {
212
+ for (i = 1; i < fault_page_size; ++i) {
213
+ if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i)
214
+ goto out_fallback;
215
+ }
216
+ }
217
+
218
+ pfnt = __pfn_to_pfn_t(pfn, PFN_DEV);
219
+ if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT))
220
+ ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write);
221
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
222
+ else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT))
223
+ ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write);
224
+#endif
225
+ else
226
+ WARN_ON_ONCE(ret = VM_FAULT_FALLBACK);
227
+
228
+ if (ret != VM_FAULT_NOPAGE)
229
+ goto out_fallback;
230
+
231
+ return VM_FAULT_NOPAGE;
232
+out_fallback:
233
+ count_vm_event(THP_FAULT_FALLBACK);
234
+ return VM_FAULT_FALLBACK;
235
+}
236
+#else
237
+static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
238
+ struct ttm_buffer_object *bo,
239
+ pgoff_t page_offset,
240
+ pgoff_t fault_page_size,
241
+ pgprot_t pgprot)
242
+{
243
+ return VM_FAULT_FALLBACK;
244
+}
245
+#endif
246
+
247
+/**
248
+ * ttm_bo_vm_fault_reserved - TTM fault helper
249
+ * @vmf: The struct vm_fault given as argument to the fault callback
250
+ * @prot: The page protection to be used for this memory area.
251
+ * @num_prefault: Maximum number of prefault pages. The caller may want to
252
+ * specify this based on madvice settings and the size of the GPU object
253
+ * backed by the memory.
254
+ * @fault_page_size: The size of the fault in pages.
255
+ *
256
+ * This function inserts one or more page table entries pointing to the
257
+ * memory backing the buffer object, and then returns a return code
258
+ * instructing the caller to retry the page access.
259
+ *
260
+ * Return:
261
+ * VM_FAULT_NOPAGE on success or pending signal
262
+ * VM_FAULT_SIGBUS on unspecified error
263
+ * VM_FAULT_OOM on out-of-memory
264
+ * VM_FAULT_RETRY if retryable wait
265
+ */
266
+vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
267
+ pgprot_t prot,
268
+ pgoff_t num_prefault,
269
+ pgoff_t fault_page_size)
110270 {
111271 struct vm_area_struct *vma = vmf->vma;
112
- struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
113
- vma->vm_private_data;
272
+ struct ttm_buffer_object *bo = vma->vm_private_data;
114273 struct ttm_bo_device *bdev = bo->bdev;
115274 unsigned long page_offset;
116275 unsigned long page_last;
....@@ -118,65 +277,37 @@
118277 struct ttm_tt *ttm = NULL;
119278 struct page *page;
120279 int err;
121
- int i;
280
+ pgoff_t i;
122281 vm_fault_t ret = VM_FAULT_NOPAGE;
123282 unsigned long address = vmf->address;
124
- struct ttm_mem_type_manager *man =
125
- &bdev->man[bo->mem.mem_type];
126
- struct vm_area_struct cvma;
127
-
128
- /*
129
- * Work around locking order reversal in fault / nopfn
130
- * between mmap_sem and bo_reserve: Perform a trylock operation
131
- * for reserve, and if it fails, retry the fault after waiting
132
- * for the buffer to become unreserved.
133
- */
134
- err = ttm_bo_reserve(bo, true, true, NULL);
135
- if (unlikely(err != 0)) {
136
- if (err != -EBUSY)
137
- return VM_FAULT_NOPAGE;
138
-
139
- if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
140
- if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
141
- ttm_bo_get(bo);
142
- up_read(&vmf->vma->vm_mm->mmap_sem);
143
- (void) ttm_bo_wait_unreserved(bo);
144
- ttm_bo_put(bo);
145
- }
146
-
147
- return VM_FAULT_RETRY;
148
- }
149
-
150
- /*
151
- * If we'd want to change locking order to
152
- * mmap_sem -> bo::reserve, we'd use a blocking reserve here
153
- * instead of retrying the fault...
154
- */
155
- return VM_FAULT_NOPAGE;
156
- }
157283
158284 /*
159285 * Refuse to fault imported pages. This should be handled
160286 * (if at all) by redirecting mmap to the exporter.
161287 */
162
- if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
163
- ret = VM_FAULT_SIGBUS;
164
- goto out_unlock;
165
- }
288
+ if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
289
+ return VM_FAULT_SIGBUS;
166290
167291 if (bdev->driver->fault_reserve_notify) {
292
+ struct dma_fence *moving = dma_fence_get(bo->moving);
293
+
168294 err = bdev->driver->fault_reserve_notify(bo);
169295 switch (err) {
170296 case 0:
171297 break;
172298 case -EBUSY:
173299 case -ERESTARTSYS:
174
- ret = VM_FAULT_NOPAGE;
175
- goto out_unlock;
300
+ dma_fence_put(moving);
301
+ return VM_FAULT_NOPAGE;
176302 default:
177
- ret = VM_FAULT_SIGBUS;
178
- goto out_unlock;
303
+ dma_fence_put(moving);
304
+ return VM_FAULT_SIGBUS;
179305 }
306
+
307
+ if (bo->moving != moving) {
308
+ ttm_bo_move_to_lru_tail_unlocked(bo);
309
+ }
310
+ dma_fence_put(moving);
180311 }
181312
182313 /*
....@@ -184,49 +315,23 @@
184315 * move.
185316 */
186317 ret = ttm_bo_vm_fault_idle(bo, vmf);
187
- if (unlikely(ret != 0)) {
188
- if (ret == VM_FAULT_RETRY &&
189
- !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
190
- /* The BO has already been unreserved. */
191
- return ret;
192
- }
318
+ if (unlikely(ret != 0))
319
+ return ret;
193320
194
- goto out_unlock;
195
- }
196
-
197
- err = ttm_mem_io_lock(man, true);
198
- if (unlikely(err != 0)) {
199
- ret = VM_FAULT_NOPAGE;
200
- goto out_unlock;
201
- }
202
- err = ttm_mem_io_reserve_vm(bo);
203
- if (unlikely(err != 0)) {
204
- ret = VM_FAULT_SIGBUS;
205
- goto out_io_unlock;
206
- }
321
+ err = ttm_mem_io_reserve(bdev, &bo->mem);
322
+ if (unlikely(err != 0))
323
+ return VM_FAULT_SIGBUS;
207324
208325 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
209
- vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
326
+ vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
210327 page_last = vma_pages(vma) + vma->vm_pgoff -
211
- drm_vma_node_start(&bo->vma_node);
328
+ drm_vma_node_start(&bo->base.vma_node);
212329
213
- if (unlikely(page_offset >= bo->num_pages)) {
214
- ret = VM_FAULT_SIGBUS;
215
- goto out_io_unlock;
216
- }
330
+ if (unlikely(page_offset >= bo->num_pages))
331
+ return VM_FAULT_SIGBUS;
217332
218
- /*
219
- * Make a local vma copy to modify the page_prot member
220
- * and vm_flags if necessary. The vma parameter is protected
221
- * by mmap_sem in write mode.
222
- */
223
- cvma = *vma;
224
- cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
225
-
226
- if (bo->mem.bus.is_iomem) {
227
- cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
228
- cvma.vm_page_prot);
229
- } else {
333
+ prot = ttm_io_prot(bo->mem.placement, prot);
334
+ if (!bo->mem.bus.is_iomem) {
230335 struct ttm_operation_ctx ctx = {
231336 .interruptible = false,
232337 .no_wait_gpu = false,
....@@ -235,48 +340,56 @@
235340 };
236341
237342 ttm = bo->ttm;
238
- cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
239
- cvma.vm_page_prot);
240
-
241
- /* Allocate all page at once, most common usage */
242
- if (ttm_tt_populate(ttm, &ctx)) {
243
- ret = VM_FAULT_OOM;
244
- goto out_io_unlock;
245
- }
343
+ if (ttm_tt_populate(bdev, bo->ttm, &ctx))
344
+ return VM_FAULT_OOM;
345
+ } else {
346
+ /* Iomem should not be marked encrypted */
347
+ prot = pgprot_decrypted(prot);
246348 }
349
+
350
+ /* We don't prefault on huge faults. Yet. */
351
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1)
352
+ return ttm_bo_vm_insert_huge(vmf, bo, page_offset,
353
+ fault_page_size, prot);
247354
248355 /*
249356 * Speculatively prefault a number of pages. Only error on
250357 * first page.
251358 */
252
- for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
359
+ for (i = 0; i < num_prefault; ++i) {
253360 if (bo->mem.bus.is_iomem) {
254
- /* Iomem should not be marked encrypted */
255
- cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
256361 pfn = ttm_bo_io_mem_pfn(bo, page_offset);
257362 } else {
258363 page = ttm->pages[page_offset];
259364 if (unlikely(!page && i == 0)) {
260
- ret = VM_FAULT_OOM;
261
- goto out_io_unlock;
365
+ return VM_FAULT_OOM;
262366 } else if (unlikely(!page)) {
263367 break;
264368 }
265
- page->index = drm_vma_node_start(&bo->vma_node) +
369
+ page->index = drm_vma_node_start(&bo->base.vma_node) +
266370 page_offset;
267371 pfn = page_to_pfn(page);
268372 }
269373
374
+ /*
375
+ * Note that the value of @prot at this point may differ from
376
+ * the value of @vma->vm_page_prot in the caching- and
377
+ * encryption bits. This is because the exact location of the
378
+ * data may not be known at mmap() time and may also change
379
+ * at arbitrary times while the data is mmap'ed.
380
+ * See vmf_insert_mixed_prot() for a discussion.
381
+ */
270382 if (vma->vm_flags & VM_MIXEDMAP)
271
- ret = vmf_insert_mixed(&cvma, address,
272
- __pfn_to_pfn_t(pfn, PFN_DEV));
383
+ ret = vmf_insert_mixed_prot(vma, address,
384
+ __pfn_to_pfn_t(pfn, PFN_DEV),
385
+ prot);
273386 else
274
- ret = vmf_insert_pfn(&cvma, address, pfn);
387
+ ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
275388
276389 /* Never error on prefaulted PTEs */
277390 if (unlikely((ret & VM_FAULT_ERROR))) {
278391 if (i == 0)
279
- goto out_io_unlock;
392
+ return VM_FAULT_NOPAGE;
280393 else
281394 break;
282395 }
....@@ -285,31 +398,50 @@
285398 if (unlikely(++page_offset >= page_last))
286399 break;
287400 }
288
- ret = VM_FAULT_NOPAGE;
289
-out_io_unlock:
290
- ttm_mem_io_unlock(man);
291
-out_unlock:
292
- ttm_bo_unreserve(bo);
293401 return ret;
294402 }
403
+EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
295404
296
-static void ttm_bo_vm_open(struct vm_area_struct *vma)
405
+vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
297406 {
298
- struct ttm_buffer_object *bo =
299
- (struct ttm_buffer_object *)vma->vm_private_data;
407
+ struct vm_area_struct *vma = vmf->vma;
408
+ pgprot_t prot;
409
+ struct ttm_buffer_object *bo = vma->vm_private_data;
410
+ vm_fault_t ret;
411
+
412
+ ret = ttm_bo_vm_reserve(bo, vmf);
413
+ if (ret)
414
+ return ret;
415
+
416
+ prot = vma->vm_page_prot;
417
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
418
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
419
+ return ret;
420
+
421
+ dma_resv_unlock(bo->base.resv);
422
+
423
+ return ret;
424
+}
425
+EXPORT_SYMBOL(ttm_bo_vm_fault);
426
+
427
+void ttm_bo_vm_open(struct vm_area_struct *vma)
428
+{
429
+ struct ttm_buffer_object *bo = vma->vm_private_data;
300430
301431 WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
302432
303433 ttm_bo_get(bo);
304434 }
435
+EXPORT_SYMBOL(ttm_bo_vm_open);
305436
306
-static void ttm_bo_vm_close(struct vm_area_struct *vma)
437
+void ttm_bo_vm_close(struct vm_area_struct *vma)
307438 {
308
- struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
439
+ struct ttm_buffer_object *bo = vma->vm_private_data;
309440
310441 ttm_bo_put(bo);
311442 vma->vm_private_data = NULL;
312443 }
444
+EXPORT_SYMBOL(ttm_bo_vm_close);
313445
314446 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
315447 unsigned long offset,
....@@ -350,11 +482,13 @@
350482 return len;
351483 }
352484
353
-static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
354
- void *buf, int len, int write)
485
+int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
486
+ void *buf, int len, int write)
355487 {
356
- unsigned long offset = (addr) - vma->vm_start;
357488 struct ttm_buffer_object *bo = vma->vm_private_data;
489
+ unsigned long offset = (addr) - vma->vm_start +
490
+ ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
491
+ << PAGE_SHIFT);
358492 int ret;
359493
360494 if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
....@@ -366,12 +500,7 @@
366500
367501 switch (bo->mem.mem_type) {
368502 case TTM_PL_SYSTEM:
369
- if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
370
- ret = ttm_tt_swapin(bo->ttm);
371
- if (unlikely(ret != 0))
372
- return ret;
373
- }
374
- /* fall through */
503
+ fallthrough;
375504 case TTM_PL_TT:
376505 ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
377506 break;
....@@ -387,12 +516,13 @@
387516
388517 return ret;
389518 }
519
+EXPORT_SYMBOL(ttm_bo_vm_access);
390520
391521 static const struct vm_operations_struct ttm_bo_vm_ops = {
392522 .fault = ttm_bo_vm_fault,
393523 .open = ttm_bo_vm_open,
394524 .close = ttm_bo_vm_close,
395
- .access = ttm_bo_vm_access
525
+ .access = ttm_bo_vm_access,
396526 };
397527
398528 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
....@@ -402,16 +532,16 @@
402532 struct drm_vma_offset_node *node;
403533 struct ttm_buffer_object *bo = NULL;
404534
405
- drm_vma_offset_lock_lookup(&bdev->vma_manager);
535
+ drm_vma_offset_lock_lookup(bdev->vma_manager);
406536
407
- node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
537
+ node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
408538 if (likely(node)) {
409
- bo = container_of(node, struct ttm_buffer_object, vma_node);
410
- if (!kref_get_unless_zero(&bo->kref))
411
- bo = NULL;
539
+ bo = container_of(node, struct ttm_buffer_object,
540
+ base.vma_node);
541
+ bo = ttm_bo_get_unless_zero(bo);
412542 }
413543
414
- drm_vma_offset_unlock_lookup(&bdev->vma_manager);
544
+ drm_vma_offset_unlock_lookup(bdev->vma_manager);
415545
416546 if (!bo)
417547 pr_err("Could not find buffer object to map\n");
....@@ -419,26 +549,8 @@
419549 return bo;
420550 }
421551
422
-int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
423
- struct ttm_bo_device *bdev)
552
+static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma)
424553 {
425
- struct ttm_bo_driver *driver;
426
- struct ttm_buffer_object *bo;
427
- int ret;
428
-
429
- bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
430
- if (unlikely(!bo))
431
- return -EINVAL;
432
-
433
- driver = bo->bdev->driver;
434
- if (unlikely(!driver->verify_access)) {
435
- ret = -EPERM;
436
- goto out_unref;
437
- }
438
- ret = driver->verify_access(bo, filp);
439
- if (unlikely(ret != 0))
440
- goto out_unref;
441
-
442554 vma->vm_ops = &ttm_bo_vm_ops;
443555
444556 /*
....@@ -457,6 +569,32 @@
457569 */
458570 vma->vm_flags |= VM_MIXEDMAP;
459571 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
572
+}
573
+
574
+int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
575
+ struct ttm_bo_device *bdev)
576
+{
577
+ struct ttm_bo_driver *driver;
578
+ struct ttm_buffer_object *bo;
579
+ int ret;
580
+
581
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
582
+ return -EINVAL;
583
+
584
+ bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
585
+ if (unlikely(!bo))
586
+ return -EINVAL;
587
+
588
+ driver = bo->bdev->driver;
589
+ if (unlikely(!driver->verify_access)) {
590
+ ret = -EPERM;
591
+ goto out_unref;
592
+ }
593
+ ret = driver->verify_access(bo, filp);
594
+ if (unlikely(ret != 0))
595
+ goto out_unref;
596
+
597
+ ttm_bo_mmap_vma_setup(bo, vma);
460598 return 0;
461599 out_unref:
462600 ttm_bo_put(bo);
....@@ -464,17 +602,10 @@
464602 }
465603 EXPORT_SYMBOL(ttm_bo_mmap);
466604
467
-int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
605
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
468606 {
469
- if (vma->vm_pgoff != 0)
470
- return -EACCES;
471
-
472607 ttm_bo_get(bo);
473
-
474
- vma->vm_ops = &ttm_bo_vm_ops;
475
- vma->vm_private_data = bo;
476
- vma->vm_flags |= VM_MIXEDMAP;
477
- vma->vm_flags |= VM_IO | VM_DONTEXPAND;
608
+ ttm_bo_mmap_vma_setup(bo, vma);
478609 return 0;
479610 }
480
-EXPORT_SYMBOL(ttm_fbdev_mmap);
611
+EXPORT_SYMBOL(ttm_bo_mmap_obj);