forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/drm_prime.c
....@@ -29,53 +29,62 @@
2929 #include <linux/export.h>
3030 #include <linux/dma-buf.h>
3131 #include <linux/rbtree.h>
32
-#include <drm/drm_prime.h>
32
+
33
+#include <drm/drm.h>
34
+#include <drm/drm_drv.h>
35
+#include <drm/drm_file.h>
36
+#include <drm/drm_framebuffer.h>
3337 #include <drm/drm_gem.h>
34
-#include <drm/drmP.h>
38
+#include <drm/drm_prime.h>
3539
3640 #include "drm_internal.h"
3741
38
-/*
39
- * DMA-BUF/GEM Object references and lifetime overview:
42
+/**
43
+ * DOC: overview and lifetime rules
4044 *
41
- * On the export the dma_buf holds a reference to the exporting GEM
42
- * object. It takes this reference in handle_to_fd_ioctl, when it
43
- * first calls .prime_export and stores the exporting GEM object in
44
- * the dma_buf priv. This reference needs to be released when the
45
- * final reference to the &dma_buf itself is dropped and its
46
- * &dma_buf_ops.release function is called. For GEM-based drivers,
47
- * the dma_buf should be exported using drm_gem_dmabuf_export() and
48
- * then released by drm_gem_dmabuf_release().
45
+ * Similar to GEM global names, PRIME file descriptors are also used to share
46
+ * buffer objects across processes. They offer additional security: as file
47
+ * descriptors must be explicitly sent over UNIX domain sockets to be shared
48
+ * between applications, they can't be guessed like the globally unique GEM
49
+ * names.
4950 *
50
- * On the import the importing GEM object holds a reference to the
51
- * dma_buf (which in turn holds a ref to the exporting GEM object).
52
- * It takes that reference in the fd_to_handle ioctl.
53
- * It calls dma_buf_get, creates an attachment to it and stores the
54
- * attachment in the GEM object. When this attachment is destroyed
55
- * when the imported object is destroyed, we remove the attachment
56
- * and drop the reference to the dma_buf.
51
+ * Drivers that support the PRIME API implement the
52
+ * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
53
+ * GEM based drivers must use drm_gem_prime_handle_to_fd() and
54
+ * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
55
+ * actual driver interfaces is provided through the &drm_gem_object_funcs.export
56
+ * and &drm_driver.gem_prime_import hooks.
5757 *
58
- * When all the references to the &dma_buf are dropped, i.e. when
59
- * userspace has closed both handles to the imported GEM object (through the
60
- * FD_TO_HANDLE IOCTL) and closed the file descriptor of the exported
61
- * (through the HANDLE_TO_FD IOCTL) dma_buf, and all kernel-internal references
62
- * are also gone, then the dma_buf gets destroyed. This can also happen as a
63
- * part of the clean up procedure in the drm_release() function if userspace
64
- * fails to properly clean up. Note that both the kernel and userspace (by
65
- * keeeping the PRIME file descriptors open) can hold references onto a
66
- * &dma_buf.
58
+ * &dma_buf_ops implementations for GEM drivers are all individually exported
59
+ * for drivers which need to overwrite or reimplement some of them.
6760 *
68
- * Thus the chain of references always flows in one direction
69
- * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
61
+ * Reference Counting for GEM Drivers
62
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7063 *
71
- * Self-importing: if userspace is using PRIME as a replacement for flink
72
- * then it will get a fd->handle request for a GEM object that it created.
73
- * Drivers should detect this situation and return back the gem object
74
- * from the dma-buf private. Prime will do this automatically for drivers that
75
- * use the drm_gem_prime_{import,export} helpers.
64
+ * On the export the &dma_buf holds a reference to the exported buffer object,
65
+ * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
66
+ * IOCTL, when it first calls &drm_gem_object_funcs.export
67
+ * and stores the exporting GEM object in the &dma_buf.priv field. This
68
+ * reference needs to be released when the final reference to the &dma_buf
69
+ * itself is dropped and its &dma_buf_ops.release function is called. For
70
+ * GEM-based drivers, the &dma_buf should be exported using
71
+ * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
7672 *
77
- * GEM struct &dma_buf_ops symbols are now exported. They can be resued by
78
- * drivers which implement GEM interface.
73
+ * Thus the chain of references always flows in one direction, avoiding loops:
74
+ * importing GEM object -> dma-buf -> exported GEM bo. A further complication
75
+ * are the lookup caches for import and export. These are required to guarantee
76
+ * that any given object will always have only one uniqe userspace handle. This
77
+ * is required to allow userspace to detect duplicated imports, since some GEM
78
+ * drivers do fail command submissions if a given buffer object is listed more
79
+ * than once. These import and export caches in &drm_prime_file_private only
80
+ * retain a weak reference, which is cleaned up when the corresponding object is
81
+ * released.
82
+ *
83
+ * Self-importing: If userspace is using PRIME as a replacement for flink then
84
+ * it will get a fd->handle request for a GEM object that it created. Drivers
85
+ * should detect this situation and return back the underlying object from the
86
+ * dma-buf private. For GEM based drivers this is handled in
87
+ * drm_gem_prime_import() already.
7988 */
8089
8190 struct drm_prime_member {
....@@ -84,11 +93,6 @@
8493
8594 struct rb_node dmabuf_rb;
8695 struct rb_node handle_rb;
87
-};
88
-
89
-struct drm_prime_attachment {
90
- struct sg_table *sgt;
91
- enum dma_data_direction dir;
9296 };
9397
9498 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
....@@ -183,75 +187,6 @@
183187 return -ENOENT;
184188 }
185189
186
-/**
187
- * drm_gem_map_attach - dma_buf attach implementation for GEM
188
- * @dma_buf: buffer to attach device to
189
- * @attach: buffer attachment data
190
- *
191
- * Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
192
- * device specific attachment. This can be used as the &dma_buf_ops.attach
193
- * callback.
194
- *
195
- * Returns 0 on success, negative error code on failure.
196
- */
197
-int drm_gem_map_attach(struct dma_buf *dma_buf,
198
- struct dma_buf_attachment *attach)
199
-{
200
- struct drm_prime_attachment *prime_attach;
201
- struct drm_gem_object *obj = dma_buf->priv;
202
- struct drm_device *dev = obj->dev;
203
-
204
- prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
205
- if (!prime_attach)
206
- return -ENOMEM;
207
-
208
- prime_attach->dir = DMA_NONE;
209
- attach->priv = prime_attach;
210
-
211
- if (!dev->driver->gem_prime_pin)
212
- return 0;
213
-
214
- return dev->driver->gem_prime_pin(obj);
215
-}
216
-EXPORT_SYMBOL(drm_gem_map_attach);
217
-
218
-/**
219
- * drm_gem_map_detach - dma_buf detach implementation for GEM
220
- * @dma_buf: buffer to detach from
221
- * @attach: attachment to be detached
222
- *
223
- * Cleans up &dma_buf_attachment. This can be used as the &dma_buf_ops.detach
224
- * callback.
225
- */
226
-void drm_gem_map_detach(struct dma_buf *dma_buf,
227
- struct dma_buf_attachment *attach)
228
-{
229
- struct drm_prime_attachment *prime_attach = attach->priv;
230
- struct drm_gem_object *obj = dma_buf->priv;
231
- struct drm_device *dev = obj->dev;
232
-
233
- if (prime_attach) {
234
- struct sg_table *sgt = prime_attach->sgt;
235
-
236
- if (sgt) {
237
- if (prime_attach->dir != DMA_NONE)
238
- dma_unmap_sg_attrs(attach->dev, sgt->sgl,
239
- sgt->nents,
240
- prime_attach->dir,
241
- DMA_ATTR_SKIP_CPU_SYNC);
242
- sg_free_table(sgt);
243
- }
244
-
245
- kfree(sgt);
246
- kfree(prime_attach);
247
- attach->priv = NULL;
248
- }
249
-
250
- if (dev->driver->gem_prime_unpin)
251
- dev->driver->gem_prime_unpin(obj);
252
-}
253
-EXPORT_SYMBOL(drm_gem_map_detach);
254
-
255190 void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
256191 uint32_t handle)
257192 {
....@@ -281,76 +216,21 @@
281216 mutex_unlock(&prime_fpriv->lock);
282217 }
283218
284
-/**
285
- * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
286
- * @attach: attachment whose scatterlist is to be returned
287
- * @dir: direction of DMA transfer
288
- *
289
- * Calls &drm_driver.gem_prime_get_sg_table and then maps the scatterlist. This
290
- * can be used as the &dma_buf_ops.map_dma_buf callback.
291
- *
292
- * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
293
- * on error. May return -EINTR if it is interrupted by a signal.
294
- */
295
-
296
-struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
297
- enum dma_data_direction dir)
219
+void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
298220 {
299
- struct drm_prime_attachment *prime_attach = attach->priv;
300
- struct drm_gem_object *obj = attach->dmabuf->priv;
301
- struct sg_table *sgt;
302
-
303
- if (WARN_ON(dir == DMA_NONE || !prime_attach))
304
- return ERR_PTR(-EINVAL);
305
-
306
- /* return the cached mapping when possible */
307
- if (prime_attach->dir == dir)
308
- return prime_attach->sgt;
309
-
310
- /*
311
- * two mappings with different directions for the same attachment are
312
- * not allowed
313
- */
314
- if (WARN_ON(prime_attach->dir != DMA_NONE))
315
- return ERR_PTR(-EBUSY);
316
-
317
- sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
318
-
319
- if (!IS_ERR(sgt)) {
320
- if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
321
- DMA_ATTR_SKIP_CPU_SYNC)) {
322
- sg_free_table(sgt);
323
- kfree(sgt);
324
- sgt = ERR_PTR(-ENOMEM);
325
- } else {
326
- prime_attach->sgt = sgt;
327
- prime_attach->dir = dir;
328
- }
329
- }
330
-
331
- return sgt;
221
+ mutex_init(&prime_fpriv->lock);
222
+ prime_fpriv->dmabufs = RB_ROOT;
223
+ prime_fpriv->handles = RB_ROOT;
332224 }
333
-EXPORT_SYMBOL(drm_gem_map_dma_buf);
334225
335
-/**
336
- * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
337
- * @attach: attachment to unmap buffer from
338
- * @sgt: scatterlist info of the buffer to unmap
339
- * @dir: direction of DMA transfer
340
- *
341
- * Not implemented. The unmap is done at drm_gem_map_detach(). This can be
342
- * used as the &dma_buf_ops.unmap_dma_buf callback.
343
- */
344
-void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
345
- struct sg_table *sgt,
346
- enum dma_data_direction dir)
226
+void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
347227 {
348
- /* nothing to be done here */
228
+ /* by now drm_gem_release should've made sure the list is empty */
229
+ WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
349230 }
350
-EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
351231
352232 /**
353
- * drm_gem_dmabuf_export - dma_buf export implementation for GEM
233
+ * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
354234 * @dev: parent device for the exported dmabuf
355235 * @exp_info: the export information used by dma_buf_export()
356236 *
....@@ -364,6 +244,7 @@
364244 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
365245 struct dma_buf_export_info *exp_info)
366246 {
247
+ struct drm_gem_object *obj = exp_info->priv;
367248 struct dma_buf *dma_buf;
368249
369250 dma_buf = dma_buf_export(exp_info);
....@@ -371,18 +252,19 @@
371252 return dma_buf;
372253
373254 drm_dev_get(dev);
374
- drm_gem_object_get(exp_info->priv);
255
+ drm_gem_object_get(obj);
256
+ dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
375257
376258 return dma_buf;
377259 }
378260 EXPORT_SYMBOL(drm_gem_dmabuf_export);
379261
380262 /**
381
- * drm_gem_dmabuf_release - dma_buf release implementation for GEM
263
+ * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
382264 * @dma_buf: buffer to be released
383265 *
384266 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
385
- * must use this in their dma_buf ops structure as the release callback.
267
+ * must use this in their &dma_buf_ops structure as the release callback.
386268 * drm_gem_dmabuf_release() should be used in conjunction with
387269 * drm_gem_dmabuf_export().
388270 */
....@@ -392,187 +274,107 @@
392274 struct drm_device *dev = obj->dev;
393275
394276 /* drop the reference on the export fd holds */
395
- drm_gem_object_put_unlocked(obj);
277
+ drm_gem_object_put(obj);
396278
397279 drm_dev_put(dev);
398280 }
399281 EXPORT_SYMBOL(drm_gem_dmabuf_release);
400282
401283 /**
402
- * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
403
- * @dma_buf: buffer to be mapped
284
+ * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
285
+ * @dev: dev to export the buffer from
286
+ * @file_priv: drm file-private structure
287
+ * @prime_fd: fd id of the dma-buf which should be imported
288
+ * @handle: pointer to storage for the handle of the imported buffer object
404289 *
405
- * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
406
- * callback.
290
+ * This is the PRIME import function which must be used mandatorily by GEM
291
+ * drivers to ensure correct lifetime management of the underlying GEM object.
292
+ * The actual importing of GEM object from the dma-buf is done through the
293
+ * &drm_driver.gem_prime_import driver callback.
407294 *
408
- * Returns the kernel virtual address.
295
+ * Returns 0 on success or a negative error code on failure.
409296 */
410
-void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
297
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
298
+ struct drm_file *file_priv, int prime_fd,
299
+ uint32_t *handle)
411300 {
412
- struct drm_gem_object *obj = dma_buf->priv;
413
- struct drm_device *dev = obj->dev;
301
+ struct dma_buf *dma_buf;
302
+ struct drm_gem_object *obj;
303
+ int ret;
414304
415
- if (dev->driver->gem_prime_vmap)
416
- return dev->driver->gem_prime_vmap(obj);
305
+ dma_buf = dma_buf_get(prime_fd);
306
+ if (IS_ERR(dma_buf))
307
+ return PTR_ERR(dma_buf);
308
+
309
+ mutex_lock(&file_priv->prime.lock);
310
+
311
+ ret = drm_prime_lookup_buf_handle(&file_priv->prime,
312
+ dma_buf, handle);
313
+ if (ret == 0)
314
+ goto out_put;
315
+
316
+ /* never seen this one, need to import */
317
+ mutex_lock(&dev->object_name_lock);
318
+ if (dev->driver->gem_prime_import)
319
+ obj = dev->driver->gem_prime_import(dev, dma_buf);
417320 else
418
- return NULL;
321
+ obj = drm_gem_prime_import(dev, dma_buf);
322
+ if (IS_ERR(obj)) {
323
+ ret = PTR_ERR(obj);
324
+ goto out_unlock;
325
+ }
326
+
327
+ if (obj->dma_buf) {
328
+ WARN_ON(obj->dma_buf != dma_buf);
329
+ } else {
330
+ obj->dma_buf = dma_buf;
331
+ get_dma_buf(dma_buf);
332
+ }
333
+
334
+ /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
335
+ ret = drm_gem_handle_create_tail(file_priv, obj, handle);
336
+ drm_gem_object_put(obj);
337
+ if (ret)
338
+ goto out_put;
339
+
340
+ ret = drm_prime_add_buf_handle(&file_priv->prime,
341
+ dma_buf, *handle);
342
+ mutex_unlock(&file_priv->prime.lock);
343
+ if (ret)
344
+ goto fail;
345
+
346
+ dma_buf_put(dma_buf);
347
+
348
+ return 0;
349
+
350
+fail:
351
+ /* hmm, if driver attached, we are relying on the free-object path
352
+ * to detach.. which seems ok..
353
+ */
354
+ drm_gem_handle_delete(file_priv, *handle);
355
+ dma_buf_put(dma_buf);
356
+ return ret;
357
+
358
+out_unlock:
359
+ mutex_unlock(&dev->object_name_lock);
360
+out_put:
361
+ mutex_unlock(&file_priv->prime.lock);
362
+ dma_buf_put(dma_buf);
363
+ return ret;
419364 }
420
-EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
365
+EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
421366
422
-/**
423
- * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
424
- * @dma_buf: buffer to be unmapped
425
- * @vaddr: the virtual address of the buffer
426
- *
427
- * Releases a kernel virtual mapping. This can be used as the
428
- * &dma_buf_ops.vunmap callback.
429
- */
430
-void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
367
+int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
368
+ struct drm_file *file_priv)
431369 {
432
- struct drm_gem_object *obj = dma_buf->priv;
433
- struct drm_device *dev = obj->dev;
370
+ struct drm_prime_handle *args = data;
434371
435
- if (dev->driver->gem_prime_vunmap)
436
- dev->driver->gem_prime_vunmap(obj, vaddr);
437
-}
438
-EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
439
-
440
-/**
441
- * drm_gem_dmabuf_get_uuid - dma_buf get_uuid implementation for GEM
442
- * @dma_buf: buffer to query
443
- * @uuid: uuid outparam
444
- *
445
- * Queries the buffer's virtio UUID. This can be used as the
446
- * &dma_buf_ops.get_uuid callback. Calls into &drm_driver.gem_prime_get_uuid.
447
- *
448
- * Returns 0 on success or a negative error code on failure.
449
- */
450
-int drm_gem_dmabuf_get_uuid(struct dma_buf *dma_buf, uuid_t *uuid)
451
-{
452
- struct drm_gem_object *obj = dma_buf->priv;
453
- struct drm_device *dev = obj->dev;
454
-
455
- if (!dev->driver->gem_prime_get_uuid)
456
- return -ENODEV;
457
-
458
- return dev->driver->gem_prime_get_uuid(obj, uuid);
459
-}
460
-EXPORT_SYMBOL(drm_gem_dmabuf_get_uuid);
461
-
462
-/**
463
- * drm_gem_dmabuf_kmap - map implementation for GEM
464
- * @dma_buf: buffer to be mapped
465
- * @page_num: page number within the buffer
466
- *
467
- * Not implemented. This can be used as the &dma_buf_ops.map callback.
468
- */
469
-void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
470
-{
471
- return NULL;
472
-}
473
-EXPORT_SYMBOL(drm_gem_dmabuf_kmap);
474
-
475
-/**
476
- * drm_gem_dmabuf_kunmap - unmap implementation for GEM
477
- * @dma_buf: buffer to be unmapped
478
- * @page_num: page number within the buffer
479
- * @addr: virtual address of the buffer
480
- *
481
- * Not implemented. This can be used as the &dma_buf_ops.unmap callback.
482
- */
483
-void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
484
- void *addr)
485
-{
486
-
487
-}
488
-EXPORT_SYMBOL(drm_gem_dmabuf_kunmap);
489
-
490
-/**
491
- * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
492
- * @dma_buf: buffer to be mapped
493
- * @vma: virtual address range
494
- *
495
- * Provides memory mapping for the buffer. This can be used as the
496
- * &dma_buf_ops.mmap callback.
497
- *
498
- * Returns 0 on success or a negative error code on failure.
499
- */
500
-int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
501
-{
502
- struct drm_gem_object *obj = dma_buf->priv;
503
- struct drm_device *dev = obj->dev;
504
-
505
- if (!dev->driver->gem_prime_mmap)
372
+ if (!dev->driver->prime_fd_to_handle)
506373 return -ENOSYS;
507374
508
- return dev->driver->gem_prime_mmap(obj, vma);
375
+ return dev->driver->prime_fd_to_handle(dev, file_priv,
376
+ args->fd, &args->handle);
509377 }
510
-EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
511
-
512
-static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
513
- .attach = drm_gem_map_attach,
514
- .detach = drm_gem_map_detach,
515
- .map_dma_buf = drm_gem_map_dma_buf,
516
- .unmap_dma_buf = drm_gem_unmap_dma_buf,
517
- .release = drm_gem_dmabuf_release,
518
- .map = drm_gem_dmabuf_kmap,
519
- .unmap = drm_gem_dmabuf_kunmap,
520
- .mmap = drm_gem_dmabuf_mmap,
521
- .vmap = drm_gem_dmabuf_vmap,
522
- .vunmap = drm_gem_dmabuf_vunmap,
523
- .get_uuid = drm_gem_dmabuf_get_uuid,
524
-};
525
-
526
-/**
527
- * DOC: PRIME Helpers
528
- *
529
- * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
530
- * simpler APIs by using the helper functions @drm_gem_prime_export and
531
- * @drm_gem_prime_import. These functions implement dma-buf support in terms of
532
- * six lower-level driver callbacks:
533
- *
534
- * Export callbacks:
535
- *
536
- * * @gem_prime_pin (optional): prepare a GEM object for exporting
537
- * * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
538
- * * @gem_prime_vmap: vmap a buffer exported by your driver
539
- * * @gem_prime_vunmap: vunmap a buffer exported by your driver
540
- * * @gem_prime_mmap (optional): mmap a buffer exported by your driver
541
- *
542
- * Import callback:
543
- *
544
- * * @gem_prime_import_sg_table (import): produce a GEM object from another
545
- * driver's scatter/gather table
546
- */
547
-
548
-/**
549
- * drm_gem_prime_export - helper library implementation of the export callback
550
- * @dev: drm_device to export from
551
- * @obj: GEM object to export
552
- * @flags: flags like DRM_CLOEXEC and DRM_RDWR
553
- *
554
- * This is the implementation of the gem_prime_export functions for GEM drivers
555
- * using the PRIME helpers.
556
- */
557
-struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
558
- struct drm_gem_object *obj,
559
- int flags)
560
-{
561
- struct dma_buf_export_info exp_info = {
562
- .exp_name = KBUILD_MODNAME, /* white lie for debug */
563
- .owner = dev->driver->fops->owner,
564
- .ops = &drm_gem_prime_dmabuf_ops,
565
- .size = obj->size,
566
- .flags = flags,
567
- .priv = obj,
568
- };
569
-
570
- if (dev->driver->gem_prime_res_obj)
571
- exp_info.resv = dev->driver->gem_prime_res_obj(obj);
572
-
573
- return drm_gem_dmabuf_export(dev, &exp_info);
574
-}
575
-EXPORT_SYMBOL(drm_gem_prime_export);
576378
577379 static struct dma_buf *export_and_register_object(struct drm_device *dev,
578380 struct drm_gem_object *obj,
....@@ -586,7 +388,12 @@
586388 return dmabuf;
587389 }
588390
589
- dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
391
+ if (obj->funcs && obj->funcs->export)
392
+ dmabuf = obj->funcs->export(obj, flags);
393
+ else if (dev->driver->gem_prime_export)
394
+ dmabuf = dev->driver->gem_prime_export(obj, flags);
395
+ else
396
+ dmabuf = drm_gem_prime_export(obj, flags);
590397 if (IS_ERR(dmabuf)) {
591398 /* normally the created dma-buf takes ownership of the ref,
592399 * but if that fails then drop the ref
....@@ -616,7 +423,7 @@
616423 * This is the PRIME export function which must be used mandatorily by GEM
617424 * drivers to ensure correct lifetime management of the underlying GEM object.
618425 * The actual exporting from GEM object to a dma-buf is done through the
619
- * gem_prime_export driver callback.
426
+ * &drm_driver.gem_prime_export driver callback.
620427 */
621428 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
622429 struct drm_file *file_priv, uint32_t handle,
....@@ -697,13 +504,203 @@
697504 fail_put_dmabuf:
698505 dma_buf_put(dmabuf);
699506 out:
700
- drm_gem_object_put_unlocked(obj);
507
+ drm_gem_object_put(obj);
701508 out_unlock:
702509 mutex_unlock(&file_priv->prime.lock);
703510
704511 return ret;
705512 }
706513 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
514
+
515
+int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
516
+ struct drm_file *file_priv)
517
+{
518
+ struct drm_prime_handle *args = data;
519
+
520
+ if (!dev->driver->prime_handle_to_fd)
521
+ return -ENOSYS;
522
+
523
+ /* check flags are valid */
524
+ if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
525
+ return -EINVAL;
526
+
527
+ return dev->driver->prime_handle_to_fd(dev, file_priv,
528
+ args->handle, args->flags, &args->fd);
529
+}
530
+
531
+/**
532
+ * DOC: PRIME Helpers
533
+ *
534
+ * Drivers can implement &drm_gem_object_funcs.export and
535
+ * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
536
+ * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
537
+ * implement dma-buf support in terms of some lower-level helpers, which are
538
+ * again exported for drivers to use individually:
539
+ *
540
+ * Exporting buffers
541
+ * ~~~~~~~~~~~~~~~~~
542
+ *
543
+ * Optional pinning of buffers is handled at dma-buf attach and detach time in
544
+ * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
545
+ * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
546
+ * &drm_gem_object_funcs.get_sg_table.
547
+ *
548
+ * For kernel-internal access there's drm_gem_dmabuf_vmap() and
549
+ * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
550
+ * drm_gem_dmabuf_mmap().
551
+ *
552
+ * Note that these export helpers can only be used if the underlying backing
553
+ * storage is fully coherent and either permanently pinned, or it is safe to pin
554
+ * it indefinitely.
555
+ *
556
+ * FIXME: The underlying helper functions are named rather inconsistently.
557
+ *
558
+ * Exporting buffers
559
+ * ~~~~~~~~~~~~~~~~~
560
+ *
561
+ * Importing dma-bufs using drm_gem_prime_import() relies on
562
+ * &drm_driver.gem_prime_import_sg_table.
563
+ *
564
+ * Note that similarly to the export helpers this permanently pins the
565
+ * underlying backing storage. Which is ok for scanout, but is not the best
566
+ * option for sharing lots of buffers for rendering.
567
+ */
568
+
569
+/**
570
+ * drm_gem_map_attach - dma_buf attach implementation for GEM
571
+ * @dma_buf: buffer to attach device to
572
+ * @attach: buffer attachment data
573
+ *
574
+ * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
575
+ * used as the &dma_buf_ops.attach callback. Must be used together with
576
+ * drm_gem_map_detach().
577
+ *
578
+ * Returns 0 on success, negative error code on failure.
579
+ */
580
+int drm_gem_map_attach(struct dma_buf *dma_buf,
581
+ struct dma_buf_attachment *attach)
582
+{
583
+ struct drm_gem_object *obj = dma_buf->priv;
584
+
585
+ return drm_gem_pin(obj);
586
+}
587
+EXPORT_SYMBOL(drm_gem_map_attach);
588
+
589
+/**
590
+ * drm_gem_map_detach - dma_buf detach implementation for GEM
591
+ * @dma_buf: buffer to detach from
592
+ * @attach: attachment to be detached
593
+ *
594
+ * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up
595
+ * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
596
+ * &dma_buf_ops.detach callback.
597
+ */
598
+void drm_gem_map_detach(struct dma_buf *dma_buf,
599
+ struct dma_buf_attachment *attach)
600
+{
601
+ struct drm_gem_object *obj = dma_buf->priv;
602
+
603
+ drm_gem_unpin(obj);
604
+}
605
+EXPORT_SYMBOL(drm_gem_map_detach);
606
+
607
+/**
608
+ * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
609
+ * @attach: attachment whose scatterlist is to be returned
610
+ * @dir: direction of DMA transfer
611
+ *
612
+ * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
613
+ * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
614
+ * with drm_gem_unmap_dma_buf().
615
+ *
616
+ * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
617
+ * on error. May return -EINTR if it is interrupted by a signal.
618
+ */
619
+struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
620
+ enum dma_data_direction dir)
621
+{
622
+ struct drm_gem_object *obj = attach->dmabuf->priv;
623
+ struct sg_table *sgt;
624
+ int ret;
625
+
626
+ if (WARN_ON(dir == DMA_NONE))
627
+ return ERR_PTR(-EINVAL);
628
+
629
+ if (obj->funcs)
630
+ sgt = obj->funcs->get_sg_table(obj);
631
+ else
632
+ sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
633
+
634
+ ret = dma_map_sgtable(attach->dev, sgt, dir,
635
+ DMA_ATTR_SKIP_CPU_SYNC);
636
+ if (ret) {
637
+ sg_free_table(sgt);
638
+ kfree(sgt);
639
+ sgt = ERR_PTR(ret);
640
+ }
641
+
642
+ return sgt;
643
+}
644
+EXPORT_SYMBOL(drm_gem_map_dma_buf);
645
+
646
+/**
647
+ * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
648
+ * @attach: attachment to unmap buffer from
649
+ * @sgt: scatterlist info of the buffer to unmap
650
+ * @dir: direction of DMA transfer
651
+ *
652
+ * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
653
+ */
654
+void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
655
+ struct sg_table *sgt,
656
+ enum dma_data_direction dir)
657
+{
658
+ if (!sgt)
659
+ return;
660
+
661
+ dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
662
+ sg_free_table(sgt);
663
+ kfree(sgt);
664
+}
665
+EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
666
+
667
+/**
668
+ * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
669
+ * @dma_buf: buffer to be mapped
670
+ *
671
+ * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
672
+ * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
673
+ *
674
+ * Returns the kernel virtual address or NULL on failure.
675
+ */
676
+void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
677
+{
678
+ struct drm_gem_object *obj = dma_buf->priv;
679
+ void *vaddr;
680
+
681
+ vaddr = drm_gem_vmap(obj);
682
+ if (IS_ERR(vaddr))
683
+ vaddr = NULL;
684
+
685
+ return vaddr;
686
+}
687
+EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
688
+
689
+/**
690
+ * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
691
+ * @dma_buf: buffer to be unmapped
692
+ * @vaddr: the virtual address of the buffer
693
+ *
694
+ * Releases a kernel virtual mapping. This can be used as the
695
+ * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
696
+ */
697
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
698
+{
699
+ struct drm_gem_object *obj = dma_buf->priv;
700
+
701
+ drm_gem_vunmap(obj, vaddr);
702
+}
703
+EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
707704
708705 /**
709706 * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
....@@ -723,6 +720,18 @@
723720 struct file *fil;
724721 int ret;
725722
723
+ /* Add the fake offset */
724
+ vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
725
+
726
+ if (obj->funcs && obj->funcs->mmap) {
727
+ ret = obj->funcs->mmap(obj, vma);
728
+ if (ret)
729
+ return ret;
730
+ vma->vm_private_data = obj;
731
+ drm_gem_object_get(obj);
732
+ return 0;
733
+ }
734
+
726735 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
727736 fil = kzalloc(sizeof(*fil), GFP_KERNEL);
728737 if (!priv || !fil) {
....@@ -738,8 +747,6 @@
738747 if (ret)
739748 goto out;
740749
741
- vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
742
-
743750 ret = obj->dev->driver->fops->mmap(fil, vma);
744751
745752 drm_vma_node_revoke(&obj->vma_node, priv);
....@@ -752,14 +759,176 @@
752759 EXPORT_SYMBOL(drm_gem_prime_mmap);
753760
754761 /**
762
+ * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
763
+ * @dma_buf: buffer to be mapped
764
+ * @vma: virtual address range
765
+ *
766
+ * Provides memory mapping for the buffer. This can be used as the
767
+ * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
768
+ * which should be set to drm_gem_prime_mmap().
769
+ *
770
+ * FIXME: There's really no point to this wrapper, drivers which need anything
771
+ * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
772
+ *
773
+ * Returns 0 on success or a negative error code on failure.
774
+ */
775
+int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
776
+{
777
+ struct drm_gem_object *obj = dma_buf->priv;
778
+ struct drm_device *dev = obj->dev;
779
+
780
+ if (!dev->driver->gem_prime_mmap)
781
+ return -ENOSYS;
782
+
783
+ return dev->driver->gem_prime_mmap(obj, vma);
784
+}
785
+EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
786
+
787
+/**
788
+ * drm_gem_dmabuf_get_uuid - dma_buf get_uuid implementation for GEM
789
+ * @dma_buf: buffer to query
790
+ * @uuid: uuid outparam
791
+ *
792
+ * Queries the buffer's virtio UUID. This can be used as the
793
+ * &dma_buf_ops.get_uuid callback. Calls into &drm_driver.gem_prime_get_uuid.
794
+ *
795
+ * Returns 0 on success or a negative error code on failure.
796
+ */
797
+int drm_gem_dmabuf_get_uuid(struct dma_buf *dma_buf, uuid_t *uuid)
798
+{
799
+ struct drm_gem_object *obj = dma_buf->priv;
800
+ struct drm_device *dev = obj->dev;
801
+
802
+ if (!dev->driver->gem_prime_get_uuid)
803
+ return -ENODEV;
804
+
805
+ return dev->driver->gem_prime_get_uuid(obj, uuid);
806
+}
807
+EXPORT_SYMBOL(drm_gem_dmabuf_get_uuid);
808
+
809
+static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
810
+ .cache_sgt_mapping = true,
811
+ .attach = drm_gem_map_attach,
812
+ .detach = drm_gem_map_detach,
813
+ .map_dma_buf = drm_gem_map_dma_buf,
814
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
815
+ .release = drm_gem_dmabuf_release,
816
+ .mmap = drm_gem_dmabuf_mmap,
817
+ .vmap = drm_gem_dmabuf_vmap,
818
+ .vunmap = drm_gem_dmabuf_vunmap,
819
+ .get_uuid = drm_gem_dmabuf_get_uuid,
820
+};
821
+
822
+/**
823
+ * drm_prime_pages_to_sg - converts a page array into an sg list
824
+ * @dev: DRM device
825
+ * @pages: pointer to the array of page pointers to convert
826
+ * @nr_pages: length of the page vector
827
+ *
828
+ * This helper creates an sg table object from a set of pages
829
+ * the driver is responsible for mapping the pages into the
830
+ * importers address space for use with dma_buf itself.
831
+ *
832
+ * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
833
+ */
834
+struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
835
+ struct page **pages, unsigned int nr_pages)
836
+{
837
+ struct sg_table *sg;
838
+ struct scatterlist *sge;
839
+ size_t max_segment = 0;
840
+
841
+ sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
842
+ if (!sg)
843
+ return ERR_PTR(-ENOMEM);
844
+
845
+ if (dev)
846
+ max_segment = dma_max_mapping_size(dev->dev);
847
+ if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
848
+ max_segment = SCATTERLIST_MAX_SEGMENT;
849
+ sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
850
+ nr_pages << PAGE_SHIFT,
851
+ max_segment,
852
+ NULL, 0, GFP_KERNEL);
853
+ if (IS_ERR(sge)) {
854
+ kfree(sg);
855
+ sg = ERR_CAST(sge);
856
+ }
857
+ return sg;
858
+}
859
+EXPORT_SYMBOL(drm_prime_pages_to_sg);
860
+
861
+/**
862
+ * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
863
+ * @sgt: sg_table describing the buffer to check
864
+ *
865
+ * This helper calculates the contiguous size in the DMA address space
866
+ * of the the buffer described by the provided sg_table.
867
+ *
868
+ * This is useful for implementing
869
+ * &drm_gem_object_funcs.gem_prime_import_sg_table.
870
+ */
871
+unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
872
+{
873
+ dma_addr_t expected = sg_dma_address(sgt->sgl);
874
+ struct scatterlist *sg;
875
+ unsigned long size = 0;
876
+ int i;
877
+
878
+ for_each_sgtable_dma_sg(sgt, sg, i) {
879
+ unsigned int len = sg_dma_len(sg);
880
+
881
+ if (!len)
882
+ break;
883
+ if (sg_dma_address(sg) != expected)
884
+ break;
885
+ expected += len;
886
+ size += len;
887
+ }
888
+ return size;
889
+}
890
+EXPORT_SYMBOL(drm_prime_get_contiguous_size);
891
+
892
+/**
893
+ * drm_gem_prime_export - helper library implementation of the export callback
894
+ * @obj: GEM object to export
895
+ * @flags: flags like DRM_CLOEXEC and DRM_RDWR
896
+ *
897
+ * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
898
+ * using the PRIME helpers. It is used as the default in
899
+ * drm_gem_prime_handle_to_fd().
900
+ */
901
+struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
902
+ int flags)
903
+{
904
+ struct drm_device *dev = obj->dev;
905
+ struct dma_buf_export_info exp_info = {
906
+ .exp_name = KBUILD_MODNAME, /* white lie for debug */
907
+ .owner = dev->driver->fops->owner,
908
+ .ops = &drm_gem_prime_dmabuf_ops,
909
+ .size = obj->size,
910
+ .flags = flags,
911
+ .priv = obj,
912
+ .resv = obj->resv,
913
+ };
914
+
915
+ return drm_gem_dmabuf_export(dev, &exp_info);
916
+}
917
+EXPORT_SYMBOL(drm_gem_prime_export);
918
+
919
+/**
755920 * drm_gem_prime_import_dev - core implementation of the import callback
756921 * @dev: drm_device to import into
757922 * @dma_buf: dma-buf object to import
758923 * @attach_dev: struct device to dma_buf attach
759924 *
760
- * This is the core of drm_gem_prime_import. It's designed to be called by
761
- * drivers who want to use a different device structure than dev->dev for
762
- * attaching via dma_buf.
925
+ * This is the core of drm_gem_prime_import(). It's designed to be called by
926
+ * drivers who want to use a different device structure than &drm_device.dev for
927
+ * attaching via dma_buf. This function calls
928
+ * &drm_driver.gem_prime_import_sg_table internally.
929
+ *
930
+ * Drivers must arrange to call drm_prime_gem_destroy() from their
931
+ * &drm_gem_object_funcs.free hook when using this function.
763932 */
764933 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
765934 struct dma_buf *dma_buf,
....@@ -804,6 +973,7 @@
804973 }
805974
806975 obj->import_attach = attach;
976
+ obj->resv = dma_buf->resv;
807977
808978 return obj;
809979
....@@ -823,7 +993,12 @@
823993 * @dma_buf: dma-buf object to import
824994 *
825995 * This is the implementation of the gem_prime_import functions for GEM drivers
826
- * using the PRIME helpers.
996
+ * using the PRIME helpers. Drivers can use this as their
997
+ * &drm_driver.gem_prime_import implementation. It is used as the default
998
+ * implementation in drm_gem_prime_fd_to_handle().
999
+ *
1000
+ * Drivers must arrange to call drm_prime_gem_destroy() from their
1001
+ * &drm_gem_object_funcs.free hook when using this function.
8271002 */
8281003 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
8291004 struct dma_buf *dma_buf)
....@@ -831,151 +1006,6 @@
8311006 return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
8321007 }
8331008 EXPORT_SYMBOL(drm_gem_prime_import);
834
-
835
-/**
836
- * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
837
- * @dev: dev to export the buffer from
838
- * @file_priv: drm file-private structure
839
- * @prime_fd: fd id of the dma-buf which should be imported
840
- * @handle: pointer to storage for the handle of the imported buffer object
841
- *
842
- * This is the PRIME import function which must be used mandatorily by GEM
843
- * drivers to ensure correct lifetime management of the underlying GEM object.
844
- * The actual importing of GEM object from the dma-buf is done through the
845
- * gem_import_export driver callback.
846
- */
847
-int drm_gem_prime_fd_to_handle(struct drm_device *dev,
848
- struct drm_file *file_priv, int prime_fd,
849
- uint32_t *handle)
850
-{
851
- struct dma_buf *dma_buf;
852
- struct drm_gem_object *obj;
853
- int ret;
854
-
855
- dma_buf = dma_buf_get(prime_fd);
856
- if (IS_ERR(dma_buf))
857
- return PTR_ERR(dma_buf);
858
-
859
- mutex_lock(&file_priv->prime.lock);
860
-
861
- ret = drm_prime_lookup_buf_handle(&file_priv->prime,
862
- dma_buf, handle);
863
- if (ret == 0)
864
- goto out_put;
865
-
866
- /* never seen this one, need to import */
867
- mutex_lock(&dev->object_name_lock);
868
- obj = dev->driver->gem_prime_import(dev, dma_buf);
869
- if (IS_ERR(obj)) {
870
- ret = PTR_ERR(obj);
871
- goto out_unlock;
872
- }
873
-
874
- if (obj->dma_buf) {
875
- WARN_ON(obj->dma_buf != dma_buf);
876
- } else {
877
- obj->dma_buf = dma_buf;
878
- get_dma_buf(dma_buf);
879
- }
880
-
881
- /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
882
- ret = drm_gem_handle_create_tail(file_priv, obj, handle);
883
- drm_gem_object_put_unlocked(obj);
884
- if (ret)
885
- goto out_put;
886
-
887
- ret = drm_prime_add_buf_handle(&file_priv->prime,
888
- dma_buf, *handle);
889
- mutex_unlock(&file_priv->prime.lock);
890
- if (ret)
891
- goto fail;
892
-
893
- dma_buf_put(dma_buf);
894
-
895
- return 0;
896
-
897
-fail:
898
- /* hmm, if driver attached, we are relying on the free-object path
899
- * to detach.. which seems ok..
900
- */
901
- drm_gem_handle_delete(file_priv, *handle);
902
- dma_buf_put(dma_buf);
903
- return ret;
904
-
905
-out_unlock:
906
- mutex_unlock(&dev->object_name_lock);
907
-out_put:
908
- mutex_unlock(&file_priv->prime.lock);
909
- dma_buf_put(dma_buf);
910
- return ret;
911
-}
912
-EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
913
-
914
-int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
915
- struct drm_file *file_priv)
916
-{
917
- struct drm_prime_handle *args = data;
918
-
919
- if (!drm_core_check_feature(dev, DRIVER_PRIME))
920
- return -EINVAL;
921
-
922
- if (!dev->driver->prime_handle_to_fd)
923
- return -ENOSYS;
924
-
925
- /* check flags are valid */
926
- if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
927
- return -EINVAL;
928
-
929
- return dev->driver->prime_handle_to_fd(dev, file_priv,
930
- args->handle, args->flags, &args->fd);
931
-}
932
-
933
-int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
934
- struct drm_file *file_priv)
935
-{
936
- struct drm_prime_handle *args = data;
937
-
938
- if (!drm_core_check_feature(dev, DRIVER_PRIME))
939
- return -EINVAL;
940
-
941
- if (!dev->driver->prime_fd_to_handle)
942
- return -ENOSYS;
943
-
944
- return dev->driver->prime_fd_to_handle(dev, file_priv,
945
- args->fd, &args->handle);
946
-}
947
-
948
-/**
949
- * drm_prime_pages_to_sg - converts a page array into an sg list
950
- * @pages: pointer to the array of page pointers to convert
951
- * @nr_pages: length of the page vector
952
- *
953
- * This helper creates an sg table object from a set of pages
954
- * the driver is responsible for mapping the pages into the
955
- * importers address space for use with dma_buf itself.
956
- */
957
-struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
958
-{
959
- struct sg_table *sg = NULL;
960
- int ret;
961
-
962
- sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
963
- if (!sg) {
964
- ret = -ENOMEM;
965
- goto out;
966
- }
967
-
968
- ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
969
- nr_pages << PAGE_SHIFT, GFP_KERNEL);
970
- if (ret)
971
- goto out;
972
-
973
- return sg;
974
-out:
975
- kfree(sg);
976
- return ERR_PTR(ret);
977
-}
978
-EXPORT_SYMBOL(drm_prime_pages_to_sg);
9791009
9801010 /**
9811011 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
....@@ -986,36 +1016,33 @@
9861016 *
9871017 * Exports an sg table into an array of pages and addresses. This is currently
9881018 * required by the TTM driver in order to do correct fault handling.
1019
+ *
1020
+ * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
1021
+ * implementation.
9891022 */
9901023 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
9911024 dma_addr_t *addrs, int max_entries)
9921025 {
993
- unsigned count;
994
- struct scatterlist *sg;
995
- struct page *page;
996
- u32 len, index;
997
- dma_addr_t addr;
1026
+ struct sg_dma_page_iter dma_iter;
1027
+ struct sg_page_iter page_iter;
1028
+ struct page **p = pages;
1029
+ dma_addr_t *a = addrs;
9981030
999
- index = 0;
1000
- for_each_sg(sgt->sgl, sg, sgt->nents, count) {
1001
- len = sg->length;
1002
- page = sg_page(sg);
1003
- addr = sg_dma_address(sg);
1004
-
1005
- while (len > 0) {
1006
- if (WARN_ON(index >= max_entries))
1031
+ if (pages) {
1032
+ for_each_sgtable_page(sgt, &page_iter, 0) {
1033
+ if (WARN_ON(p - pages >= max_entries))
10071034 return -1;
1008
- if (pages)
1009
- pages[index] = page;
1010
- if (addrs)
1011
- addrs[index] = addr;
1012
-
1013
- page++;
1014
- addr += PAGE_SIZE;
1015
- len -= PAGE_SIZE;
1016
- index++;
1035
+ *p++ = sg_page_iter_page(&page_iter);
10171036 }
10181037 }
1038
+ if (addrs) {
1039
+ for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
1040
+ if (WARN_ON(a - addrs >= max_entries))
1041
+ return -1;
1042
+ *a++ = sg_page_iter_dma_address(&dma_iter);
1043
+ }
1044
+ }
1045
+
10191046 return 0;
10201047 }
10211048 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
....@@ -1026,12 +1053,13 @@
10261053 * @sg: the sg-table which was pinned at import time
10271054 *
10281055 * This is the cleanup functions which GEM drivers need to call when they use
1029
- * @drm_gem_prime_import to import dma-bufs.
1056
+ * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
10301057 */
10311058 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
10321059 {
10331060 struct dma_buf_attachment *attach;
10341061 struct dma_buf *dma_buf;
1062
+
10351063 attach = obj->import_attach;
10361064 if (sg)
10371065 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
....@@ -1041,16 +1069,3 @@
10411069 dma_buf_put(dma_buf);
10421070 }
10431071 EXPORT_SYMBOL(drm_prime_gem_destroy);
1044
-
1045
-void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
1046
-{
1047
- mutex_init(&prime_fpriv->lock);
1048
- prime_fpriv->dmabufs = RB_ROOT;
1049
- prime_fpriv->handles = RB_ROOT;
1050
-}
1051
-
1052
-void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
1053
-{
1054
- /* by now drm_gem_release should've made sure the list is empty */
1055
- WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
1056
-}