forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/drm_prime.c
....@@ -29,53 +29,62 @@
2929 #include <linux/export.h>
3030 #include <linux/dma-buf.h>
3131 #include <linux/rbtree.h>
32
-#include <drm/drm_prime.h>
32
+
33
+#include <drm/drm.h>
34
+#include <drm/drm_drv.h>
35
+#include <drm/drm_file.h>
36
+#include <drm/drm_framebuffer.h>
3337 #include <drm/drm_gem.h>
34
-#include <drm/drmP.h>
38
+#include <drm/drm_prime.h>
3539
3640 #include "drm_internal.h"
3741
38
-/*
39
- * DMA-BUF/GEM Object references and lifetime overview:
42
+/**
43
+ * DOC: overview and lifetime rules
4044 *
41
- * On the export the dma_buf holds a reference to the exporting GEM
42
- * object. It takes this reference in handle_to_fd_ioctl, when it
43
- * first calls .prime_export and stores the exporting GEM object in
44
- * the dma_buf priv. This reference needs to be released when the
45
- * final reference to the &dma_buf itself is dropped and its
46
- * &dma_buf_ops.release function is called. For GEM-based drivers,
47
- * the dma_buf should be exported using drm_gem_dmabuf_export() and
48
- * then released by drm_gem_dmabuf_release().
45
+ * Similar to GEM global names, PRIME file descriptors are also used to share
46
+ * buffer objects across processes. They offer additional security: as file
47
+ * descriptors must be explicitly sent over UNIX domain sockets to be shared
48
+ * between applications, they can't be guessed like the globally unique GEM
49
+ * names.
4950 *
50
- * On the import the importing GEM object holds a reference to the
51
- * dma_buf (which in turn holds a ref to the exporting GEM object).
52
- * It takes that reference in the fd_to_handle ioctl.
53
- * It calls dma_buf_get, creates an attachment to it and stores the
54
- * attachment in the GEM object. When this attachment is destroyed
55
- * when the imported object is destroyed, we remove the attachment
56
- * and drop the reference to the dma_buf.
51
+ * Drivers that support the PRIME API implement the
52
+ * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
53
+ * GEM based drivers must use drm_gem_prime_handle_to_fd() and
54
+ * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
55
+ * actual driver interfaces is provided through the &drm_gem_object_funcs.export
56
+ * and &drm_driver.gem_prime_import hooks.
5757 *
58
- * When all the references to the &dma_buf are dropped, i.e. when
59
- * userspace has closed both handles to the imported GEM object (through the
60
- * FD_TO_HANDLE IOCTL) and closed the file descriptor of the exported
61
- * (through the HANDLE_TO_FD IOCTL) dma_buf, and all kernel-internal references
62
- * are also gone, then the dma_buf gets destroyed. This can also happen as a
63
- * part of the clean up procedure in the drm_release() function if userspace
64
- * fails to properly clean up. Note that both the kernel and userspace (by
65
- * keeeping the PRIME file descriptors open) can hold references onto a
66
- * &dma_buf.
58
+ * &dma_buf_ops implementations for GEM drivers are all individually exported
59
+ * for drivers which need to overwrite or reimplement some of them.
6760 *
68
- * Thus the chain of references always flows in one direction
69
- * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
61
+ * Reference Counting for GEM Drivers
62
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7063 *
71
- * Self-importing: if userspace is using PRIME as a replacement for flink
72
- * then it will get a fd->handle request for a GEM object that it created.
73
- * Drivers should detect this situation and return back the gem object
74
- * from the dma-buf private. Prime will do this automatically for drivers that
75
- * use the drm_gem_prime_{import,export} helpers.
64
+ * On the export the &dma_buf holds a reference to the exported buffer object,
65
+ * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
66
+ * IOCTL, when it first calls &drm_gem_object_funcs.export
67
+ * and stores the exporting GEM object in the &dma_buf.priv field. This
68
+ * reference needs to be released when the final reference to the &dma_buf
69
+ * itself is dropped and its &dma_buf_ops.release function is called. For
70
+ * GEM-based drivers, the &dma_buf should be exported using
71
+ * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
7672 *
77
- * GEM struct &dma_buf_ops symbols are now exported. They can be resued by
78
- * drivers which implement GEM interface.
73
+ * Thus the chain of references always flows in one direction, avoiding loops:
74
+ * importing GEM object -> dma-buf -> exported GEM bo. A further complication
75
+ * are the lookup caches for import and export. These are required to guarantee
76
+ * that any given object will always have only one uniqe userspace handle. This
77
+ * is required to allow userspace to detect duplicated imports, since some GEM
78
+ * drivers do fail command submissions if a given buffer object is listed more
79
+ * than once. These import and export caches in &drm_prime_file_private only
80
+ * retain a weak reference, which is cleaned up when the corresponding object is
81
+ * released.
82
+ *
83
+ * Self-importing: If userspace is using PRIME as a replacement for flink then
84
+ * it will get a fd->handle request for a GEM object that it created. Drivers
85
+ * should detect this situation and return back the underlying object from the
86
+ * dma-buf private. For GEM based drivers this is handled in
87
+ * drm_gem_prime_import() already.
7988 */
8089
8190 struct drm_prime_member {
....@@ -84,11 +93,6 @@
8493
8594 struct rb_node dmabuf_rb;
8695 struct rb_node handle_rb;
87
-};
88
-
89
-struct drm_prime_attachment {
90
- struct sg_table *sgt;
91
- enum dma_data_direction dir;
9296 };
9397
9498 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
....@@ -183,170 +187,50 @@
183187 return -ENOENT;
184188 }
185189
186
-/**
187
- * drm_gem_map_attach - dma_buf attach implementation for GEM
188
- * @dma_buf: buffer to attach device to
189
- * @attach: buffer attachment data
190
- *
191
- * Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
192
- * device specific attachment. This can be used as the &dma_buf_ops.attach
193
- * callback.
194
- *
195
- * Returns 0 on success, negative error code on failure.
196
- */
197
-int drm_gem_map_attach(struct dma_buf *dma_buf,
198
- struct dma_buf_attachment *attach)
199
-{
200
- struct drm_prime_attachment *prime_attach;
201
- struct drm_gem_object *obj = dma_buf->priv;
202
- struct drm_device *dev = obj->dev;
203
-
204
- prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
205
- if (!prime_attach)
206
- return -ENOMEM;
207
-
208
- prime_attach->dir = DMA_NONE;
209
- attach->priv = prime_attach;
210
-
211
- if (!dev->driver->gem_prime_pin)
212
- return 0;
213
-
214
- return dev->driver->gem_prime_pin(obj);
215
-}
216
-EXPORT_SYMBOL(drm_gem_map_attach);
217
-
218
-/**
219
- * drm_gem_map_detach - dma_buf detach implementation for GEM
220
- * @dma_buf: buffer to detach from
221
- * @attach: attachment to be detached
222
- *
223
- * Cleans up &dma_buf_attachment. This can be used as the &dma_buf_ops.detach
224
- * callback.
225
- */
226
-void drm_gem_map_detach(struct dma_buf *dma_buf,
227
- struct dma_buf_attachment *attach)
228
-{
229
- struct drm_prime_attachment *prime_attach = attach->priv;
230
- struct drm_gem_object *obj = dma_buf->priv;
231
- struct drm_device *dev = obj->dev;
232
-
233
- if (prime_attach) {
234
- struct sg_table *sgt = prime_attach->sgt;
235
-
236
- if (sgt) {
237
- if (prime_attach->dir != DMA_NONE)
238
- dma_unmap_sg_attrs(attach->dev, sgt->sgl,
239
- sgt->nents,
240
- prime_attach->dir,
241
- DMA_ATTR_SKIP_CPU_SYNC);
242
- sg_free_table(sgt);
243
- }
244
-
245
- kfree(sgt);
246
- kfree(prime_attach);
247
- attach->priv = NULL;
248
- }
249
-
250
- if (dev->driver->gem_prime_unpin)
251
- dev->driver->gem_prime_unpin(obj);
252
-}
253
-EXPORT_SYMBOL(drm_gem_map_detach);
254
-
255
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
256
- struct dma_buf *dma_buf)
190
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
191
+ uint32_t handle)
257192 {
258193 struct rb_node *rb;
259194
260
- rb = prime_fpriv->dmabufs.rb_node;
195
+ mutex_lock(&prime_fpriv->lock);
196
+
197
+ rb = prime_fpriv->handles.rb_node;
261198 while (rb) {
262199 struct drm_prime_member *member;
263200
264
- member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
265
- if (member->dma_buf == dma_buf) {
201
+ member = rb_entry(rb, struct drm_prime_member, handle_rb);
202
+ if (member->handle == handle) {
266203 rb_erase(&member->handle_rb, &prime_fpriv->handles);
267204 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
268205
269
- dma_buf_put(dma_buf);
206
+ dma_buf_put(member->dma_buf);
270207 kfree(member);
271
- return;
272
- } else if (member->dma_buf < dma_buf) {
208
+ break;
209
+ } else if (member->handle < handle) {
273210 rb = rb->rb_right;
274211 } else {
275212 rb = rb->rb_left;
276213 }
277214 }
215
+
216
+ mutex_unlock(&prime_fpriv->lock);
278217 }
279218
280
-/**
281
- * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
282
- * @attach: attachment whose scatterlist is to be returned
283
- * @dir: direction of DMA transfer
284
- *
285
- * Calls &drm_driver.gem_prime_get_sg_table and then maps the scatterlist. This
286
- * can be used as the &dma_buf_ops.map_dma_buf callback.
287
- *
288
- * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
289
- * on error. May return -EINTR if it is interrupted by a signal.
290
- */
291
-
292
-struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
293
- enum dma_data_direction dir)
219
+void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
294220 {
295
- struct drm_prime_attachment *prime_attach = attach->priv;
296
- struct drm_gem_object *obj = attach->dmabuf->priv;
297
- struct sg_table *sgt;
298
-
299
- if (WARN_ON(dir == DMA_NONE || !prime_attach))
300
- return ERR_PTR(-EINVAL);
301
-
302
- /* return the cached mapping when possible */
303
- if (prime_attach->dir == dir)
304
- return prime_attach->sgt;
305
-
306
- /*
307
- * two mappings with different directions for the same attachment are
308
- * not allowed
309
- */
310
- if (WARN_ON(prime_attach->dir != DMA_NONE))
311
- return ERR_PTR(-EBUSY);
312
-
313
- sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
314
-
315
- if (!IS_ERR(sgt)) {
316
- if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
317
- DMA_ATTR_SKIP_CPU_SYNC)) {
318
- sg_free_table(sgt);
319
- kfree(sgt);
320
- sgt = ERR_PTR(-ENOMEM);
321
- } else {
322
- prime_attach->sgt = sgt;
323
- prime_attach->dir = dir;
324
- }
325
- }
326
-
327
- return sgt;
221
+ mutex_init(&prime_fpriv->lock);
222
+ prime_fpriv->dmabufs = RB_ROOT;
223
+ prime_fpriv->handles = RB_ROOT;
328224 }
329
-EXPORT_SYMBOL(drm_gem_map_dma_buf);
330225
331
-/**
332
- * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
333
- * @attach: attachment to unmap buffer from
334
- * @sgt: scatterlist info of the buffer to unmap
335
- * @dir: direction of DMA transfer
336
- *
337
- * Not implemented. The unmap is done at drm_gem_map_detach(). This can be
338
- * used as the &dma_buf_ops.unmap_dma_buf callback.
339
- */
340
-void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
341
- struct sg_table *sgt,
342
- enum dma_data_direction dir)
226
+void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
343227 {
344
- /* nothing to be done here */
228
+ /* by now drm_gem_release should've made sure the list is empty */
229
+ WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
345230 }
346
-EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
347231
348232 /**
349
- * drm_gem_dmabuf_export - dma_buf export implementation for GEM
233
+ * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
350234 * @dev: parent device for the exported dmabuf
351235 * @exp_info: the export information used by dma_buf_export()
352236 *
....@@ -360,6 +244,7 @@
360244 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
361245 struct dma_buf_export_info *exp_info)
362246 {
247
+ struct drm_gem_object *obj = exp_info->priv;
363248 struct dma_buf *dma_buf;
364249
365250 dma_buf = dma_buf_export(exp_info);
....@@ -367,18 +252,19 @@
367252 return dma_buf;
368253
369254 drm_dev_get(dev);
370
- drm_gem_object_get(exp_info->priv);
255
+ drm_gem_object_get(obj);
256
+ dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
371257
372258 return dma_buf;
373259 }
374260 EXPORT_SYMBOL(drm_gem_dmabuf_export);
375261
376262 /**
377
- * drm_gem_dmabuf_release - dma_buf release implementation for GEM
263
+ * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
378264 * @dma_buf: buffer to be released
379265 *
380266 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
381
- * must use this in their dma_buf ops structure as the release callback.
267
+ * must use this in their &dma_buf_ops structure as the release callback.
382268 * drm_gem_dmabuf_release() should be used in conjunction with
383269 * drm_gem_dmabuf_export().
384270 */
....@@ -388,187 +274,107 @@
388274 struct drm_device *dev = obj->dev;
389275
390276 /* drop the reference on the export fd holds */
391
- drm_gem_object_put_unlocked(obj);
277
+ drm_gem_object_put(obj);
392278
393279 drm_dev_put(dev);
394280 }
395281 EXPORT_SYMBOL(drm_gem_dmabuf_release);
396282
397283 /**
398
- * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
399
- * @dma_buf: buffer to be mapped
284
+ * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
285
+ * @dev: dev to export the buffer from
286
+ * @file_priv: drm file-private structure
287
+ * @prime_fd: fd id of the dma-buf which should be imported
288
+ * @handle: pointer to storage for the handle of the imported buffer object
400289 *
401
- * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
402
- * callback.
290
+ * This is the PRIME import function which must be used mandatorily by GEM
291
+ * drivers to ensure correct lifetime management of the underlying GEM object.
292
+ * The actual importing of GEM object from the dma-buf is done through the
293
+ * &drm_driver.gem_prime_import driver callback.
403294 *
404
- * Returns the kernel virtual address.
295
+ * Returns 0 on success or a negative error code on failure.
405296 */
406
-void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
297
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
298
+ struct drm_file *file_priv, int prime_fd,
299
+ uint32_t *handle)
407300 {
408
- struct drm_gem_object *obj = dma_buf->priv;
409
- struct drm_device *dev = obj->dev;
301
+ struct dma_buf *dma_buf;
302
+ struct drm_gem_object *obj;
303
+ int ret;
410304
411
- if (dev->driver->gem_prime_vmap)
412
- return dev->driver->gem_prime_vmap(obj);
305
+ dma_buf = dma_buf_get(prime_fd);
306
+ if (IS_ERR(dma_buf))
307
+ return PTR_ERR(dma_buf);
308
+
309
+ mutex_lock(&file_priv->prime.lock);
310
+
311
+ ret = drm_prime_lookup_buf_handle(&file_priv->prime,
312
+ dma_buf, handle);
313
+ if (ret == 0)
314
+ goto out_put;
315
+
316
+ /* never seen this one, need to import */
317
+ mutex_lock(&dev->object_name_lock);
318
+ if (dev->driver->gem_prime_import)
319
+ obj = dev->driver->gem_prime_import(dev, dma_buf);
413320 else
414
- return NULL;
321
+ obj = drm_gem_prime_import(dev, dma_buf);
322
+ if (IS_ERR(obj)) {
323
+ ret = PTR_ERR(obj);
324
+ goto out_unlock;
325
+ }
326
+
327
+ if (obj->dma_buf) {
328
+ WARN_ON(obj->dma_buf != dma_buf);
329
+ } else {
330
+ obj->dma_buf = dma_buf;
331
+ get_dma_buf(dma_buf);
332
+ }
333
+
334
+ /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
335
+ ret = drm_gem_handle_create_tail(file_priv, obj, handle);
336
+ drm_gem_object_put(obj);
337
+ if (ret)
338
+ goto out_put;
339
+
340
+ ret = drm_prime_add_buf_handle(&file_priv->prime,
341
+ dma_buf, *handle);
342
+ mutex_unlock(&file_priv->prime.lock);
343
+ if (ret)
344
+ goto fail;
345
+
346
+ dma_buf_put(dma_buf);
347
+
348
+ return 0;
349
+
350
+fail:
351
+ /* hmm, if driver attached, we are relying on the free-object path
352
+ * to detach.. which seems ok..
353
+ */
354
+ drm_gem_handle_delete(file_priv, *handle);
355
+ dma_buf_put(dma_buf);
356
+ return ret;
357
+
358
+out_unlock:
359
+ mutex_unlock(&dev->object_name_lock);
360
+out_put:
361
+ mutex_unlock(&file_priv->prime.lock);
362
+ dma_buf_put(dma_buf);
363
+ return ret;
415364 }
416
-EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
365
+EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
417366
418
-/**
419
- * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
420
- * @dma_buf: buffer to be unmapped
421
- * @vaddr: the virtual address of the buffer
422
- *
423
- * Releases a kernel virtual mapping. This can be used as the
424
- * &dma_buf_ops.vunmap callback.
425
- */
426
-void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
367
+int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
368
+ struct drm_file *file_priv)
427369 {
428
- struct drm_gem_object *obj = dma_buf->priv;
429
- struct drm_device *dev = obj->dev;
370
+ struct drm_prime_handle *args = data;
430371
431
- if (dev->driver->gem_prime_vunmap)
432
- dev->driver->gem_prime_vunmap(obj, vaddr);
433
-}
434
-EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
435
-
436
-/**
437
- * drm_gem_dmabuf_get_uuid - dma_buf get_uuid implementation for GEM
438
- * @dma_buf: buffer to query
439
- * @uuid: uuid outparam
440
- *
441
- * Queries the buffer's virtio UUID. This can be used as the
442
- * &dma_buf_ops.get_uuid callback. Calls into &drm_driver.gem_prime_get_uuid.
443
- *
444
- * Returns 0 on success or a negative error code on failure.
445
- */
446
-int drm_gem_dmabuf_get_uuid(struct dma_buf *dma_buf, uuid_t *uuid)
447
-{
448
- struct drm_gem_object *obj = dma_buf->priv;
449
- struct drm_device *dev = obj->dev;
450
-
451
- if (!dev->driver->gem_prime_get_uuid)
452
- return -ENODEV;
453
-
454
- return dev->driver->gem_prime_get_uuid(obj, uuid);
455
-}
456
-EXPORT_SYMBOL(drm_gem_dmabuf_get_uuid);
457
-
458
-/**
459
- * drm_gem_dmabuf_kmap - map implementation for GEM
460
- * @dma_buf: buffer to be mapped
461
- * @page_num: page number within the buffer
462
- *
463
- * Not implemented. This can be used as the &dma_buf_ops.map callback.
464
- */
465
-void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
466
-{
467
- return NULL;
468
-}
469
-EXPORT_SYMBOL(drm_gem_dmabuf_kmap);
470
-
471
-/**
472
- * drm_gem_dmabuf_kunmap - unmap implementation for GEM
473
- * @dma_buf: buffer to be unmapped
474
- * @page_num: page number within the buffer
475
- * @addr: virtual address of the buffer
476
- *
477
- * Not implemented. This can be used as the &dma_buf_ops.unmap callback.
478
- */
479
-void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
480
- void *addr)
481
-{
482
-
483
-}
484
-EXPORT_SYMBOL(drm_gem_dmabuf_kunmap);
485
-
486
-/**
487
- * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
488
- * @dma_buf: buffer to be mapped
489
- * @vma: virtual address range
490
- *
491
- * Provides memory mapping for the buffer. This can be used as the
492
- * &dma_buf_ops.mmap callback.
493
- *
494
- * Returns 0 on success or a negative error code on failure.
495
- */
496
-int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
497
-{
498
- struct drm_gem_object *obj = dma_buf->priv;
499
- struct drm_device *dev = obj->dev;
500
-
501
- if (!dev->driver->gem_prime_mmap)
372
+ if (!dev->driver->prime_fd_to_handle)
502373 return -ENOSYS;
503374
504
- return dev->driver->gem_prime_mmap(obj, vma);
375
+ return dev->driver->prime_fd_to_handle(dev, file_priv,
376
+ args->fd, &args->handle);
505377 }
506
-EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
507
-
508
-static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
509
- .attach = drm_gem_map_attach,
510
- .detach = drm_gem_map_detach,
511
- .map_dma_buf = drm_gem_map_dma_buf,
512
- .unmap_dma_buf = drm_gem_unmap_dma_buf,
513
- .release = drm_gem_dmabuf_release,
514
- .map = drm_gem_dmabuf_kmap,
515
- .unmap = drm_gem_dmabuf_kunmap,
516
- .mmap = drm_gem_dmabuf_mmap,
517
- .vmap = drm_gem_dmabuf_vmap,
518
- .vunmap = drm_gem_dmabuf_vunmap,
519
- .get_uuid = drm_gem_dmabuf_get_uuid,
520
-};
521
-
522
-/**
523
- * DOC: PRIME Helpers
524
- *
525
- * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
526
- * simpler APIs by using the helper functions @drm_gem_prime_export and
527
- * @drm_gem_prime_import. These functions implement dma-buf support in terms of
528
- * six lower-level driver callbacks:
529
- *
530
- * Export callbacks:
531
- *
532
- * * @gem_prime_pin (optional): prepare a GEM object for exporting
533
- * * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
534
- * * @gem_prime_vmap: vmap a buffer exported by your driver
535
- * * @gem_prime_vunmap: vunmap a buffer exported by your driver
536
- * * @gem_prime_mmap (optional): mmap a buffer exported by your driver
537
- *
538
- * Import callback:
539
- *
540
- * * @gem_prime_import_sg_table (import): produce a GEM object from another
541
- * driver's scatter/gather table
542
- */
543
-
544
-/**
545
- * drm_gem_prime_export - helper library implementation of the export callback
546
- * @dev: drm_device to export from
547
- * @obj: GEM object to export
548
- * @flags: flags like DRM_CLOEXEC and DRM_RDWR
549
- *
550
- * This is the implementation of the gem_prime_export functions for GEM drivers
551
- * using the PRIME helpers.
552
- */
553
-struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
554
- struct drm_gem_object *obj,
555
- int flags)
556
-{
557
- struct dma_buf_export_info exp_info = {
558
- .exp_name = KBUILD_MODNAME, /* white lie for debug */
559
- .owner = dev->driver->fops->owner,
560
- .ops = &drm_gem_prime_dmabuf_ops,
561
- .size = obj->size,
562
- .flags = flags,
563
- .priv = obj,
564
- };
565
-
566
- if (dev->driver->gem_prime_res_obj)
567
- exp_info.resv = dev->driver->gem_prime_res_obj(obj);
568
-
569
- return drm_gem_dmabuf_export(dev, &exp_info);
570
-}
571
-EXPORT_SYMBOL(drm_gem_prime_export);
572378
573379 static struct dma_buf *export_and_register_object(struct drm_device *dev,
574380 struct drm_gem_object *obj,
....@@ -582,7 +388,12 @@
582388 return dmabuf;
583389 }
584390
585
- dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
391
+ if (obj->funcs && obj->funcs->export)
392
+ dmabuf = obj->funcs->export(obj, flags);
393
+ else if (dev->driver->gem_prime_export)
394
+ dmabuf = dev->driver->gem_prime_export(obj, flags);
395
+ else
396
+ dmabuf = drm_gem_prime_export(obj, flags);
586397 if (IS_ERR(dmabuf)) {
587398 /* normally the created dma-buf takes ownership of the ref,
588399 * but if that fails then drop the ref
....@@ -612,7 +423,7 @@
612423 * This is the PRIME export function which must be used mandatorily by GEM
613424 * drivers to ensure correct lifetime management of the underlying GEM object.
614425 * The actual exporting from GEM object to a dma-buf is done through the
615
- * gem_prime_export driver callback.
426
+ * &drm_driver.gem_prime_export driver callback.
616427 */
617428 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
618429 struct drm_file *file_priv, uint32_t handle,
....@@ -693,13 +504,203 @@
693504 fail_put_dmabuf:
694505 dma_buf_put(dmabuf);
695506 out:
696
- drm_gem_object_put_unlocked(obj);
507
+ drm_gem_object_put(obj);
697508 out_unlock:
698509 mutex_unlock(&file_priv->prime.lock);
699510
700511 return ret;
701512 }
702513 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
514
+
515
+int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
516
+ struct drm_file *file_priv)
517
+{
518
+ struct drm_prime_handle *args = data;
519
+
520
+ if (!dev->driver->prime_handle_to_fd)
521
+ return -ENOSYS;
522
+
523
+ /* check flags are valid */
524
+ if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
525
+ return -EINVAL;
526
+
527
+ return dev->driver->prime_handle_to_fd(dev, file_priv,
528
+ args->handle, args->flags, &args->fd);
529
+}
530
+
531
+/**
532
+ * DOC: PRIME Helpers
533
+ *
534
+ * Drivers can implement &drm_gem_object_funcs.export and
535
+ * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
536
+ * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
537
+ * implement dma-buf support in terms of some lower-level helpers, which are
538
+ * again exported for drivers to use individually:
539
+ *
540
+ * Exporting buffers
541
+ * ~~~~~~~~~~~~~~~~~
542
+ *
543
+ * Optional pinning of buffers is handled at dma-buf attach and detach time in
544
+ * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
545
+ * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
546
+ * &drm_gem_object_funcs.get_sg_table.
547
+ *
548
+ * For kernel-internal access there's drm_gem_dmabuf_vmap() and
549
+ * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
550
+ * drm_gem_dmabuf_mmap().
551
+ *
552
+ * Note that these export helpers can only be used if the underlying backing
553
+ * storage is fully coherent and either permanently pinned, or it is safe to pin
554
+ * it indefinitely.
555
+ *
556
+ * FIXME: The underlying helper functions are named rather inconsistently.
557
+ *
558
+ * Exporting buffers
559
+ * ~~~~~~~~~~~~~~~~~
560
+ *
561
+ * Importing dma-bufs using drm_gem_prime_import() relies on
562
+ * &drm_driver.gem_prime_import_sg_table.
563
+ *
564
+ * Note that similarly to the export helpers this permanently pins the
565
+ * underlying backing storage. Which is ok for scanout, but is not the best
566
+ * option for sharing lots of buffers for rendering.
567
+ */
568
+
569
+/**
570
+ * drm_gem_map_attach - dma_buf attach implementation for GEM
571
+ * @dma_buf: buffer to attach device to
572
+ * @attach: buffer attachment data
573
+ *
574
+ * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
575
+ * used as the &dma_buf_ops.attach callback. Must be used together with
576
+ * drm_gem_map_detach().
577
+ *
578
+ * Returns 0 on success, negative error code on failure.
579
+ */
580
+int drm_gem_map_attach(struct dma_buf *dma_buf,
581
+ struct dma_buf_attachment *attach)
582
+{
583
+ struct drm_gem_object *obj = dma_buf->priv;
584
+
585
+ return drm_gem_pin(obj);
586
+}
587
+EXPORT_SYMBOL(drm_gem_map_attach);
588
+
589
+/**
590
+ * drm_gem_map_detach - dma_buf detach implementation for GEM
591
+ * @dma_buf: buffer to detach from
592
+ * @attach: attachment to be detached
593
+ *
594
+ * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up
595
+ * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
596
+ * &dma_buf_ops.detach callback.
597
+ */
598
+void drm_gem_map_detach(struct dma_buf *dma_buf,
599
+ struct dma_buf_attachment *attach)
600
+{
601
+ struct drm_gem_object *obj = dma_buf->priv;
602
+
603
+ drm_gem_unpin(obj);
604
+}
605
+EXPORT_SYMBOL(drm_gem_map_detach);
606
+
607
+/**
608
+ * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
609
+ * @attach: attachment whose scatterlist is to be returned
610
+ * @dir: direction of DMA transfer
611
+ *
612
+ * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
613
+ * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
614
+ * with drm_gem_unmap_dma_buf().
615
+ *
616
+ * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
617
+ * on error. May return -EINTR if it is interrupted by a signal.
618
+ */
619
+struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
620
+ enum dma_data_direction dir)
621
+{
622
+ struct drm_gem_object *obj = attach->dmabuf->priv;
623
+ struct sg_table *sgt;
624
+ int ret;
625
+
626
+ if (WARN_ON(dir == DMA_NONE))
627
+ return ERR_PTR(-EINVAL);
628
+
629
+ if (obj->funcs)
630
+ sgt = obj->funcs->get_sg_table(obj);
631
+ else
632
+ sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
633
+
634
+ ret = dma_map_sgtable(attach->dev, sgt, dir,
635
+ DMA_ATTR_SKIP_CPU_SYNC);
636
+ if (ret) {
637
+ sg_free_table(sgt);
638
+ kfree(sgt);
639
+ sgt = ERR_PTR(ret);
640
+ }
641
+
642
+ return sgt;
643
+}
644
+EXPORT_SYMBOL(drm_gem_map_dma_buf);
645
+
646
+/**
647
+ * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
648
+ * @attach: attachment to unmap buffer from
649
+ * @sgt: scatterlist info of the buffer to unmap
650
+ * @dir: direction of DMA transfer
651
+ *
652
+ * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
653
+ */
654
+void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
655
+ struct sg_table *sgt,
656
+ enum dma_data_direction dir)
657
+{
658
+ if (!sgt)
659
+ return;
660
+
661
+ dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
662
+ sg_free_table(sgt);
663
+ kfree(sgt);
664
+}
665
+EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
666
+
667
+/**
668
+ * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
669
+ * @dma_buf: buffer to be mapped
670
+ *
671
+ * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
672
+ * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
673
+ *
674
+ * Returns the kernel virtual address or NULL on failure.
675
+ */
676
+void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
677
+{
678
+ struct drm_gem_object *obj = dma_buf->priv;
679
+ void *vaddr;
680
+
681
+ vaddr = drm_gem_vmap(obj);
682
+ if (IS_ERR(vaddr))
683
+ vaddr = NULL;
684
+
685
+ return vaddr;
686
+}
687
+EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
688
+
689
+/**
690
+ * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
691
+ * @dma_buf: buffer to be unmapped
692
+ * @vaddr: the virtual address of the buffer
693
+ *
694
+ * Releases a kernel virtual mapping. This can be used as the
695
+ * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
696
+ */
697
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
698
+{
699
+ struct drm_gem_object *obj = dma_buf->priv;
700
+
701
+ drm_gem_vunmap(obj, vaddr);
702
+}
703
+EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
703704
704705 /**
705706 * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
....@@ -719,6 +720,18 @@
719720 struct file *fil;
720721 int ret;
721722
723
+ /* Add the fake offset */
724
+ vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
725
+
726
+ if (obj->funcs && obj->funcs->mmap) {
727
+ ret = obj->funcs->mmap(obj, vma);
728
+ if (ret)
729
+ return ret;
730
+ vma->vm_private_data = obj;
731
+ drm_gem_object_get(obj);
732
+ return 0;
733
+ }
734
+
722735 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
723736 fil = kzalloc(sizeof(*fil), GFP_KERNEL);
724737 if (!priv || !fil) {
....@@ -734,8 +747,6 @@
734747 if (ret)
735748 goto out;
736749
737
- vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
738
-
739750 ret = obj->dev->driver->fops->mmap(fil, vma);
740751
741752 drm_vma_node_revoke(&obj->vma_node, priv);
....@@ -748,14 +759,176 @@
748759 EXPORT_SYMBOL(drm_gem_prime_mmap);
749760
750761 /**
762
+ * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
763
+ * @dma_buf: buffer to be mapped
764
+ * @vma: virtual address range
765
+ *
766
+ * Provides memory mapping for the buffer. This can be used as the
767
+ * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
768
+ * which should be set to drm_gem_prime_mmap().
769
+ *
770
+ * FIXME: There's really no point to this wrapper, drivers which need anything
771
+ * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
772
+ *
773
+ * Returns 0 on success or a negative error code on failure.
774
+ */
775
+int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
776
+{
777
+ struct drm_gem_object *obj = dma_buf->priv;
778
+ struct drm_device *dev = obj->dev;
779
+
780
+ if (!dev->driver->gem_prime_mmap)
781
+ return -ENOSYS;
782
+
783
+ return dev->driver->gem_prime_mmap(obj, vma);
784
+}
785
+EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
786
+
787
+/**
788
+ * drm_gem_dmabuf_get_uuid - dma_buf get_uuid implementation for GEM
789
+ * @dma_buf: buffer to query
790
+ * @uuid: uuid outparam
791
+ *
792
+ * Queries the buffer's virtio UUID. This can be used as the
793
+ * &dma_buf_ops.get_uuid callback. Calls into &drm_driver.gem_prime_get_uuid.
794
+ *
795
+ * Returns 0 on success or a negative error code on failure.
796
+ */
797
+int drm_gem_dmabuf_get_uuid(struct dma_buf *dma_buf, uuid_t *uuid)
798
+{
799
+ struct drm_gem_object *obj = dma_buf->priv;
800
+ struct drm_device *dev = obj->dev;
801
+
802
+ if (!dev->driver->gem_prime_get_uuid)
803
+ return -ENODEV;
804
+
805
+ return dev->driver->gem_prime_get_uuid(obj, uuid);
806
+}
807
+EXPORT_SYMBOL(drm_gem_dmabuf_get_uuid);
808
+
809
+static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
810
+ .cache_sgt_mapping = true,
811
+ .attach = drm_gem_map_attach,
812
+ .detach = drm_gem_map_detach,
813
+ .map_dma_buf = drm_gem_map_dma_buf,
814
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
815
+ .release = drm_gem_dmabuf_release,
816
+ .mmap = drm_gem_dmabuf_mmap,
817
+ .vmap = drm_gem_dmabuf_vmap,
818
+ .vunmap = drm_gem_dmabuf_vunmap,
819
+ .get_uuid = drm_gem_dmabuf_get_uuid,
820
+};
821
+
822
+/**
823
+ * drm_prime_pages_to_sg - converts a page array into an sg list
824
+ * @dev: DRM device
825
+ * @pages: pointer to the array of page pointers to convert
826
+ * @nr_pages: length of the page vector
827
+ *
828
+ * This helper creates an sg table object from a set of pages
829
+ * the driver is responsible for mapping the pages into the
830
+ * importers address space for use with dma_buf itself.
831
+ *
832
+ * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
833
+ */
834
+struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
835
+ struct page **pages, unsigned int nr_pages)
836
+{
837
+ struct sg_table *sg;
838
+ struct scatterlist *sge;
839
+ size_t max_segment = 0;
840
+
841
+ sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
842
+ if (!sg)
843
+ return ERR_PTR(-ENOMEM);
844
+
845
+ if (dev)
846
+ max_segment = dma_max_mapping_size(dev->dev);
847
+ if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
848
+ max_segment = SCATTERLIST_MAX_SEGMENT;
849
+ sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
850
+ nr_pages << PAGE_SHIFT,
851
+ max_segment,
852
+ NULL, 0, GFP_KERNEL);
853
+ if (IS_ERR(sge)) {
854
+ kfree(sg);
855
+ sg = ERR_CAST(sge);
856
+ }
857
+ return sg;
858
+}
859
+EXPORT_SYMBOL(drm_prime_pages_to_sg);
860
+
861
+/**
862
+ * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
863
+ * @sgt: sg_table describing the buffer to check
864
+ *
865
+ * This helper calculates the contiguous size in the DMA address space
866
+ * of the the buffer described by the provided sg_table.
867
+ *
868
+ * This is useful for implementing
869
+ * &drm_gem_object_funcs.gem_prime_import_sg_table.
870
+ */
871
+unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
872
+{
873
+ dma_addr_t expected = sg_dma_address(sgt->sgl);
874
+ struct scatterlist *sg;
875
+ unsigned long size = 0;
876
+ int i;
877
+
878
+ for_each_sgtable_dma_sg(sgt, sg, i) {
879
+ unsigned int len = sg_dma_len(sg);
880
+
881
+ if (!len)
882
+ break;
883
+ if (sg_dma_address(sg) != expected)
884
+ break;
885
+ expected += len;
886
+ size += len;
887
+ }
888
+ return size;
889
+}
890
+EXPORT_SYMBOL(drm_prime_get_contiguous_size);
891
+
892
+/**
893
+ * drm_gem_prime_export - helper library implementation of the export callback
894
+ * @obj: GEM object to export
895
+ * @flags: flags like DRM_CLOEXEC and DRM_RDWR
896
+ *
897
+ * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
898
+ * using the PRIME helpers. It is used as the default in
899
+ * drm_gem_prime_handle_to_fd().
900
+ */
901
+struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
902
+ int flags)
903
+{
904
+ struct drm_device *dev = obj->dev;
905
+ struct dma_buf_export_info exp_info = {
906
+ .exp_name = KBUILD_MODNAME, /* white lie for debug */
907
+ .owner = dev->driver->fops->owner,
908
+ .ops = &drm_gem_prime_dmabuf_ops,
909
+ .size = obj->size,
910
+ .flags = flags,
911
+ .priv = obj,
912
+ .resv = obj->resv,
913
+ };
914
+
915
+ return drm_gem_dmabuf_export(dev, &exp_info);
916
+}
917
+EXPORT_SYMBOL(drm_gem_prime_export);
918
+
919
+/**
751920 * drm_gem_prime_import_dev - core implementation of the import callback
752921 * @dev: drm_device to import into
753922 * @dma_buf: dma-buf object to import
754923 * @attach_dev: struct device to dma_buf attach
755924 *
756
- * This is the core of drm_gem_prime_import. It's designed to be called by
757
- * drivers who want to use a different device structure than dev->dev for
758
- * attaching via dma_buf.
925
+ * This is the core of drm_gem_prime_import(). It's designed to be called by
926
+ * drivers who want to use a different device structure than &drm_device.dev for
927
+ * attaching via dma_buf. This function calls
928
+ * &drm_driver.gem_prime_import_sg_table internally.
929
+ *
930
+ * Drivers must arrange to call drm_prime_gem_destroy() from their
931
+ * &drm_gem_object_funcs.free hook when using this function.
759932 */
760933 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
761934 struct dma_buf *dma_buf,
....@@ -800,6 +973,7 @@
800973 }
801974
802975 obj->import_attach = attach;
976
+ obj->resv = dma_buf->resv;
803977
804978 return obj;
805979
....@@ -819,7 +993,12 @@
819993 * @dma_buf: dma-buf object to import
820994 *
821995 * This is the implementation of the gem_prime_import functions for GEM drivers
822
- * using the PRIME helpers.
996
+ * using the PRIME helpers. Drivers can use this as their
997
+ * &drm_driver.gem_prime_import implementation. It is used as the default
998
+ * implementation in drm_gem_prime_fd_to_handle().
999
+ *
1000
+ * Drivers must arrange to call drm_prime_gem_destroy() from their
1001
+ * &drm_gem_object_funcs.free hook when using this function.
8231002 */
8241003 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
8251004 struct dma_buf *dma_buf)
....@@ -827,151 +1006,6 @@
8271006 return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
8281007 }
8291008 EXPORT_SYMBOL(drm_gem_prime_import);
830
-
831
-/**
832
- * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
833
- * @dev: dev to export the buffer from
834
- * @file_priv: drm file-private structure
835
- * @prime_fd: fd id of the dma-buf which should be imported
836
- * @handle: pointer to storage for the handle of the imported buffer object
837
- *
838
- * This is the PRIME import function which must be used mandatorily by GEM
839
- * drivers to ensure correct lifetime management of the underlying GEM object.
840
- * The actual importing of GEM object from the dma-buf is done through the
841
- * gem_import_export driver callback.
842
- */
843
-int drm_gem_prime_fd_to_handle(struct drm_device *dev,
844
- struct drm_file *file_priv, int prime_fd,
845
- uint32_t *handle)
846
-{
847
- struct dma_buf *dma_buf;
848
- struct drm_gem_object *obj;
849
- int ret;
850
-
851
- dma_buf = dma_buf_get(prime_fd);
852
- if (IS_ERR(dma_buf))
853
- return PTR_ERR(dma_buf);
854
-
855
- mutex_lock(&file_priv->prime.lock);
856
-
857
- ret = drm_prime_lookup_buf_handle(&file_priv->prime,
858
- dma_buf, handle);
859
- if (ret == 0)
860
- goto out_put;
861
-
862
- /* never seen this one, need to import */
863
- mutex_lock(&dev->object_name_lock);
864
- obj = dev->driver->gem_prime_import(dev, dma_buf);
865
- if (IS_ERR(obj)) {
866
- ret = PTR_ERR(obj);
867
- goto out_unlock;
868
- }
869
-
870
- if (obj->dma_buf) {
871
- WARN_ON(obj->dma_buf != dma_buf);
872
- } else {
873
- obj->dma_buf = dma_buf;
874
- get_dma_buf(dma_buf);
875
- }
876
-
877
- /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
878
- ret = drm_gem_handle_create_tail(file_priv, obj, handle);
879
- drm_gem_object_put_unlocked(obj);
880
- if (ret)
881
- goto out_put;
882
-
883
- ret = drm_prime_add_buf_handle(&file_priv->prime,
884
- dma_buf, *handle);
885
- mutex_unlock(&file_priv->prime.lock);
886
- if (ret)
887
- goto fail;
888
-
889
- dma_buf_put(dma_buf);
890
-
891
- return 0;
892
-
893
-fail:
894
- /* hmm, if driver attached, we are relying on the free-object path
895
- * to detach.. which seems ok..
896
- */
897
- drm_gem_handle_delete(file_priv, *handle);
898
- dma_buf_put(dma_buf);
899
- return ret;
900
-
901
-out_unlock:
902
- mutex_unlock(&dev->object_name_lock);
903
-out_put:
904
- mutex_unlock(&file_priv->prime.lock);
905
- dma_buf_put(dma_buf);
906
- return ret;
907
-}
908
-EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
909
-
910
-int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
911
- struct drm_file *file_priv)
912
-{
913
- struct drm_prime_handle *args = data;
914
-
915
- if (!drm_core_check_feature(dev, DRIVER_PRIME))
916
- return -EINVAL;
917
-
918
- if (!dev->driver->prime_handle_to_fd)
919
- return -ENOSYS;
920
-
921
- /* check flags are valid */
922
- if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
923
- return -EINVAL;
924
-
925
- return dev->driver->prime_handle_to_fd(dev, file_priv,
926
- args->handle, args->flags, &args->fd);
927
-}
928
-
929
-int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
930
- struct drm_file *file_priv)
931
-{
932
- struct drm_prime_handle *args = data;
933
-
934
- if (!drm_core_check_feature(dev, DRIVER_PRIME))
935
- return -EINVAL;
936
-
937
- if (!dev->driver->prime_fd_to_handle)
938
- return -ENOSYS;
939
-
940
- return dev->driver->prime_fd_to_handle(dev, file_priv,
941
- args->fd, &args->handle);
942
-}
943
-
944
-/**
945
- * drm_prime_pages_to_sg - converts a page array into an sg list
946
- * @pages: pointer to the array of page pointers to convert
947
- * @nr_pages: length of the page vector
948
- *
949
- * This helper creates an sg table object from a set of pages
950
- * the driver is responsible for mapping the pages into the
951
- * importers address space for use with dma_buf itself.
952
- */
953
-struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
954
-{
955
- struct sg_table *sg = NULL;
956
- int ret;
957
-
958
- sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
959
- if (!sg) {
960
- ret = -ENOMEM;
961
- goto out;
962
- }
963
-
964
- ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
965
- nr_pages << PAGE_SHIFT, GFP_KERNEL);
966
- if (ret)
967
- goto out;
968
-
969
- return sg;
970
-out:
971
- kfree(sg);
972
- return ERR_PTR(ret);
973
-}
974
-EXPORT_SYMBOL(drm_prime_pages_to_sg);
9751009
9761010 /**
9771011 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
....@@ -982,36 +1016,33 @@
9821016 *
9831017 * Exports an sg table into an array of pages and addresses. This is currently
9841018 * required by the TTM driver in order to do correct fault handling.
1019
+ *
1020
+ * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
1021
+ * implementation.
9851022 */
9861023 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
9871024 dma_addr_t *addrs, int max_entries)
9881025 {
989
- unsigned count;
990
- struct scatterlist *sg;
991
- struct page *page;
992
- u32 len, index;
993
- dma_addr_t addr;
1026
+ struct sg_dma_page_iter dma_iter;
1027
+ struct sg_page_iter page_iter;
1028
+ struct page **p = pages;
1029
+ dma_addr_t *a = addrs;
9941030
995
- index = 0;
996
- for_each_sg(sgt->sgl, sg, sgt->nents, count) {
997
- len = sg->length;
998
- page = sg_page(sg);
999
- addr = sg_dma_address(sg);
1000
-
1001
- while (len > 0) {
1002
- if (WARN_ON(index >= max_entries))
1031
+ if (pages) {
1032
+ for_each_sgtable_page(sgt, &page_iter, 0) {
1033
+ if (WARN_ON(p - pages >= max_entries))
10031034 return -1;
1004
- if (pages)
1005
- pages[index] = page;
1006
- if (addrs)
1007
- addrs[index] = addr;
1008
-
1009
- page++;
1010
- addr += PAGE_SIZE;
1011
- len -= PAGE_SIZE;
1012
- index++;
1035
+ *p++ = sg_page_iter_page(&page_iter);
10131036 }
10141037 }
1038
+ if (addrs) {
1039
+ for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
1040
+ if (WARN_ON(a - addrs >= max_entries))
1041
+ return -1;
1042
+ *a++ = sg_page_iter_dma_address(&dma_iter);
1043
+ }
1044
+ }
1045
+
10151046 return 0;
10161047 }
10171048 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
....@@ -1022,12 +1053,13 @@
10221053 * @sg: the sg-table which was pinned at import time
10231054 *
10241055 * This is the cleanup functions which GEM drivers need to call when they use
1025
- * @drm_gem_prime_import to import dma-bufs.
1056
+ * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
10261057 */
10271058 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
10281059 {
10291060 struct dma_buf_attachment *attach;
10301061 struct dma_buf *dma_buf;
1062
+
10311063 attach = obj->import_attach;
10321064 if (sg)
10331065 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
....@@ -1037,16 +1069,3 @@
10371069 dma_buf_put(dma_buf);
10381070 }
10391071 EXPORT_SYMBOL(drm_prime_gem_destroy);
1040
-
1041
-void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
1042
-{
1043
- mutex_init(&prime_fpriv->lock);
1044
- prime_fpriv->dmabufs = RB_ROOT;
1045
- prime_fpriv->handles = RB_ROOT;
1046
-}
1047
-
1048
-void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
1049
-{
1050
- /* by now drm_gem_release should've made sure the list is empty */
1051
- WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
1052
-}