.. | .. |
---|
38 | 38 | #include <linux/dma-buf.h> |
---|
39 | 39 | #include <linux/mem_encrypt.h> |
---|
40 | 40 | #include <linux/pagevec.h> |
---|
41 | | -#include <drm/drmP.h> |
---|
42 | | -#include <drm/drm_vma_manager.h> |
---|
| 41 | + |
---|
| 42 | +#include <drm/drm.h> |
---|
| 43 | +#include <drm/drm_device.h> |
---|
| 44 | +#include <drm/drm_drv.h> |
---|
| 45 | +#include <drm/drm_file.h> |
---|
43 | 46 | #include <drm/drm_gem.h> |
---|
| 47 | +#include <drm/drm_managed.h> |
---|
44 | 48 | #include <drm/drm_print.h> |
---|
| 49 | +#include <drm/drm_vma_manager.h> |
---|
| 50 | + |
---|
45 | 51 | #include "drm_internal.h" |
---|
46 | 52 | |
---|
47 | 53 | /** @file drm_gem.c |
---|
.. | .. |
---|
72 | 78 | * up at a later date, and as our interface with shmfs for memory allocation. |
---|
73 | 79 | */ |
---|
74 | 80 | |
---|
75 | | -/* |
---|
76 | | - * We make up offsets for buffer objects so we can recognize them at |
---|
77 | | - * mmap time. |
---|
78 | | - */ |
---|
79 | | - |
---|
80 | | -/* pgoff in mmap is an unsigned long, so we need to make sure that |
---|
81 | | - * the faked up offset will fit |
---|
82 | | - */ |
---|
83 | | - |
---|
84 | | -#if BITS_PER_LONG == 64 |
---|
85 | | -#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) |
---|
86 | | -#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) |
---|
87 | | -#else |
---|
88 | | -#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) |
---|
89 | | -#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) |
---|
90 | | -#endif |
---|
| 81 | +static void |
---|
| 82 | +drm_gem_init_release(struct drm_device *dev, void *ptr) |
---|
| 83 | +{ |
---|
| 84 | + drm_vma_offset_manager_destroy(dev->vma_offset_manager); |
---|
| 85 | +} |
---|
91 | 86 | |
---|
92 | 87 | /** |
---|
93 | 88 | * drm_gem_init - Initialize the GEM device fields |
---|
.. | .. |
---|
101 | 96 | mutex_init(&dev->object_name_lock); |
---|
102 | 97 | idr_init_base(&dev->object_name_idr, 1); |
---|
103 | 98 | |
---|
104 | | - vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL); |
---|
| 99 | + vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), |
---|
| 100 | + GFP_KERNEL); |
---|
105 | 101 | if (!vma_offset_manager) { |
---|
106 | 102 | DRM_ERROR("out of memory\n"); |
---|
107 | 103 | return -ENOMEM; |
---|
.. | .. |
---|
112 | 108 | DRM_FILE_PAGE_OFFSET_START, |
---|
113 | 109 | DRM_FILE_PAGE_OFFSET_SIZE); |
---|
114 | 110 | |
---|
115 | | - return 0; |
---|
116 | | -} |
---|
117 | | - |
---|
118 | | -void |
---|
119 | | -drm_gem_destroy(struct drm_device *dev) |
---|
120 | | -{ |
---|
121 | | - |
---|
122 | | - drm_vma_offset_manager_destroy(dev->vma_offset_manager); |
---|
123 | | - kfree(dev->vma_offset_manager); |
---|
124 | | - dev->vma_offset_manager = NULL; |
---|
| 111 | + return drmm_add_action(dev, drm_gem_init_release, NULL); |
---|
125 | 112 | } |
---|
126 | 113 | |
---|
127 | 114 | /** |
---|
.. | .. |
---|
171 | 158 | kref_init(&obj->refcount); |
---|
172 | 159 | obj->handle_count = 0; |
---|
173 | 160 | obj->size = size; |
---|
| 161 | + dma_resv_init(&obj->_resv); |
---|
| 162 | + if (!obj->resv) |
---|
| 163 | + obj->resv = &obj->_resv; |
---|
| 164 | + |
---|
174 | 165 | drm_vma_node_reset(&obj->vma_node); |
---|
175 | 166 | } |
---|
176 | 167 | EXPORT_SYMBOL(drm_gem_private_object_init); |
---|
.. | .. |
---|
211 | 202 | struct drm_device *dev = obj->dev; |
---|
212 | 203 | bool final = false; |
---|
213 | 204 | |
---|
214 | | - if (WARN_ON(obj->handle_count == 0)) |
---|
| 205 | + if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) |
---|
215 | 206 | return; |
---|
216 | 207 | |
---|
217 | 208 | /* |
---|
.. | .. |
---|
229 | 220 | mutex_unlock(&dev->object_name_lock); |
---|
230 | 221 | |
---|
231 | 222 | if (final) |
---|
232 | | - drm_gem_object_put_unlocked(obj); |
---|
| 223 | + drm_gem_object_put(obj); |
---|
233 | 224 | } |
---|
234 | 225 | |
---|
235 | 226 | /* |
---|
.. | .. |
---|
243 | 234 | struct drm_gem_object *obj = ptr; |
---|
244 | 235 | struct drm_device *dev = obj->dev; |
---|
245 | 236 | |
---|
246 | | - if (dev->driver->gem_close_object) |
---|
| 237 | + if (obj->funcs && obj->funcs->close) |
---|
| 238 | + obj->funcs->close(obj, file_priv); |
---|
| 239 | + else if (dev->driver->gem_close_object) |
---|
247 | 240 | dev->driver->gem_close_object(obj, file_priv); |
---|
248 | 241 | |
---|
249 | | - if (drm_core_check_feature(dev, DRIVER_PRIME)) |
---|
250 | | - drm_prime_remove_buf_handle(&file_priv->prime, id); |
---|
| 242 | + drm_prime_remove_buf_handle(&file_priv->prime, id); |
---|
251 | 243 | drm_vma_node_revoke(&obj->vma_node, file_priv); |
---|
252 | 244 | |
---|
253 | 245 | drm_gem_object_handle_put_unlocked(obj); |
---|
.. | .. |
---|
324 | 316 | |
---|
325 | 317 | *offset = drm_vma_node_offset_addr(&obj->vma_node); |
---|
326 | 318 | out: |
---|
327 | | - drm_gem_object_put_unlocked(obj); |
---|
| 319 | + drm_gem_object_put(obj); |
---|
328 | 320 | |
---|
329 | 321 | return ret; |
---|
330 | 322 | } |
---|
.. | .. |
---|
396 | 388 | if (ret) |
---|
397 | 389 | goto err_remove; |
---|
398 | 390 | |
---|
399 | | - if (dev->driver->gem_open_object) { |
---|
| 391 | + if (obj->funcs && obj->funcs->open) { |
---|
| 392 | + ret = obj->funcs->open(obj, file_priv); |
---|
| 393 | + if (ret) |
---|
| 394 | + goto err_revoke; |
---|
| 395 | + } else if (dev->driver->gem_open_object) { |
---|
400 | 396 | ret = dev->driver->gem_open_object(obj, file_priv); |
---|
401 | 397 | if (ret) |
---|
402 | 398 | goto err_revoke; |
---|
.. | .. |
---|
420 | 416 | * drm_gem_handle_create - create a gem handle for an object |
---|
421 | 417 | * @file_priv: drm file-private structure to register the handle for |
---|
422 | 418 | * @obj: object to register |
---|
423 | | - * @handlep: pionter to return the created handle to the caller |
---|
| 419 | + * @handlep: pointer to return the created handle to the caller |
---|
424 | 420 | * |
---|
425 | 421 | * Create a handle for this object. This adds a handle reference to the object, |
---|
426 | 422 | * which includes a regular reference count. Callers will likely want to |
---|
.. | .. |
---|
537 | 533 | * set during initialization. If you have special zone constraints, set them |
---|
538 | 534 | * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care |
---|
539 | 535 | * to keep pages in the required zone during swap-in. |
---|
| 536 | + * |
---|
| 537 | + * This function is only valid on objects initialized with |
---|
| 538 | + * drm_gem_object_init(), but not for those initialized with |
---|
| 539 | + * drm_gem_private_object_init() only. |
---|
540 | 540 | */ |
---|
541 | 541 | struct page **drm_gem_get_pages(struct drm_gem_object *obj) |
---|
542 | 542 | { |
---|
.. | .. |
---|
544 | 544 | struct page *p, **pages; |
---|
545 | 545 | struct pagevec pvec; |
---|
546 | 546 | int i, npages; |
---|
| 547 | + |
---|
| 548 | + |
---|
| 549 | + if (WARN_ON(!obj->filp)) |
---|
| 550 | + return ERR_PTR(-EINVAL); |
---|
547 | 551 | |
---|
548 | 552 | /* This is the shared memory object that backs the GEM resource */ |
---|
549 | 553 | mapping = obj->filp->f_mapping; |
---|
.. | .. |
---|
621 | 625 | |
---|
622 | 626 | pagevec_init(&pvec); |
---|
623 | 627 | for (i = 0; i < npages; i++) { |
---|
| 628 | + if (!pages[i]) |
---|
| 629 | + continue; |
---|
| 630 | + |
---|
624 | 631 | if (dirty) |
---|
625 | 632 | set_page_dirty(pages[i]); |
---|
626 | 633 | |
---|
.. | .. |
---|
638 | 645 | } |
---|
639 | 646 | EXPORT_SYMBOL(drm_gem_put_pages); |
---|
640 | 647 | |
---|
| 648 | +static int objects_lookup(struct drm_file *filp, u32 *handle, int count, |
---|
| 649 | + struct drm_gem_object **objs) |
---|
| 650 | +{ |
---|
| 651 | + int i, ret = 0; |
---|
| 652 | + struct drm_gem_object *obj; |
---|
| 653 | + |
---|
| 654 | + spin_lock(&filp->table_lock); |
---|
| 655 | + |
---|
| 656 | + for (i = 0; i < count; i++) { |
---|
| 657 | + /* Check if we currently have a reference on the object */ |
---|
| 658 | + obj = idr_find(&filp->object_idr, handle[i]); |
---|
| 659 | + if (!obj) { |
---|
| 660 | + ret = -ENOENT; |
---|
| 661 | + break; |
---|
| 662 | + } |
---|
| 663 | + drm_gem_object_get(obj); |
---|
| 664 | + objs[i] = obj; |
---|
| 665 | + } |
---|
| 666 | + spin_unlock(&filp->table_lock); |
---|
| 667 | + |
---|
| 668 | + return ret; |
---|
| 669 | +} |
---|
| 670 | + |
---|
641 | 671 | /** |
---|
642 | | - * drm_gem_object_lookup - look up a GEM object from it's handle |
---|
| 672 | + * drm_gem_objects_lookup - look up GEM objects from an array of handles |
---|
| 673 | + * @filp: DRM file private date |
---|
| 674 | + * @bo_handles: user pointer to array of userspace handle |
---|
| 675 | + * @count: size of handle array |
---|
| 676 | + * @objs_out: returned pointer to array of drm_gem_object pointers |
---|
| 677 | + * |
---|
| 678 | + * Takes an array of userspace handles and returns a newly allocated array of |
---|
| 679 | + * GEM objects. |
---|
| 680 | + * |
---|
| 681 | + * For a single handle lookup, use drm_gem_object_lookup(). |
---|
| 682 | + * |
---|
| 683 | + * Returns: |
---|
| 684 | + * |
---|
| 685 | + * @objs filled in with GEM object pointers. Returned GEM objects need to be |
---|
| 686 | + * released with drm_gem_object_put(). -ENOENT is returned on a lookup |
---|
| 687 | + * failure. 0 is returned on success. |
---|
| 688 | + * |
---|
| 689 | + */ |
---|
| 690 | +int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, |
---|
| 691 | + int count, struct drm_gem_object ***objs_out) |
---|
| 692 | +{ |
---|
| 693 | + int ret; |
---|
| 694 | + u32 *handles; |
---|
| 695 | + struct drm_gem_object **objs; |
---|
| 696 | + |
---|
| 697 | + if (!count) |
---|
| 698 | + return 0; |
---|
| 699 | + |
---|
| 700 | + objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), |
---|
| 701 | + GFP_KERNEL | __GFP_ZERO); |
---|
| 702 | + if (!objs) |
---|
| 703 | + return -ENOMEM; |
---|
| 704 | + |
---|
| 705 | + *objs_out = objs; |
---|
| 706 | + |
---|
| 707 | + handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); |
---|
| 708 | + if (!handles) { |
---|
| 709 | + ret = -ENOMEM; |
---|
| 710 | + goto out; |
---|
| 711 | + } |
---|
| 712 | + |
---|
| 713 | + if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { |
---|
| 714 | + ret = -EFAULT; |
---|
| 715 | + DRM_DEBUG("Failed to copy in GEM handles\n"); |
---|
| 716 | + goto out; |
---|
| 717 | + } |
---|
| 718 | + |
---|
| 719 | + ret = objects_lookup(filp, handles, count, objs); |
---|
| 720 | +out: |
---|
| 721 | + kvfree(handles); |
---|
| 722 | + return ret; |
---|
| 723 | + |
---|
| 724 | +} |
---|
| 725 | +EXPORT_SYMBOL(drm_gem_objects_lookup); |
---|
| 726 | + |
---|
| 727 | +/** |
---|
| 728 | + * drm_gem_object_lookup - look up a GEM object from its handle |
---|
643 | 729 | * @filp: DRM file private date |
---|
644 | 730 | * @handle: userspace handle |
---|
645 | 731 | * |
---|
.. | .. |
---|
647 | 733 | * |
---|
648 | 734 | * A reference to the object named by the handle if such exists on @filp, NULL |
---|
649 | 735 | * otherwise. |
---|
| 736 | + * |
---|
| 737 | + * If looking up an array of handles, use drm_gem_objects_lookup(). |
---|
650 | 738 | */ |
---|
651 | 739 | struct drm_gem_object * |
---|
652 | 740 | drm_gem_object_lookup(struct drm_file *filp, u32 handle) |
---|
653 | 741 | { |
---|
654 | | - struct drm_gem_object *obj; |
---|
| 742 | + struct drm_gem_object *obj = NULL; |
---|
655 | 743 | |
---|
656 | | - spin_lock(&filp->table_lock); |
---|
657 | | - |
---|
658 | | - /* Check if we currently have a reference on the object */ |
---|
659 | | - obj = idr_find(&filp->object_idr, handle); |
---|
660 | | - if (obj) |
---|
661 | | - drm_gem_object_get(obj); |
---|
662 | | - |
---|
663 | | - spin_unlock(&filp->table_lock); |
---|
664 | | - |
---|
| 744 | + objects_lookup(filp, &handle, 1, &obj); |
---|
665 | 745 | return obj; |
---|
666 | 746 | } |
---|
667 | 747 | EXPORT_SYMBOL(drm_gem_object_lookup); |
---|
| 748 | + |
---|
| 749 | +/** |
---|
| 750 | + * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects |
---|
| 751 | + * shared and/or exclusive fences. |
---|
| 752 | + * @filep: DRM file private date |
---|
| 753 | + * @handle: userspace handle |
---|
| 754 | + * @wait_all: if true, wait on all fences, else wait on just exclusive fence |
---|
| 755 | + * @timeout: timeout value in jiffies or zero to return immediately |
---|
| 756 | + * |
---|
| 757 | + * Returns: |
---|
| 758 | + * |
---|
| 759 | + * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or |
---|
| 760 | + * greater than 0 on success. |
---|
| 761 | + */ |
---|
| 762 | +long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, |
---|
| 763 | + bool wait_all, unsigned long timeout) |
---|
| 764 | +{ |
---|
| 765 | + long ret; |
---|
| 766 | + struct drm_gem_object *obj; |
---|
| 767 | + |
---|
| 768 | + obj = drm_gem_object_lookup(filep, handle); |
---|
| 769 | + if (!obj) { |
---|
| 770 | + DRM_DEBUG("Failed to look up GEM BO %d\n", handle); |
---|
| 771 | + return -EINVAL; |
---|
| 772 | + } |
---|
| 773 | + |
---|
| 774 | + ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all, |
---|
| 775 | + true, timeout); |
---|
| 776 | + if (ret == 0) |
---|
| 777 | + ret = -ETIME; |
---|
| 778 | + else if (ret > 0) |
---|
| 779 | + ret = 0; |
---|
| 780 | + |
---|
| 781 | + drm_gem_object_put(obj); |
---|
| 782 | + |
---|
| 783 | + return ret; |
---|
| 784 | +} |
---|
| 785 | +EXPORT_SYMBOL(drm_gem_dma_resv_wait); |
---|
668 | 786 | |
---|
669 | 787 | /** |
---|
670 | 788 | * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl |
---|
.. | .. |
---|
682 | 800 | int ret; |
---|
683 | 801 | |
---|
684 | 802 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
---|
685 | | - return -ENODEV; |
---|
| 803 | + return -EOPNOTSUPP; |
---|
686 | 804 | |
---|
687 | 805 | ret = drm_gem_handle_delete(file_priv, args->handle); |
---|
688 | 806 | |
---|
.. | .. |
---|
709 | 827 | int ret; |
---|
710 | 828 | |
---|
711 | 829 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
---|
712 | | - return -ENODEV; |
---|
| 830 | + return -EOPNOTSUPP; |
---|
713 | 831 | |
---|
714 | 832 | obj = drm_gem_object_lookup(file_priv, args->handle); |
---|
715 | 833 | if (obj == NULL) |
---|
.. | .. |
---|
735 | 853 | |
---|
736 | 854 | err: |
---|
737 | 855 | mutex_unlock(&dev->object_name_lock); |
---|
738 | | - drm_gem_object_put_unlocked(obj); |
---|
| 856 | + drm_gem_object_put(obj); |
---|
739 | 857 | return ret; |
---|
740 | 858 | } |
---|
741 | 859 | |
---|
.. | .. |
---|
746 | 864 | * @file_priv: drm file-private structure |
---|
747 | 865 | * |
---|
748 | 866 | * Open an object using the global name, returning a handle and the size. |
---|
| 867 | + * |
---|
| 868 | + * This handle (of course) holds a reference to the object, so the object |
---|
| 869 | + * will not go away until the handle is deleted. |
---|
749 | 870 | */ |
---|
750 | 871 | int |
---|
751 | 872 | drm_gem_open_ioctl(struct drm_device *dev, void *data, |
---|
.. | .. |
---|
757 | 878 | u32 handle; |
---|
758 | 879 | |
---|
759 | 880 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
---|
760 | | - return -ENODEV; |
---|
| 881 | + return -EOPNOTSUPP; |
---|
761 | 882 | |
---|
762 | 883 | mutex_lock(&dev->object_name_lock); |
---|
763 | 884 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
---|
.. | .. |
---|
777 | 898 | args->size = obj->size; |
---|
778 | 899 | |
---|
779 | 900 | err: |
---|
780 | | - drm_gem_object_put_unlocked(obj); |
---|
| 901 | + drm_gem_object_put(obj); |
---|
781 | 902 | return ret; |
---|
782 | 903 | } |
---|
783 | 904 | |
---|
.. | .. |
---|
828 | 949 | if (obj->filp) |
---|
829 | 950 | fput(obj->filp); |
---|
830 | 951 | |
---|
| 952 | + dma_resv_fini(&obj->_resv); |
---|
831 | 953 | drm_gem_free_mmap_offset(obj); |
---|
832 | 954 | } |
---|
833 | 955 | EXPORT_SYMBOL(drm_gem_object_release); |
---|
.. | .. |
---|
837 | 959 | * @kref: kref of the object to free |
---|
838 | 960 | * |
---|
839 | 961 | * Called after the last reference to the object has been lost. |
---|
840 | | - * Must be called holding &drm_device.struct_mutex. |
---|
841 | 962 | * |
---|
842 | 963 | * Frees the object |
---|
843 | 964 | */ |
---|
.. | .. |
---|
848 | 969 | container_of(kref, struct drm_gem_object, refcount); |
---|
849 | 970 | struct drm_device *dev = obj->dev; |
---|
850 | 971 | |
---|
851 | | - if (dev->driver->gem_free_object_unlocked) { |
---|
| 972 | + if (obj->funcs) |
---|
| 973 | + obj->funcs->free(obj); |
---|
| 974 | + else if (dev->driver->gem_free_object_unlocked) |
---|
852 | 975 | dev->driver->gem_free_object_unlocked(obj); |
---|
853 | | - } else if (dev->driver->gem_free_object) { |
---|
854 | | - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
---|
855 | | - |
---|
856 | | - dev->driver->gem_free_object(obj); |
---|
857 | | - } |
---|
858 | 976 | } |
---|
859 | 977 | EXPORT_SYMBOL(drm_gem_object_free); |
---|
860 | 978 | |
---|
861 | 979 | /** |
---|
862 | | - * drm_gem_object_put_unlocked - drop a GEM buffer object reference |
---|
863 | | - * @obj: GEM buffer object |
---|
864 | | - * |
---|
865 | | - * This releases a reference to @obj. Callers must not hold the |
---|
866 | | - * &drm_device.struct_mutex lock when calling this function. |
---|
867 | | - * |
---|
868 | | - * See also __drm_gem_object_put(). |
---|
869 | | - */ |
---|
870 | | -void |
---|
871 | | -drm_gem_object_put_unlocked(struct drm_gem_object *obj) |
---|
872 | | -{ |
---|
873 | | - struct drm_device *dev; |
---|
874 | | - |
---|
875 | | - if (!obj) |
---|
876 | | - return; |
---|
877 | | - |
---|
878 | | - dev = obj->dev; |
---|
879 | | - |
---|
880 | | - if (dev->driver->gem_free_object_unlocked) { |
---|
881 | | - kref_put(&obj->refcount, drm_gem_object_free); |
---|
882 | | - } else { |
---|
883 | | - might_lock(&dev->struct_mutex); |
---|
884 | | - if (kref_put_mutex(&obj->refcount, drm_gem_object_free, |
---|
885 | | - &dev->struct_mutex)) |
---|
886 | | - mutex_unlock(&dev->struct_mutex); |
---|
887 | | - } |
---|
888 | | -} |
---|
889 | | -EXPORT_SYMBOL(drm_gem_object_put_unlocked); |
---|
890 | | - |
---|
891 | | -/** |
---|
892 | | - * drm_gem_object_put - release a GEM buffer object reference |
---|
| 980 | + * drm_gem_object_put_locked - release a GEM buffer object reference |
---|
893 | 981 | * @obj: GEM buffer object |
---|
894 | 982 | * |
---|
895 | 983 | * This releases a reference to @obj. Callers must hold the |
---|
.. | .. |
---|
897 | 985 | * driver doesn't use &drm_device.struct_mutex for anything. |
---|
898 | 986 | * |
---|
899 | 987 | * For drivers not encumbered with legacy locking use |
---|
900 | | - * drm_gem_object_put_unlocked() instead. |
---|
| 988 | + * drm_gem_object_put() instead. |
---|
901 | 989 | */ |
---|
902 | 990 | void |
---|
903 | | -drm_gem_object_put(struct drm_gem_object *obj) |
---|
| 991 | +drm_gem_object_put_locked(struct drm_gem_object *obj) |
---|
904 | 992 | { |
---|
905 | 993 | if (obj) { |
---|
906 | 994 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
---|
.. | .. |
---|
908 | 996 | kref_put(&obj->refcount, drm_gem_object_free); |
---|
909 | 997 | } |
---|
910 | 998 | } |
---|
911 | | -EXPORT_SYMBOL(drm_gem_object_put); |
---|
| 999 | +EXPORT_SYMBOL(drm_gem_object_put_locked); |
---|
912 | 1000 | |
---|
913 | 1001 | /** |
---|
914 | 1002 | * drm_gem_vm_open - vma->ops->open implementation for GEM |
---|
.. | .. |
---|
936 | 1024 | { |
---|
937 | 1025 | struct drm_gem_object *obj = vma->vm_private_data; |
---|
938 | 1026 | |
---|
939 | | - drm_gem_object_put_unlocked(obj); |
---|
| 1027 | + drm_gem_object_put(obj); |
---|
940 | 1028 | } |
---|
941 | 1029 | EXPORT_SYMBOL(drm_gem_vm_close); |
---|
942 | 1030 | |
---|
.. | .. |
---|
968 | 1056 | struct vm_area_struct *vma) |
---|
969 | 1057 | { |
---|
970 | 1058 | struct drm_device *dev = obj->dev; |
---|
| 1059 | + int ret; |
---|
971 | 1060 | |
---|
972 | 1061 | /* Check for valid size. */ |
---|
973 | 1062 | if (obj_size < vma->vm_end - vma->vm_start) |
---|
974 | 1063 | return -EINVAL; |
---|
975 | | - |
---|
976 | | - if (!dev->driver->gem_vm_ops) |
---|
977 | | - return -EINVAL; |
---|
978 | | - |
---|
979 | | - vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; |
---|
980 | | - vma->vm_ops = dev->driver->gem_vm_ops; |
---|
981 | | - vma->vm_private_data = obj; |
---|
982 | | - vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
---|
983 | | - vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); |
---|
984 | 1064 | |
---|
985 | 1065 | /* Take a ref for this mapping of the object, so that the fault |
---|
986 | 1066 | * handler can dereference the mmap offset's pointer to the object. |
---|
.. | .. |
---|
989 | 1069 | * by a vm_open due to mremap or partial unmap or whatever). |
---|
990 | 1070 | */ |
---|
991 | 1071 | drm_gem_object_get(obj); |
---|
| 1072 | + |
---|
| 1073 | + vma->vm_private_data = obj; |
---|
| 1074 | + |
---|
| 1075 | + if (obj->funcs && obj->funcs->mmap) { |
---|
| 1076 | + ret = obj->funcs->mmap(obj, vma); |
---|
| 1077 | + if (ret) { |
---|
| 1078 | + drm_gem_object_put(obj); |
---|
| 1079 | + return ret; |
---|
| 1080 | + } |
---|
| 1081 | + WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); |
---|
| 1082 | + } else { |
---|
| 1083 | + if (obj->funcs && obj->funcs->vm_ops) |
---|
| 1084 | + vma->vm_ops = obj->funcs->vm_ops; |
---|
| 1085 | + else if (dev->driver->gem_vm_ops) |
---|
| 1086 | + vma->vm_ops = dev->driver->gem_vm_ops; |
---|
| 1087 | + else { |
---|
| 1088 | + drm_gem_object_put(obj); |
---|
| 1089 | + return -EINVAL; |
---|
| 1090 | + } |
---|
| 1091 | + |
---|
| 1092 | + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; |
---|
| 1093 | + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
---|
| 1094 | + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); |
---|
| 1095 | + } |
---|
992 | 1096 | |
---|
993 | 1097 | return 0; |
---|
994 | 1098 | } |
---|
.. | .. |
---|
1045 | 1149 | return -EINVAL; |
---|
1046 | 1150 | |
---|
1047 | 1151 | if (!drm_vma_node_is_allowed(node, priv)) { |
---|
1048 | | - drm_gem_object_put_unlocked(obj); |
---|
| 1152 | + drm_gem_object_put(obj); |
---|
1049 | 1153 | return -EACCES; |
---|
1050 | 1154 | } |
---|
1051 | 1155 | |
---|
1052 | 1156 | if (node->readonly) { |
---|
1053 | 1157 | if (vma->vm_flags & VM_WRITE) { |
---|
1054 | | - drm_gem_object_put_unlocked(obj); |
---|
| 1158 | + drm_gem_object_put(obj); |
---|
1055 | 1159 | return -EINVAL; |
---|
1056 | 1160 | } |
---|
1057 | 1161 | |
---|
.. | .. |
---|
1061 | 1165 | ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, |
---|
1062 | 1166 | vma); |
---|
1063 | 1167 | |
---|
1064 | | - drm_gem_object_put_unlocked(obj); |
---|
| 1168 | + drm_gem_object_put(obj); |
---|
1065 | 1169 | |
---|
1066 | 1170 | return ret; |
---|
1067 | 1171 | } |
---|
.. | .. |
---|
1079 | 1183 | drm_printf_indent(p, indent, "imported=%s\n", |
---|
1080 | 1184 | obj->import_attach ? "yes" : "no"); |
---|
1081 | 1185 | |
---|
1082 | | - if (obj->dev->driver->gem_print_info) |
---|
1083 | | - obj->dev->driver->gem_print_info(p, indent, obj); |
---|
| 1186 | + if (obj->funcs && obj->funcs->print_info) |
---|
| 1187 | + obj->funcs->print_info(p, indent, obj); |
---|
1084 | 1188 | } |
---|
| 1189 | + |
---|
| 1190 | +int drm_gem_pin(struct drm_gem_object *obj) |
---|
| 1191 | +{ |
---|
| 1192 | + if (obj->funcs && obj->funcs->pin) |
---|
| 1193 | + return obj->funcs->pin(obj); |
---|
| 1194 | + else if (obj->dev->driver->gem_prime_pin) |
---|
| 1195 | + return obj->dev->driver->gem_prime_pin(obj); |
---|
| 1196 | + else |
---|
| 1197 | + return 0; |
---|
| 1198 | +} |
---|
| 1199 | + |
---|
| 1200 | +void drm_gem_unpin(struct drm_gem_object *obj) |
---|
| 1201 | +{ |
---|
| 1202 | + if (obj->funcs && obj->funcs->unpin) |
---|
| 1203 | + obj->funcs->unpin(obj); |
---|
| 1204 | + else if (obj->dev->driver->gem_prime_unpin) |
---|
| 1205 | + obj->dev->driver->gem_prime_unpin(obj); |
---|
| 1206 | +} |
---|
| 1207 | + |
---|
| 1208 | +void *drm_gem_vmap(struct drm_gem_object *obj) |
---|
| 1209 | +{ |
---|
| 1210 | + void *vaddr; |
---|
| 1211 | + |
---|
| 1212 | + if (obj->funcs && obj->funcs->vmap) |
---|
| 1213 | + vaddr = obj->funcs->vmap(obj); |
---|
| 1214 | + else if (obj->dev->driver->gem_prime_vmap) |
---|
| 1215 | + vaddr = obj->dev->driver->gem_prime_vmap(obj); |
---|
| 1216 | + else |
---|
| 1217 | + vaddr = ERR_PTR(-EOPNOTSUPP); |
---|
| 1218 | + |
---|
| 1219 | + if (!vaddr) |
---|
| 1220 | + vaddr = ERR_PTR(-ENOMEM); |
---|
| 1221 | + |
---|
| 1222 | + return vaddr; |
---|
| 1223 | +} |
---|
| 1224 | + |
---|
| 1225 | +void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) |
---|
| 1226 | +{ |
---|
| 1227 | + if (!vaddr) |
---|
| 1228 | + return; |
---|
| 1229 | + |
---|
| 1230 | + if (obj->funcs && obj->funcs->vunmap) |
---|
| 1231 | + obj->funcs->vunmap(obj, vaddr); |
---|
| 1232 | + else if (obj->dev->driver->gem_prime_vunmap) |
---|
| 1233 | + obj->dev->driver->gem_prime_vunmap(obj, vaddr); |
---|
| 1234 | +} |
---|
| 1235 | + |
---|
| 1236 | +/** |
---|
| 1237 | + * drm_gem_lock_reservations - Sets up the ww context and acquires |
---|
| 1238 | + * the lock on an array of GEM objects. |
---|
| 1239 | + * |
---|
| 1240 | + * Once you've locked your reservations, you'll want to set up space |
---|
| 1241 | + * for your shared fences (if applicable), submit your job, then |
---|
| 1242 | + * drm_gem_unlock_reservations(). |
---|
| 1243 | + * |
---|
| 1244 | + * @objs: drm_gem_objects to lock |
---|
| 1245 | + * @count: Number of objects in @objs |
---|
| 1246 | + * @acquire_ctx: struct ww_acquire_ctx that will be initialized as |
---|
| 1247 | + * part of tracking this set of locked reservations. |
---|
| 1248 | + */ |
---|
| 1249 | +int |
---|
| 1250 | +drm_gem_lock_reservations(struct drm_gem_object **objs, int count, |
---|
| 1251 | + struct ww_acquire_ctx *acquire_ctx) |
---|
| 1252 | +{ |
---|
| 1253 | + int contended = -1; |
---|
| 1254 | + int i, ret; |
---|
| 1255 | + |
---|
| 1256 | + ww_acquire_init(acquire_ctx, &reservation_ww_class); |
---|
| 1257 | + |
---|
| 1258 | +retry: |
---|
| 1259 | + if (contended != -1) { |
---|
| 1260 | + struct drm_gem_object *obj = objs[contended]; |
---|
| 1261 | + |
---|
| 1262 | + ret = dma_resv_lock_slow_interruptible(obj->resv, |
---|
| 1263 | + acquire_ctx); |
---|
| 1264 | + if (ret) { |
---|
| 1265 | + ww_acquire_fini(acquire_ctx); |
---|
| 1266 | + return ret; |
---|
| 1267 | + } |
---|
| 1268 | + } |
---|
| 1269 | + |
---|
| 1270 | + for (i = 0; i < count; i++) { |
---|
| 1271 | + if (i == contended) |
---|
| 1272 | + continue; |
---|
| 1273 | + |
---|
| 1274 | + ret = dma_resv_lock_interruptible(objs[i]->resv, |
---|
| 1275 | + acquire_ctx); |
---|
| 1276 | + if (ret) { |
---|
| 1277 | + int j; |
---|
| 1278 | + |
---|
| 1279 | + for (j = 0; j < i; j++) |
---|
| 1280 | + dma_resv_unlock(objs[j]->resv); |
---|
| 1281 | + |
---|
| 1282 | + if (contended != -1 && contended >= i) |
---|
| 1283 | + dma_resv_unlock(objs[contended]->resv); |
---|
| 1284 | + |
---|
| 1285 | + if (ret == -EDEADLK) { |
---|
| 1286 | + contended = i; |
---|
| 1287 | + goto retry; |
---|
| 1288 | + } |
---|
| 1289 | + |
---|
| 1290 | + ww_acquire_fini(acquire_ctx); |
---|
| 1291 | + return ret; |
---|
| 1292 | + } |
---|
| 1293 | + } |
---|
| 1294 | + |
---|
| 1295 | + ww_acquire_done(acquire_ctx); |
---|
| 1296 | + |
---|
| 1297 | + return 0; |
---|
| 1298 | +} |
---|
| 1299 | +EXPORT_SYMBOL(drm_gem_lock_reservations); |
---|
| 1300 | + |
---|
| 1301 | +void |
---|
| 1302 | +drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, |
---|
| 1303 | + struct ww_acquire_ctx *acquire_ctx) |
---|
| 1304 | +{ |
---|
| 1305 | + int i; |
---|
| 1306 | + |
---|
| 1307 | + for (i = 0; i < count; i++) |
---|
| 1308 | + dma_resv_unlock(objs[i]->resv); |
---|
| 1309 | + |
---|
| 1310 | + ww_acquire_fini(acquire_ctx); |
---|
| 1311 | +} |
---|
| 1312 | +EXPORT_SYMBOL(drm_gem_unlock_reservations); |
---|
| 1313 | + |
---|
| 1314 | +/** |
---|
| 1315 | + * drm_gem_fence_array_add - Adds the fence to an array of fences to be |
---|
| 1316 | + * waited on, deduplicating fences from the same context. |
---|
| 1317 | + * |
---|
| 1318 | + * @fence_array: array of dma_fence * for the job to block on. |
---|
| 1319 | + * @fence: the dma_fence to add to the list of dependencies. |
---|
| 1320 | + * |
---|
| 1321 | + * Returns: |
---|
| 1322 | + * 0 on success, or an error on failing to expand the array. |
---|
| 1323 | + */ |
---|
| 1324 | +int drm_gem_fence_array_add(struct xarray *fence_array, |
---|
| 1325 | + struct dma_fence *fence) |
---|
| 1326 | +{ |
---|
| 1327 | + struct dma_fence *entry; |
---|
| 1328 | + unsigned long index; |
---|
| 1329 | + u32 id = 0; |
---|
| 1330 | + int ret; |
---|
| 1331 | + |
---|
| 1332 | + if (!fence) |
---|
| 1333 | + return 0; |
---|
| 1334 | + |
---|
| 1335 | + /* Deduplicate if we already depend on a fence from the same context. |
---|
| 1336 | + * This lets the size of the array of deps scale with the number of |
---|
| 1337 | + * engines involved, rather than the number of BOs. |
---|
| 1338 | + */ |
---|
| 1339 | + xa_for_each(fence_array, index, entry) { |
---|
| 1340 | + if (entry->context != fence->context) |
---|
| 1341 | + continue; |
---|
| 1342 | + |
---|
| 1343 | + if (dma_fence_is_later(fence, entry)) { |
---|
| 1344 | + dma_fence_put(entry); |
---|
| 1345 | + xa_store(fence_array, index, fence, GFP_KERNEL); |
---|
| 1346 | + } else { |
---|
| 1347 | + dma_fence_put(fence); |
---|
| 1348 | + } |
---|
| 1349 | + return 0; |
---|
| 1350 | + } |
---|
| 1351 | + |
---|
| 1352 | + ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL); |
---|
| 1353 | + if (ret != 0) |
---|
| 1354 | + dma_fence_put(fence); |
---|
| 1355 | + |
---|
| 1356 | + return ret; |
---|
| 1357 | +} |
---|
| 1358 | +EXPORT_SYMBOL(drm_gem_fence_array_add); |
---|
| 1359 | + |
---|
| 1360 | +/** |
---|
| 1361 | + * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked |
---|
| 1362 | + * in the GEM object's reservation object to an array of dma_fences for use in |
---|
| 1363 | + * scheduling a rendering job. |
---|
| 1364 | + * |
---|
| 1365 | + * This should be called after drm_gem_lock_reservations() on your array of |
---|
| 1366 | + * GEM objects used in the job but before updating the reservations with your |
---|
| 1367 | + * own fences. |
---|
| 1368 | + * |
---|
| 1369 | + * @fence_array: array of dma_fence * for the job to block on. |
---|
| 1370 | + * @obj: the gem object to add new dependencies from. |
---|
| 1371 | + * @write: whether the job might write the object (so we need to depend on |
---|
| 1372 | + * shared fences in the reservation object). |
---|
| 1373 | + */ |
---|
| 1374 | +int drm_gem_fence_array_add_implicit(struct xarray *fence_array, |
---|
| 1375 | + struct drm_gem_object *obj, |
---|
| 1376 | + bool write) |
---|
| 1377 | +{ |
---|
| 1378 | + int ret; |
---|
| 1379 | + struct dma_fence **fences; |
---|
| 1380 | + unsigned int i, fence_count; |
---|
| 1381 | + |
---|
| 1382 | + if (!write) { |
---|
| 1383 | + struct dma_fence *fence = |
---|
| 1384 | + dma_resv_get_excl_rcu(obj->resv); |
---|
| 1385 | + |
---|
| 1386 | + return drm_gem_fence_array_add(fence_array, fence); |
---|
| 1387 | + } |
---|
| 1388 | + |
---|
| 1389 | + ret = dma_resv_get_fences_rcu(obj->resv, NULL, |
---|
| 1390 | + &fence_count, &fences); |
---|
| 1391 | + if (ret || !fence_count) |
---|
| 1392 | + return ret; |
---|
| 1393 | + |
---|
| 1394 | + for (i = 0; i < fence_count; i++) { |
---|
| 1395 | + ret = drm_gem_fence_array_add(fence_array, fences[i]); |
---|
| 1396 | + if (ret) |
---|
| 1397 | + break; |
---|
| 1398 | + } |
---|
| 1399 | + |
---|
| 1400 | + for (; i < fence_count; i++) |
---|
| 1401 | + dma_fence_put(fences[i]); |
---|
| 1402 | + kfree(fences); |
---|
| 1403 | + return ret; |
---|
| 1404 | +} |
---|
| 1405 | +EXPORT_SYMBOL(drm_gem_fence_array_add_implicit); |
---|