forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/radeon/radeon_mn.c
....@@ -31,218 +31,57 @@
3131 #include <linux/firmware.h>
3232 #include <linux/module.h>
3333 #include <linux/mmu_notifier.h>
34
-#include <drm/drmP.h>
34
+
3535 #include <drm/drm.h>
3636
3737 #include "radeon.h"
3838
39
-struct radeon_mn {
40
- /* constant after initialisation */
41
- struct radeon_device *rdev;
42
- struct mm_struct *mm;
43
- struct mmu_notifier mn;
44
-
45
- /* only used on destruction */
46
- struct work_struct work;
47
-
48
- /* protected by rdev->mn_lock */
49
- struct hlist_node node;
50
-
51
- /* objects protected by lock */
52
- struct mutex lock;
53
- struct rb_root_cached objects;
54
-};
55
-
56
-struct radeon_mn_node {
57
- struct interval_tree_node it;
58
- struct list_head bos;
59
-};
60
-
6139 /**
62
- * radeon_mn_destroy - destroy the rmn
63
- *
64
- * @work: previously sheduled work item
65
- *
66
- * Lazy destroys the notifier from a work item
67
- */
68
-static void radeon_mn_destroy(struct work_struct *work)
69
-{
70
- struct radeon_mn *rmn = container_of(work, struct radeon_mn, work);
71
- struct radeon_device *rdev = rmn->rdev;
72
- struct radeon_mn_node *node, *next_node;
73
- struct radeon_bo *bo, *next_bo;
74
-
75
- mutex_lock(&rdev->mn_lock);
76
- mutex_lock(&rmn->lock);
77
- hash_del(&rmn->node);
78
- rbtree_postorder_for_each_entry_safe(node, next_node,
79
- &rmn->objects.rb_root, it.rb) {
80
-
81
- interval_tree_remove(&node->it, &rmn->objects);
82
- list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
83
- bo->mn = NULL;
84
- list_del_init(&bo->mn_list);
85
- }
86
- kfree(node);
87
- }
88
- mutex_unlock(&rmn->lock);
89
- mutex_unlock(&rdev->mn_lock);
90
- mmu_notifier_unregister(&rmn->mn, rmn->mm);
91
- kfree(rmn);
92
-}
93
-
94
-/**
95
- * radeon_mn_release - callback to notify about mm destruction
40
+ * radeon_mn_invalidate - callback to notify about mm change
9641 *
9742 * @mn: our notifier
98
- * @mn: the mm this callback is about
99
- *
100
- * Shedule a work item to lazy destroy our notifier.
101
- */
102
-static void radeon_mn_release(struct mmu_notifier *mn,
103
- struct mm_struct *mm)
104
-{
105
- struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
106
- INIT_WORK(&rmn->work, radeon_mn_destroy);
107
- schedule_work(&rmn->work);
108
-}
109
-
110
-/**
111
- * radeon_mn_invalidate_range_start - callback to notify about mm change
112
- *
113
- * @mn: our notifier
114
- * @mn: the mm this callback is about
115
- * @start: start of updated range
116
- * @end: end of updated range
43
+ * @range: the VMA under invalidation
11744 *
11845 * We block for all BOs between start and end to be idle and
11946 * unmap them by move them into system domain again.
12047 */
121
-static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
122
- struct mm_struct *mm,
123
- unsigned long start,
124
- unsigned long end,
125
- bool blockable)
48
+static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
49
+ const struct mmu_notifier_range *range,
50
+ unsigned long cur_seq)
12651 {
127
- struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
52
+ struct radeon_bo *bo = container_of(mn, struct radeon_bo, notifier);
12853 struct ttm_operation_ctx ctx = { false, false };
129
- struct interval_tree_node *it;
130
- int ret = 0;
54
+ long r;
13155
132
- /* notification is exclusive, but interval is inclusive */
133
- end -= 1;
56
+ if (!bo->tbo.ttm || !radeon_ttm_tt_is_bound(bo->tbo.bdev, bo->tbo.ttm))
57
+ return true;
13458
135
- /* TODO we should be able to split locking for interval tree and
136
- * the tear down.
137
- */
138
- if (blockable)
139
- mutex_lock(&rmn->lock);
140
- else if (!mutex_trylock(&rmn->lock))
141
- return -EAGAIN;
59
+ if (!mmu_notifier_range_blockable(range))
60
+ return false;
14261
143
- it = interval_tree_iter_first(&rmn->objects, start, end);
144
- while (it) {
145
- struct radeon_mn_node *node;
146
- struct radeon_bo *bo;
147
- long r;
148
-
149
- if (!blockable) {
150
- ret = -EAGAIN;
151
- goto out_unlock;
152
- }
153
-
154
- node = container_of(it, struct radeon_mn_node, it);
155
- it = interval_tree_iter_next(it, start, end);
156
-
157
- list_for_each_entry(bo, &node->bos, mn_list) {
158
-
159
- if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
160
- continue;
161
-
162
- r = radeon_bo_reserve(bo, true);
163
- if (r) {
164
- DRM_ERROR("(%ld) failed to reserve user bo\n", r);
165
- continue;
166
- }
167
-
168
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
169
- true, false, MAX_SCHEDULE_TIMEOUT);
170
- if (r <= 0)
171
- DRM_ERROR("(%ld) failed to wait for user bo\n", r);
172
-
173
- radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
174
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
175
- if (r)
176
- DRM_ERROR("(%ld) failed to validate user bo\n", r);
177
-
178
- radeon_bo_unreserve(bo);
179
- }
180
- }
181
-
182
-out_unlock:
183
- mutex_unlock(&rmn->lock);
184
-
185
- return ret;
186
-}
187
-
188
-static const struct mmu_notifier_ops radeon_mn_ops = {
189
- .release = radeon_mn_release,
190
- .invalidate_range_start = radeon_mn_invalidate_range_start,
191
-};
192
-
193
-/**
194
- * radeon_mn_get - create notifier context
195
- *
196
- * @rdev: radeon device pointer
197
- *
198
- * Creates a notifier context for current->mm.
199
- */
200
-static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev)
201
-{
202
- struct mm_struct *mm = current->mm;
203
- struct radeon_mn *rmn;
204
- int r;
205
-
206
- if (down_write_killable(&mm->mmap_sem))
207
- return ERR_PTR(-EINTR);
208
-
209
- mutex_lock(&rdev->mn_lock);
210
-
211
- hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm)
212
- if (rmn->mm == mm)
213
- goto release_locks;
214
-
215
- rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
216
- if (!rmn) {
217
- rmn = ERR_PTR(-ENOMEM);
218
- goto release_locks;
62
+ r = radeon_bo_reserve(bo, true);
63
+ if (r) {
64
+ DRM_ERROR("(%ld) failed to reserve user bo\n", r);
65
+ return true;
21966 }
22067
221
- rmn->rdev = rdev;
222
- rmn->mm = mm;
223
- rmn->mn.ops = &radeon_mn_ops;
224
- mutex_init(&rmn->lock);
225
- rmn->objects = RB_ROOT_CACHED;
226
-
227
- r = __mmu_notifier_register(&rmn->mn, mm);
68
+ r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
69
+ MAX_SCHEDULE_TIMEOUT);
70
+ if (r <= 0)
71
+ DRM_ERROR("(%ld) failed to wait for user bo\n", r);
72
+
73
+ radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
74
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
22875 if (r)
229
- goto free_rmn;
76
+ DRM_ERROR("(%ld) failed to validate user bo\n", r);
23077
231
- hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm);
232
-
233
-release_locks:
234
- mutex_unlock(&rdev->mn_lock);
235
- up_write(&mm->mmap_sem);
236
-
237
- return rmn;
238
-
239
-free_rmn:
240
- mutex_unlock(&rdev->mn_lock);
241
- up_write(&mm->mmap_sem);
242
- kfree(rmn);
243
-
244
- return ERR_PTR(r);
78
+ radeon_bo_unreserve(bo);
79
+ return true;
24580 }
81
+
82
+static const struct mmu_interval_notifier_ops radeon_mn_ops = {
83
+ .invalidate = radeon_mn_invalidate,
84
+};
24685
24786 /**
24887 * radeon_mn_register - register a BO for notifier updates
....@@ -255,50 +94,20 @@
25594 */
25695 int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
25796 {
258
- unsigned long end = addr + radeon_bo_size(bo) - 1;
259
- struct radeon_device *rdev = bo->rdev;
260
- struct radeon_mn *rmn;
261
- struct radeon_mn_node *node = NULL;
262
- struct list_head bos;
263
- struct interval_tree_node *it;
97
+ int ret;
26498
265
- rmn = radeon_mn_get(rdev);
266
- if (IS_ERR(rmn))
267
- return PTR_ERR(rmn);
99
+ ret = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
100
+ radeon_bo_size(bo), &radeon_mn_ops);
101
+ if (ret)
102
+ return ret;
268103
269
- INIT_LIST_HEAD(&bos);
270
-
271
- mutex_lock(&rmn->lock);
272
-
273
- while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
274
- kfree(node);
275
- node = container_of(it, struct radeon_mn_node, it);
276
- interval_tree_remove(&node->it, &rmn->objects);
277
- addr = min(it->start, addr);
278
- end = max(it->last, end);
279
- list_splice(&node->bos, &bos);
280
- }
281
-
282
- if (!node) {
283
- node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL);
284
- if (!node) {
285
- mutex_unlock(&rmn->lock);
286
- return -ENOMEM;
287
- }
288
- }
289
-
290
- bo->mn = rmn;
291
-
292
- node->it.start = addr;
293
- node->it.last = end;
294
- INIT_LIST_HEAD(&node->bos);
295
- list_splice(&bos, &node->bos);
296
- list_add(&bo->mn_list, &node->bos);
297
-
298
- interval_tree_insert(&node->it, &rmn->objects);
299
-
300
- mutex_unlock(&rmn->lock);
301
-
104
+ /*
105
+ * FIXME: radeon appears to allow get_user_pages to run during
106
+ * invalidate_range_start/end, which is not a safe way to read the
107
+ * PTEs. It should use the mmu_interval_read_begin() scheme around the
108
+ * get_user_pages to ensure that the PTEs are read properly
109
+ */
110
+ mmu_interval_read_begin(&bo->notifier);
302111 return 0;
303112 }
304113
....@@ -311,31 +120,8 @@
311120 */
312121 void radeon_mn_unregister(struct radeon_bo *bo)
313122 {
314
- struct radeon_device *rdev = bo->rdev;
315
- struct radeon_mn *rmn;
316
- struct list_head *head;
317
-
318
- mutex_lock(&rdev->mn_lock);
319
- rmn = bo->mn;
320
- if (rmn == NULL) {
321
- mutex_unlock(&rdev->mn_lock);
123
+ if (!bo->notifier.mm)
322124 return;
323
- }
324
-
325
- mutex_lock(&rmn->lock);
326
- /* save the next list entry for later */
327
- head = bo->mn_list.next;
328
-
329
- bo->mn = NULL;
330
- list_del(&bo->mn_list);
331
-
332
- if (list_empty(head)) {
333
- struct radeon_mn_node *node;
334
- node = container_of(head, struct radeon_mn_node, bos);
335
- interval_tree_remove(&node->it, &rmn->objects);
336
- kfree(node);
337
- }
338
-
339
- mutex_unlock(&rmn->lock);
340
- mutex_unlock(&rdev->mn_lock);
125
+ mmu_interval_notifier_remove(&bo->notifier);
126
+ bo->notifier.mm = NULL;
341127 }