forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/ttm/ttm_bo.c
....@@ -41,9 +41,17 @@
4141 #include <linux/file.h>
4242 #include <linux/module.h>
4343 #include <linux/atomic.h>
44
-#include <linux/reservation.h>
44
+#include <linux/dma-resv.h>
4545
4646 static void ttm_bo_global_kobj_release(struct kobject *kobj);
47
+
48
+/**
49
+ * ttm_global_mutex - protecting the global BO state
50
+ */
51
+DEFINE_MUTEX(ttm_global_mutex);
52
+unsigned ttm_bo_glob_use_count;
53
+struct ttm_bo_global ttm_bo_glob;
54
+EXPORT_SYMBOL(ttm_bo_glob);
4755
4856 static struct attribute ttm_bo_count = {
4957 .name = "bo_count",
....@@ -56,51 +64,22 @@
5664 kfree(bo);
5765 }
5866
59
-static inline int ttm_mem_type_from_place(const struct ttm_place *place,
60
- uint32_t *mem_type)
61
-{
62
- int pos;
63
-
64
- pos = ffs(place->flags & TTM_PL_MASK_MEM);
65
- if (unlikely(!pos))
66
- return -EINVAL;
67
-
68
- *mem_type = pos - 1;
69
- return 0;
70
-}
71
-
72
-static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
73
-{
74
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
75
- struct drm_printer p = drm_debug_printer(TTM_PFX);
76
-
77
- pr_err(" has_type: %d\n", man->has_type);
78
- pr_err(" use_type: %d\n", man->use_type);
79
- pr_err(" flags: 0x%08X\n", man->flags);
80
- pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset);
81
- pr_err(" size: %llu\n", man->size);
82
- pr_err(" available_caching: 0x%08X\n", man->available_caching);
83
- pr_err(" default_caching: 0x%08X\n", man->default_caching);
84
- if (mem_type != TTM_PL_SYSTEM)
85
- (*man->func->debug)(man, &p);
86
-}
87
-
8867 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
8968 struct ttm_placement *placement)
9069 {
91
- int i, ret, mem_type;
70
+ struct drm_printer p = drm_debug_printer(TTM_PFX);
71
+ struct ttm_resource_manager *man;
72
+ int i, mem_type;
9273
93
- pr_err("No space for %p (%lu pages, %luK, %luM)\n",
94
- bo, bo->mem.num_pages, bo->mem.size >> 10,
95
- bo->mem.size >> 20);
74
+ drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
75
+ bo, bo->mem.num_pages, bo->mem.size >> 10,
76
+ bo->mem.size >> 20);
9677 for (i = 0; i < placement->num_placement; i++) {
97
- ret = ttm_mem_type_from_place(&placement->placement[i],
98
- &mem_type);
99
- if (ret)
100
- return;
101
- pr_err(" placement[%d]=0x%08X (%d)\n",
102
- i, placement->placement[i].flags, mem_type);
103
- ttm_mem_type_debug(bo->bdev, mem_type);
78
+ mem_type = placement->placement[i].mem_type;
79
+ drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
80
+ i, placement->placement[i].flags, mem_type);
81
+ man = ttm_manager_type(bo->bdev, mem_type);
82
+ ttm_resource_manager_debug(man, &p);
10483 }
10584 }
10685
....@@ -130,137 +109,162 @@
130109 .default_attrs = ttm_bo_global_attrs
131110 };
132111
133
-
134
-static inline uint32_t ttm_bo_type_flags(unsigned type)
135
-{
136
- return 1 << (type);
137
-}
138
-
139
-static void ttm_bo_release_list(struct kref *list_kref)
140
-{
141
- struct ttm_buffer_object *bo =
142
- container_of(list_kref, struct ttm_buffer_object, list_kref);
143
- struct ttm_bo_device *bdev = bo->bdev;
144
- size_t acc_size = bo->acc_size;
145
-
146
- BUG_ON(kref_read(&bo->list_kref));
147
- BUG_ON(kref_read(&bo->kref));
148
- BUG_ON(atomic_read(&bo->cpu_writers));
149
- BUG_ON(bo->mem.mm_node != NULL);
150
- BUG_ON(!list_empty(&bo->lru));
151
- BUG_ON(!list_empty(&bo->ddestroy));
152
- ttm_tt_destroy(bo->ttm);
153
- atomic_dec(&bo->bdev->glob->bo_count);
154
- dma_fence_put(bo->moving);
155
- reservation_object_fini(&bo->ttm_resv);
156
- mutex_destroy(&bo->wu_mutex);
157
- bo->destroy(bo);
158
- ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
159
-}
160
-
161
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
112
+static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
113
+ struct ttm_resource *mem)
162114 {
163115 struct ttm_bo_device *bdev = bo->bdev;
164
- struct ttm_mem_type_manager *man;
116
+ struct ttm_resource_manager *man;
165117
166
- reservation_object_assert_held(bo->resv);
118
+ if (!list_empty(&bo->lru))
119
+ return;
167120
168
- if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
169
- BUG_ON(!list_empty(&bo->lru));
121
+ if (mem->placement & TTM_PL_FLAG_NO_EVICT)
122
+ return;
170123
171
- man = &bdev->man[bo->mem.mem_type];
172
- list_add_tail(&bo->lru, &man->lru[bo->priority]);
173
- kref_get(&bo->list_kref);
124
+ man = ttm_manager_type(bdev, mem->mem_type);
125
+ list_add_tail(&bo->lru, &man->lru[bo->priority]);
174126
175
- if (bo->ttm && !(bo->ttm->page_flags &
176
- (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
177
- list_add_tail(&bo->swap,
178
- &bdev->glob->swap_lru[bo->priority]);
179
- kref_get(&bo->list_kref);
180
- }
127
+ if (man->use_tt && bo->ttm &&
128
+ !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
129
+ TTM_PAGE_FLAG_SWAPPED))) {
130
+ list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
181131 }
182132 }
183
-EXPORT_SYMBOL(ttm_bo_add_to_lru);
184133
185
-static void ttm_bo_ref_bug(struct kref *list_kref)
134
+static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
186135 {
187
- BUG();
188
-}
136
+ struct ttm_bo_device *bdev = bo->bdev;
137
+ bool notify = false;
189138
190
-void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
191
-{
192139 if (!list_empty(&bo->swap)) {
193140 list_del_init(&bo->swap);
194
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
141
+ notify = true;
195142 }
196143 if (!list_empty(&bo->lru)) {
197144 list_del_init(&bo->lru);
198
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
145
+ notify = true;
199146 }
200147
201
- /*
202
- * TODO: Add a driver hook to delete from
203
- * driver-specific LRU's here.
204
- */
148
+ if (notify && bdev->driver->del_from_lru_notify)
149
+ bdev->driver->del_from_lru_notify(bo);
205150 }
206151
207
-void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
152
+static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
153
+ struct ttm_buffer_object *bo)
208154 {
209
- struct ttm_bo_global *glob = bo->bdev->glob;
210
-
211
- spin_lock(&glob->lru_lock);
212
- ttm_bo_del_from_lru(bo);
213
- spin_unlock(&glob->lru_lock);
155
+ if (!pos->first)
156
+ pos->first = bo;
157
+ pos->last = bo;
214158 }
215
-EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
216159
217
-void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
160
+void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
161
+ struct ttm_lru_bulk_move *bulk)
218162 {
219
- reservation_object_assert_held(bo->resv);
163
+ dma_resv_assert_held(bo->base.resv);
220164
221165 ttm_bo_del_from_lru(bo);
222
- ttm_bo_add_to_lru(bo);
166
+ ttm_bo_add_mem_to_lru(bo, &bo->mem);
167
+
168
+ if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
169
+ switch (bo->mem.mem_type) {
170
+ case TTM_PL_TT:
171
+ ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
172
+ break;
173
+
174
+ case TTM_PL_VRAM:
175
+ ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
176
+ break;
177
+ }
178
+ if (bo->ttm && !(bo->ttm->page_flags &
179
+ (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED)))
180
+ ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
181
+ }
223182 }
224183 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
225184
185
+void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
186
+{
187
+ unsigned i;
188
+
189
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
190
+ struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
191
+ struct ttm_resource_manager *man;
192
+
193
+ if (!pos->first)
194
+ continue;
195
+
196
+ dma_resv_assert_held(pos->first->base.resv);
197
+ dma_resv_assert_held(pos->last->base.resv);
198
+
199
+ man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
200
+ list_bulk_move_tail(&man->lru[i], &pos->first->lru,
201
+ &pos->last->lru);
202
+ }
203
+
204
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
205
+ struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
206
+ struct ttm_resource_manager *man;
207
+
208
+ if (!pos->first)
209
+ continue;
210
+
211
+ dma_resv_assert_held(pos->first->base.resv);
212
+ dma_resv_assert_held(pos->last->base.resv);
213
+
214
+ man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
215
+ list_bulk_move_tail(&man->lru[i], &pos->first->lru,
216
+ &pos->last->lru);
217
+ }
218
+
219
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
220
+ struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
221
+ struct list_head *lru;
222
+
223
+ if (!pos->first)
224
+ continue;
225
+
226
+ dma_resv_assert_held(pos->first->base.resv);
227
+ dma_resv_assert_held(pos->last->base.resv);
228
+
229
+ lru = &ttm_bo_glob.swap_lru[i];
230
+ list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
231
+ }
232
+}
233
+EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
234
+
226235 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
227
- struct ttm_mem_reg *mem, bool evict,
236
+ struct ttm_resource *mem, bool evict,
228237 struct ttm_operation_ctx *ctx)
229238 {
230239 struct ttm_bo_device *bdev = bo->bdev;
231
- bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
232
- bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
233
- struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
234
- struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
235
- int ret = 0;
240
+ struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
241
+ struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
242
+ int ret;
236243
237
- if (old_is_pci || new_is_pci ||
238
- ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
239
- ret = ttm_mem_io_lock(old_man, true);
240
- if (unlikely(ret != 0))
241
- goto out_err;
242
- ttm_bo_unmap_virtual_locked(bo);
243
- ttm_mem_io_unlock(old_man);
244
- }
244
+ ttm_bo_unmap_virtual(bo);
245245
246246 /*
247247 * Create and bind a ttm if required.
248248 */
249249
250
- if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
251
- if (bo->ttm == NULL) {
252
- bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
253
- ret = ttm_tt_create(bo, zero);
254
- if (ret)
255
- goto out_err;
256
- }
250
+ if (new_man->use_tt) {
251
+ /* Zero init the new TTM structure if the old location should
252
+ * have used one as well.
253
+ */
254
+ ret = ttm_tt_create(bo, old_man->use_tt);
255
+ if (ret)
256
+ goto out_err;
257257
258258 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
259259 if (ret)
260260 goto out_err;
261261
262262 if (mem->mem_type != TTM_PL_SYSTEM) {
263
- ret = ttm_tt_bind(bo->ttm, mem, ctx);
263
+ ret = ttm_tt_populate(bdev, bo->ttm, ctx);
264
+ if (ret)
265
+ goto out_err;
266
+
267
+ ret = ttm_bo_tt_bind(bo, mem);
264268 if (ret)
265269 goto out_err;
266270 }
....@@ -269,7 +273,6 @@
269273 if (bdev->driver->move_notify)
270274 bdev->driver->move_notify(bo, evict, mem);
271275 bo->mem = *mem;
272
- mem->mm_node = NULL;
273276 goto moved;
274277 }
275278 }
....@@ -277,8 +280,7 @@
277280 if (bdev->driver->move_notify)
278281 bdev->driver->move_notify(bo, evict, mem);
279282
280
- if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
281
- !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
283
+ if (old_man->use_tt && new_man->use_tt)
282284 ret = ttm_bo_move_ttm(bo, ctx, mem);
283285 else if (bdev->driver->move)
284286 ret = bdev->driver->move(bo, evict, ctx, mem);
....@@ -296,30 +298,13 @@
296298 }
297299
298300 moved:
299
- if (bo->evicted) {
300
- if (bdev->driver->invalidate_caches) {
301
- ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
302
- if (ret)
303
- pr_err("Can not flush read caches\n");
304
- }
305
- bo->evicted = false;
306
- }
307
-
308
- if (bo->mem.mm_node)
309
- bo->offset = (bo->mem.start << PAGE_SHIFT) +
310
- bdev->man[bo->mem.mem_type].gpu_offset;
311
- else
312
- bo->offset = 0;
313
-
314301 ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
315302 return 0;
316303
317304 out_err:
318
- new_man = &bdev->man[bo->mem.mem_type];
319
- if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
320
- ttm_tt_destroy(bo->ttm);
321
- bo->ttm = NULL;
322
- }
305
+ new_man = ttm_manager_type(bdev, bo->mem.mem_type);
306
+ if (!new_man->use_tt)
307
+ ttm_bo_tt_destroy(bo);
323308
324309 return ret;
325310 }
....@@ -337,108 +322,63 @@
337322 if (bo->bdev->driver->move_notify)
338323 bo->bdev->driver->move_notify(bo, false, NULL);
339324
340
- ttm_tt_destroy(bo->ttm);
341
- bo->ttm = NULL;
342
- ttm_bo_mem_put(bo, &bo->mem);
325
+ ttm_bo_tt_destroy(bo);
326
+ ttm_resource_free(bo, &bo->mem);
343327 }
344328
345329 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
346330 {
347331 int r;
348332
349
- if (bo->resv == &bo->ttm_resv)
333
+ if (bo->base.resv == &bo->base._resv)
350334 return 0;
351335
352
- BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
336
+ BUG_ON(!dma_resv_trylock(&bo->base._resv));
353337
354
- r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
338
+ r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
339
+ dma_resv_unlock(&bo->base._resv);
355340 if (r)
356
- reservation_object_unlock(&bo->ttm_resv);
341
+ return r;
342
+
343
+ if (bo->type != ttm_bo_type_sg) {
344
+ /* This works because the BO is about to be destroyed and nobody
345
+ * reference it any more. The only tricky case is the trylock on
346
+ * the resv object while holding the lru_lock.
347
+ */
348
+ spin_lock(&ttm_bo_glob.lru_lock);
349
+ bo->base.resv = &bo->base._resv;
350
+ spin_unlock(&ttm_bo_glob.lru_lock);
351
+ }
357352
358353 return r;
359354 }
360355
361356 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
362357 {
363
- struct reservation_object_list *fobj;
358
+ struct dma_resv *resv = &bo->base._resv;
359
+ struct dma_resv_list *fobj;
364360 struct dma_fence *fence;
365361 int i;
366362
367
- fobj = reservation_object_get_list(&bo->ttm_resv);
368
- fence = reservation_object_get_excl(&bo->ttm_resv);
363
+ rcu_read_lock();
364
+ fobj = rcu_dereference(resv->fence);
365
+ fence = rcu_dereference(resv->fence_excl);
369366 if (fence && !fence->ops->signaled)
370367 dma_fence_enable_sw_signaling(fence);
371368
372369 for (i = 0; fobj && i < fobj->shared_count; ++i) {
373
- fence = rcu_dereference_protected(fobj->shared[i],
374
- reservation_object_held(bo->resv));
370
+ fence = rcu_dereference(fobj->shared[i]);
375371
376372 if (!fence->ops->signaled)
377373 dma_fence_enable_sw_signaling(fence);
378374 }
379
-}
380
-
381
-static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
382
-{
383
- struct ttm_bo_device *bdev = bo->bdev;
384
- struct ttm_bo_global *glob = bdev->glob;
385
- int ret;
386
-
387
- ret = ttm_bo_individualize_resv(bo);
388
- if (ret) {
389
- /* Last resort, if we fail to allocate memory for the
390
- * fences block for the BO to become idle
391
- */
392
- reservation_object_wait_timeout_rcu(bo->resv, true, false,
393
- 30 * HZ);
394
- spin_lock(&glob->lru_lock);
395
- goto error;
396
- }
397
-
398
- spin_lock(&glob->lru_lock);
399
- ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
400
- if (!ret) {
401
- if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
402
- ttm_bo_del_from_lru(bo);
403
- spin_unlock(&glob->lru_lock);
404
- if (bo->resv != &bo->ttm_resv)
405
- reservation_object_unlock(&bo->ttm_resv);
406
-
407
- ttm_bo_cleanup_memtype_use(bo);
408
- reservation_object_unlock(bo->resv);
409
- return;
410
- }
411
-
412
- ttm_bo_flush_all_fences(bo);
413
-
414
- /*
415
- * Make NO_EVICT bos immediately available to
416
- * shrinkers, now that they are queued for
417
- * destruction.
418
- */
419
- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
420
- bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
421
- ttm_bo_add_to_lru(bo);
422
- }
423
-
424
- reservation_object_unlock(bo->resv);
425
- }
426
- if (bo->resv != &bo->ttm_resv)
427
- reservation_object_unlock(&bo->ttm_resv);
428
-
429
-error:
430
- kref_get(&bo->list_kref);
431
- list_add_tail(&bo->ddestroy, &bdev->ddestroy);
432
- spin_unlock(&glob->lru_lock);
433
-
434
- schedule_delayed_work(&bdev->wq,
435
- ((HZ / 100) < 1) ? 1 : HZ / 100);
375
+ rcu_read_unlock();
436376 }
437377
438378 /**
439379 * function ttm_bo_cleanup_refs
440
- * If bo idle, remove from delayed- and lru lists, and unref.
441
- * If not idle, do nothing.
380
+ * If bo idle, remove from lru lists, and unref.
381
+ * If not idle, block if possible.
442382 *
443383 * Must be called with lru_lock and reservation held, this function
444384 * will drop the lru lock and optionally the reservation lock before returning.
....@@ -452,16 +392,10 @@
452392 bool interruptible, bool no_wait_gpu,
453393 bool unlock_resv)
454394 {
455
- struct ttm_bo_global *glob = bo->bdev->glob;
456
- struct reservation_object *resv;
395
+ struct dma_resv *resv = &bo->base._resv;
457396 int ret;
458397
459
- if (unlikely(list_empty(&bo->ddestroy)))
460
- resv = bo->resv;
461
- else
462
- resv = &bo->ttm_resv;
463
-
464
- if (reservation_object_test_signaled_rcu(resv, true))
398
+ if (dma_resv_test_signaled_rcu(resv, true))
465399 ret = 0;
466400 else
467401 ret = -EBUSY;
....@@ -470,20 +404,19 @@
470404 long lret;
471405
472406 if (unlock_resv)
473
- reservation_object_unlock(bo->resv);
474
- spin_unlock(&glob->lru_lock);
407
+ dma_resv_unlock(bo->base.resv);
408
+ spin_unlock(&ttm_bo_glob.lru_lock);
475409
476
- lret = reservation_object_wait_timeout_rcu(resv, true,
477
- interruptible,
478
- 30 * HZ);
410
+ lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
411
+ 30 * HZ);
479412
480413 if (lret < 0)
481414 return lret;
482415 else if (lret == 0)
483416 return -EBUSY;
484417
485
- spin_lock(&glob->lru_lock);
486
- if (unlock_resv && !reservation_object_trylock(bo->resv)) {
418
+ spin_lock(&ttm_bo_glob.lru_lock);
419
+ if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
487420 /*
488421 * We raced, and lost, someone else holds the reservation now,
489422 * and is probably busy in ttm_bo_cleanup_memtype_use.
....@@ -492,7 +425,7 @@
492425 * delayed destruction would succeed, so just return success
493426 * here.
494427 */
495
- spin_unlock(&glob->lru_lock);
428
+ spin_unlock(&ttm_bo_glob.lru_lock);
496429 return 0;
497430 }
498431 ret = 0;
....@@ -500,20 +433,20 @@
500433
501434 if (ret || unlikely(list_empty(&bo->ddestroy))) {
502435 if (unlock_resv)
503
- reservation_object_unlock(bo->resv);
504
- spin_unlock(&glob->lru_lock);
436
+ dma_resv_unlock(bo->base.resv);
437
+ spin_unlock(&ttm_bo_glob.lru_lock);
505438 return ret;
506439 }
507440
508441 ttm_bo_del_from_lru(bo);
509442 list_del_init(&bo->ddestroy);
510
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
511
-
512
- spin_unlock(&glob->lru_lock);
443
+ spin_unlock(&ttm_bo_glob.lru_lock);
513444 ttm_bo_cleanup_memtype_use(bo);
514445
515446 if (unlock_resv)
516
- reservation_object_unlock(bo->resv);
447
+ dma_resv_unlock(bo->base.resv);
448
+
449
+ ttm_bo_put(bo);
517450
518451 return 0;
519452 }
....@@ -524,7 +457,7 @@
524457 */
525458 static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
526459 {
527
- struct ttm_bo_global *glob = bdev->glob;
460
+ struct ttm_bo_global *glob = &ttm_bo_glob;
528461 struct list_head removed;
529462 bool empty;
530463
....@@ -536,23 +469,24 @@
536469
537470 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
538471 ddestroy);
539
- kref_get(&bo->list_kref);
540472 list_move_tail(&bo->ddestroy, &removed);
473
+ if (!ttm_bo_get_unless_zero(bo))
474
+ continue;
541475
542
- if (remove_all || bo->resv != &bo->ttm_resv) {
476
+ if (remove_all || bo->base.resv != &bo->base._resv) {
543477 spin_unlock(&glob->lru_lock);
544
- reservation_object_lock(bo->resv, NULL);
478
+ dma_resv_lock(bo->base.resv, NULL);
545479
546480 spin_lock(&glob->lru_lock);
547481 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
548482
549
- } else if (reservation_object_trylock(bo->resv)) {
483
+ } else if (dma_resv_trylock(bo->base.resv)) {
550484 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
551485 } else {
552486 spin_unlock(&glob->lru_lock);
553487 }
554488
555
- kref_put(&bo->list_kref, ttm_bo_release_list);
489
+ ttm_bo_put(bo);
556490 spin_lock(&glob->lru_lock);
557491 }
558492 list_splice_tail(&removed, &bdev->ddestroy);
....@@ -577,14 +511,68 @@
577511 struct ttm_buffer_object *bo =
578512 container_of(kref, struct ttm_buffer_object, kref);
579513 struct ttm_bo_device *bdev = bo->bdev;
580
- struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
514
+ size_t acc_size = bo->acc_size;
515
+ int ret;
581516
582
- drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
583
- ttm_mem_io_lock(man, false);
584
- ttm_mem_io_free_vm(bo);
585
- ttm_mem_io_unlock(man);
586
- ttm_bo_cleanup_refs_or_queue(bo);
587
- kref_put(&bo->list_kref, ttm_bo_release_list);
517
+ if (!bo->deleted) {
518
+ ret = ttm_bo_individualize_resv(bo);
519
+ if (ret) {
520
+ /* Last resort, if we fail to allocate memory for the
521
+ * fences block for the BO to become idle
522
+ */
523
+ dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
524
+ 30 * HZ);
525
+ }
526
+
527
+ if (bo->bdev->driver->release_notify)
528
+ bo->bdev->driver->release_notify(bo);
529
+
530
+ drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
531
+ ttm_mem_io_free(bdev, &bo->mem);
532
+ }
533
+
534
+ if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
535
+ !dma_resv_trylock(bo->base.resv)) {
536
+ /* The BO is not idle, resurrect it for delayed destroy */
537
+ ttm_bo_flush_all_fences(bo);
538
+ bo->deleted = true;
539
+
540
+ spin_lock(&ttm_bo_glob.lru_lock);
541
+
542
+ /*
543
+ * Make NO_EVICT bos immediately available to
544
+ * shrinkers, now that they are queued for
545
+ * destruction.
546
+ */
547
+ if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
548
+ bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
549
+ ttm_bo_del_from_lru(bo);
550
+ ttm_bo_add_mem_to_lru(bo, &bo->mem);
551
+ }
552
+
553
+ kref_init(&bo->kref);
554
+ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
555
+ spin_unlock(&ttm_bo_glob.lru_lock);
556
+
557
+ schedule_delayed_work(&bdev->wq,
558
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
559
+ return;
560
+ }
561
+
562
+ spin_lock(&ttm_bo_glob.lru_lock);
563
+ ttm_bo_del_from_lru(bo);
564
+ list_del(&bo->ddestroy);
565
+ spin_unlock(&ttm_bo_glob.lru_lock);
566
+
567
+ ttm_bo_cleanup_memtype_use(bo);
568
+ dma_resv_unlock(bo->base.resv);
569
+
570
+ atomic_dec(&ttm_bo_glob.bo_count);
571
+ dma_fence_put(bo->moving);
572
+ if (!ttm_bo_uses_embedded_gem_object(bo))
573
+ dma_resv_fini(&bo->base._resv);
574
+ bo->destroy(bo);
575
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
588576 }
589577
590578 void ttm_bo_put(struct ttm_buffer_object *bo)
....@@ -592,15 +580,6 @@
592580 kref_put(&bo->kref, ttm_bo_release);
593581 }
594582 EXPORT_SYMBOL(ttm_bo_put);
595
-
596
-void ttm_bo_unref(struct ttm_buffer_object **p_bo)
597
-{
598
- struct ttm_buffer_object *bo = *p_bo;
599
-
600
- *p_bo = NULL;
601
- ttm_bo_put(bo);
602
-}
603
-EXPORT_SYMBOL(ttm_bo_unref);
604583
605584 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
606585 {
....@@ -620,28 +599,27 @@
620599 struct ttm_operation_ctx *ctx)
621600 {
622601 struct ttm_bo_device *bdev = bo->bdev;
623
- struct ttm_mem_reg evict_mem;
602
+ struct ttm_resource evict_mem;
624603 struct ttm_placement placement;
625604 int ret = 0;
626605
627
- reservation_object_assert_held(bo->resv);
606
+ dma_resv_assert_held(bo->base.resv);
628607
629608 placement.num_placement = 0;
630609 placement.num_busy_placement = 0;
631610 bdev->driver->evict_flags(bo, &placement);
632611
633612 if (!placement.num_placement && !placement.num_busy_placement) {
634
- ret = ttm_bo_pipeline_gutting(bo);
635
- if (ret)
636
- return ret;
613
+ ttm_bo_wait(bo, false, false);
637614
615
+ ttm_bo_cleanup_memtype_use(bo);
638616 return ttm_tt_create(bo, false);
639617 }
640618
641619 evict_mem = bo->mem;
642620 evict_mem.mm_node = NULL;
643
- evict_mem.bus.io_reserved_vm = false;
644
- evict_mem.bus.io_reserved_count = 0;
621
+ evict_mem.bus.offset = 0;
622
+ evict_mem.bus.addr = NULL;
645623
646624 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
647625 if (ret) {
....@@ -657,10 +635,8 @@
657635 if (unlikely(ret)) {
658636 if (ret != -ERESTARTSYS)
659637 pr_err("Buffer eviction failed\n");
660
- ttm_bo_mem_put(bo, &evict_mem);
661
- goto out;
638
+ ttm_resource_free(bo, &evict_mem);
662639 }
663
- bo->evicted = true;
664640 out:
665641 return ret;
666642 }
....@@ -690,46 +666,95 @@
690666 * b. Otherwise, trylock it.
691667 */
692668 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
693
- struct ttm_operation_ctx *ctx, bool *locked)
669
+ struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
694670 {
695671 bool ret = false;
696672
697
- *locked = false;
698
- if (bo->resv == ctx->resv) {
699
- reservation_object_assert_held(bo->resv);
700
- if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
701
- || !list_empty(&bo->ddestroy))
673
+ if (bo->base.resv == ctx->resv) {
674
+ dma_resv_assert_held(bo->base.resv);
675
+ if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT)
702676 ret = true;
677
+ *locked = false;
678
+ if (busy)
679
+ *busy = false;
703680 } else {
704
- *locked = reservation_object_trylock(bo->resv);
705
- ret = *locked;
681
+ ret = dma_resv_trylock(bo->base.resv);
682
+ *locked = ret;
683
+ if (busy)
684
+ *busy = !ret;
706685 }
707686
708687 return ret;
709688 }
710689
711
-static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
712
- uint32_t mem_type,
713
- const struct ttm_place *place,
714
- struct ttm_operation_ctx *ctx)
690
+/**
691
+ * ttm_mem_evict_wait_busy - wait for a busy BO to become available
692
+ *
693
+ * @busy_bo: BO which couldn't be locked with trylock
694
+ * @ctx: operation context
695
+ * @ticket: acquire ticket
696
+ *
697
+ * Try to lock a busy buffer object to avoid failing eviction.
698
+ */
699
+static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
700
+ struct ttm_operation_ctx *ctx,
701
+ struct ww_acquire_ctx *ticket)
715702 {
716
- struct ttm_bo_global *glob = bdev->glob;
717
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
718
- struct ttm_buffer_object *bo = NULL;
703
+ int r;
704
+
705
+ if (!busy_bo || !ticket)
706
+ return -EBUSY;
707
+
708
+ if (ctx->interruptible)
709
+ r = dma_resv_lock_interruptible(busy_bo->base.resv,
710
+ ticket);
711
+ else
712
+ r = dma_resv_lock(busy_bo->base.resv, ticket);
713
+
714
+ /*
715
+ * TODO: It would be better to keep the BO locked until allocation is at
716
+ * least tried one more time, but that would mean a much larger rework
717
+ * of TTM.
718
+ */
719
+ if (!r)
720
+ dma_resv_unlock(busy_bo->base.resv);
721
+
722
+ return r == -EDEADLK ? -EBUSY : r;
723
+}
724
+
725
+int ttm_mem_evict_first(struct ttm_bo_device *bdev,
726
+ struct ttm_resource_manager *man,
727
+ const struct ttm_place *place,
728
+ struct ttm_operation_ctx *ctx,
729
+ struct ww_acquire_ctx *ticket)
730
+{
731
+ struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
719732 bool locked = false;
720733 unsigned i;
721734 int ret;
722735
723
- spin_lock(&glob->lru_lock);
736
+ spin_lock(&ttm_bo_glob.lru_lock);
724737 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
725738 list_for_each_entry(bo, &man->lru[i], lru) {
726
- if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked))
739
+ bool busy;
740
+
741
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
742
+ &busy)) {
743
+ if (busy && !busy_bo && ticket !=
744
+ dma_resv_locking_ctx(bo->base.resv))
745
+ busy_bo = bo;
727746 continue;
747
+ }
728748
729749 if (place && !bdev->driver->eviction_valuable(bo,
730750 place)) {
731751 if (locked)
732
- reservation_object_unlock(bo->resv);
752
+ dma_resv_unlock(bo->base.resv);
753
+ continue;
754
+ }
755
+ if (!ttm_bo_get_unless_zero(bo)) {
756
+ if (locked)
757
+ dma_resv_unlock(bo->base.resv);
733758 continue;
734759 }
735760 break;
....@@ -743,50 +768,41 @@
743768 }
744769
745770 if (!bo) {
746
- spin_unlock(&glob->lru_lock);
747
- return -EBUSY;
748
- }
749
-
750
- kref_get(&bo->list_kref);
751
-
752
- if (!list_empty(&bo->ddestroy)) {
753
- ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
754
- ctx->no_wait_gpu, locked);
755
- kref_put(&bo->list_kref, ttm_bo_release_list);
771
+ if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
772
+ busy_bo = NULL;
773
+ spin_unlock(&ttm_bo_glob.lru_lock);
774
+ ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
775
+ if (busy_bo)
776
+ ttm_bo_put(busy_bo);
756777 return ret;
757778 }
758779
759
- ttm_bo_del_from_lru(bo);
760
- spin_unlock(&glob->lru_lock);
761
-
762
- ret = ttm_bo_evict(bo, ctx);
763
- if (locked) {
764
- ttm_bo_unreserve(bo);
765
- } else {
766
- spin_lock(&glob->lru_lock);
767
- ttm_bo_add_to_lru(bo);
768
- spin_unlock(&glob->lru_lock);
780
+ if (bo->deleted) {
781
+ ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
782
+ ctx->no_wait_gpu, locked);
783
+ ttm_bo_put(bo);
784
+ return ret;
769785 }
770786
771
- kref_put(&bo->list_kref, ttm_bo_release_list);
787
+ spin_unlock(&ttm_bo_glob.lru_lock);
788
+
789
+ ret = ttm_bo_evict(bo, ctx);
790
+ if (locked)
791
+ ttm_bo_unreserve(bo);
792
+ else
793
+ ttm_bo_move_to_lru_tail_unlocked(bo);
794
+
795
+ ttm_bo_put(bo);
772796 return ret;
773797 }
774
-
775
-void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
776
-{
777
- struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
778
-
779
- if (mem->mm_node)
780
- (*man->func->put_node)(man, mem);
781
-}
782
-EXPORT_SYMBOL(ttm_bo_mem_put);
783798
784799 /**
785800 * Add the last move fence to the BO and reserve a new shared slot.
786801 */
787802 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
788
- struct ttm_mem_type_manager *man,
789
- struct ttm_mem_reg *mem)
803
+ struct ttm_resource_manager *man,
804
+ struct ttm_resource *mem,
805
+ bool no_wait_gpu)
790806 {
791807 struct dma_fence *fence;
792808 int ret;
....@@ -795,17 +811,24 @@
795811 fence = dma_fence_get(man->move);
796812 spin_unlock(&man->move_lock);
797813
798
- if (fence) {
799
- reservation_object_add_shared_fence(bo->resv, fence);
814
+ if (!fence)
815
+ return 0;
800816
801
- ret = reservation_object_reserve_shared(bo->resv);
802
- if (unlikely(ret))
803
- return ret;
804
-
805
- dma_fence_put(bo->moving);
806
- bo->moving = fence;
817
+ if (no_wait_gpu) {
818
+ dma_fence_put(fence);
819
+ return -EBUSY;
807820 }
808821
822
+ dma_resv_add_shared_fence(bo->base.resv, fence);
823
+
824
+ ret = dma_resv_reserve_shared(bo->base.resv, 1);
825
+ if (unlikely(ret)) {
826
+ dma_fence_put(fence);
827
+ return ret;
828
+ }
829
+
830
+ dma_fence_put(bo->moving);
831
+ bo->moving = fence;
809832 return 0;
810833 }
811834
....@@ -814,30 +837,32 @@
814837 * space, or we've evicted everything and there isn't enough space.
815838 */
816839 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
817
- uint32_t mem_type,
818
- const struct ttm_place *place,
819
- struct ttm_mem_reg *mem,
820
- struct ttm_operation_ctx *ctx)
840
+ const struct ttm_place *place,
841
+ struct ttm_resource *mem,
842
+ struct ttm_operation_ctx *ctx)
821843 {
822844 struct ttm_bo_device *bdev = bo->bdev;
823
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
845
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
846
+ struct ww_acquire_ctx *ticket;
824847 int ret;
825848
849
+ ticket = dma_resv_locking_ctx(bo->base.resv);
826850 do {
827
- ret = (*man->func->get_node)(man, bo, place, mem);
828
- if (unlikely(ret != 0))
829
- return ret;
830
- if (mem->mm_node)
851
+ ret = ttm_resource_alloc(bo, place, mem);
852
+ if (likely(!ret))
831853 break;
832
- ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
854
+ if (unlikely(ret != -ENOSPC))
855
+ return ret;
856
+ ret = ttm_mem_evict_first(bdev, man, place, ctx,
857
+ ticket);
833858 if (unlikely(ret != 0))
834859 return ret;
835860 } while (1);
836
- mem->mem_type = mem_type;
837
- return ttm_bo_add_move_fence(bo, man, mem);
861
+
862
+ return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
838863 }
839864
840
-static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
865
+static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man,
841866 uint32_t cur_placement,
842867 uint32_t proposed_placement)
843868 {
....@@ -850,8 +875,6 @@
850875
851876 if ((cur_placement & caching) != 0)
852877 result |= (cur_placement & caching);
853
- else if ((man->default_caching & caching) != 0)
854
- result |= man->default_caching;
855878 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
856879 result |= TTM_PL_FLAG_CACHED;
857880 else if ((TTM_PL_FLAG_WC & caching) != 0)
....@@ -862,23 +885,43 @@
862885 return result;
863886 }
864887
865
-static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
866
- uint32_t mem_type,
867
- const struct ttm_place *place,
868
- uint32_t *masked_placement)
888
+/**
889
+ * ttm_bo_mem_placement - check if placement is compatible
890
+ * @bo: BO to find memory for
891
+ * @place: where to search
892
+ * @mem: the memory object to fill in
893
+ * @ctx: operation context
894
+ *
895
+ * Check if placement is compatible and fill in mem structure.
896
+ * Returns -EBUSY if placement won't work or negative error code.
897
+ * 0 when placement can be used.
898
+ */
899
+static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
900
+ const struct ttm_place *place,
901
+ struct ttm_resource *mem,
902
+ struct ttm_operation_ctx *ctx)
869903 {
870
- uint32_t cur_flags = ttm_bo_type_flags(mem_type);
904
+ struct ttm_bo_device *bdev = bo->bdev;
905
+ struct ttm_resource_manager *man;
906
+ uint32_t cur_flags = 0;
871907
872
- if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
873
- return false;
908
+ man = ttm_manager_type(bdev, place->mem_type);
909
+ if (!man || !ttm_resource_manager_used(man))
910
+ return -EBUSY;
874911
875
- if ((place->flags & man->available_caching) == 0)
876
- return false;
912
+ cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
913
+ place->flags);
914
+ cur_flags |= place->flags & ~TTM_PL_MASK_CACHING;
877915
878
- cur_flags |= (place->flags & man->available_caching);
916
+ mem->mem_type = place->mem_type;
917
+ mem->placement = cur_flags;
879918
880
- *masked_placement = cur_flags;
881
- return true;
919
+ spin_lock(&ttm_bo_glob.lru_lock);
920
+ ttm_bo_del_from_lru(bo);
921
+ ttm_bo_add_mem_to_lru(bo, mem);
922
+ spin_unlock(&ttm_bo_glob.lru_lock);
923
+
924
+ return 0;
882925 }
883926
884927 /**
....@@ -891,116 +934,72 @@
891934 */
892935 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
893936 struct ttm_placement *placement,
894
- struct ttm_mem_reg *mem,
937
+ struct ttm_resource *mem,
895938 struct ttm_operation_ctx *ctx)
896939 {
897940 struct ttm_bo_device *bdev = bo->bdev;
898
- struct ttm_mem_type_manager *man;
899
- uint32_t mem_type = TTM_PL_SYSTEM;
900
- uint32_t cur_flags = 0;
901941 bool type_found = false;
902
- bool type_ok = false;
903
- bool has_erestartsys = false;
904942 int i, ret;
905943
906
- ret = reservation_object_reserve_shared(bo->resv);
944
+ ret = dma_resv_reserve_shared(bo->base.resv, 1);
907945 if (unlikely(ret))
908946 return ret;
909947
910
- mem->mm_node = NULL;
911948 for (i = 0; i < placement->num_placement; ++i) {
912949 const struct ttm_place *place = &placement->placement[i];
950
+ struct ttm_resource_manager *man;
913951
914
- ret = ttm_mem_type_from_place(place, &mem_type);
952
+ ret = ttm_bo_mem_placement(bo, place, mem, ctx);
915953 if (ret)
916
- return ret;
917
- man = &bdev->man[mem_type];
918
- if (!man->has_type || !man->use_type)
919
- continue;
920
-
921
- type_ok = ttm_bo_mt_compatible(man, mem_type, place,
922
- &cur_flags);
923
-
924
- if (!type_ok)
925954 continue;
926955
927956 type_found = true;
928
- cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
929
- cur_flags);
930
- /*
931
- * Use the access and other non-mapping-related flag bits from
932
- * the memory placement flags to the current flags
933
- */
934
- ttm_flag_masked(&cur_flags, place->flags,
935
- ~TTM_PL_MASK_MEMTYPE);
936
-
937
- if (mem_type == TTM_PL_SYSTEM)
938
- break;
939
-
940
- ret = (*man->func->get_node)(man, bo, place, mem);
957
+ ret = ttm_resource_alloc(bo, place, mem);
958
+ if (ret == -ENOSPC)
959
+ continue;
941960 if (unlikely(ret))
942
- return ret;
961
+ goto error;
943962
944
- if (mem->mm_node) {
945
- ret = ttm_bo_add_move_fence(bo, man, mem);
946
- if (unlikely(ret)) {
947
- (*man->func->put_node)(man, mem);
948
- return ret;
949
- }
950
- break;
963
+ man = ttm_manager_type(bdev, mem->mem_type);
964
+ ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
965
+ if (unlikely(ret)) {
966
+ ttm_resource_free(bo, mem);
967
+ if (ret == -EBUSY)
968
+ continue;
969
+
970
+ goto error;
951971 }
952
- }
953
-
954
- if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
955
- mem->mem_type = mem_type;
956
- mem->placement = cur_flags;
957972 return 0;
958973 }
959974
960975 for (i = 0; i < placement->num_busy_placement; ++i) {
961976 const struct ttm_place *place = &placement->busy_placement[i];
962977
963
- ret = ttm_mem_type_from_place(place, &mem_type);
978
+ ret = ttm_bo_mem_placement(bo, place, mem, ctx);
964979 if (ret)
965
- return ret;
966
- man = &bdev->man[mem_type];
967
- if (!man->has_type || !man->use_type)
968
- continue;
969
- if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
970980 continue;
971981
972982 type_found = true;
973
- cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
974
- cur_flags);
975
- /*
976
- * Use the access and other non-mapping-related flag bits from
977
- * the memory placement flags to the current flags
978
- */
979
- ttm_flag_masked(&cur_flags, place->flags,
980
- ~TTM_PL_MASK_MEMTYPE);
981
-
982
- if (mem_type == TTM_PL_SYSTEM) {
983
- mem->mem_type = mem_type;
984
- mem->placement = cur_flags;
985
- mem->mm_node = NULL;
983
+ ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
984
+ if (likely(!ret))
986985 return 0;
987
- }
988986
989
- ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ctx);
990
- if (ret == 0 && mem->mm_node) {
991
- mem->placement = cur_flags;
992
- return 0;
993
- }
994
- if (ret == -ERESTARTSYS)
995
- has_erestartsys = true;
987
+ if (ret && ret != -EBUSY)
988
+ goto error;
996989 }
997990
991
+ ret = -ENOMEM;
998992 if (!type_found) {
999993 pr_err(TTM_PFX "No compatible memory type found\n");
1000
- return -EINVAL;
994
+ ret = -EINVAL;
1001995 }
1002996
1003
- return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
997
+error:
998
+ if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
999
+ ttm_bo_move_to_lru_tail_unlocked(bo);
1000
+ }
1001
+
1002
+ return ret;
10041003 }
10051004 EXPORT_SYMBOL(ttm_bo_mem_space);
10061005
....@@ -1009,15 +1008,17 @@
10091008 struct ttm_operation_ctx *ctx)
10101009 {
10111010 int ret = 0;
1012
- struct ttm_mem_reg mem;
1011
+ struct ttm_resource mem;
10131012
1014
- reservation_object_assert_held(bo->resv);
1013
+ dma_resv_assert_held(bo->base.resv);
10151014
10161015 mem.num_pages = bo->num_pages;
10171016 mem.size = mem.num_pages << PAGE_SHIFT;
10181017 mem.page_alignment = bo->mem.page_alignment;
1019
- mem.bus.io_reserved_vm = false;
1020
- mem.bus.io_reserved_count = 0;
1018
+ mem.bus.offset = 0;
1019
+ mem.bus.addr = NULL;
1020
+ mem.mm_node = NULL;
1021
+
10211022 /*
10221023 * Determine where to move the buffer.
10231024 */
....@@ -1026,14 +1027,14 @@
10261027 goto out_unlock;
10271028 ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
10281029 out_unlock:
1029
- if (ret && mem.mm_node)
1030
- ttm_bo_mem_put(bo, &mem);
1030
+ if (ret)
1031
+ ttm_resource_free(bo, &mem);
10311032 return ret;
10321033 }
10331034
10341035 static bool ttm_bo_places_compat(const struct ttm_place *places,
10351036 unsigned num_placement,
1036
- struct ttm_mem_reg *mem,
1037
+ struct ttm_resource *mem,
10371038 uint32_t *new_flags)
10381039 {
10391040 unsigned i;
....@@ -1041,13 +1042,13 @@
10411042 for (i = 0; i < num_placement; i++) {
10421043 const struct ttm_place *heap = &places[i];
10431044
1044
- if (mem->mm_node && (mem->start < heap->fpfn ||
1045
+ if ((mem->start < heap->fpfn ||
10451046 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
10461047 continue;
10471048
10481049 *new_flags = heap->flags;
10491050 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1050
- (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
1051
+ (mem->mem_type == heap->mem_type) &&
10511052 (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
10521053 (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
10531054 return true;
....@@ -1056,7 +1057,7 @@
10561057 }
10571058
10581059 bool ttm_bo_mem_compat(struct ttm_placement *placement,
1059
- struct ttm_mem_reg *mem,
1060
+ struct ttm_resource *mem,
10601061 uint32_t *new_flags)
10611062 {
10621063 if (ttm_bo_places_compat(placement->placement, placement->num_placement,
....@@ -1081,7 +1082,19 @@
10811082 int ret;
10821083 uint32_t new_flags;
10831084
1084
- reservation_object_assert_held(bo->resv);
1085
+ dma_resv_assert_held(bo->base.resv);
1086
+
1087
+ /*
1088
+ * Remove the backing store if no placement is given.
1089
+ */
1090
+ if (!placement->num_placement && !placement->num_busy_placement) {
1091
+ ret = ttm_bo_pipeline_gutting(bo);
1092
+ if (ret)
1093
+ return ret;
1094
+
1095
+ return ttm_tt_create(bo, false);
1096
+ }
1097
+
10851098 /*
10861099 * Check whether we need to move buffer.
10871100 */
....@@ -1090,17 +1103,13 @@
10901103 if (ret)
10911104 return ret;
10921105 } else {
1093
- /*
1094
- * Use the access and other non-mapping-related flag bits from
1095
- * the compatible memory placement flags to the active flags
1096
- */
1097
- ttm_flag_masked(&bo->mem.placement, new_flags,
1098
- ~TTM_PL_MASK_MEMTYPE);
1106
+ bo->mem.placement &= TTM_PL_MASK_CACHING;
1107
+ bo->mem.placement |= new_flags & ~TTM_PL_MASK_CACHING;
10991108 }
11001109 /*
11011110 * We might need to add a TTM.
11021111 */
1103
- if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1112
+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
11041113 ret = ttm_tt_create(bo, true);
11051114 if (ret)
11061115 return ret;
....@@ -1118,12 +1127,12 @@
11181127 struct ttm_operation_ctx *ctx,
11191128 size_t acc_size,
11201129 struct sg_table *sg,
1121
- struct reservation_object *resv,
1130
+ struct dma_resv *resv,
11221131 void (*destroy) (struct ttm_buffer_object *))
11231132 {
1133
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
11241134 int ret = 0;
11251135 unsigned long num_pages;
1126
- struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
11271136 bool locked;
11281137
11291138 ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
....@@ -1149,13 +1158,9 @@
11491158 bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
11501159
11511160 kref_init(&bo->kref);
1152
- kref_init(&bo->list_kref);
1153
- atomic_set(&bo->cpu_writers, 0);
11541161 INIT_LIST_HEAD(&bo->lru);
11551162 INIT_LIST_HEAD(&bo->ddestroy);
11561163 INIT_LIST_HEAD(&bo->swap);
1157
- INIT_LIST_HEAD(&bo->io_reserve_lru);
1158
- mutex_init(&bo->wu_mutex);
11591164 bo->bdev = bdev;
11601165 bo->type = type;
11611166 bo->num_pages = num_pages;
....@@ -1164,21 +1169,27 @@
11641169 bo->mem.num_pages = bo->num_pages;
11651170 bo->mem.mm_node = NULL;
11661171 bo->mem.page_alignment = page_alignment;
1167
- bo->mem.bus.io_reserved_vm = false;
1168
- bo->mem.bus.io_reserved_count = 0;
1172
+ bo->mem.bus.offset = 0;
1173
+ bo->mem.bus.addr = NULL;
11691174 bo->moving = NULL;
1170
- bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1175
+ bo->mem.placement = TTM_PL_FLAG_CACHED;
11711176 bo->acc_size = acc_size;
11721177 bo->sg = sg;
11731178 if (resv) {
1174
- bo->resv = resv;
1175
- reservation_object_assert_held(bo->resv);
1179
+ bo->base.resv = resv;
1180
+ dma_resv_assert_held(bo->base.resv);
11761181 } else {
1177
- bo->resv = &bo->ttm_resv;
1182
+ bo->base.resv = &bo->base._resv;
11781183 }
1179
- reservation_object_init(&bo->ttm_resv);
1180
- atomic_inc(&bo->bdev->glob->bo_count);
1181
- drm_vma_node_reset(&bo->vma_node);
1184
+ if (!ttm_bo_uses_embedded_gem_object(bo)) {
1185
+ /*
1186
+ * bo.gem is not initialized, so we have to setup the
1187
+ * struct elements we want use regardless.
1188
+ */
1189
+ dma_resv_init(&bo->base._resv);
1190
+ drm_vma_node_reset(&bo->base.vma_node);
1191
+ }
1192
+ atomic_inc(&ttm_bo_glob.bo_count);
11821193
11831194 /*
11841195 * For ttm_bo_type_device buffers, allocate
....@@ -1186,14 +1197,14 @@
11861197 */
11871198 if (bo->type == ttm_bo_type_device ||
11881199 bo->type == ttm_bo_type_sg)
1189
- ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
1200
+ ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
11901201 bo->mem.num_pages);
11911202
11921203 /* passed reservation objects should already be locked,
11931204 * since otherwise lockdep will be angered in radeon.
11941205 */
11951206 if (!resv) {
1196
- locked = reservation_object_trylock(bo->resv);
1207
+ locked = dma_resv_trylock(bo->base.resv);
11971208 WARN_ON(!locked);
11981209 }
11991210
....@@ -1208,11 +1219,7 @@
12081219 return ret;
12091220 }
12101221
1211
- if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1212
- spin_lock(&bdev->glob->lru_lock);
1213
- ttm_bo_add_to_lru(bo);
1214
- spin_unlock(&bdev->glob->lru_lock);
1215
- }
1222
+ ttm_bo_move_to_lru_tail_unlocked(bo);
12161223
12171224 return ret;
12181225 }
....@@ -1227,7 +1234,7 @@
12271234 bool interruptible,
12281235 size_t acc_size,
12291236 struct sg_table *sg,
1230
- struct reservation_object *resv,
1237
+ struct dma_resv *resv,
12311238 void (*destroy) (struct ttm_buffer_object *))
12321239 {
12331240 struct ttm_operation_ctx ctx = { interruptible, false };
....@@ -1246,9 +1253,9 @@
12461253 }
12471254 EXPORT_SYMBOL(ttm_bo_init);
12481255
1249
-size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1250
- unsigned long bo_size,
1251
- unsigned struct_size)
1256
+static size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1257
+ unsigned long bo_size,
1258
+ unsigned struct_size)
12521259 {
12531260 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
12541261 size_t size = 0;
....@@ -1258,7 +1265,6 @@
12581265 size += ttm_round_pot(sizeof(struct ttm_tt));
12591266 return size;
12601267 }
1261
-EXPORT_SYMBOL(ttm_bo_acc_size);
12621268
12631269 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
12641270 unsigned long bo_size,
....@@ -1301,143 +1307,23 @@
13011307 }
13021308 EXPORT_SYMBOL(ttm_bo_create);
13031309
1304
-static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1305
- unsigned mem_type)
1306
-{
1307
- struct ttm_operation_ctx ctx = {
1308
- .interruptible = false,
1309
- .no_wait_gpu = false,
1310
- .flags = TTM_OPT_FLAG_FORCE_ALLOC
1311
- };
1312
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1313
- struct ttm_bo_global *glob = bdev->glob;
1314
- struct dma_fence *fence;
1315
- int ret;
1316
- unsigned i;
1317
-
1318
- /*
1319
- * Can't use standard list traversal since we're unlocking.
1320
- */
1321
-
1322
- spin_lock(&glob->lru_lock);
1323
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1324
- while (!list_empty(&man->lru[i])) {
1325
- spin_unlock(&glob->lru_lock);
1326
- ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
1327
- if (ret)
1328
- return ret;
1329
- spin_lock(&glob->lru_lock);
1330
- }
1331
- }
1332
- spin_unlock(&glob->lru_lock);
1333
-
1334
- spin_lock(&man->move_lock);
1335
- fence = dma_fence_get(man->move);
1336
- spin_unlock(&man->move_lock);
1337
-
1338
- if (fence) {
1339
- ret = dma_fence_wait(fence, false);
1340
- dma_fence_put(fence);
1341
- if (ret)
1342
- return ret;
1343
- }
1344
-
1345
- return 0;
1346
-}
1347
-
1348
-int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1349
-{
1350
- struct ttm_mem_type_manager *man;
1351
- int ret = -EINVAL;
1352
-
1353
- if (mem_type >= TTM_NUM_MEM_TYPES) {
1354
- pr_err("Illegal memory type %d\n", mem_type);
1355
- return ret;
1356
- }
1357
- man = &bdev->man[mem_type];
1358
-
1359
- if (!man->has_type) {
1360
- pr_err("Trying to take down uninitialized memory manager type %u\n",
1361
- mem_type);
1362
- return ret;
1363
- }
1364
-
1365
- man->use_type = false;
1366
- man->has_type = false;
1367
-
1368
- ret = 0;
1369
- if (mem_type > 0) {
1370
- ret = ttm_bo_force_list_clean(bdev, mem_type);
1371
- if (ret) {
1372
- pr_err("Cleanup eviction failed\n");
1373
- return ret;
1374
- }
1375
-
1376
- ret = (*man->func->takedown)(man);
1377
- }
1378
-
1379
- dma_fence_put(man->move);
1380
- man->move = NULL;
1381
-
1382
- return ret;
1383
-}
1384
-EXPORT_SYMBOL(ttm_bo_clean_mm);
1385
-
13861310 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
13871311 {
1388
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1312
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type);
13891313
13901314 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
13911315 pr_err("Illegal memory manager memory type %u\n", mem_type);
13921316 return -EINVAL;
13931317 }
13941318
1395
- if (!man->has_type) {
1319
+ if (!man) {
13961320 pr_err("Memory type %u has not been initialized\n", mem_type);
13971321 return 0;
13981322 }
13991323
1400
- return ttm_bo_force_list_clean(bdev, mem_type);
1324
+ return ttm_resource_manager_force_list_clean(bdev, man);
14011325 }
14021326 EXPORT_SYMBOL(ttm_bo_evict_mm);
1403
-
1404
-int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1405
- unsigned long p_size)
1406
-{
1407
- int ret;
1408
- struct ttm_mem_type_manager *man;
1409
- unsigned i;
1410
-
1411
- BUG_ON(type >= TTM_NUM_MEM_TYPES);
1412
- man = &bdev->man[type];
1413
- BUG_ON(man->has_type);
1414
- man->io_reserve_fastpath = true;
1415
- man->use_io_reserve_lru = false;
1416
- mutex_init(&man->io_reserve_mutex);
1417
- spin_lock_init(&man->move_lock);
1418
- INIT_LIST_HEAD(&man->io_reserve_lru);
1419
-
1420
- ret = bdev->driver->init_mem_type(bdev, type, man);
1421
- if (ret)
1422
- return ret;
1423
- man->bdev = bdev;
1424
-
1425
- if (type != TTM_PL_SYSTEM) {
1426
- ret = (*man->func->init)(man, p_size);
1427
- if (ret)
1428
- return ret;
1429
- }
1430
- man->has_type = true;
1431
- man->use_type = true;
1432
- man->size = p_size;
1433
-
1434
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1435
- INIT_LIST_HEAD(&man->lru[i]);
1436
- man->move = NULL;
1437
-
1438
- return 0;
1439
-}
1440
-EXPORT_SYMBOL(ttm_bo_init_mm);
14411327
14421328 static void ttm_bo_global_kobj_release(struct kobject *kobj)
14431329 {
....@@ -1447,32 +1333,42 @@
14471333 __free_page(glob->dummy_read_page);
14481334 }
14491335
1450
-void ttm_bo_global_release(struct drm_global_reference *ref)
1336
+static void ttm_bo_global_release(void)
14511337 {
1452
- struct ttm_bo_global *glob = ref->object;
1338
+ struct ttm_bo_global *glob = &ttm_bo_glob;
1339
+
1340
+ mutex_lock(&ttm_global_mutex);
1341
+ if (--ttm_bo_glob_use_count > 0)
1342
+ goto out;
14531343
14541344 kobject_del(&glob->kobj);
14551345 kobject_put(&glob->kobj);
1346
+ ttm_mem_global_release(&ttm_mem_glob);
1347
+ memset(glob, 0, sizeof(*glob));
1348
+out:
1349
+ mutex_unlock(&ttm_global_mutex);
14561350 }
1457
-EXPORT_SYMBOL(ttm_bo_global_release);
14581351
1459
-int ttm_bo_global_init(struct drm_global_reference *ref)
1352
+static int ttm_bo_global_init(void)
14601353 {
1461
- struct ttm_bo_global_ref *bo_ref =
1462
- container_of(ref, struct ttm_bo_global_ref, ref);
1463
- struct ttm_bo_global *glob = ref->object;
1464
- int ret;
1354
+ struct ttm_bo_global *glob = &ttm_bo_glob;
1355
+ int ret = 0;
14651356 unsigned i;
14661357
1467
- mutex_init(&glob->device_list_mutex);
1358
+ mutex_lock(&ttm_global_mutex);
1359
+ if (++ttm_bo_glob_use_count > 1)
1360
+ goto out;
1361
+
1362
+ ret = ttm_mem_global_init(&ttm_mem_glob);
1363
+ if (ret)
1364
+ goto out;
1365
+
14681366 spin_lock_init(&glob->lru_lock);
1469
- glob->mem_glob = bo_ref->mem_glob;
1470
- glob->mem_glob->bo_glob = glob;
14711367 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
14721368
14731369 if (unlikely(glob->dummy_read_page == NULL)) {
14741370 ret = -ENOMEM;
1475
- goto out_no_drp;
1371
+ goto out;
14761372 }
14771373
14781374 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
....@@ -1484,37 +1380,25 @@
14841380 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
14851381 if (unlikely(ret != 0))
14861382 kobject_put(&glob->kobj);
1487
- return ret;
1488
-out_no_drp:
1489
- kfree(glob);
1383
+out:
1384
+ mutex_unlock(&ttm_global_mutex);
14901385 return ret;
14911386 }
1492
-EXPORT_SYMBOL(ttm_bo_global_init);
1493
-
14941387
14951388 int ttm_bo_device_release(struct ttm_bo_device *bdev)
14961389 {
1390
+ struct ttm_bo_global *glob = &ttm_bo_glob;
14971391 int ret = 0;
1498
- unsigned i = TTM_NUM_MEM_TYPES;
1499
- struct ttm_mem_type_manager *man;
1500
- struct ttm_bo_global *glob = bdev->glob;
1392
+ unsigned i;
1393
+ struct ttm_resource_manager *man;
15011394
1502
- while (i--) {
1503
- man = &bdev->man[i];
1504
- if (man->has_type) {
1505
- man->use_type = false;
1506
- if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1507
- ret = -EBUSY;
1508
- pr_err("DRM memory manager type %d is not clean\n",
1509
- i);
1510
- }
1511
- man->has_type = false;
1512
- }
1513
- }
1395
+ man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
1396
+ ttm_resource_manager_set_used(man, false);
1397
+ ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
15141398
1515
- mutex_lock(&glob->device_list_mutex);
1399
+ mutex_lock(&ttm_global_mutex);
15161400 list_del(&bdev->device_list);
1517
- mutex_unlock(&glob->device_list_mutex);
1401
+ mutex_unlock(&ttm_global_mutex);
15181402
15191403 cancel_delayed_work_sync(&bdev->wq);
15201404
....@@ -1523,51 +1407,62 @@
15231407
15241408 spin_lock(&glob->lru_lock);
15251409 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1526
- if (list_empty(&bdev->man[0].lru[0]))
1410
+ if (list_empty(&man->lru[0]))
15271411 pr_debug("Swap list %d was clean\n", i);
15281412 spin_unlock(&glob->lru_lock);
15291413
1530
- drm_vma_offset_manager_destroy(&bdev->vma_manager);
1414
+ if (!ret)
1415
+ ttm_bo_global_release();
15311416
15321417 return ret;
15331418 }
15341419 EXPORT_SYMBOL(ttm_bo_device_release);
15351420
1536
-int ttm_bo_device_init(struct ttm_bo_device *bdev,
1537
- struct ttm_bo_global *glob,
1538
- struct ttm_bo_driver *driver,
1539
- struct address_space *mapping,
1540
- uint64_t file_page_offset,
1541
- bool need_dma32)
1421
+static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
15421422 {
1543
- int ret = -EINVAL;
1544
-
1545
- bdev->driver = driver;
1546
-
1547
- memset(bdev->man, 0, sizeof(bdev->man));
1423
+ struct ttm_resource_manager *man = &bdev->sysman;
15481424
15491425 /*
15501426 * Initialize the system memory buffer type.
15511427 * Other types need to be driver / IOCTL initialized.
15521428 */
1553
- ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1554
- if (unlikely(ret != 0))
1555
- goto out_no_sys;
1429
+ man->use_tt = true;
15561430
1557
- drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
1558
- 0x10000000);
1431
+ ttm_resource_manager_init(man, 0);
1432
+ ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
1433
+ ttm_resource_manager_set_used(man, true);
1434
+}
1435
+
1436
+int ttm_bo_device_init(struct ttm_bo_device *bdev,
1437
+ struct ttm_bo_driver *driver,
1438
+ struct address_space *mapping,
1439
+ struct drm_vma_offset_manager *vma_manager,
1440
+ bool need_dma32)
1441
+{
1442
+ struct ttm_bo_global *glob = &ttm_bo_glob;
1443
+ int ret;
1444
+
1445
+ if (WARN_ON(vma_manager == NULL))
1446
+ return -EINVAL;
1447
+
1448
+ ret = ttm_bo_global_init();
1449
+ if (ret)
1450
+ return ret;
1451
+
1452
+ bdev->driver = driver;
1453
+
1454
+ ttm_bo_init_sysman(bdev);
1455
+
1456
+ bdev->vma_manager = vma_manager;
15591457 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
15601458 INIT_LIST_HEAD(&bdev->ddestroy);
15611459 bdev->dev_mapping = mapping;
1562
- bdev->glob = glob;
15631460 bdev->need_dma32 = need_dma32;
1564
- mutex_lock(&glob->device_list_mutex);
1461
+ mutex_lock(&ttm_global_mutex);
15651462 list_add_tail(&bdev->device_list, &glob->device_list);
1566
- mutex_unlock(&glob->device_list_mutex);
1463
+ mutex_unlock(&ttm_global_mutex);
15671464
15681465 return 0;
1569
-out_no_sys:
1570
- return ret;
15711466 }
15721467 EXPORT_SYMBOL(ttm_bo_device_init);
15731468
....@@ -1575,42 +1470,13 @@
15751470 * buffer object vm functions.
15761471 */
15771472
1578
-bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1579
-{
1580
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1581
-
1582
- if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1583
- if (mem->mem_type == TTM_PL_SYSTEM)
1584
- return false;
1585
-
1586
- if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1587
- return false;
1588
-
1589
- if (mem->placement & TTM_PL_FLAG_CACHED)
1590
- return false;
1591
- }
1592
- return true;
1593
-}
1594
-
1595
-void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1596
-{
1597
- struct ttm_bo_device *bdev = bo->bdev;
1598
-
1599
- drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
1600
- ttm_mem_io_free_vm(bo);
1601
-}
1602
-
16031473 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
16041474 {
16051475 struct ttm_bo_device *bdev = bo->bdev;
1606
- struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
16071476
1608
- ttm_mem_io_lock(man, false);
1609
- ttm_bo_unmap_virtual_locked(bo);
1610
- ttm_mem_io_unlock(man);
1477
+ drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1478
+ ttm_mem_io_free(bdev, &bo->mem);
16111479 }
1612
-
1613
-
16141480 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
16151481
16161482 int ttm_bo_wait(struct ttm_buffer_object *bo,
....@@ -1619,13 +1485,13 @@
16191485 long timeout = 15 * HZ;
16201486
16211487 if (no_wait) {
1622
- if (reservation_object_test_signaled_rcu(bo->resv, true))
1488
+ if (dma_resv_test_signaled_rcu(bo->base.resv, true))
16231489 return 0;
16241490 else
16251491 return -EBUSY;
16261492 }
16271493
1628
- timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
1494
+ timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
16291495 interruptible, timeout);
16301496 if (timeout < 0)
16311497 return timeout;
....@@ -1633,35 +1499,10 @@
16331499 if (timeout == 0)
16341500 return -EBUSY;
16351501
1636
- reservation_object_add_excl_fence(bo->resv, NULL);
1502
+ dma_resv_add_excl_fence(bo->base.resv, NULL);
16371503 return 0;
16381504 }
16391505 EXPORT_SYMBOL(ttm_bo_wait);
1640
-
1641
-int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1642
-{
1643
- int ret = 0;
1644
-
1645
- /*
1646
- * Using ttm_bo_reserve makes sure the lru lists are updated.
1647
- */
1648
-
1649
- ret = ttm_bo_reserve(bo, true, no_wait, NULL);
1650
- if (unlikely(ret != 0))
1651
- return ret;
1652
- ret = ttm_bo_wait(bo, true, no_wait);
1653
- if (likely(ret == 0))
1654
- atomic_inc(&bo->cpu_writers);
1655
- ttm_bo_unreserve(bo);
1656
- return ret;
1657
-}
1658
-EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1659
-
1660
-void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1661
-{
1662
- atomic_dec(&bo->cpu_writers);
1663
-}
1664
-EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
16651506
16661507 /**
16671508 * A buffer object shrink method that tries to swap out the first
....@@ -1677,10 +1518,18 @@
16771518 spin_lock(&glob->lru_lock);
16781519 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
16791520 list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1680
- if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked)) {
1681
- ret = 0;
1682
- break;
1521
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
1522
+ NULL))
1523
+ continue;
1524
+
1525
+ if (!ttm_bo_get_unless_zero(bo)) {
1526
+ if (locked)
1527
+ dma_resv_unlock(bo->base.resv);
1528
+ continue;
16831529 }
1530
+
1531
+ ret = 0;
1532
+ break;
16841533 }
16851534 if (!ret)
16861535 break;
....@@ -1691,11 +1540,9 @@
16911540 return ret;
16921541 }
16931542
1694
- kref_get(&bo->list_kref);
1695
-
1696
- if (!list_empty(&bo->ddestroy)) {
1543
+ if (bo->deleted) {
16971544 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1698
- kref_put(&bo->list_kref, ttm_bo_release_list);
1545
+ ttm_bo_put(bo);
16991546 return ret;
17001547 }
17011548
....@@ -1709,11 +1556,11 @@
17091556 if (bo->mem.mem_type != TTM_PL_SYSTEM ||
17101557 bo->ttm->caching_state != tt_cached) {
17111558 struct ttm_operation_ctx ctx = { false, false };
1712
- struct ttm_mem_reg evict_mem;
1559
+ struct ttm_resource evict_mem;
17131560
17141561 evict_mem = bo->mem;
17151562 evict_mem.mm_node = NULL;
1716
- evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1563
+ evict_mem.placement = TTM_PL_FLAG_CACHED;
17171564 evict_mem.mem_type = TTM_PL_SYSTEM;
17181565
17191566 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
....@@ -1739,7 +1586,7 @@
17391586 if (bo->bdev->driver->swap_notify)
17401587 bo->bdev->driver->swap_notify(bo);
17411588
1742
- ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1589
+ ret = ttm_tt_swapout(bo->bdev, bo->ttm, bo->persistent_swap_storage);
17431590 out:
17441591
17451592 /**
....@@ -1748,54 +1595,38 @@
17481595 * already swapped buffer.
17491596 */
17501597 if (locked)
1751
- reservation_object_unlock(bo->resv);
1752
- kref_put(&bo->list_kref, ttm_bo_release_list);
1598
+ dma_resv_unlock(bo->base.resv);
1599
+ ttm_bo_put(bo);
17531600 return ret;
17541601 }
17551602 EXPORT_SYMBOL(ttm_bo_swapout);
17561603
1757
-void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1604
+void ttm_bo_swapout_all(void)
17581605 {
17591606 struct ttm_operation_ctx ctx = {
17601607 .interruptible = false,
17611608 .no_wait_gpu = false
17621609 };
17631610
1764
- while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
1765
- ;
1611
+ while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
17661612 }
17671613 EXPORT_SYMBOL(ttm_bo_swapout_all);
17681614
1769
-/**
1770
- * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
1771
- * unreserved
1772
- *
1773
- * @bo: Pointer to buffer
1774
- */
1775
-int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
1615
+void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
17761616 {
1777
- int ret;
1617
+ if (bo->ttm == NULL)
1618
+ return;
17781619
1779
- /*
1780
- * In the absense of a wait_unlocked API,
1781
- * Use the bo::wu_mutex to avoid triggering livelocks due to
1782
- * concurrent use of this function. Note that this use of
1783
- * bo::wu_mutex can go away if we change locking order to
1784
- * mmap_sem -> bo::reserve.
1785
- */
1786
- ret = mutex_lock_interruptible(&bo->wu_mutex);
1787
- if (unlikely(ret != 0))
1788
- return -ERESTARTSYS;
1789
- if (!ww_mutex_is_locked(&bo->resv->lock))
1790
- goto out_unlock;
1791
- ret = reservation_object_lock_interruptible(bo->resv, NULL);
1792
- if (ret == -EINTR)
1793
- ret = -ERESTARTSYS;
1794
- if (unlikely(ret != 0))
1795
- goto out_unlock;
1796
- reservation_object_unlock(bo->resv);
1620
+ ttm_tt_destroy(bo->bdev, bo->ttm);
1621
+ bo->ttm = NULL;
1622
+}
17971623
1798
-out_unlock:
1799
- mutex_unlock(&bo->wu_mutex);
1800
- return ret;
1624
+int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem)
1625
+{
1626
+ return bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem);
1627
+}
1628
+
1629
+void ttm_bo_tt_unbind(struct ttm_buffer_object *bo)
1630
+{
1631
+ bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm);
18011632 }