forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/gpu/drm/ttm/ttm_bo_util.c
....@@ -38,7 +38,7 @@
3838 #include <linux/slab.h>
3939 #include <linux/vmalloc.h>
4040 #include <linux/module.h>
41
-#include <linux/reservation.h>
41
+#include <linux/dma-resv.h>
4242
4343 struct ttm_transfer_obj {
4444 struct ttm_buffer_object base;
....@@ -47,15 +47,15 @@
4747
4848 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
4949 {
50
- ttm_bo_mem_put(bo, &bo->mem);
50
+ ttm_resource_free(bo, &bo->mem);
5151 }
5252
5353 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
5454 struct ttm_operation_ctx *ctx,
55
- struct ttm_mem_reg *new_mem)
55
+ struct ttm_resource *new_mem)
5656 {
5757 struct ttm_tt *ttm = bo->ttm;
58
- struct ttm_mem_reg *old_mem = &bo->mem;
58
+ struct ttm_resource *old_mem = &bo->mem;
5959 int ret;
6060
6161 if (old_mem->mem_type != TTM_PL_SYSTEM) {
....@@ -67,10 +67,8 @@
6767 return ret;
6868 }
6969
70
- ttm_tt_unbind(ttm);
70
+ ttm_bo_tt_unbind(bo);
7171 ttm_bo_free_old_node(bo);
72
- ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
73
- TTM_PL_MASK_MEM);
7472 old_mem->mem_type = TTM_PL_SYSTEM;
7573 }
7674
....@@ -79,154 +77,70 @@
7977 return ret;
8078
8179 if (new_mem->mem_type != TTM_PL_SYSTEM) {
82
- ret = ttm_tt_bind(ttm, new_mem, ctx);
80
+
81
+ ret = ttm_tt_populate(bo->bdev, ttm, ctx);
82
+ if (unlikely(ret != 0))
83
+ return ret;
84
+
85
+ ret = ttm_bo_tt_bind(bo, new_mem);
8386 if (unlikely(ret != 0))
8487 return ret;
8588 }
8689
87
- *old_mem = *new_mem;
88
- new_mem->mm_node = NULL;
89
-
90
+ ttm_bo_assign_mem(bo, new_mem);
9091 return 0;
9192 }
9293 EXPORT_SYMBOL(ttm_bo_move_ttm);
9394
94
-int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
95
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
96
+ struct ttm_resource *mem)
9597 {
96
- if (likely(man->io_reserve_fastpath))
98
+ if (mem->bus.offset || mem->bus.addr)
9799 return 0;
98100
99
- if (interruptible)
100
- return mutex_lock_interruptible(&man->io_reserve_mutex);
101
-
102
- mutex_lock(&man->io_reserve_mutex);
103
- return 0;
104
-}
105
-EXPORT_SYMBOL(ttm_mem_io_lock);
106
-
107
-void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
108
-{
109
- if (likely(man->io_reserve_fastpath))
110
- return;
111
-
112
- mutex_unlock(&man->io_reserve_mutex);
113
-}
114
-EXPORT_SYMBOL(ttm_mem_io_unlock);
115
-
116
-static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
117
-{
118
- struct ttm_buffer_object *bo;
119
-
120
- if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
121
- return -EAGAIN;
122
-
123
- bo = list_first_entry(&man->io_reserve_lru,
124
- struct ttm_buffer_object,
125
- io_reserve_lru);
126
- list_del_init(&bo->io_reserve_lru);
127
- ttm_bo_unmap_virtual_locked(bo);
128
-
129
- return 0;
130
-}
131
-
132
-
133
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
134
- struct ttm_mem_reg *mem)
135
-{
136
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
137
- int ret = 0;
138
-
101
+ mem->bus.is_iomem = false;
139102 if (!bdev->driver->io_mem_reserve)
140103 return 0;
141
- if (likely(man->io_reserve_fastpath))
142
- return bdev->driver->io_mem_reserve(bdev, mem);
143104
144
- if (bdev->driver->io_mem_reserve &&
145
- mem->bus.io_reserved_count++ == 0) {
146
-retry:
147
- ret = bdev->driver->io_mem_reserve(bdev, mem);
148
- if (ret == -EAGAIN) {
149
- ret = ttm_mem_io_evict(man);
150
- if (ret == 0)
151
- goto retry;
152
- }
153
- }
154
- return ret;
105
+ return bdev->driver->io_mem_reserve(bdev, mem);
155106 }
156
-EXPORT_SYMBOL(ttm_mem_io_reserve);
157107
158108 void ttm_mem_io_free(struct ttm_bo_device *bdev,
159
- struct ttm_mem_reg *mem)
109
+ struct ttm_resource *mem)
160110 {
161
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
162
-
163
- if (likely(man->io_reserve_fastpath))
111
+ if (!mem->bus.offset && !mem->bus.addr)
164112 return;
165113
166
- if (bdev->driver->io_mem_reserve &&
167
- --mem->bus.io_reserved_count == 0 &&
168
- bdev->driver->io_mem_free)
114
+ if (bdev->driver->io_mem_free)
169115 bdev->driver->io_mem_free(bdev, mem);
170116
171
-}
172
-EXPORT_SYMBOL(ttm_mem_io_free);
173
-
174
-int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
175
-{
176
- struct ttm_mem_reg *mem = &bo->mem;
177
- int ret;
178
-
179
- if (!mem->bus.io_reserved_vm) {
180
- struct ttm_mem_type_manager *man =
181
- &bo->bdev->man[mem->mem_type];
182
-
183
- ret = ttm_mem_io_reserve(bo->bdev, mem);
184
- if (unlikely(ret != 0))
185
- return ret;
186
- mem->bus.io_reserved_vm = true;
187
- if (man->use_io_reserve_lru)
188
- list_add_tail(&bo->io_reserve_lru,
189
- &man->io_reserve_lru);
190
- }
191
- return 0;
117
+ mem->bus.offset = 0;
118
+ mem->bus.addr = NULL;
192119 }
193120
194
-void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
121
+static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
122
+ struct ttm_resource *mem,
123
+ void **virtual)
195124 {
196
- struct ttm_mem_reg *mem = &bo->mem;
197
-
198
- if (mem->bus.io_reserved_vm) {
199
- mem->bus.io_reserved_vm = false;
200
- list_del_init(&bo->io_reserve_lru);
201
- ttm_mem_io_free(bo->bdev, mem);
202
- }
203
-}
204
-
205
-static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
206
- void **virtual)
207
-{
208
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
209125 int ret;
210126 void *addr;
211127
212128 *virtual = NULL;
213
- (void) ttm_mem_io_lock(man, false);
214129 ret = ttm_mem_io_reserve(bdev, mem);
215
- ttm_mem_io_unlock(man);
216130 if (ret || !mem->bus.is_iomem)
217131 return ret;
218132
219133 if (mem->bus.addr) {
220134 addr = mem->bus.addr;
221135 } else {
136
+ size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
137
+
222138 if (mem->placement & TTM_PL_FLAG_WC)
223
- addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
139
+ addr = ioremap_wc(mem->bus.offset, bus_size);
224140 else
225
- addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
141
+ addr = ioremap(mem->bus.offset, bus_size);
226142 if (!addr) {
227
- (void) ttm_mem_io_lock(man, false);
228143 ttm_mem_io_free(bdev, mem);
229
- ttm_mem_io_unlock(man);
230144 return -ENOMEM;
231145 }
232146 }
....@@ -234,18 +148,13 @@
234148 return 0;
235149 }
236150
237
-static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
238
- void *virtual)
151
+static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
152
+ struct ttm_resource *mem,
153
+ void *virtual)
239154 {
240
- struct ttm_mem_type_manager *man;
241
-
242
- man = &bdev->man[mem->mem_type];
243
-
244155 if (virtual && mem->bus.addr == NULL)
245156 iounmap(virtual);
246
- (void) ttm_mem_io_lock(man, false);
247157 ttm_mem_io_free(bdev, mem);
248
- ttm_mem_io_unlock(man);
249158 }
250159
251160 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
....@@ -261,54 +170,6 @@
261170 return 0;
262171 }
263172
264
-#ifdef CONFIG_X86
265
-#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
266
-#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
267
-#else
268
-#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
269
-#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
270
-#endif
271
-
272
-
273
-/**
274
- * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
275
- * specified page protection.
276
- *
277
- * @page: The page to map.
278
- * @prot: The page protection.
279
- *
280
- * This function maps a TTM page using the kmap_atomic api if available,
281
- * otherwise falls back to vmap. The user must make sure that the
282
- * specified page does not have an aliased mapping with a different caching
283
- * policy unless the architecture explicitly allows it. Also mapping and
284
- * unmapping using this api must be correctly nested. Unmapping should
285
- * occur in the reverse order of mapping.
286
- */
287
-void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
288
-{
289
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
290
- return kmap_atomic(page);
291
- else
292
- return __ttm_kmap_atomic_prot(page, prot);
293
-}
294
-EXPORT_SYMBOL(ttm_kmap_atomic_prot);
295
-
296
-/**
297
- * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
298
- * ttm_kmap_atomic_prot.
299
- *
300
- * @addr: The virtual address from the map.
301
- * @prot: The page protection.
302
- */
303
-void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
304
-{
305
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
306
- kunmap_atomic(addr);
307
- else
308
- __ttm_kunmap_atomic(addr);
309
-}
310
-EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
311
-
312173 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
313174 unsigned long page,
314175 pgprot_t prot)
....@@ -320,13 +181,15 @@
320181 return -ENOMEM;
321182
322183 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
323
- dst = ttm_kmap_atomic_prot(d, prot);
324
- if (!dst)
325
- return -ENOMEM;
184
+ /*
185
+ * Ensure that a highmem page is mapped with the correct
186
+ * pgprot. For non highmem the mapping is already there.
187
+ */
188
+ dst = kmap_local_page_prot(d, prot);
326189
327190 memcpy_fromio(dst, src, PAGE_SIZE);
328191
329
- ttm_kunmap_atomic_prot(dst, prot);
192
+ kunmap_local(dst);
330193
331194 return 0;
332195 }
....@@ -342,26 +205,28 @@
342205 return -ENOMEM;
343206
344207 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
345
- src = ttm_kmap_atomic_prot(s, prot);
346
- if (!src)
347
- return -ENOMEM;
208
+ /*
209
+ * Ensure that a highmem page is mapped with the correct
210
+ * pgprot. For non highmem the mapping is already there.
211
+ */
212
+ src = kmap_local_page_prot(s, prot);
348213
349214 memcpy_toio(dst, src, PAGE_SIZE);
350215
351
- ttm_kunmap_atomic_prot(src, prot);
216
+ kunmap_local(src);
352217
353218 return 0;
354219 }
355220
356221 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
357222 struct ttm_operation_ctx *ctx,
358
- struct ttm_mem_reg *new_mem)
223
+ struct ttm_resource *new_mem)
359224 {
360225 struct ttm_bo_device *bdev = bo->bdev;
361
- struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
226
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
362227 struct ttm_tt *ttm = bo->ttm;
363
- struct ttm_mem_reg *old_mem = &bo->mem;
364
- struct ttm_mem_reg old_copy = *old_mem;
228
+ struct ttm_resource *old_mem = &bo->mem;
229
+ struct ttm_resource old_copy = *old_mem;
365230 void *old_iomap;
366231 void *new_iomap;
367232 int ret;
....@@ -374,10 +239,10 @@
374239 if (ret)
375240 return ret;
376241
377
- ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
242
+ ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
378243 if (ret)
379244 return ret;
380
- ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
245
+ ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
381246 if (ret)
382247 goto out;
383248
....@@ -391,7 +256,7 @@
391256 * Don't move nonexistent data. Clear destination instead.
392257 */
393258 if (old_iomap == NULL &&
394
- (ttm == NULL || (ttm->state == tt_unpopulated &&
259
+ (ttm == NULL || (!ttm_tt_is_populated(ttm) &&
395260 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
396261 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
397262 goto out2;
....@@ -401,7 +266,7 @@
401266 * TTM might be null for moves within the same region.
402267 */
403268 if (ttm) {
404
- ret = ttm_tt_populate(ttm, ctx);
269
+ ret = ttm_tt_populate(bdev, ttm, ctx);
405270 if (ret)
406271 goto out1;
407272 }
....@@ -436,24 +301,22 @@
436301 mb();
437302 out2:
438303 old_copy = *old_mem;
439
- *old_mem = *new_mem;
440
- new_mem->mm_node = NULL;
441304
442
- if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
443
- ttm_tt_destroy(ttm);
444
- bo->ttm = NULL;
445
- }
305
+ ttm_bo_assign_mem(bo, new_mem);
306
+
307
+ if (!man->use_tt)
308
+ ttm_bo_tt_destroy(bo);
446309
447310 out1:
448
- ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
311
+ ttm_resource_iounmap(bdev, old_mem, new_iomap);
449312 out:
450
- ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
313
+ ttm_resource_iounmap(bdev, &old_copy, old_iomap);
451314
452315 /*
453316 * On error, keep the mm node!
454317 */
455318 if (!ret)
456
- ttm_bo_mem_put(bo, &old_copy);
319
+ ttm_resource_free(bo, &old_copy);
457320 return ret;
458321 }
459322 EXPORT_SYMBOL(ttm_bo_move_memcpy);
....@@ -503,23 +366,22 @@
503366 * TODO: Explicit member copy would probably be better here.
504367 */
505368
506
- atomic_inc(&bo->bdev->glob->bo_count);
369
+ atomic_inc(&ttm_bo_glob.bo_count);
507370 INIT_LIST_HEAD(&fbo->base.ddestroy);
508371 INIT_LIST_HEAD(&fbo->base.lru);
509372 INIT_LIST_HEAD(&fbo->base.swap);
510
- INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
511
- mutex_init(&fbo->base.wu_mutex);
512373 fbo->base.moving = NULL;
513
- drm_vma_node_reset(&fbo->base.vma_node);
514
- atomic_set(&fbo->base.cpu_writers, 0);
374
+ drm_vma_node_reset(&fbo->base.base.vma_node);
515375
516
- kref_init(&fbo->base.list_kref);
517376 kref_init(&fbo->base.kref);
518377 fbo->base.destroy = &ttm_transfered_destroy;
519378 fbo->base.acc_size = 0;
520
- fbo->base.resv = &fbo->base.ttm_resv;
521
- reservation_object_init(fbo->base.resv);
522
- ret = reservation_object_trylock(fbo->base.resv);
379
+ if (bo->type != ttm_bo_type_sg)
380
+ fbo->base.base.resv = &fbo->base.base._resv;
381
+
382
+ dma_resv_init(&fbo->base.base._resv);
383
+ fbo->base.base.dev = NULL;
384
+ ret = dma_resv_trylock(&fbo->base.base._resv);
523385 WARN_ON(!ret);
524386
525387 *new_obj = &fbo->base;
....@@ -539,13 +401,13 @@
539401 tmp = pgprot_noncached(tmp);
540402 #endif
541403 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
542
- defined(__powerpc__)
404
+ defined(__powerpc__) || defined(__mips__)
543405 if (caching_flags & TTM_PL_FLAG_WC)
544406 tmp = pgprot_writecombine(tmp);
545407 else
546408 tmp = pgprot_noncached(tmp);
547409 #endif
548
-#if defined(__sparc__) || defined(__mips__)
410
+#if defined(__sparc__)
549411 tmp = pgprot_noncached(tmp);
550412 #endif
551413 return tmp;
....@@ -557,7 +419,7 @@
557419 unsigned long size,
558420 struct ttm_bo_kmap_obj *map)
559421 {
560
- struct ttm_mem_reg *mem = &bo->mem;
422
+ struct ttm_resource *mem = &bo->mem;
561423
562424 if (bo->mem.bus.addr) {
563425 map->bo_kmap_type = ttm_bo_map_premapped;
....@@ -565,11 +427,11 @@
565427 } else {
566428 map->bo_kmap_type = ttm_bo_map_iomap;
567429 if (mem->placement & TTM_PL_FLAG_WC)
568
- map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
430
+ map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
569431 size);
570432 else
571
- map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
572
- size);
433
+ map->virtual = ioremap(bo->mem.bus.offset + offset,
434
+ size);
573435 }
574436 return (!map->virtual) ? -ENOMEM : 0;
575437 }
....@@ -579,7 +441,7 @@
579441 unsigned long num_pages,
580442 struct ttm_bo_kmap_obj *map)
581443 {
582
- struct ttm_mem_reg *mem = &bo->mem;
444
+ struct ttm_resource *mem = &bo->mem;
583445 struct ttm_operation_ctx ctx = {
584446 .interruptible = false,
585447 .no_wait_gpu = false
....@@ -590,7 +452,7 @@
590452
591453 BUG_ON(!ttm);
592454
593
- ret = ttm_tt_populate(ttm, &ctx);
455
+ ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
594456 if (ret)
595457 return ret;
596458
....@@ -620,8 +482,6 @@
620482 unsigned long start_page, unsigned long num_pages,
621483 struct ttm_bo_kmap_obj *map)
622484 {
623
- struct ttm_mem_type_manager *man =
624
- &bo->bdev->man[bo->mem.mem_type];
625485 unsigned long offset, size;
626486 int ret;
627487
....@@ -631,13 +491,8 @@
631491 return -EINVAL;
632492 if (start_page > bo->num_pages)
633493 return -EINVAL;
634
-#if 0
635
- if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
636
- return -EPERM;
637
-#endif
638
- (void) ttm_mem_io_lock(man, false);
494
+
639495 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
640
- ttm_mem_io_unlock(man);
641496 if (ret)
642497 return ret;
643498 if (!bo->mem.bus.is_iomem) {
....@@ -652,10 +507,6 @@
652507
653508 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
654509 {
655
- struct ttm_buffer_object *bo = map->bo;
656
- struct ttm_mem_type_manager *man =
657
- &bo->bdev->man[bo->mem.mem_type];
658
-
659510 if (!map->virtual)
660511 return;
661512 switch (map->bo_kmap_type) {
....@@ -673,167 +524,116 @@
673524 default:
674525 BUG();
675526 }
676
- (void) ttm_mem_io_lock(man, false);
677527 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
678
- ttm_mem_io_unlock(man);
679528 map->virtual = NULL;
680529 map->page = NULL;
681530 }
682531 EXPORT_SYMBOL(ttm_bo_kunmap);
683532
533
+static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
534
+ bool dst_use_tt)
535
+{
536
+ int ret;
537
+ ret = ttm_bo_wait(bo, false, false);
538
+ if (ret)
539
+ return ret;
540
+
541
+ if (!dst_use_tt)
542
+ ttm_bo_tt_destroy(bo);
543
+ ttm_bo_free_old_node(bo);
544
+ return 0;
545
+}
546
+
547
+static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
548
+ struct dma_fence *fence,
549
+ bool dst_use_tt)
550
+{
551
+ struct ttm_buffer_object *ghost_obj;
552
+ int ret;
553
+
554
+ /**
555
+ * This should help pipeline ordinary buffer moves.
556
+ *
557
+ * Hang old buffer memory on a new buffer object,
558
+ * and leave it to be released when the GPU
559
+ * operation has completed.
560
+ */
561
+
562
+ dma_fence_put(bo->moving);
563
+ bo->moving = dma_fence_get(fence);
564
+
565
+ ret = ttm_buffer_object_transfer(bo, &ghost_obj);
566
+ if (ret)
567
+ return ret;
568
+
569
+ dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
570
+
571
+ /**
572
+ * If we're not moving to fixed memory, the TTM object
573
+ * needs to stay alive. Otherwhise hang it on the ghost
574
+ * bo to be unbound and destroyed.
575
+ */
576
+
577
+ if (dst_use_tt)
578
+ ghost_obj->ttm = NULL;
579
+ else
580
+ bo->ttm = NULL;
581
+
582
+ dma_resv_unlock(&ghost_obj->base._resv);
583
+ ttm_bo_put(ghost_obj);
584
+ return 0;
585
+}
586
+
587
+static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
588
+ struct dma_fence *fence)
589
+{
590
+ struct ttm_bo_device *bdev = bo->bdev;
591
+ struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
592
+
593
+ /**
594
+ * BO doesn't have a TTM we need to bind/unbind. Just remember
595
+ * this eviction and free up the allocation
596
+ */
597
+ spin_lock(&from->move_lock);
598
+ if (!from->move || dma_fence_is_later(fence, from->move)) {
599
+ dma_fence_put(from->move);
600
+ from->move = dma_fence_get(fence);
601
+ }
602
+ spin_unlock(&from->move_lock);
603
+
604
+ ttm_bo_free_old_node(bo);
605
+
606
+ dma_fence_put(bo->moving);
607
+ bo->moving = dma_fence_get(fence);
608
+}
609
+
684610 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
685611 struct dma_fence *fence,
686612 bool evict,
687
- struct ttm_mem_reg *new_mem)
613
+ bool pipeline,
614
+ struct ttm_resource *new_mem)
688615 {
689616 struct ttm_bo_device *bdev = bo->bdev;
690
- struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
691
- struct ttm_mem_reg *old_mem = &bo->mem;
692
- int ret;
693
- struct ttm_buffer_object *ghost_obj;
617
+ struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
618
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
619
+ int ret = 0;
694620
695
- reservation_object_add_excl_fence(bo->resv, fence);
696
- if (evict) {
697
- ret = ttm_bo_wait(bo, false, false);
698
- if (ret)
699
- return ret;
621
+ dma_resv_add_excl_fence(bo->base.resv, fence);
622
+ if (!evict)
623
+ ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
624
+ else if (!from->use_tt && pipeline)
625
+ ttm_bo_move_pipeline_evict(bo, fence);
626
+ else
627
+ ret = ttm_bo_wait_free_node(bo, man->use_tt);
700628
701
- if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
702
- ttm_tt_destroy(bo->ttm);
703
- bo->ttm = NULL;
704
- }
705
- ttm_bo_free_old_node(bo);
706
- } else {
707
- /**
708
- * This should help pipeline ordinary buffer moves.
709
- *
710
- * Hang old buffer memory on a new buffer object,
711
- * and leave it to be released when the GPU
712
- * operation has completed.
713
- */
629
+ if (ret)
630
+ return ret;
714631
715
- dma_fence_put(bo->moving);
716
- bo->moving = dma_fence_get(fence);
717
-
718
- ret = ttm_buffer_object_transfer(bo, &ghost_obj);
719
- if (ret)
720
- return ret;
721
-
722
- reservation_object_add_excl_fence(ghost_obj->resv, fence);
723
-
724
- /**
725
- * If we're not moving to fixed memory, the TTM object
726
- * needs to stay alive. Otherwhise hang it on the ghost
727
- * bo to be unbound and destroyed.
728
- */
729
-
730
- if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
731
- ghost_obj->ttm = NULL;
732
- else
733
- bo->ttm = NULL;
734
-
735
- ttm_bo_unreserve(ghost_obj);
736
- ttm_bo_put(ghost_obj);
737
- }
738
-
739
- *old_mem = *new_mem;
740
- new_mem->mm_node = NULL;
632
+ ttm_bo_assign_mem(bo, new_mem);
741633
742634 return 0;
743635 }
744636 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
745
-
746
-int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
747
- struct dma_fence *fence, bool evict,
748
- struct ttm_mem_reg *new_mem)
749
-{
750
- struct ttm_bo_device *bdev = bo->bdev;
751
- struct ttm_mem_reg *old_mem = &bo->mem;
752
-
753
- struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
754
- struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
755
-
756
- int ret;
757
-
758
- reservation_object_add_excl_fence(bo->resv, fence);
759
-
760
- if (!evict) {
761
- struct ttm_buffer_object *ghost_obj;
762
-
763
- /**
764
- * This should help pipeline ordinary buffer moves.
765
- *
766
- * Hang old buffer memory on a new buffer object,
767
- * and leave it to be released when the GPU
768
- * operation has completed.
769
- */
770
-
771
- dma_fence_put(bo->moving);
772
- bo->moving = dma_fence_get(fence);
773
-
774
- ret = ttm_buffer_object_transfer(bo, &ghost_obj);
775
- if (ret)
776
- return ret;
777
-
778
- reservation_object_add_excl_fence(ghost_obj->resv, fence);
779
-
780
- /**
781
- * If we're not moving to fixed memory, the TTM object
782
- * needs to stay alive. Otherwhise hang it on the ghost
783
- * bo to be unbound and destroyed.
784
- */
785
-
786
- if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
787
- ghost_obj->ttm = NULL;
788
- else
789
- bo->ttm = NULL;
790
-
791
- ttm_bo_unreserve(ghost_obj);
792
- ttm_bo_put(ghost_obj);
793
-
794
- } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
795
-
796
- /**
797
- * BO doesn't have a TTM we need to bind/unbind. Just remember
798
- * this eviction and free up the allocation
799
- */
800
-
801
- spin_lock(&from->move_lock);
802
- if (!from->move || dma_fence_is_later(fence, from->move)) {
803
- dma_fence_put(from->move);
804
- from->move = dma_fence_get(fence);
805
- }
806
- spin_unlock(&from->move_lock);
807
-
808
- ttm_bo_free_old_node(bo);
809
-
810
- dma_fence_put(bo->moving);
811
- bo->moving = dma_fence_get(fence);
812
-
813
- } else {
814
- /**
815
- * Last resort, wait for the move to be completed.
816
- *
817
- * Should never happen in pratice.
818
- */
819
-
820
- ret = ttm_bo_wait(bo, false, false);
821
- if (ret)
822
- return ret;
823
-
824
- if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
825
- ttm_tt_destroy(bo->ttm);
826
- bo->ttm = NULL;
827
- }
828
- ttm_bo_free_old_node(bo);
829
- }
830
-
831
- *old_mem = *new_mem;
832
- new_mem->mm_node = NULL;
833
-
834
- return 0;
835
-}
836
-EXPORT_SYMBOL(ttm_bo_pipeline_move);
837637
838638 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
839639 {
....@@ -844,7 +644,7 @@
844644 if (ret)
845645 return ret;
846646
847
- ret = reservation_object_copy_fences(ghost->resv, bo->resv);
647
+ ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
848648 /* Last resort, wait for the BO to be idle when we are OOM */
849649 if (ret)
850650 ttm_bo_wait(bo, false, false);
....@@ -853,7 +653,7 @@
853653 bo->mem.mem_type = TTM_PL_SYSTEM;
854654 bo->ttm = NULL;
855655
856
- ttm_bo_unreserve(ghost);
656
+ dma_resv_unlock(&ghost->base._resv);
857657 ttm_bo_put(ghost);
858658
859659 return 0;