forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/ttm/ttm_bo_util.c
....@@ -38,7 +38,7 @@
3838 #include <linux/slab.h>
3939 #include <linux/vmalloc.h>
4040 #include <linux/module.h>
41
-#include <linux/reservation.h>
41
+#include <linux/dma-resv.h>
4242
4343 struct ttm_transfer_obj {
4444 struct ttm_buffer_object base;
....@@ -47,15 +47,15 @@
4747
4848 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
4949 {
50
- ttm_bo_mem_put(bo, &bo->mem);
50
+ ttm_resource_free(bo, &bo->mem);
5151 }
5252
5353 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
5454 struct ttm_operation_ctx *ctx,
55
- struct ttm_mem_reg *new_mem)
55
+ struct ttm_resource *new_mem)
5656 {
5757 struct ttm_tt *ttm = bo->ttm;
58
- struct ttm_mem_reg *old_mem = &bo->mem;
58
+ struct ttm_resource *old_mem = &bo->mem;
5959 int ret;
6060
6161 if (old_mem->mem_type != TTM_PL_SYSTEM) {
....@@ -67,10 +67,8 @@
6767 return ret;
6868 }
6969
70
- ttm_tt_unbind(ttm);
70
+ ttm_bo_tt_unbind(bo);
7171 ttm_bo_free_old_node(bo);
72
- ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
73
- TTM_PL_MASK_MEM);
7472 old_mem->mem_type = TTM_PL_SYSTEM;
7573 }
7674
....@@ -79,154 +77,70 @@
7977 return ret;
8078
8179 if (new_mem->mem_type != TTM_PL_SYSTEM) {
82
- ret = ttm_tt_bind(ttm, new_mem, ctx);
80
+
81
+ ret = ttm_tt_populate(bo->bdev, ttm, ctx);
82
+ if (unlikely(ret != 0))
83
+ return ret;
84
+
85
+ ret = ttm_bo_tt_bind(bo, new_mem);
8386 if (unlikely(ret != 0))
8487 return ret;
8588 }
8689
87
- *old_mem = *new_mem;
88
- new_mem->mm_node = NULL;
89
-
90
+ ttm_bo_assign_mem(bo, new_mem);
9091 return 0;
9192 }
9293 EXPORT_SYMBOL(ttm_bo_move_ttm);
9394
94
-int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
95
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
96
+ struct ttm_resource *mem)
9597 {
96
- if (likely(man->io_reserve_fastpath))
98
+ if (mem->bus.offset || mem->bus.addr)
9799 return 0;
98100
99
- if (interruptible)
100
- return mutex_lock_interruptible(&man->io_reserve_mutex);
101
-
102
- mutex_lock(&man->io_reserve_mutex);
103
- return 0;
104
-}
105
-EXPORT_SYMBOL(ttm_mem_io_lock);
106
-
107
-void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
108
-{
109
- if (likely(man->io_reserve_fastpath))
110
- return;
111
-
112
- mutex_unlock(&man->io_reserve_mutex);
113
-}
114
-EXPORT_SYMBOL(ttm_mem_io_unlock);
115
-
116
-static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
117
-{
118
- struct ttm_buffer_object *bo;
119
-
120
- if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
121
- return -EAGAIN;
122
-
123
- bo = list_first_entry(&man->io_reserve_lru,
124
- struct ttm_buffer_object,
125
- io_reserve_lru);
126
- list_del_init(&bo->io_reserve_lru);
127
- ttm_bo_unmap_virtual_locked(bo);
128
-
129
- return 0;
130
-}
131
-
132
-
133
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
134
- struct ttm_mem_reg *mem)
135
-{
136
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
137
- int ret = 0;
138
-
101
+ mem->bus.is_iomem = false;
139102 if (!bdev->driver->io_mem_reserve)
140103 return 0;
141
- if (likely(man->io_reserve_fastpath))
142
- return bdev->driver->io_mem_reserve(bdev, mem);
143104
144
- if (bdev->driver->io_mem_reserve &&
145
- mem->bus.io_reserved_count++ == 0) {
146
-retry:
147
- ret = bdev->driver->io_mem_reserve(bdev, mem);
148
- if (ret == -EAGAIN) {
149
- ret = ttm_mem_io_evict(man);
150
- if (ret == 0)
151
- goto retry;
152
- }
153
- }
154
- return ret;
105
+ return bdev->driver->io_mem_reserve(bdev, mem);
155106 }
156
-EXPORT_SYMBOL(ttm_mem_io_reserve);
157107
158108 void ttm_mem_io_free(struct ttm_bo_device *bdev,
159
- struct ttm_mem_reg *mem)
109
+ struct ttm_resource *mem)
160110 {
161
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
162
-
163
- if (likely(man->io_reserve_fastpath))
111
+ if (!mem->bus.offset && !mem->bus.addr)
164112 return;
165113
166
- if (bdev->driver->io_mem_reserve &&
167
- --mem->bus.io_reserved_count == 0 &&
168
- bdev->driver->io_mem_free)
114
+ if (bdev->driver->io_mem_free)
169115 bdev->driver->io_mem_free(bdev, mem);
170116
171
-}
172
-EXPORT_SYMBOL(ttm_mem_io_free);
173
-
174
-int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
175
-{
176
- struct ttm_mem_reg *mem = &bo->mem;
177
- int ret;
178
-
179
- if (!mem->bus.io_reserved_vm) {
180
- struct ttm_mem_type_manager *man =
181
- &bo->bdev->man[mem->mem_type];
182
-
183
- ret = ttm_mem_io_reserve(bo->bdev, mem);
184
- if (unlikely(ret != 0))
185
- return ret;
186
- mem->bus.io_reserved_vm = true;
187
- if (man->use_io_reserve_lru)
188
- list_add_tail(&bo->io_reserve_lru,
189
- &man->io_reserve_lru);
190
- }
191
- return 0;
117
+ mem->bus.offset = 0;
118
+ mem->bus.addr = NULL;
192119 }
193120
194
-void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
121
+static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
122
+ struct ttm_resource *mem,
123
+ void **virtual)
195124 {
196
- struct ttm_mem_reg *mem = &bo->mem;
197
-
198
- if (mem->bus.io_reserved_vm) {
199
- mem->bus.io_reserved_vm = false;
200
- list_del_init(&bo->io_reserve_lru);
201
- ttm_mem_io_free(bo->bdev, mem);
202
- }
203
-}
204
-
205
-static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
206
- void **virtual)
207
-{
208
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
209125 int ret;
210126 void *addr;
211127
212128 *virtual = NULL;
213
- (void) ttm_mem_io_lock(man, false);
214129 ret = ttm_mem_io_reserve(bdev, mem);
215
- ttm_mem_io_unlock(man);
216130 if (ret || !mem->bus.is_iomem)
217131 return ret;
218132
219133 if (mem->bus.addr) {
220134 addr = mem->bus.addr;
221135 } else {
136
+ size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
137
+
222138 if (mem->placement & TTM_PL_FLAG_WC)
223
- addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
139
+ addr = ioremap_wc(mem->bus.offset, bus_size);
224140 else
225
- addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
141
+ addr = ioremap(mem->bus.offset, bus_size);
226142 if (!addr) {
227
- (void) ttm_mem_io_lock(man, false);
228143 ttm_mem_io_free(bdev, mem);
229
- ttm_mem_io_unlock(man);
230144 return -ENOMEM;
231145 }
232146 }
....@@ -234,18 +148,13 @@
234148 return 0;
235149 }
236150
237
-static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
238
- void *virtual)
151
+static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
152
+ struct ttm_resource *mem,
153
+ void *virtual)
239154 {
240
- struct ttm_mem_type_manager *man;
241
-
242
- man = &bdev->man[mem->mem_type];
243
-
244155 if (virtual && mem->bus.addr == NULL)
245156 iounmap(virtual);
246
- (void) ttm_mem_io_lock(man, false);
247157 ttm_mem_io_free(bdev, mem);
248
- ttm_mem_io_unlock(man);
249158 }
250159
251160 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
....@@ -261,54 +170,6 @@
261170 return 0;
262171 }
263172
264
-#ifdef CONFIG_X86
265
-#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
266
-#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
267
-#else
268
-#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
269
-#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
270
-#endif
271
-
272
-
273
-/**
274
- * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
275
- * specified page protection.
276
- *
277
- * @page: The page to map.
278
- * @prot: The page protection.
279
- *
280
- * This function maps a TTM page using the kmap_atomic api if available,
281
- * otherwise falls back to vmap. The user must make sure that the
282
- * specified page does not have an aliased mapping with a different caching
283
- * policy unless the architecture explicitly allows it. Also mapping and
284
- * unmapping using this api must be correctly nested. Unmapping should
285
- * occur in the reverse order of mapping.
286
- */
287
-void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
288
-{
289
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
290
- return kmap_atomic(page);
291
- else
292
- return __ttm_kmap_atomic_prot(page, prot);
293
-}
294
-EXPORT_SYMBOL(ttm_kmap_atomic_prot);
295
-
296
-/**
297
- * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
298
- * ttm_kmap_atomic_prot.
299
- *
300
- * @addr: The virtual address from the map.
301
- * @prot: The page protection.
302
- */
303
-void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
304
-{
305
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
306
- kunmap_atomic(addr);
307
- else
308
- __ttm_kunmap_atomic(addr);
309
-}
310
-EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
311
-
312173 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
313174 unsigned long page,
314175 pgprot_t prot)
....@@ -320,13 +181,13 @@
320181 return -ENOMEM;
321182
322183 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
323
- dst = ttm_kmap_atomic_prot(d, prot);
184
+ dst = kmap_atomic_prot(d, prot);
324185 if (!dst)
325186 return -ENOMEM;
326187
327188 memcpy_fromio(dst, src, PAGE_SIZE);
328189
329
- ttm_kunmap_atomic_prot(dst, prot);
190
+ kunmap_atomic(dst);
330191
331192 return 0;
332193 }
....@@ -342,26 +203,26 @@
342203 return -ENOMEM;
343204
344205 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
345
- src = ttm_kmap_atomic_prot(s, prot);
206
+ src = kmap_atomic_prot(s, prot);
346207 if (!src)
347208 return -ENOMEM;
348209
349210 memcpy_toio(dst, src, PAGE_SIZE);
350211
351
- ttm_kunmap_atomic_prot(src, prot);
212
+ kunmap_atomic(src);
352213
353214 return 0;
354215 }
355216
356217 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
357218 struct ttm_operation_ctx *ctx,
358
- struct ttm_mem_reg *new_mem)
219
+ struct ttm_resource *new_mem)
359220 {
360221 struct ttm_bo_device *bdev = bo->bdev;
361
- struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
222
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
362223 struct ttm_tt *ttm = bo->ttm;
363
- struct ttm_mem_reg *old_mem = &bo->mem;
364
- struct ttm_mem_reg old_copy = *old_mem;
224
+ struct ttm_resource *old_mem = &bo->mem;
225
+ struct ttm_resource old_copy = *old_mem;
365226 void *old_iomap;
366227 void *new_iomap;
367228 int ret;
....@@ -374,10 +235,10 @@
374235 if (ret)
375236 return ret;
376237
377
- ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
238
+ ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
378239 if (ret)
379240 return ret;
380
- ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
241
+ ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
381242 if (ret)
382243 goto out;
383244
....@@ -391,7 +252,7 @@
391252 * Don't move nonexistent data. Clear destination instead.
392253 */
393254 if (old_iomap == NULL &&
394
- (ttm == NULL || (ttm->state == tt_unpopulated &&
255
+ (ttm == NULL || (!ttm_tt_is_populated(ttm) &&
395256 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
396257 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
397258 goto out2;
....@@ -401,7 +262,7 @@
401262 * TTM might be null for moves within the same region.
402263 */
403264 if (ttm) {
404
- ret = ttm_tt_populate(ttm, ctx);
265
+ ret = ttm_tt_populate(bdev, ttm, ctx);
405266 if (ret)
406267 goto out1;
407268 }
....@@ -436,24 +297,22 @@
436297 mb();
437298 out2:
438299 old_copy = *old_mem;
439
- *old_mem = *new_mem;
440
- new_mem->mm_node = NULL;
441300
442
- if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
443
- ttm_tt_destroy(ttm);
444
- bo->ttm = NULL;
445
- }
301
+ ttm_bo_assign_mem(bo, new_mem);
302
+
303
+ if (!man->use_tt)
304
+ ttm_bo_tt_destroy(bo);
446305
447306 out1:
448
- ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
307
+ ttm_resource_iounmap(bdev, old_mem, new_iomap);
449308 out:
450
- ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
309
+ ttm_resource_iounmap(bdev, &old_copy, old_iomap);
451310
452311 /*
453312 * On error, keep the mm node!
454313 */
455314 if (!ret)
456
- ttm_bo_mem_put(bo, &old_copy);
315
+ ttm_resource_free(bo, &old_copy);
457316 return ret;
458317 }
459318 EXPORT_SYMBOL(ttm_bo_move_memcpy);
....@@ -503,23 +362,22 @@
503362 * TODO: Explicit member copy would probably be better here.
504363 */
505364
506
- atomic_inc(&bo->bdev->glob->bo_count);
365
+ atomic_inc(&ttm_bo_glob.bo_count);
507366 INIT_LIST_HEAD(&fbo->base.ddestroy);
508367 INIT_LIST_HEAD(&fbo->base.lru);
509368 INIT_LIST_HEAD(&fbo->base.swap);
510
- INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
511
- mutex_init(&fbo->base.wu_mutex);
512369 fbo->base.moving = NULL;
513
- drm_vma_node_reset(&fbo->base.vma_node);
514
- atomic_set(&fbo->base.cpu_writers, 0);
370
+ drm_vma_node_reset(&fbo->base.base.vma_node);
515371
516
- kref_init(&fbo->base.list_kref);
517372 kref_init(&fbo->base.kref);
518373 fbo->base.destroy = &ttm_transfered_destroy;
519374 fbo->base.acc_size = 0;
520
- fbo->base.resv = &fbo->base.ttm_resv;
521
- reservation_object_init(fbo->base.resv);
522
- ret = reservation_object_trylock(fbo->base.resv);
375
+ if (bo->type != ttm_bo_type_sg)
376
+ fbo->base.base.resv = &fbo->base.base._resv;
377
+
378
+ dma_resv_init(&fbo->base.base._resv);
379
+ fbo->base.base.dev = NULL;
380
+ ret = dma_resv_trylock(&fbo->base.base._resv);
523381 WARN_ON(!ret);
524382
525383 *new_obj = &fbo->base;
....@@ -539,13 +397,13 @@
539397 tmp = pgprot_noncached(tmp);
540398 #endif
541399 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
542
- defined(__powerpc__)
400
+ defined(__powerpc__) || defined(__mips__)
543401 if (caching_flags & TTM_PL_FLAG_WC)
544402 tmp = pgprot_writecombine(tmp);
545403 else
546404 tmp = pgprot_noncached(tmp);
547405 #endif
548
-#if defined(__sparc__) || defined(__mips__)
406
+#if defined(__sparc__)
549407 tmp = pgprot_noncached(tmp);
550408 #endif
551409 return tmp;
....@@ -557,7 +415,7 @@
557415 unsigned long size,
558416 struct ttm_bo_kmap_obj *map)
559417 {
560
- struct ttm_mem_reg *mem = &bo->mem;
418
+ struct ttm_resource *mem = &bo->mem;
561419
562420 if (bo->mem.bus.addr) {
563421 map->bo_kmap_type = ttm_bo_map_premapped;
....@@ -565,11 +423,11 @@
565423 } else {
566424 map->bo_kmap_type = ttm_bo_map_iomap;
567425 if (mem->placement & TTM_PL_FLAG_WC)
568
- map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
426
+ map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
569427 size);
570428 else
571
- map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
572
- size);
429
+ map->virtual = ioremap(bo->mem.bus.offset + offset,
430
+ size);
573431 }
574432 return (!map->virtual) ? -ENOMEM : 0;
575433 }
....@@ -579,7 +437,7 @@
579437 unsigned long num_pages,
580438 struct ttm_bo_kmap_obj *map)
581439 {
582
- struct ttm_mem_reg *mem = &bo->mem;
440
+ struct ttm_resource *mem = &bo->mem;
583441 struct ttm_operation_ctx ctx = {
584442 .interruptible = false,
585443 .no_wait_gpu = false
....@@ -590,7 +448,7 @@
590448
591449 BUG_ON(!ttm);
592450
593
- ret = ttm_tt_populate(ttm, &ctx);
451
+ ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
594452 if (ret)
595453 return ret;
596454
....@@ -620,8 +478,6 @@
620478 unsigned long start_page, unsigned long num_pages,
621479 struct ttm_bo_kmap_obj *map)
622480 {
623
- struct ttm_mem_type_manager *man =
624
- &bo->bdev->man[bo->mem.mem_type];
625481 unsigned long offset, size;
626482 int ret;
627483
....@@ -631,13 +487,8 @@
631487 return -EINVAL;
632488 if (start_page > bo->num_pages)
633489 return -EINVAL;
634
-#if 0
635
- if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
636
- return -EPERM;
637
-#endif
638
- (void) ttm_mem_io_lock(man, false);
490
+
639491 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
640
- ttm_mem_io_unlock(man);
641492 if (ret)
642493 return ret;
643494 if (!bo->mem.bus.is_iomem) {
....@@ -652,10 +503,6 @@
652503
653504 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
654505 {
655
- struct ttm_buffer_object *bo = map->bo;
656
- struct ttm_mem_type_manager *man =
657
- &bo->bdev->man[bo->mem.mem_type];
658
-
659506 if (!map->virtual)
660507 return;
661508 switch (map->bo_kmap_type) {
....@@ -673,167 +520,116 @@
673520 default:
674521 BUG();
675522 }
676
- (void) ttm_mem_io_lock(man, false);
677523 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
678
- ttm_mem_io_unlock(man);
679524 map->virtual = NULL;
680525 map->page = NULL;
681526 }
682527 EXPORT_SYMBOL(ttm_bo_kunmap);
683528
529
+static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
530
+ bool dst_use_tt)
531
+{
532
+ int ret;
533
+ ret = ttm_bo_wait(bo, false, false);
534
+ if (ret)
535
+ return ret;
536
+
537
+ if (!dst_use_tt)
538
+ ttm_bo_tt_destroy(bo);
539
+ ttm_bo_free_old_node(bo);
540
+ return 0;
541
+}
542
+
543
+static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
544
+ struct dma_fence *fence,
545
+ bool dst_use_tt)
546
+{
547
+ struct ttm_buffer_object *ghost_obj;
548
+ int ret;
549
+
550
+ /**
551
+ * This should help pipeline ordinary buffer moves.
552
+ *
553
+ * Hang old buffer memory on a new buffer object,
554
+ * and leave it to be released when the GPU
555
+ * operation has completed.
556
+ */
557
+
558
+ dma_fence_put(bo->moving);
559
+ bo->moving = dma_fence_get(fence);
560
+
561
+ ret = ttm_buffer_object_transfer(bo, &ghost_obj);
562
+ if (ret)
563
+ return ret;
564
+
565
+ dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
566
+
567
+ /**
568
+ * If we're not moving to fixed memory, the TTM object
569
+ * needs to stay alive. Otherwhise hang it on the ghost
570
+ * bo to be unbound and destroyed.
571
+ */
572
+
573
+ if (dst_use_tt)
574
+ ghost_obj->ttm = NULL;
575
+ else
576
+ bo->ttm = NULL;
577
+
578
+ dma_resv_unlock(&ghost_obj->base._resv);
579
+ ttm_bo_put(ghost_obj);
580
+ return 0;
581
+}
582
+
583
+static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
584
+ struct dma_fence *fence)
585
+{
586
+ struct ttm_bo_device *bdev = bo->bdev;
587
+ struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
588
+
589
+ /**
590
+ * BO doesn't have a TTM we need to bind/unbind. Just remember
591
+ * this eviction and free up the allocation
592
+ */
593
+ spin_lock(&from->move_lock);
594
+ if (!from->move || dma_fence_is_later(fence, from->move)) {
595
+ dma_fence_put(from->move);
596
+ from->move = dma_fence_get(fence);
597
+ }
598
+ spin_unlock(&from->move_lock);
599
+
600
+ ttm_bo_free_old_node(bo);
601
+
602
+ dma_fence_put(bo->moving);
603
+ bo->moving = dma_fence_get(fence);
604
+}
605
+
684606 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
685607 struct dma_fence *fence,
686608 bool evict,
687
- struct ttm_mem_reg *new_mem)
609
+ bool pipeline,
610
+ struct ttm_resource *new_mem)
688611 {
689612 struct ttm_bo_device *bdev = bo->bdev;
690
- struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
691
- struct ttm_mem_reg *old_mem = &bo->mem;
692
- int ret;
693
- struct ttm_buffer_object *ghost_obj;
613
+ struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
614
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
615
+ int ret = 0;
694616
695
- reservation_object_add_excl_fence(bo->resv, fence);
696
- if (evict) {
697
- ret = ttm_bo_wait(bo, false, false);
698
- if (ret)
699
- return ret;
617
+ dma_resv_add_excl_fence(bo->base.resv, fence);
618
+ if (!evict)
619
+ ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
620
+ else if (!from->use_tt && pipeline)
621
+ ttm_bo_move_pipeline_evict(bo, fence);
622
+ else
623
+ ret = ttm_bo_wait_free_node(bo, man->use_tt);
700624
701
- if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
702
- ttm_tt_destroy(bo->ttm);
703
- bo->ttm = NULL;
704
- }
705
- ttm_bo_free_old_node(bo);
706
- } else {
707
- /**
708
- * This should help pipeline ordinary buffer moves.
709
- *
710
- * Hang old buffer memory on a new buffer object,
711
- * and leave it to be released when the GPU
712
- * operation has completed.
713
- */
625
+ if (ret)
626
+ return ret;
714627
715
- dma_fence_put(bo->moving);
716
- bo->moving = dma_fence_get(fence);
717
-
718
- ret = ttm_buffer_object_transfer(bo, &ghost_obj);
719
- if (ret)
720
- return ret;
721
-
722
- reservation_object_add_excl_fence(ghost_obj->resv, fence);
723
-
724
- /**
725
- * If we're not moving to fixed memory, the TTM object
726
- * needs to stay alive. Otherwhise hang it on the ghost
727
- * bo to be unbound and destroyed.
728
- */
729
-
730
- if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
731
- ghost_obj->ttm = NULL;
732
- else
733
- bo->ttm = NULL;
734
-
735
- ttm_bo_unreserve(ghost_obj);
736
- ttm_bo_put(ghost_obj);
737
- }
738
-
739
- *old_mem = *new_mem;
740
- new_mem->mm_node = NULL;
628
+ ttm_bo_assign_mem(bo, new_mem);
741629
742630 return 0;
743631 }
744632 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
745
-
746
-int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
747
- struct dma_fence *fence, bool evict,
748
- struct ttm_mem_reg *new_mem)
749
-{
750
- struct ttm_bo_device *bdev = bo->bdev;
751
- struct ttm_mem_reg *old_mem = &bo->mem;
752
-
753
- struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
754
- struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
755
-
756
- int ret;
757
-
758
- reservation_object_add_excl_fence(bo->resv, fence);
759
-
760
- if (!evict) {
761
- struct ttm_buffer_object *ghost_obj;
762
-
763
- /**
764
- * This should help pipeline ordinary buffer moves.
765
- *
766
- * Hang old buffer memory on a new buffer object,
767
- * and leave it to be released when the GPU
768
- * operation has completed.
769
- */
770
-
771
- dma_fence_put(bo->moving);
772
- bo->moving = dma_fence_get(fence);
773
-
774
- ret = ttm_buffer_object_transfer(bo, &ghost_obj);
775
- if (ret)
776
- return ret;
777
-
778
- reservation_object_add_excl_fence(ghost_obj->resv, fence);
779
-
780
- /**
781
- * If we're not moving to fixed memory, the TTM object
782
- * needs to stay alive. Otherwhise hang it on the ghost
783
- * bo to be unbound and destroyed.
784
- */
785
-
786
- if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
787
- ghost_obj->ttm = NULL;
788
- else
789
- bo->ttm = NULL;
790
-
791
- ttm_bo_unreserve(ghost_obj);
792
- ttm_bo_put(ghost_obj);
793
-
794
- } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
795
-
796
- /**
797
- * BO doesn't have a TTM we need to bind/unbind. Just remember
798
- * this eviction and free up the allocation
799
- */
800
-
801
- spin_lock(&from->move_lock);
802
- if (!from->move || dma_fence_is_later(fence, from->move)) {
803
- dma_fence_put(from->move);
804
- from->move = dma_fence_get(fence);
805
- }
806
- spin_unlock(&from->move_lock);
807
-
808
- ttm_bo_free_old_node(bo);
809
-
810
- dma_fence_put(bo->moving);
811
- bo->moving = dma_fence_get(fence);
812
-
813
- } else {
814
- /**
815
- * Last resort, wait for the move to be completed.
816
- *
817
- * Should never happen in pratice.
818
- */
819
-
820
- ret = ttm_bo_wait(bo, false, false);
821
- if (ret)
822
- return ret;
823
-
824
- if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
825
- ttm_tt_destroy(bo->ttm);
826
- bo->ttm = NULL;
827
- }
828
- ttm_bo_free_old_node(bo);
829
- }
830
-
831
- *old_mem = *new_mem;
832
- new_mem->mm_node = NULL;
833
-
834
- return 0;
835
-}
836
-EXPORT_SYMBOL(ttm_bo_pipeline_move);
837633
838634 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
839635 {
....@@ -844,7 +640,7 @@
844640 if (ret)
845641 return ret;
846642
847
- ret = reservation_object_copy_fences(ghost->resv, bo->resv);
643
+ ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
848644 /* Last resort, wait for the BO to be idle when we are OOM */
849645 if (ret)
850646 ttm_bo_wait(bo, false, false);
....@@ -853,7 +649,7 @@
853649 bo->mem.mem_type = TTM_PL_SYSTEM;
854650 bo->ttm = NULL;
855651
856
- ttm_bo_unreserve(ghost);
652
+ dma_resv_unlock(&ghost->base._resv);
857653 ttm_bo_put(ghost);
858654
859655 return 0;