forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/drivers/gpu/drm/nouveau/nouveau_gem.c
....@@ -35,13 +35,13 @@
3535 #include "nouveau_vmm.h"
3636
3737 #include <nvif/class.h>
38
+#include <nvif/push206e.h>
3839
3940 void
4041 nouveau_gem_object_del(struct drm_gem_object *gem)
4142 {
4243 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
4344 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
44
- struct ttm_buffer_object *bo = &nvbo->bo;
4545 struct device *dev = drm->dev->dev;
4646 int ret;
4747
....@@ -54,11 +54,7 @@
5454 if (gem->import_attach)
5555 drm_prime_gem_destroy(gem, nvbo->bo.sg);
5656
57
- drm_gem_object_release(gem);
58
-
59
- /* reset filp so nouveau_bo_del_ttm() can test for it */
60
- gem->filp = NULL;
61
- ttm_bo_unref(&bo);
57
+ ttm_bo_put(&nvbo->bo);
6258
6359 pm_runtime_mark_last_busy(dev);
6460 pm_runtime_put_autosuspend(dev);
....@@ -71,10 +67,11 @@
7167 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
7268 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
7369 struct device *dev = drm->dev->dev;
70
+ struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
7471 struct nouveau_vma *vma;
7572 int ret;
7673
77
- if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
74
+ if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
7875 return 0;
7976
8077 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
....@@ -87,7 +84,7 @@
8784 goto out;
8885 }
8986
90
- ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
87
+ ret = nouveau_vma_new(nvbo, vmm, &vma);
9188 pm_runtime_mark_last_busy(dev);
9289 pm_runtime_put_autosuspend(dev);
9390 out:
....@@ -147,25 +144,26 @@
147144 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
148145 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
149146 struct device *dev = drm->dev->dev;
147
+ struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
150148 struct nouveau_vma *vma;
151149 int ret;
152150
153
- if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
151
+ if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
154152 return;
155153
156154 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
157155 if (ret)
158156 return;
159157
160
- vma = nouveau_vma_find(nvbo, &cli->vmm);
158
+ vma = nouveau_vma_find(nvbo, vmm);
161159 if (vma) {
162160 if (--vma->refs == 0) {
163161 ret = pm_runtime_get_sync(dev);
164162 if (!WARN_ON(ret < 0 && ret != -EACCES)) {
165163 nouveau_gem_object_unmap(nvbo, vma);
166164 pm_runtime_mark_last_busy(dev);
167
- pm_runtime_put_autosuspend(dev);
168165 }
166
+ pm_runtime_put_autosuspend(dev);
169167 }
170168 }
171169 ttm_bo_unreserve(&nvbo->bo);
....@@ -178,24 +176,28 @@
178176 {
179177 struct nouveau_drm *drm = cli->drm;
180178 struct nouveau_bo *nvbo;
181
- u32 flags = 0;
182179 int ret;
183180
184
- if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
185
- flags |= TTM_PL_FLAG_VRAM;
186
- if (domain & NOUVEAU_GEM_DOMAIN_GART)
187
- flags |= TTM_PL_FLAG_TT;
188
- if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
189
- flags |= TTM_PL_FLAG_SYSTEM;
181
+ if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
182
+ domain |= NOUVEAU_GEM_DOMAIN_CPU;
190183
191
- if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
192
- flags |= TTM_PL_FLAG_UNCACHED;
184
+ nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
185
+ tile_flags);
186
+ if (IS_ERR(nvbo))
187
+ return PTR_ERR(nvbo);
193188
194
- ret = nouveau_bo_new(cli, size, align, flags, tile_mode,
195
- tile_flags, NULL, NULL, pnvbo);
189
+ /* Initialize the embedded gem-object. We return a single gem-reference
190
+ * to the caller, instead of a normal nouveau_bo ttm reference. */
191
+ ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
192
+ if (ret) {
193
+ drm_gem_object_release(&nvbo->bo.base);
194
+ kfree(nvbo);
195
+ return ret;
196
+ }
197
+
198
+ ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
196199 if (ret)
197200 return ret;
198
- nvbo = *pnvbo;
199201
200202 /* we restrict allowed domains on nv50+ to only the types
201203 * that were requested at creation time. not possibly on
....@@ -206,15 +208,8 @@
206208 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
207209 nvbo->valid_domains &= domain;
208210
209
- /* Initialize the embedded gem-object. We return a single gem-reference
210
- * to the caller, instead of a normal nouveau_bo ttm reference. */
211
- ret = drm_gem_object_init(drm->dev, &nvbo->gem, nvbo->bo.mem.size);
212
- if (ret) {
213
- nouveau_bo_ref(NULL, pnvbo);
214
- return -ENOMEM;
215
- }
216
-
217
- nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
211
+ nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
212
+ *pnvbo = nvbo;
218213 return 0;
219214 }
220215
....@@ -224,6 +219,7 @@
224219 {
225220 struct nouveau_cli *cli = nouveau_cli(file_priv);
226221 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
222
+ struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
227223 struct nouveau_vma *vma;
228224
229225 if (is_power_of_2(nvbo->valid_domains))
....@@ -232,9 +228,9 @@
232228 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
233229 else
234230 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
235
- rep->offset = nvbo->bo.offset;
236
- if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
237
- vma = nouveau_vma_find(nvbo, &cli->vmm);
231
+ rep->offset = nvbo->offset;
232
+ if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
233
+ vma = nouveau_vma_find(nvbo, vmm);
238234 if (!vma)
239235 return -EINVAL;
240236
....@@ -242,7 +238,7 @@
242238 }
243239
244240 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
245
- rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
241
+ rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
246242 rep->tile_mode = nvbo->mode;
247243 rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
248244 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
....@@ -270,15 +266,16 @@
270266 if (ret)
271267 return ret;
272268
273
- ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
269
+ ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
270
+ &req->info.handle);
274271 if (ret == 0) {
275
- ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
272
+ ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
276273 if (ret)
277274 drm_gem_handle_delete(file_priv, req->info.handle);
278275 }
279276
280277 /* drop reference from allocate - handle holds it now */
281
- drm_gem_object_put_unlocked(&nvbo->gem);
278
+ drm_gem_object_put(&nvbo->bo.base);
282279 return ret;
283280 }
284281
....@@ -290,32 +287,28 @@
290287 struct ttm_buffer_object *bo = &nvbo->bo;
291288 uint32_t domains = valid_domains & nvbo->valid_domains &
292289 (write_domains ? write_domains : read_domains);
293
- uint32_t pref_flags = 0, valid_flags = 0;
290
+ uint32_t pref_domains = 0;;
294291
295292 if (!domains)
296293 return -EINVAL;
297294
298
- if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
299
- valid_flags |= TTM_PL_FLAG_VRAM;
300
-
301
- if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
302
- valid_flags |= TTM_PL_FLAG_TT;
295
+ valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
303296
304297 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
305298 bo->mem.mem_type == TTM_PL_VRAM)
306
- pref_flags |= TTM_PL_FLAG_VRAM;
299
+ pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
307300
308301 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
309302 bo->mem.mem_type == TTM_PL_TT)
310
- pref_flags |= TTM_PL_FLAG_TT;
303
+ pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
311304
312305 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
313
- pref_flags |= TTM_PL_FLAG_VRAM;
306
+ pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
314307
315308 else
316
- pref_flags |= TTM_PL_FLAG_TT;
309
+ pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
317310
318
- nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
311
+ nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
319312
320313 return 0;
321314 }
....@@ -326,7 +319,8 @@
326319 };
327320
328321 static void
329
-validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
322
+validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
323
+ struct nouveau_fence *fence,
330324 struct drm_nouveau_gem_pushbuf_bo *pbbo)
331325 {
332326 struct nouveau_bo *nvbo;
....@@ -337,13 +331,11 @@
337331 b = &pbbo[nvbo->pbbo_index];
338332
339333 if (likely(fence)) {
340
- struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
341
- struct nouveau_vma *vma;
342
-
343334 nouveau_bo_fence(nvbo, fence, !!b->write_domains);
344335
345
- if (drm->client.vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
346
- vma = (void *)(unsigned long)b->user_priv;
336
+ if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
337
+ struct nouveau_vma *vma =
338
+ (void *)(unsigned long)b->user_priv;
347339 nouveau_fence_unref(&vma->fence);
348340 dma_fence_get(&fence->base);
349341 vma->fence = fence;
....@@ -358,15 +350,16 @@
358350 list_del(&nvbo->entry);
359351 nvbo->reserved_by = NULL;
360352 ttm_bo_unreserve(&nvbo->bo);
361
- drm_gem_object_put_unlocked(&nvbo->gem);
353
+ drm_gem_object_put(&nvbo->bo.base);
362354 }
363355 }
364356
365357 static void
366
-validate_fini(struct validate_op *op, struct nouveau_fence *fence,
358
+validate_fini(struct validate_op *op, struct nouveau_channel *chan,
359
+ struct nouveau_fence *fence,
367360 struct drm_nouveau_gem_pushbuf_bo *pbbo)
368361 {
369
- validate_fini_no_ticket(op, fence, pbbo);
362
+ validate_fini_no_ticket(op, chan, fence, pbbo);
370363 ww_acquire_fini(&op->ticket);
371364 }
372365
....@@ -404,14 +397,14 @@
404397 nvbo = nouveau_gem_object(gem);
405398 if (nvbo == res_bo) {
406399 res_bo = NULL;
407
- drm_gem_object_put_unlocked(gem);
400
+ drm_gem_object_put(gem);
408401 continue;
409402 }
410403
411404 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
412405 NV_PRINTK(err, cli, "multiple instances of buffer %d on "
413406 "validation list\n", b->handle);
414
- drm_gem_object_put_unlocked(gem);
407
+ drm_gem_object_put(gem);
415408 ret = -EINVAL;
416409 break;
417410 }
....@@ -421,7 +414,7 @@
421414 list_splice_tail_init(&vram_list, &op->list);
422415 list_splice_tail_init(&gart_list, &op->list);
423416 list_splice_tail_init(&both_list, &op->list);
424
- validate_fini_no_ticket(op, NULL, NULL);
417
+ validate_fini_no_ticket(op, chan, NULL, NULL);
425418 if (unlikely(ret == -EDEADLK)) {
426419 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
427420 &op->ticket);
....@@ -435,8 +428,8 @@
435428 }
436429 }
437430
438
- if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
439
- struct nouveau_vmm *vmm = &cli->vmm;
431
+ if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
432
+ struct nouveau_vmm *vmm = chan->vmm;
440433 struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
441434 if (!vma) {
442435 NV_PRINTK(err, cli, "vma not found!\n");
....@@ -476,26 +469,23 @@
476469 list_splice_tail(&gart_list, &op->list);
477470 list_splice_tail(&both_list, &op->list);
478471 if (ret)
479
- validate_fini(op, NULL, NULL);
472
+ validate_fini(op, chan, NULL, NULL);
480473 return ret;
481474
482475 }
483476
484477 static int
485478 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
486
- struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
487
- uint64_t user_pbbo_ptr)
479
+ struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
488480 {
489481 struct nouveau_drm *drm = chan->drm;
490
- struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
491
- (void __force __user *)(uintptr_t)user_pbbo_ptr;
492482 struct nouveau_bo *nvbo;
493483 int ret, relocs = 0;
494484
495485 list_for_each_entry(nvbo, list, entry) {
496486 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
497487
498
- ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
488
+ ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
499489 b->write_domains,
500490 b->valid_domains);
501491 if (unlikely(ret)) {
....@@ -518,7 +508,7 @@
518508 }
519509
520510 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
521
- if (nvbo->bo.offset == b->presumed.offset &&
511
+ if (nvbo->offset == b->presumed.offset &&
522512 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
523513 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
524514 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
....@@ -529,13 +519,9 @@
529519 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
530520 else
531521 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
532
- b->presumed.offset = nvbo->bo.offset;
522
+ b->presumed.offset = nvbo->offset;
533523 b->presumed.valid = 0;
534524 relocs++;
535
-
536
- if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
537
- &b->presumed, sizeof(b->presumed)))
538
- return -EFAULT;
539525 }
540526 }
541527
....@@ -546,8 +532,8 @@
546532 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
547533 struct drm_file *file_priv,
548534 struct drm_nouveau_gem_pushbuf_bo *pbbo,
549
- uint64_t user_buffers, int nr_buffers,
550
- struct validate_op *op, int *apply_relocs)
535
+ int nr_buffers,
536
+ struct validate_op *op, bool *apply_relocs)
551537 {
552538 struct nouveau_cli *cli = nouveau_cli(file_priv);
553539 int ret;
....@@ -564,14 +550,16 @@
564550 return ret;
565551 }
566552
567
- ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
553
+ ret = validate_list(chan, cli, &op->list, pbbo);
568554 if (unlikely(ret < 0)) {
569555 if (ret != -ERESTARTSYS)
570556 NV_PRINTK(err, cli, "validating bo list\n");
571
- validate_fini(op, NULL, NULL);
557
+ validate_fini(op, chan, NULL, NULL);
572558 return ret;
559
+ } else if (ret > 0) {
560
+ *apply_relocs = true;
573561 }
574
- *apply_relocs = ret;
562
+
575563 return 0;
576564 }
577565
....@@ -604,15 +592,11 @@
604592 static int
605593 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
606594 struct drm_nouveau_gem_pushbuf *req,
595
+ struct drm_nouveau_gem_pushbuf_reloc *reloc,
607596 struct drm_nouveau_gem_pushbuf_bo *bo)
608597 {
609
- struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
610598 int ret = 0;
611599 unsigned i;
612
-
613
- reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
614
- if (IS_ERR(reloc))
615
- return PTR_ERR(reloc);
616600
617601 for (i = 0; i < req->nr_relocs; i++) {
618602 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
....@@ -678,7 +662,6 @@
678662 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
679663 }
680664
681
- u_free(reloc);
682665 return ret;
683666 }
684667
....@@ -692,11 +675,13 @@
692675 struct nouveau_drm *drm = nouveau_drm(dev);
693676 struct drm_nouveau_gem_pushbuf *req = data;
694677 struct drm_nouveau_gem_pushbuf_push *push;
678
+ struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
695679 struct drm_nouveau_gem_pushbuf_bo *bo;
696680 struct nouveau_channel *chan = NULL;
697681 struct validate_op op;
698682 struct nouveau_fence *fence = NULL;
699
- int i, j, ret = 0, do_reloc = 0;
683
+ int i, j, ret = 0;
684
+ bool do_reloc = false, sync = false;
700685
701686 if (unlikely(!abi16))
702687 return -ENOMEM;
....@@ -710,6 +695,10 @@
710695
711696 if (!chan)
712697 return nouveau_abi16_put(abi16, -ENOENT);
698
+ if (unlikely(atomic_read(&chan->killed)))
699
+ return nouveau_abi16_put(abi16, -ENODEV);
700
+
701
+ sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
713702
714703 req->vram_available = drm->gem.vram_available;
715704 req->gart_available = drm->gem.gart_available;
....@@ -754,7 +743,8 @@
754743 }
755744
756745 /* Validate buffer list */
757
- ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
746
+revalidate:
747
+ ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
758748 req->nr_buffers, &op, &do_reloc);
759749 if (ret) {
760750 if (ret != -ERESTARTSYS)
....@@ -764,7 +754,18 @@
764754
765755 /* Apply any relocations that are required */
766756 if (do_reloc) {
767
- ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
757
+ if (!reloc) {
758
+ validate_fini(&op, chan, NULL, bo);
759
+ reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
760
+ if (IS_ERR(reloc)) {
761
+ ret = PTR_ERR(reloc);
762
+ goto out_prevalid;
763
+ }
764
+
765
+ goto revalidate;
766
+ }
767
+
768
+ ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
768769 if (ret) {
769770 NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
770771 goto out;
....@@ -787,7 +788,7 @@
787788 }
788789 } else
789790 if (drm->client.device.info.chipset >= 0x25) {
790
- ret = RING_SPACE(chan, req->nr_push * 2);
791
+ ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
791792 if (ret) {
792793 NV_PRINTK(err, cli, "cal_space: %d\n", ret);
793794 goto out;
....@@ -797,11 +798,11 @@
797798 struct nouveau_bo *nvbo = (void *)(unsigned long)
798799 bo[push[i].bo_index].user_priv;
799800
800
- OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
801
- OUT_RING(chan, 0);
801
+ PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
802
+ PUSH_DATA(chan->chan.push, 0);
802803 }
803804 } else {
804
- ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
805
+ ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
805806 if (ret) {
806807 NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
807808 goto out;
....@@ -831,11 +832,10 @@
831832 push[i].length - 8) / 4, cmd);
832833 }
833834
834
- OUT_RING(chan, 0x20000000 |
835
- (nvbo->bo.offset + push[i].offset));
836
- OUT_RING(chan, 0);
835
+ PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
836
+ PUSH_DATA(chan->chan.push, 0);
837837 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
838
- OUT_RING(chan, 0);
838
+ PUSH_DATA(chan->chan.push, 0);
839839 }
840840 }
841841
....@@ -846,11 +846,35 @@
846846 goto out;
847847 }
848848
849
+ if (sync) {
850
+ if (!(ret = nouveau_fence_wait(fence, false, false))) {
851
+ if ((ret = dma_fence_get_status(&fence->base)) == 1)
852
+ ret = 0;
853
+ }
854
+ }
855
+
849856 out:
850
- validate_fini(&op, fence, bo);
857
+ validate_fini(&op, chan, fence, bo);
851858 nouveau_fence_unref(&fence);
852859
860
+ if (do_reloc) {
861
+ struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
862
+ u64_to_user_ptr(req->buffers);
863
+
864
+ for (i = 0; i < req->nr_buffers; i++) {
865
+ if (bo[i].presumed.valid)
866
+ continue;
867
+
868
+ if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
869
+ sizeof(bo[i].presumed))) {
870
+ ret = -EFAULT;
871
+ break;
872
+ }
873
+ }
874
+ }
853875 out_prevalid:
876
+ if (!IS_ERR(reloc))
877
+ u_free(reloc);
854878 u_free(bo);
855879 u_free(push);
856880
....@@ -888,7 +912,7 @@
888912 return -ENOENT;
889913 nvbo = nouveau_gem_object(gem);
890914
891
- lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true,
915
+ lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
892916 no_wait ? 0 : 30 * HZ);
893917 if (!lret)
894918 ret = -EBUSY;
....@@ -898,7 +922,7 @@
898922 ret = lret;
899923
900924 nouveau_bo_sync_for_cpu(nvbo);
901
- drm_gem_object_put_unlocked(gem);
925
+ drm_gem_object_put(gem);
902926
903927 return ret;
904928 }
....@@ -917,7 +941,7 @@
917941 nvbo = nouveau_gem_object(gem);
918942
919943 nouveau_bo_sync_for_device(nvbo);
920
- drm_gem_object_put_unlocked(gem);
944
+ drm_gem_object_put(gem);
921945 return 0;
922946 }
923947
....@@ -934,7 +958,7 @@
934958 return -ENOENT;
935959
936960 ret = nouveau_gem_info(file_priv, gem, req);
937
- drm_gem_object_put_unlocked(gem);
961
+ drm_gem_object_put(gem);
938962 return ret;
939963 }
940964