hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
....@@ -25,14 +25,57 @@
2525 *
2626 **************************************************************************/
2727
28
-#include "vmwgfx_drv.h"
29
-#include <drm/vmwgfx_drm.h>
3028 #include <drm/ttm/ttm_placement.h>
31
-#include <drm/drmP.h>
29
+
3230 #include "vmwgfx_resource_priv.h"
3331 #include "vmwgfx_binding.h"
32
+#include "vmwgfx_drv.h"
3433
3534 #define VMW_RES_EVICT_ERR_COUNT 10
35
+
36
+/**
37
+ * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
38
+ * @res: The resource
39
+ */
40
+void vmw_resource_mob_attach(struct vmw_resource *res)
41
+{
42
+ struct vmw_buffer_object *backup = res->backup;
43
+ struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
44
+
45
+ dma_resv_assert_held(res->backup->base.base.resv);
46
+ res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
47
+ res->func->prio;
48
+
49
+ while (*new) {
50
+ struct vmw_resource *this =
51
+ container_of(*new, struct vmw_resource, mob_node);
52
+
53
+ parent = *new;
54
+ new = (res->backup_offset < this->backup_offset) ?
55
+ &((*new)->rb_left) : &((*new)->rb_right);
56
+ }
57
+
58
+ rb_link_node(&res->mob_node, parent, new);
59
+ rb_insert_color(&res->mob_node, &backup->res_tree);
60
+
61
+ vmw_bo_prio_add(backup, res->used_prio);
62
+}
63
+
64
+/**
65
+ * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
66
+ * @res: The resource
67
+ */
68
+void vmw_resource_mob_detach(struct vmw_resource *res)
69
+{
70
+ struct vmw_buffer_object *backup = res->backup;
71
+
72
+ dma_resv_assert_held(backup->base.base.resv);
73
+ if (vmw_resource_mob_attached(res)) {
74
+ rb_erase(&res->mob_node, &backup->res_tree);
75
+ RB_CLEAR_NODE(&res->mob_node);
76
+ vmw_bo_prio_del(backup, res->used_prio);
77
+ }
78
+}
3679
3780 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
3881 {
....@@ -58,11 +101,11 @@
58101 struct vmw_private *dev_priv = res->dev_priv;
59102 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
60103
61
- write_lock(&dev_priv->resource_lock);
104
+ spin_lock(&dev_priv->resource_lock);
62105 if (res->id != -1)
63106 idr_remove(idr, res->id);
64107 res->id = -1;
65
- write_unlock(&dev_priv->resource_lock);
108
+ spin_unlock(&dev_priv->resource_lock);
66109 }
67110
68111 static void vmw_resource_release(struct kref *kref)
....@@ -73,24 +116,27 @@
73116 int id;
74117 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
75118
76
- write_lock(&dev_priv->resource_lock);
77
- res->avail = false;
119
+ spin_lock(&dev_priv->resource_lock);
78120 list_del_init(&res->lru_head);
79
- write_unlock(&dev_priv->resource_lock);
121
+ spin_unlock(&dev_priv->resource_lock);
80122 if (res->backup) {
81123 struct ttm_buffer_object *bo = &res->backup->base;
82124
83125 ttm_bo_reserve(bo, false, false, NULL);
84
- if (!list_empty(&res->mob_head) &&
126
+ if (vmw_resource_mob_attached(res) &&
85127 res->func->unbind != NULL) {
86128 struct ttm_validate_buffer val_buf;
87129
88130 val_buf.bo = bo;
89
- val_buf.shared = false;
131
+ val_buf.num_shared = 0;
90132 res->func->unbind(res, false, &val_buf);
91133 }
92134 res->backup_dirty = false;
93
- list_del_init(&res->mob_head);
135
+ vmw_resource_mob_detach(res);
136
+ if (res->dirty)
137
+ res->func->dirty_free(res);
138
+ if (res->coherent)
139
+ vmw_bo_dirty_release(res->backup);
94140 ttm_bo_unreserve(bo);
95141 vmw_bo_unreference(&res->backup);
96142 }
....@@ -108,10 +154,10 @@
108154 else
109155 kfree(res);
110156
111
- write_lock(&dev_priv->resource_lock);
157
+ spin_lock(&dev_priv->resource_lock);
112158 if (id != -1)
113159 idr_remove(idr, id);
114
- write_unlock(&dev_priv->resource_lock);
160
+ spin_unlock(&dev_priv->resource_lock);
115161 }
116162
117163 void vmw_resource_unreference(struct vmw_resource **p_res)
....@@ -140,13 +186,13 @@
140186 BUG_ON(res->id != -1);
141187
142188 idr_preload(GFP_KERNEL);
143
- write_lock(&dev_priv->resource_lock);
189
+ spin_lock(&dev_priv->resource_lock);
144190
145191 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
146192 if (ret >= 0)
147193 res->id = ret;
148194
149
- write_unlock(&dev_priv->resource_lock);
195
+ spin_unlock(&dev_priv->resource_lock);
150196 idr_preload_end();
151197 return ret < 0 ? ret : 0;
152198 }
....@@ -170,45 +216,25 @@
170216 kref_init(&res->kref);
171217 res->hw_destroy = NULL;
172218 res->res_free = res_free;
173
- res->avail = false;
174219 res->dev_priv = dev_priv;
175220 res->func = func;
221
+ RB_CLEAR_NODE(&res->mob_node);
176222 INIT_LIST_HEAD(&res->lru_head);
177
- INIT_LIST_HEAD(&res->mob_head);
178223 INIT_LIST_HEAD(&res->binding_head);
179224 res->id = -1;
180225 res->backup = NULL;
181226 res->backup_offset = 0;
182227 res->backup_dirty = false;
183228 res->res_dirty = false;
229
+ res->coherent = false;
230
+ res->used_prio = 3;
231
+ res->dirty = NULL;
184232 if (delay_id)
185233 return 0;
186234 else
187235 return vmw_resource_alloc_id(res);
188236 }
189237
190
-/**
191
- * vmw_resource_activate
192
- *
193
- * @res: Pointer to the newly created resource
194
- * @hw_destroy: Destroy function. NULL if none.
195
- *
196
- * Activate a resource after the hardware has been made aware of it.
197
- * Set tye destroy function to @destroy. Typically this frees the
198
- * resource and destroys the hardware resources associated with it.
199
- * Activate basically means that the function vmw_resource_lookup will
200
- * find it.
201
- */
202
-void vmw_resource_activate(struct vmw_resource *res,
203
- void (*hw_destroy) (struct vmw_resource *))
204
-{
205
- struct vmw_private *dev_priv = res->dev_priv;
206
-
207
- write_lock(&dev_priv->resource_lock);
208
- res->avail = true;
209
- res->hw_destroy = hw_destroy;
210
- write_unlock(&dev_priv->resource_lock);
211
-}
212238
213239 /**
214240 * vmw_user_resource_lookup_handle - lookup a struct resource from a
....@@ -243,15 +269,7 @@
243269 goto out_bad_resource;
244270
245271 res = converter->base_obj_to_res(base);
246
-
247
- read_lock(&dev_priv->resource_lock);
248
- if (!res->avail || res->res_free != converter->res_free) {
249
- read_unlock(&dev_priv->resource_lock);
250
- goto out_bad_resource;
251
- }
252
-
253272 kref_get(&res->kref);
254
- read_unlock(&dev_priv->resource_lock);
255273
256274 *p_res = res;
257275 ret = 0;
....@@ -260,6 +278,41 @@
260278 ttm_base_object_unref(&base);
261279
262280 return ret;
281
+}
282
+
283
+/**
284
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
285
+ * TTM user-space handle and perform basic type checks
286
+ *
287
+ * @dev_priv: Pointer to a device private struct
288
+ * @tfile: Pointer to a struct ttm_object_file identifying the caller
289
+ * @handle: The TTM user-space handle
290
+ * @converter: Pointer to an object describing the resource type
291
+ * @p_res: On successful return the location pointed to will contain
292
+ * a pointer to a refcounted struct vmw_resource.
293
+ *
294
+ * If the handle can't be found or is associated with an incorrect resource
295
+ * type, -EINVAL will be returned.
296
+ */
297
+struct vmw_resource *
298
+vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
299
+ struct ttm_object_file *tfile,
300
+ uint32_t handle,
301
+ const struct vmw_user_resource_conv
302
+ *converter)
303
+{
304
+ struct ttm_base_object *base;
305
+
306
+ base = ttm_base_object_noref_lookup(tfile, handle);
307
+ if (!base)
308
+ return ERR_PTR(-ESRCH);
309
+
310
+ if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
311
+ ttm_base_object_noref_release();
312
+ return ERR_PTR(-EINVAL);
313
+ }
314
+
315
+ return converter->base_obj_to_res(base);
263316 }
264317
265318 /**
....@@ -340,7 +393,8 @@
340393 * should be retried once resources have been freed up.
341394 */
342395 static int vmw_resource_do_validate(struct vmw_resource *res,
343
- struct ttm_validate_buffer *val_buf)
396
+ struct ttm_validate_buffer *val_buf,
397
+ bool dirtying)
344398 {
345399 int ret = 0;
346400 const struct vmw_res_func *func = res->func;
....@@ -352,23 +406,48 @@
352406 }
353407
354408 if (func->bind &&
355
- ((func->needs_backup && list_empty(&res->mob_head) &&
409
+ ((func->needs_backup && !vmw_resource_mob_attached(res) &&
356410 val_buf->bo != NULL) ||
357411 (!func->needs_backup && val_buf->bo != NULL))) {
358412 ret = func->bind(res, val_buf);
359413 if (unlikely(ret != 0))
360414 goto out_bind_failed;
361415 if (func->needs_backup)
362
- list_add_tail(&res->mob_head, &res->backup->res_list);
416
+ vmw_resource_mob_attach(res);
363417 }
364418
365419 /*
366
- * Only do this on write operations, and move to
367
- * vmw_resource_unreserve if it can be called after
368
- * backup buffers have been unreserved. Otherwise
369
- * sort out locking.
420
+ * Handle the case where the backup mob is marked coherent but
421
+ * the resource isn't.
370422 */
371
- res->res_dirty = true;
423
+ if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
424
+ !res->coherent) {
425
+ if (res->backup->dirty && !res->dirty) {
426
+ ret = func->dirty_alloc(res);
427
+ if (ret)
428
+ return ret;
429
+ } else if (!res->backup->dirty && res->dirty) {
430
+ func->dirty_free(res);
431
+ }
432
+ }
433
+
434
+ /*
435
+ * Transfer the dirty regions to the resource and update
436
+ * the resource.
437
+ */
438
+ if (res->dirty) {
439
+ if (dirtying && !res->res_dirty) {
440
+ pgoff_t start = res->backup_offset >> PAGE_SHIFT;
441
+ pgoff_t end = __KERNEL_DIV_ROUND_UP
442
+ (res->backup_offset + res->backup_size,
443
+ PAGE_SIZE);
444
+
445
+ vmw_bo_dirty_unmap(res->backup, start, end);
446
+ }
447
+
448
+ vmw_bo_dirty_transfer_to_res(res);
449
+ return func->dirty_sync(res);
450
+ }
372451
373452 return 0;
374453
....@@ -383,6 +462,8 @@
383462 * command submission.
384463 *
385464 * @res: Pointer to the struct vmw_resource to unreserve.
465
+ * @dirty_set: Change dirty status of the resource.
466
+ * @dirty: When changing dirty status indicates the new status.
386467 * @switch_backup: Backup buffer has been switched.
387468 * @new_backup: Pointer to new backup buffer if command submission
388469 * switched. May be NULL.
....@@ -392,6 +473,8 @@
392473 * resource lru list, so that it can be evicted if necessary.
393474 */
394475 void vmw_resource_unreserve(struct vmw_resource *res,
476
+ bool dirty_set,
477
+ bool dirty,
395478 bool switch_backup,
396479 struct vmw_buffer_object *new_backup,
397480 unsigned long new_backup_offset)
....@@ -403,29 +486,42 @@
403486
404487 if (switch_backup && new_backup != res->backup) {
405488 if (res->backup) {
406
- lockdep_assert_held(&res->backup->base.resv->lock.base);
407
- list_del_init(&res->mob_head);
489
+ vmw_resource_mob_detach(res);
490
+ if (res->coherent)
491
+ vmw_bo_dirty_release(res->backup);
408492 vmw_bo_unreference(&res->backup);
409493 }
410494
411495 if (new_backup) {
412496 res->backup = vmw_bo_reference(new_backup);
413
- lockdep_assert_held(&new_backup->base.resv->lock.base);
414
- list_add_tail(&res->mob_head, &new_backup->res_list);
497
+
498
+ /*
499
+ * The validation code should already have added a
500
+ * dirty tracker here.
501
+ */
502
+ WARN_ON(res->coherent && !new_backup->dirty);
503
+
504
+ vmw_resource_mob_attach(res);
415505 } else {
416506 res->backup = NULL;
417507 }
508
+ } else if (switch_backup && res->coherent) {
509
+ vmw_bo_dirty_release(res->backup);
418510 }
511
+
419512 if (switch_backup)
420513 res->backup_offset = new_backup_offset;
514
+
515
+ if (dirty_set)
516
+ res->res_dirty = dirty;
421517
422518 if (!res->func->may_evict || res->id == -1 || res->pin_count)
423519 return;
424520
425
- write_lock(&dev_priv->resource_lock);
521
+ spin_lock(&dev_priv->resource_lock);
426522 list_add_tail(&res->lru_head,
427523 &res->dev_priv->res_lru[res->func->res_type]);
428
- write_unlock(&dev_priv->resource_lock);
524
+ spin_unlock(&dev_priv->resource_lock);
429525 }
430526
431527 /**
....@@ -458,14 +554,15 @@
458554 }
459555
460556 INIT_LIST_HEAD(&val_list);
461
- val_buf->bo = ttm_bo_reference(&res->backup->base);
462
- val_buf->shared = false;
557
+ ttm_bo_get(&res->backup->base);
558
+ val_buf->bo = &res->backup->base;
559
+ val_buf->num_shared = 0;
463560 list_add_tail(&val_buf->head, &val_list);
464561 ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
465562 if (unlikely(ret != 0))
466563 goto out_no_reserve;
467564
468
- if (res->func->needs_backup && list_empty(&res->mob_head))
565
+ if (res->func->needs_backup && !vmw_resource_mob_attached(res))
469566 return 0;
470567
471568 backup_dirty = res->backup_dirty;
....@@ -481,7 +578,8 @@
481578 out_no_validate:
482579 ttm_eu_backoff_reservation(ticket, &val_list);
483580 out_no_reserve:
484
- ttm_bo_unref(&val_buf->bo);
581
+ ttm_bo_put(val_buf->bo);
582
+ val_buf->bo = NULL;
485583 if (backup_dirty)
486584 vmw_bo_unreference(&res->backup);
487585
....@@ -504,9 +602,9 @@
504602 struct vmw_private *dev_priv = res->dev_priv;
505603 int ret;
506604
507
- write_lock(&dev_priv->resource_lock);
605
+ spin_lock(&dev_priv->resource_lock);
508606 list_del_init(&res->lru_head);
509
- write_unlock(&dev_priv->resource_lock);
607
+ spin_unlock(&dev_priv->resource_lock);
510608
511609 if (res->func->needs_backup && res->backup == NULL &&
512610 !no_backup) {
....@@ -541,7 +639,8 @@
541639 INIT_LIST_HEAD(&val_list);
542640 list_add_tail(&val_buf->head, &val_list);
543641 ttm_eu_backoff_reservation(ticket, &val_list);
544
- ttm_bo_unref(&val_buf->bo);
642
+ ttm_bo_put(val_buf->bo);
643
+ val_buf->bo = NULL;
545644 }
546645
547646 /**
....@@ -562,17 +661,17 @@
562661 BUG_ON(!func->may_evict);
563662
564663 val_buf.bo = NULL;
565
- val_buf.shared = false;
664
+ val_buf.num_shared = 0;
566665 ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
567666 if (unlikely(ret != 0))
568667 return ret;
569668
570669 if (unlikely(func->unbind != NULL &&
571
- (!func->needs_backup || !list_empty(&res->mob_head)))) {
670
+ (!func->needs_backup || vmw_resource_mob_attached(res)))) {
572671 ret = func->unbind(res, res->res_dirty, &val_buf);
573672 if (unlikely(ret != 0))
574673 goto out_no_unbind;
575
- list_del_init(&res->mob_head);
674
+ vmw_resource_mob_detach(res);
576675 }
577676 ret = func->destroy(res);
578677 res->backup_dirty = true;
....@@ -587,15 +686,20 @@
587686 /**
588687 * vmw_resource_validate - Make a resource up-to-date and visible
589688 * to the device.
590
- *
591
- * @res: The resource to make visible to the device.
689
+ * @res: The resource to make visible to the device.
690
+ * @intr: Perform waits interruptible if possible.
691
+ * @dirtying: Pending GPU operation will dirty the resource
592692 *
593693 * On succesful return, any backup DMA buffer pointed to by @res->backup will
594694 * be reserved and validated.
595695 * On hardware resource shortage, this function will repeatedly evict
596696 * resources of the same type until the validation succeeds.
697
+ *
698
+ * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
699
+ * on failure.
597700 */
598
-int vmw_resource_validate(struct vmw_resource *res)
701
+int vmw_resource_validate(struct vmw_resource *res, bool intr,
702
+ bool dirtying)
599703 {
600704 int ret;
601705 struct vmw_resource *evict_res;
....@@ -608,20 +712,20 @@
608712 return 0;
609713
610714 val_buf.bo = NULL;
611
- val_buf.shared = false;
715
+ val_buf.num_shared = 0;
612716 if (res->backup)
613717 val_buf.bo = &res->backup->base;
614718 do {
615
- ret = vmw_resource_do_validate(res, &val_buf);
719
+ ret = vmw_resource_do_validate(res, &val_buf, dirtying);
616720 if (likely(ret != -EBUSY))
617721 break;
618722
619
- write_lock(&dev_priv->resource_lock);
723
+ spin_lock(&dev_priv->resource_lock);
620724 if (list_empty(lru_list) || !res->func->may_evict) {
621725 DRM_ERROR("Out of device device resources "
622726 "for %s.\n", res->func->type_name);
623727 ret = -EBUSY;
624
- write_unlock(&dev_priv->resource_lock);
728
+ spin_unlock(&dev_priv->resource_lock);
625729 break;
626730 }
627731
....@@ -630,14 +734,14 @@
630734 lru_head));
631735 list_del_init(&evict_res->lru_head);
632736
633
- write_unlock(&dev_priv->resource_lock);
737
+ spin_unlock(&dev_priv->resource_lock);
634738
635739 /* Trylock backup buffers with a NULL ticket. */
636
- ret = vmw_resource_do_evict(NULL, evict_res, true);
740
+ ret = vmw_resource_do_evict(NULL, evict_res, intr);
637741 if (unlikely(ret != 0)) {
638
- write_lock(&dev_priv->resource_lock);
742
+ spin_lock(&dev_priv->resource_lock);
639743 list_add_tail(&evict_res->lru_head, lru_list);
640
- write_unlock(&dev_priv->resource_lock);
744
+ spin_unlock(&dev_priv->resource_lock);
641745 if (ret == -ERESTARTSYS ||
642746 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
643747 vmw_resource_unreference(&evict_res);
....@@ -651,7 +755,7 @@
651755 if (unlikely(ret != 0))
652756 goto out_no_validate;
653757 else if (!res->func->needs_backup && res->backup) {
654
- list_del_init(&res->mob_head);
758
+ WARN_ON_ONCE(vmw_resource_mob_attached(res));
655759 vmw_bo_unreference(&res->backup);
656760 }
657761
....@@ -675,22 +779,23 @@
675779 */
676780 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
677781 {
678
-
679
- struct vmw_resource *res, *next;
680782 struct ttm_validate_buffer val_buf = {
681783 .bo = &vbo->base,
682
- .shared = false
784
+ .num_shared = 0
683785 };
684786
685
- lockdep_assert_held(&vbo->base.resv->lock.base);
686
- list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
687
- if (!res->func->unbind)
688
- continue;
787
+ dma_resv_assert_held(vbo->base.base.resv);
788
+ while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
789
+ struct rb_node *node = vbo->res_tree.rb_node;
790
+ struct vmw_resource *res =
791
+ container_of(node, struct vmw_resource, mob_node);
689792
690
- (void) res->func->unbind(res, true, &val_buf);
793
+ if (!WARN_ON_ONCE(!res->func->unbind))
794
+ (void) res->func->unbind(res, res->res_dirty, &val_buf);
795
+
691796 res->backup_dirty = true;
692797 res->res_dirty = false;
693
- list_del_init(&res->mob_head);
798
+ vmw_resource_mob_detach(res);
694799 }
695800
696801 (void) ttm_bo_wait(&vbo->base, false, false);
....@@ -722,12 +827,9 @@
722827 dx_query_ctx = dx_query_mob->dx_query_ctx;
723828 dev_priv = dx_query_ctx->dev_priv;
724829
725
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
726
- if (unlikely(cmd == NULL)) {
727
- DRM_ERROR("Failed reserving FIFO space for "
728
- "query MOB read back.\n");
830
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
831
+ if (unlikely(cmd == NULL))
729832 return -ENOMEM;
730
- }
731833
732834 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
733835 cmd->header.size = sizeof(cmd->body);
....@@ -753,7 +855,7 @@
753855 * states from the device.
754856 */
755857 void vmw_query_move_notify(struct ttm_buffer_object *bo,
756
- struct ttm_mem_reg *mem)
858
+ struct ttm_resource *mem)
757859 {
758860 struct vmw_buffer_object *dx_query_mob;
759861 struct ttm_bo_device *bdev = bo->bdev;
....@@ -819,7 +921,7 @@
819921 struct ww_acquire_ctx ticket;
820922
821923 do {
822
- write_lock(&dev_priv->resource_lock);
924
+ spin_lock(&dev_priv->resource_lock);
823925
824926 if (list_empty(lru_list))
825927 goto out_unlock;
....@@ -828,14 +930,14 @@
828930 list_first_entry(lru_list, struct vmw_resource,
829931 lru_head));
830932 list_del_init(&evict_res->lru_head);
831
- write_unlock(&dev_priv->resource_lock);
933
+ spin_unlock(&dev_priv->resource_lock);
832934
833935 /* Wait lock backup buffers with a ticket. */
834936 ret = vmw_resource_do_evict(&ticket, evict_res, false);
835937 if (unlikely(ret != 0)) {
836
- write_lock(&dev_priv->resource_lock);
938
+ spin_lock(&dev_priv->resource_lock);
837939 list_add_tail(&evict_res->lru_head, lru_list);
838
- write_unlock(&dev_priv->resource_lock);
940
+ spin_unlock(&dev_priv->resource_lock);
839941 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
840942 vmw_resource_unreference(&evict_res);
841943 return;
....@@ -846,7 +948,7 @@
846948 } while (1);
847949
848950 out_unlock:
849
- write_unlock(&dev_priv->resource_lock);
951
+ spin_unlock(&dev_priv->resource_lock);
850952 }
851953
852954 /**
....@@ -914,7 +1016,7 @@
9141016 /* Do we really need to pin the MOB as well? */
9151017 vmw_bo_pin_reserved(vbo, true);
9161018 }
917
- ret = vmw_resource_validate(res);
1019
+ ret = vmw_resource_validate(res, interruptible, true);
9181020 if (vbo)
9191021 ttm_bo_unreserve(&vbo->base);
9201022 if (ret)
....@@ -923,7 +1025,7 @@
9231025 res->pin_count++;
9241026
9251027 out_no_validate:
926
- vmw_resource_unreserve(res, false, NULL, 0UL);
1028
+ vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
9271029 out_no_reserve:
9281030 mutex_unlock(&dev_priv->cmdbuf_mutex);
9291031 ttm_write_unlock(&dev_priv->reservation_sem);
....@@ -959,7 +1061,7 @@
9591061 ttm_bo_unreserve(&vbo->base);
9601062 }
9611063
962
- vmw_resource_unreserve(res, false, NULL, 0UL);
1064
+ vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
9631065
9641066 mutex_unlock(&dev_priv->cmdbuf_mutex);
9651067 ttm_read_unlock(&dev_priv->reservation_sem);
....@@ -974,3 +1076,101 @@
9741076 {
9751077 return res->func->res_type;
9761078 }
1079
+
1080
+/**
1081
+ * vmw_resource_update_dirty - Update a resource's dirty tracker with a
1082
+ * sequential range of touched backing store memory.
1083
+ * @res: The resource.
1084
+ * @start: The first page touched.
1085
+ * @end: The last page touched + 1.
1086
+ */
1087
+void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1088
+ pgoff_t end)
1089
+{
1090
+ if (res->dirty)
1091
+ res->func->dirty_range_add(res, start << PAGE_SHIFT,
1092
+ end << PAGE_SHIFT);
1093
+}
1094
+
1095
+/**
1096
+ * vmw_resources_clean - Clean resources intersecting a mob range
1097
+ * @vbo: The mob buffer object
1098
+ * @start: The mob page offset starting the range
1099
+ * @end: The mob page offset ending the range
1100
+ * @num_prefault: Returns how many pages including the first have been
1101
+ * cleaned and are ok to prefault
1102
+ */
1103
+int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1104
+ pgoff_t end, pgoff_t *num_prefault)
1105
+{
1106
+ struct rb_node *cur = vbo->res_tree.rb_node;
1107
+ struct vmw_resource *found = NULL;
1108
+ unsigned long res_start = start << PAGE_SHIFT;
1109
+ unsigned long res_end = end << PAGE_SHIFT;
1110
+ unsigned long last_cleaned = 0;
1111
+
1112
+ /*
1113
+ * Find the resource with lowest backup_offset that intersects the
1114
+ * range.
1115
+ */
1116
+ while (cur) {
1117
+ struct vmw_resource *cur_res =
1118
+ container_of(cur, struct vmw_resource, mob_node);
1119
+
1120
+ if (cur_res->backup_offset >= res_end) {
1121
+ cur = cur->rb_left;
1122
+ } else if (cur_res->backup_offset + cur_res->backup_size <=
1123
+ res_start) {
1124
+ cur = cur->rb_right;
1125
+ } else {
1126
+ found = cur_res;
1127
+ cur = cur->rb_left;
1128
+ /* Continue to look for resources with lower offsets */
1129
+ }
1130
+ }
1131
+
1132
+ /*
1133
+ * In order of increasing backup_offset, clean dirty resorces
1134
+ * intersecting the range.
1135
+ */
1136
+ while (found) {
1137
+ if (found->res_dirty) {
1138
+ int ret;
1139
+
1140
+ if (!found->func->clean)
1141
+ return -EINVAL;
1142
+
1143
+ ret = found->func->clean(found);
1144
+ if (ret)
1145
+ return ret;
1146
+
1147
+ found->res_dirty = false;
1148
+ }
1149
+ last_cleaned = found->backup_offset + found->backup_size;
1150
+ cur = rb_next(&found->mob_node);
1151
+ if (!cur)
1152
+ break;
1153
+
1154
+ found = container_of(cur, struct vmw_resource, mob_node);
1155
+ if (found->backup_offset >= res_end)
1156
+ break;
1157
+ }
1158
+
1159
+ /*
1160
+ * Set number of pages allowed prefaulting and fence the buffer object
1161
+ */
1162
+ *num_prefault = 1;
1163
+ if (last_cleaned > res_start) {
1164
+ struct ttm_buffer_object *bo = &vbo->base;
1165
+
1166
+ *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1167
+ PAGE_SIZE);
1168
+ vmw_bo_fence_single(bo, NULL);
1169
+ if (bo->moving)
1170
+ dma_fence_put(bo->moving);
1171
+ bo->moving = dma_fence_get
1172
+ (dma_resv_get_excl(bo->base.resv));
1173
+ }
1174
+
1175
+ return 0;
1176
+}