.. | .. |
---|
25 | 25 | * |
---|
26 | 26 | **************************************************************************/ |
---|
27 | 27 | |
---|
28 | | -#include "vmwgfx_drv.h" |
---|
29 | | -#include <drm/vmwgfx_drm.h> |
---|
30 | 28 | #include <drm/ttm/ttm_placement.h> |
---|
31 | | -#include <drm/drmP.h> |
---|
| 29 | + |
---|
32 | 30 | #include "vmwgfx_resource_priv.h" |
---|
33 | 31 | #include "vmwgfx_binding.h" |
---|
| 32 | +#include "vmwgfx_drv.h" |
---|
34 | 33 | |
---|
35 | 34 | #define VMW_RES_EVICT_ERR_COUNT 10 |
---|
| 35 | + |
---|
| 36 | +/** |
---|
| 37 | + * vmw_resource_mob_attach - Mark a resource as attached to its backing mob |
---|
| 38 | + * @res: The resource |
---|
| 39 | + */ |
---|
| 40 | +void vmw_resource_mob_attach(struct vmw_resource *res) |
---|
| 41 | +{ |
---|
| 42 | + struct vmw_buffer_object *backup = res->backup; |
---|
| 43 | + struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL; |
---|
| 44 | + |
---|
| 45 | + dma_resv_assert_held(res->backup->base.base.resv); |
---|
| 46 | + res->used_prio = (res->res_dirty) ? res->func->dirty_prio : |
---|
| 47 | + res->func->prio; |
---|
| 48 | + |
---|
| 49 | + while (*new) { |
---|
| 50 | + struct vmw_resource *this = |
---|
| 51 | + container_of(*new, struct vmw_resource, mob_node); |
---|
| 52 | + |
---|
| 53 | + parent = *new; |
---|
| 54 | + new = (res->backup_offset < this->backup_offset) ? |
---|
| 55 | + &((*new)->rb_left) : &((*new)->rb_right); |
---|
| 56 | + } |
---|
| 57 | + |
---|
| 58 | + rb_link_node(&res->mob_node, parent, new); |
---|
| 59 | + rb_insert_color(&res->mob_node, &backup->res_tree); |
---|
| 60 | + |
---|
| 61 | + vmw_bo_prio_add(backup, res->used_prio); |
---|
| 62 | +} |
---|
| 63 | + |
---|
| 64 | +/** |
---|
| 65 | + * vmw_resource_mob_detach - Mark a resource as detached from its backing mob |
---|
| 66 | + * @res: The resource |
---|
| 67 | + */ |
---|
| 68 | +void vmw_resource_mob_detach(struct vmw_resource *res) |
---|
| 69 | +{ |
---|
| 70 | + struct vmw_buffer_object *backup = res->backup; |
---|
| 71 | + |
---|
| 72 | + dma_resv_assert_held(backup->base.base.resv); |
---|
| 73 | + if (vmw_resource_mob_attached(res)) { |
---|
| 74 | + rb_erase(&res->mob_node, &backup->res_tree); |
---|
| 75 | + RB_CLEAR_NODE(&res->mob_node); |
---|
| 76 | + vmw_bo_prio_del(backup, res->used_prio); |
---|
| 77 | + } |
---|
| 78 | +} |
---|
36 | 79 | |
---|
37 | 80 | struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) |
---|
38 | 81 | { |
---|
.. | .. |
---|
58 | 101 | struct vmw_private *dev_priv = res->dev_priv; |
---|
59 | 102 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
---|
60 | 103 | |
---|
61 | | - write_lock(&dev_priv->resource_lock); |
---|
| 104 | + spin_lock(&dev_priv->resource_lock); |
---|
62 | 105 | if (res->id != -1) |
---|
63 | 106 | idr_remove(idr, res->id); |
---|
64 | 107 | res->id = -1; |
---|
65 | | - write_unlock(&dev_priv->resource_lock); |
---|
| 108 | + spin_unlock(&dev_priv->resource_lock); |
---|
66 | 109 | } |
---|
67 | 110 | |
---|
68 | 111 | static void vmw_resource_release(struct kref *kref) |
---|
.. | .. |
---|
73 | 116 | int id; |
---|
74 | 117 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
---|
75 | 118 | |
---|
76 | | - write_lock(&dev_priv->resource_lock); |
---|
77 | | - res->avail = false; |
---|
| 119 | + spin_lock(&dev_priv->resource_lock); |
---|
78 | 120 | list_del_init(&res->lru_head); |
---|
79 | | - write_unlock(&dev_priv->resource_lock); |
---|
| 121 | + spin_unlock(&dev_priv->resource_lock); |
---|
80 | 122 | if (res->backup) { |
---|
81 | 123 | struct ttm_buffer_object *bo = &res->backup->base; |
---|
82 | 124 | |
---|
83 | 125 | ttm_bo_reserve(bo, false, false, NULL); |
---|
84 | | - if (!list_empty(&res->mob_head) && |
---|
| 126 | + if (vmw_resource_mob_attached(res) && |
---|
85 | 127 | res->func->unbind != NULL) { |
---|
86 | 128 | struct ttm_validate_buffer val_buf; |
---|
87 | 129 | |
---|
88 | 130 | val_buf.bo = bo; |
---|
89 | | - val_buf.shared = false; |
---|
| 131 | + val_buf.num_shared = 0; |
---|
90 | 132 | res->func->unbind(res, false, &val_buf); |
---|
91 | 133 | } |
---|
92 | 134 | res->backup_dirty = false; |
---|
93 | | - list_del_init(&res->mob_head); |
---|
| 135 | + vmw_resource_mob_detach(res); |
---|
| 136 | + if (res->dirty) |
---|
| 137 | + res->func->dirty_free(res); |
---|
| 138 | + if (res->coherent) |
---|
| 139 | + vmw_bo_dirty_release(res->backup); |
---|
94 | 140 | ttm_bo_unreserve(bo); |
---|
95 | 141 | vmw_bo_unreference(&res->backup); |
---|
96 | 142 | } |
---|
.. | .. |
---|
108 | 154 | else |
---|
109 | 155 | kfree(res); |
---|
110 | 156 | |
---|
111 | | - write_lock(&dev_priv->resource_lock); |
---|
| 157 | + spin_lock(&dev_priv->resource_lock); |
---|
112 | 158 | if (id != -1) |
---|
113 | 159 | idr_remove(idr, id); |
---|
114 | | - write_unlock(&dev_priv->resource_lock); |
---|
| 160 | + spin_unlock(&dev_priv->resource_lock); |
---|
115 | 161 | } |
---|
116 | 162 | |
---|
117 | 163 | void vmw_resource_unreference(struct vmw_resource **p_res) |
---|
.. | .. |
---|
140 | 186 | BUG_ON(res->id != -1); |
---|
141 | 187 | |
---|
142 | 188 | idr_preload(GFP_KERNEL); |
---|
143 | | - write_lock(&dev_priv->resource_lock); |
---|
| 189 | + spin_lock(&dev_priv->resource_lock); |
---|
144 | 190 | |
---|
145 | 191 | ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT); |
---|
146 | 192 | if (ret >= 0) |
---|
147 | 193 | res->id = ret; |
---|
148 | 194 | |
---|
149 | | - write_unlock(&dev_priv->resource_lock); |
---|
| 195 | + spin_unlock(&dev_priv->resource_lock); |
---|
150 | 196 | idr_preload_end(); |
---|
151 | 197 | return ret < 0 ? ret : 0; |
---|
152 | 198 | } |
---|
.. | .. |
---|
170 | 216 | kref_init(&res->kref); |
---|
171 | 217 | res->hw_destroy = NULL; |
---|
172 | 218 | res->res_free = res_free; |
---|
173 | | - res->avail = false; |
---|
174 | 219 | res->dev_priv = dev_priv; |
---|
175 | 220 | res->func = func; |
---|
| 221 | + RB_CLEAR_NODE(&res->mob_node); |
---|
176 | 222 | INIT_LIST_HEAD(&res->lru_head); |
---|
177 | | - INIT_LIST_HEAD(&res->mob_head); |
---|
178 | 223 | INIT_LIST_HEAD(&res->binding_head); |
---|
179 | 224 | res->id = -1; |
---|
180 | 225 | res->backup = NULL; |
---|
181 | 226 | res->backup_offset = 0; |
---|
182 | 227 | res->backup_dirty = false; |
---|
183 | 228 | res->res_dirty = false; |
---|
| 229 | + res->coherent = false; |
---|
| 230 | + res->used_prio = 3; |
---|
| 231 | + res->dirty = NULL; |
---|
184 | 232 | if (delay_id) |
---|
185 | 233 | return 0; |
---|
186 | 234 | else |
---|
187 | 235 | return vmw_resource_alloc_id(res); |
---|
188 | 236 | } |
---|
189 | 237 | |
---|
190 | | -/** |
---|
191 | | - * vmw_resource_activate |
---|
192 | | - * |
---|
193 | | - * @res: Pointer to the newly created resource |
---|
194 | | - * @hw_destroy: Destroy function. NULL if none. |
---|
195 | | - * |
---|
196 | | - * Activate a resource after the hardware has been made aware of it. |
---|
197 | | - * Set tye destroy function to @destroy. Typically this frees the |
---|
198 | | - * resource and destroys the hardware resources associated with it. |
---|
199 | | - * Activate basically means that the function vmw_resource_lookup will |
---|
200 | | - * find it. |
---|
201 | | - */ |
---|
202 | | -void vmw_resource_activate(struct vmw_resource *res, |
---|
203 | | - void (*hw_destroy) (struct vmw_resource *)) |
---|
204 | | -{ |
---|
205 | | - struct vmw_private *dev_priv = res->dev_priv; |
---|
206 | | - |
---|
207 | | - write_lock(&dev_priv->resource_lock); |
---|
208 | | - res->avail = true; |
---|
209 | | - res->hw_destroy = hw_destroy; |
---|
210 | | - write_unlock(&dev_priv->resource_lock); |
---|
211 | | -} |
---|
212 | 238 | |
---|
213 | 239 | /** |
---|
214 | 240 | * vmw_user_resource_lookup_handle - lookup a struct resource from a |
---|
.. | .. |
---|
243 | 269 | goto out_bad_resource; |
---|
244 | 270 | |
---|
245 | 271 | res = converter->base_obj_to_res(base); |
---|
246 | | - |
---|
247 | | - read_lock(&dev_priv->resource_lock); |
---|
248 | | - if (!res->avail || res->res_free != converter->res_free) { |
---|
249 | | - read_unlock(&dev_priv->resource_lock); |
---|
250 | | - goto out_bad_resource; |
---|
251 | | - } |
---|
252 | | - |
---|
253 | 272 | kref_get(&res->kref); |
---|
254 | | - read_unlock(&dev_priv->resource_lock); |
---|
255 | 273 | |
---|
256 | 274 | *p_res = res; |
---|
257 | 275 | ret = 0; |
---|
.. | .. |
---|
260 | 278 | ttm_base_object_unref(&base); |
---|
261 | 279 | |
---|
262 | 280 | return ret; |
---|
| 281 | +} |
---|
| 282 | + |
---|
| 283 | +/** |
---|
| 284 | + * vmw_user_resource_lookup_handle - lookup a struct resource from a |
---|
| 285 | + * TTM user-space handle and perform basic type checks |
---|
| 286 | + * |
---|
| 287 | + * @dev_priv: Pointer to a device private struct |
---|
| 288 | + * @tfile: Pointer to a struct ttm_object_file identifying the caller |
---|
| 289 | + * @handle: The TTM user-space handle |
---|
| 290 | + * @converter: Pointer to an object describing the resource type |
---|
| 291 | + * @p_res: On successful return the location pointed to will contain |
---|
| 292 | + * a pointer to a refcounted struct vmw_resource. |
---|
| 293 | + * |
---|
| 294 | + * If the handle can't be found or is associated with an incorrect resource |
---|
| 295 | + * type, -EINVAL will be returned. |
---|
| 296 | + */ |
---|
| 297 | +struct vmw_resource * |
---|
| 298 | +vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv, |
---|
| 299 | + struct ttm_object_file *tfile, |
---|
| 300 | + uint32_t handle, |
---|
| 301 | + const struct vmw_user_resource_conv |
---|
| 302 | + *converter) |
---|
| 303 | +{ |
---|
| 304 | + struct ttm_base_object *base; |
---|
| 305 | + |
---|
| 306 | + base = ttm_base_object_noref_lookup(tfile, handle); |
---|
| 307 | + if (!base) |
---|
| 308 | + return ERR_PTR(-ESRCH); |
---|
| 309 | + |
---|
| 310 | + if (unlikely(ttm_base_object_type(base) != converter->object_type)) { |
---|
| 311 | + ttm_base_object_noref_release(); |
---|
| 312 | + return ERR_PTR(-EINVAL); |
---|
| 313 | + } |
---|
| 314 | + |
---|
| 315 | + return converter->base_obj_to_res(base); |
---|
263 | 316 | } |
---|
264 | 317 | |
---|
265 | 318 | /** |
---|
.. | .. |
---|
340 | 393 | * should be retried once resources have been freed up. |
---|
341 | 394 | */ |
---|
342 | 395 | static int vmw_resource_do_validate(struct vmw_resource *res, |
---|
343 | | - struct ttm_validate_buffer *val_buf) |
---|
| 396 | + struct ttm_validate_buffer *val_buf, |
---|
| 397 | + bool dirtying) |
---|
344 | 398 | { |
---|
345 | 399 | int ret = 0; |
---|
346 | 400 | const struct vmw_res_func *func = res->func; |
---|
.. | .. |
---|
352 | 406 | } |
---|
353 | 407 | |
---|
354 | 408 | if (func->bind && |
---|
355 | | - ((func->needs_backup && list_empty(&res->mob_head) && |
---|
| 409 | + ((func->needs_backup && !vmw_resource_mob_attached(res) && |
---|
356 | 410 | val_buf->bo != NULL) || |
---|
357 | 411 | (!func->needs_backup && val_buf->bo != NULL))) { |
---|
358 | 412 | ret = func->bind(res, val_buf); |
---|
359 | 413 | if (unlikely(ret != 0)) |
---|
360 | 414 | goto out_bind_failed; |
---|
361 | 415 | if (func->needs_backup) |
---|
362 | | - list_add_tail(&res->mob_head, &res->backup->res_list); |
---|
| 416 | + vmw_resource_mob_attach(res); |
---|
363 | 417 | } |
---|
364 | 418 | |
---|
365 | 419 | /* |
---|
366 | | - * Only do this on write operations, and move to |
---|
367 | | - * vmw_resource_unreserve if it can be called after |
---|
368 | | - * backup buffers have been unreserved. Otherwise |
---|
369 | | - * sort out locking. |
---|
| 420 | + * Handle the case where the backup mob is marked coherent but |
---|
| 421 | + * the resource isn't. |
---|
370 | 422 | */ |
---|
371 | | - res->res_dirty = true; |
---|
| 423 | + if (func->dirty_alloc && vmw_resource_mob_attached(res) && |
---|
| 424 | + !res->coherent) { |
---|
| 425 | + if (res->backup->dirty && !res->dirty) { |
---|
| 426 | + ret = func->dirty_alloc(res); |
---|
| 427 | + if (ret) |
---|
| 428 | + return ret; |
---|
| 429 | + } else if (!res->backup->dirty && res->dirty) { |
---|
| 430 | + func->dirty_free(res); |
---|
| 431 | + } |
---|
| 432 | + } |
---|
| 433 | + |
---|
| 434 | + /* |
---|
| 435 | + * Transfer the dirty regions to the resource and update |
---|
| 436 | + * the resource. |
---|
| 437 | + */ |
---|
| 438 | + if (res->dirty) { |
---|
| 439 | + if (dirtying && !res->res_dirty) { |
---|
| 440 | + pgoff_t start = res->backup_offset >> PAGE_SHIFT; |
---|
| 441 | + pgoff_t end = __KERNEL_DIV_ROUND_UP |
---|
| 442 | + (res->backup_offset + res->backup_size, |
---|
| 443 | + PAGE_SIZE); |
---|
| 444 | + |
---|
| 445 | + vmw_bo_dirty_unmap(res->backup, start, end); |
---|
| 446 | + } |
---|
| 447 | + |
---|
| 448 | + vmw_bo_dirty_transfer_to_res(res); |
---|
| 449 | + return func->dirty_sync(res); |
---|
| 450 | + } |
---|
372 | 451 | |
---|
373 | 452 | return 0; |
---|
374 | 453 | |
---|
.. | .. |
---|
383 | 462 | * command submission. |
---|
384 | 463 | * |
---|
385 | 464 | * @res: Pointer to the struct vmw_resource to unreserve. |
---|
| 465 | + * @dirty_set: Change dirty status of the resource. |
---|
| 466 | + * @dirty: When changing dirty status indicates the new status. |
---|
386 | 467 | * @switch_backup: Backup buffer has been switched. |
---|
387 | 468 | * @new_backup: Pointer to new backup buffer if command submission |
---|
388 | 469 | * switched. May be NULL. |
---|
.. | .. |
---|
392 | 473 | * resource lru list, so that it can be evicted if necessary. |
---|
393 | 474 | */ |
---|
394 | 475 | void vmw_resource_unreserve(struct vmw_resource *res, |
---|
| 476 | + bool dirty_set, |
---|
| 477 | + bool dirty, |
---|
395 | 478 | bool switch_backup, |
---|
396 | 479 | struct vmw_buffer_object *new_backup, |
---|
397 | 480 | unsigned long new_backup_offset) |
---|
.. | .. |
---|
403 | 486 | |
---|
404 | 487 | if (switch_backup && new_backup != res->backup) { |
---|
405 | 488 | if (res->backup) { |
---|
406 | | - lockdep_assert_held(&res->backup->base.resv->lock.base); |
---|
407 | | - list_del_init(&res->mob_head); |
---|
| 489 | + vmw_resource_mob_detach(res); |
---|
| 490 | + if (res->coherent) |
---|
| 491 | + vmw_bo_dirty_release(res->backup); |
---|
408 | 492 | vmw_bo_unreference(&res->backup); |
---|
409 | 493 | } |
---|
410 | 494 | |
---|
411 | 495 | if (new_backup) { |
---|
412 | 496 | res->backup = vmw_bo_reference(new_backup); |
---|
413 | | - lockdep_assert_held(&new_backup->base.resv->lock.base); |
---|
414 | | - list_add_tail(&res->mob_head, &new_backup->res_list); |
---|
| 497 | + |
---|
| 498 | + /* |
---|
| 499 | + * The validation code should already have added a |
---|
| 500 | + * dirty tracker here. |
---|
| 501 | + */ |
---|
| 502 | + WARN_ON(res->coherent && !new_backup->dirty); |
---|
| 503 | + |
---|
| 504 | + vmw_resource_mob_attach(res); |
---|
415 | 505 | } else { |
---|
416 | 506 | res->backup = NULL; |
---|
417 | 507 | } |
---|
| 508 | + } else if (switch_backup && res->coherent) { |
---|
| 509 | + vmw_bo_dirty_release(res->backup); |
---|
418 | 510 | } |
---|
| 511 | + |
---|
419 | 512 | if (switch_backup) |
---|
420 | 513 | res->backup_offset = new_backup_offset; |
---|
| 514 | + |
---|
| 515 | + if (dirty_set) |
---|
| 516 | + res->res_dirty = dirty; |
---|
421 | 517 | |
---|
422 | 518 | if (!res->func->may_evict || res->id == -1 || res->pin_count) |
---|
423 | 519 | return; |
---|
424 | 520 | |
---|
425 | | - write_lock(&dev_priv->resource_lock); |
---|
| 521 | + spin_lock(&dev_priv->resource_lock); |
---|
426 | 522 | list_add_tail(&res->lru_head, |
---|
427 | 523 | &res->dev_priv->res_lru[res->func->res_type]); |
---|
428 | | - write_unlock(&dev_priv->resource_lock); |
---|
| 524 | + spin_unlock(&dev_priv->resource_lock); |
---|
429 | 525 | } |
---|
430 | 526 | |
---|
431 | 527 | /** |
---|
.. | .. |
---|
458 | 554 | } |
---|
459 | 555 | |
---|
460 | 556 | INIT_LIST_HEAD(&val_list); |
---|
461 | | - val_buf->bo = ttm_bo_reference(&res->backup->base); |
---|
462 | | - val_buf->shared = false; |
---|
| 557 | + ttm_bo_get(&res->backup->base); |
---|
| 558 | + val_buf->bo = &res->backup->base; |
---|
| 559 | + val_buf->num_shared = 0; |
---|
463 | 560 | list_add_tail(&val_buf->head, &val_list); |
---|
464 | 561 | ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL); |
---|
465 | 562 | if (unlikely(ret != 0)) |
---|
466 | 563 | goto out_no_reserve; |
---|
467 | 564 | |
---|
468 | | - if (res->func->needs_backup && list_empty(&res->mob_head)) |
---|
| 565 | + if (res->func->needs_backup && !vmw_resource_mob_attached(res)) |
---|
469 | 566 | return 0; |
---|
470 | 567 | |
---|
471 | 568 | backup_dirty = res->backup_dirty; |
---|
.. | .. |
---|
481 | 578 | out_no_validate: |
---|
482 | 579 | ttm_eu_backoff_reservation(ticket, &val_list); |
---|
483 | 580 | out_no_reserve: |
---|
484 | | - ttm_bo_unref(&val_buf->bo); |
---|
| 581 | + ttm_bo_put(val_buf->bo); |
---|
| 582 | + val_buf->bo = NULL; |
---|
485 | 583 | if (backup_dirty) |
---|
486 | 584 | vmw_bo_unreference(&res->backup); |
---|
487 | 585 | |
---|
.. | .. |
---|
504 | 602 | struct vmw_private *dev_priv = res->dev_priv; |
---|
505 | 603 | int ret; |
---|
506 | 604 | |
---|
507 | | - write_lock(&dev_priv->resource_lock); |
---|
| 605 | + spin_lock(&dev_priv->resource_lock); |
---|
508 | 606 | list_del_init(&res->lru_head); |
---|
509 | | - write_unlock(&dev_priv->resource_lock); |
---|
| 607 | + spin_unlock(&dev_priv->resource_lock); |
---|
510 | 608 | |
---|
511 | 609 | if (res->func->needs_backup && res->backup == NULL && |
---|
512 | 610 | !no_backup) { |
---|
.. | .. |
---|
541 | 639 | INIT_LIST_HEAD(&val_list); |
---|
542 | 640 | list_add_tail(&val_buf->head, &val_list); |
---|
543 | 641 | ttm_eu_backoff_reservation(ticket, &val_list); |
---|
544 | | - ttm_bo_unref(&val_buf->bo); |
---|
| 642 | + ttm_bo_put(val_buf->bo); |
---|
| 643 | + val_buf->bo = NULL; |
---|
545 | 644 | } |
---|
546 | 645 | |
---|
547 | 646 | /** |
---|
.. | .. |
---|
562 | 661 | BUG_ON(!func->may_evict); |
---|
563 | 662 | |
---|
564 | 663 | val_buf.bo = NULL; |
---|
565 | | - val_buf.shared = false; |
---|
| 664 | + val_buf.num_shared = 0; |
---|
566 | 665 | ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf); |
---|
567 | 666 | if (unlikely(ret != 0)) |
---|
568 | 667 | return ret; |
---|
569 | 668 | |
---|
570 | 669 | if (unlikely(func->unbind != NULL && |
---|
571 | | - (!func->needs_backup || !list_empty(&res->mob_head)))) { |
---|
| 670 | + (!func->needs_backup || vmw_resource_mob_attached(res)))) { |
---|
572 | 671 | ret = func->unbind(res, res->res_dirty, &val_buf); |
---|
573 | 672 | if (unlikely(ret != 0)) |
---|
574 | 673 | goto out_no_unbind; |
---|
575 | | - list_del_init(&res->mob_head); |
---|
| 674 | + vmw_resource_mob_detach(res); |
---|
576 | 675 | } |
---|
577 | 676 | ret = func->destroy(res); |
---|
578 | 677 | res->backup_dirty = true; |
---|
.. | .. |
---|
587 | 686 | /** |
---|
588 | 687 | * vmw_resource_validate - Make a resource up-to-date and visible |
---|
589 | 688 | * to the device. |
---|
590 | | - * |
---|
591 | | - * @res: The resource to make visible to the device. |
---|
| 689 | + * @res: The resource to make visible to the device. |
---|
| 690 | + * @intr: Perform waits interruptible if possible. |
---|
| 691 | + * @dirtying: Pending GPU operation will dirty the resource |
---|
592 | 692 | * |
---|
593 | 693 | * On succesful return, any backup DMA buffer pointed to by @res->backup will |
---|
594 | 694 | * be reserved and validated. |
---|
595 | 695 | * On hardware resource shortage, this function will repeatedly evict |
---|
596 | 696 | * resources of the same type until the validation succeeds. |
---|
| 697 | + * |
---|
| 698 | + * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code |
---|
| 699 | + * on failure. |
---|
597 | 700 | */ |
---|
598 | | -int vmw_resource_validate(struct vmw_resource *res) |
---|
| 701 | +int vmw_resource_validate(struct vmw_resource *res, bool intr, |
---|
| 702 | + bool dirtying) |
---|
599 | 703 | { |
---|
600 | 704 | int ret; |
---|
601 | 705 | struct vmw_resource *evict_res; |
---|
.. | .. |
---|
608 | 712 | return 0; |
---|
609 | 713 | |
---|
610 | 714 | val_buf.bo = NULL; |
---|
611 | | - val_buf.shared = false; |
---|
| 715 | + val_buf.num_shared = 0; |
---|
612 | 716 | if (res->backup) |
---|
613 | 717 | val_buf.bo = &res->backup->base; |
---|
614 | 718 | do { |
---|
615 | | - ret = vmw_resource_do_validate(res, &val_buf); |
---|
| 719 | + ret = vmw_resource_do_validate(res, &val_buf, dirtying); |
---|
616 | 720 | if (likely(ret != -EBUSY)) |
---|
617 | 721 | break; |
---|
618 | 722 | |
---|
619 | | - write_lock(&dev_priv->resource_lock); |
---|
| 723 | + spin_lock(&dev_priv->resource_lock); |
---|
620 | 724 | if (list_empty(lru_list) || !res->func->may_evict) { |
---|
621 | 725 | DRM_ERROR("Out of device device resources " |
---|
622 | 726 | "for %s.\n", res->func->type_name); |
---|
623 | 727 | ret = -EBUSY; |
---|
624 | | - write_unlock(&dev_priv->resource_lock); |
---|
| 728 | + spin_unlock(&dev_priv->resource_lock); |
---|
625 | 729 | break; |
---|
626 | 730 | } |
---|
627 | 731 | |
---|
.. | .. |
---|
630 | 734 | lru_head)); |
---|
631 | 735 | list_del_init(&evict_res->lru_head); |
---|
632 | 736 | |
---|
633 | | - write_unlock(&dev_priv->resource_lock); |
---|
| 737 | + spin_unlock(&dev_priv->resource_lock); |
---|
634 | 738 | |
---|
635 | 739 | /* Trylock backup buffers with a NULL ticket. */ |
---|
636 | | - ret = vmw_resource_do_evict(NULL, evict_res, true); |
---|
| 740 | + ret = vmw_resource_do_evict(NULL, evict_res, intr); |
---|
637 | 741 | if (unlikely(ret != 0)) { |
---|
638 | | - write_lock(&dev_priv->resource_lock); |
---|
| 742 | + spin_lock(&dev_priv->resource_lock); |
---|
639 | 743 | list_add_tail(&evict_res->lru_head, lru_list); |
---|
640 | | - write_unlock(&dev_priv->resource_lock); |
---|
| 744 | + spin_unlock(&dev_priv->resource_lock); |
---|
641 | 745 | if (ret == -ERESTARTSYS || |
---|
642 | 746 | ++err_count > VMW_RES_EVICT_ERR_COUNT) { |
---|
643 | 747 | vmw_resource_unreference(&evict_res); |
---|
.. | .. |
---|
651 | 755 | if (unlikely(ret != 0)) |
---|
652 | 756 | goto out_no_validate; |
---|
653 | 757 | else if (!res->func->needs_backup && res->backup) { |
---|
654 | | - list_del_init(&res->mob_head); |
---|
| 758 | + WARN_ON_ONCE(vmw_resource_mob_attached(res)); |
---|
655 | 759 | vmw_bo_unreference(&res->backup); |
---|
656 | 760 | } |
---|
657 | 761 | |
---|
.. | .. |
---|
675 | 779 | */ |
---|
676 | 780 | void vmw_resource_unbind_list(struct vmw_buffer_object *vbo) |
---|
677 | 781 | { |
---|
678 | | - |
---|
679 | | - struct vmw_resource *res, *next; |
---|
680 | 782 | struct ttm_validate_buffer val_buf = { |
---|
681 | 783 | .bo = &vbo->base, |
---|
682 | | - .shared = false |
---|
| 784 | + .num_shared = 0 |
---|
683 | 785 | }; |
---|
684 | 786 | |
---|
685 | | - lockdep_assert_held(&vbo->base.resv->lock.base); |
---|
686 | | - list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) { |
---|
687 | | - if (!res->func->unbind) |
---|
688 | | - continue; |
---|
| 787 | + dma_resv_assert_held(vbo->base.base.resv); |
---|
| 788 | + while (!RB_EMPTY_ROOT(&vbo->res_tree)) { |
---|
| 789 | + struct rb_node *node = vbo->res_tree.rb_node; |
---|
| 790 | + struct vmw_resource *res = |
---|
| 791 | + container_of(node, struct vmw_resource, mob_node); |
---|
689 | 792 | |
---|
690 | | - (void) res->func->unbind(res, true, &val_buf); |
---|
| 793 | + if (!WARN_ON_ONCE(!res->func->unbind)) |
---|
| 794 | + (void) res->func->unbind(res, res->res_dirty, &val_buf); |
---|
| 795 | + |
---|
691 | 796 | res->backup_dirty = true; |
---|
692 | 797 | res->res_dirty = false; |
---|
693 | | - list_del_init(&res->mob_head); |
---|
| 798 | + vmw_resource_mob_detach(res); |
---|
694 | 799 | } |
---|
695 | 800 | |
---|
696 | 801 | (void) ttm_bo_wait(&vbo->base, false, false); |
---|
.. | .. |
---|
722 | 827 | dx_query_ctx = dx_query_mob->dx_query_ctx; |
---|
723 | 828 | dev_priv = dx_query_ctx->dev_priv; |
---|
724 | 829 | |
---|
725 | | - cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id); |
---|
726 | | - if (unlikely(cmd == NULL)) { |
---|
727 | | - DRM_ERROR("Failed reserving FIFO space for " |
---|
728 | | - "query MOB read back.\n"); |
---|
| 830 | + cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id); |
---|
| 831 | + if (unlikely(cmd == NULL)) |
---|
729 | 832 | return -ENOMEM; |
---|
730 | | - } |
---|
731 | 833 | |
---|
732 | 834 | cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY; |
---|
733 | 835 | cmd->header.size = sizeof(cmd->body); |
---|
.. | .. |
---|
753 | 855 | * states from the device. |
---|
754 | 856 | */ |
---|
755 | 857 | void vmw_query_move_notify(struct ttm_buffer_object *bo, |
---|
756 | | - struct ttm_mem_reg *mem) |
---|
| 858 | + struct ttm_resource *mem) |
---|
757 | 859 | { |
---|
758 | 860 | struct vmw_buffer_object *dx_query_mob; |
---|
759 | 861 | struct ttm_bo_device *bdev = bo->bdev; |
---|
.. | .. |
---|
819 | 921 | struct ww_acquire_ctx ticket; |
---|
820 | 922 | |
---|
821 | 923 | do { |
---|
822 | | - write_lock(&dev_priv->resource_lock); |
---|
| 924 | + spin_lock(&dev_priv->resource_lock); |
---|
823 | 925 | |
---|
824 | 926 | if (list_empty(lru_list)) |
---|
825 | 927 | goto out_unlock; |
---|
.. | .. |
---|
828 | 930 | list_first_entry(lru_list, struct vmw_resource, |
---|
829 | 931 | lru_head)); |
---|
830 | 932 | list_del_init(&evict_res->lru_head); |
---|
831 | | - write_unlock(&dev_priv->resource_lock); |
---|
| 933 | + spin_unlock(&dev_priv->resource_lock); |
---|
832 | 934 | |
---|
833 | 935 | /* Wait lock backup buffers with a ticket. */ |
---|
834 | 936 | ret = vmw_resource_do_evict(&ticket, evict_res, false); |
---|
835 | 937 | if (unlikely(ret != 0)) { |
---|
836 | | - write_lock(&dev_priv->resource_lock); |
---|
| 938 | + spin_lock(&dev_priv->resource_lock); |
---|
837 | 939 | list_add_tail(&evict_res->lru_head, lru_list); |
---|
838 | | - write_unlock(&dev_priv->resource_lock); |
---|
| 940 | + spin_unlock(&dev_priv->resource_lock); |
---|
839 | 941 | if (++err_count > VMW_RES_EVICT_ERR_COUNT) { |
---|
840 | 942 | vmw_resource_unreference(&evict_res); |
---|
841 | 943 | return; |
---|
.. | .. |
---|
846 | 948 | } while (1); |
---|
847 | 949 | |
---|
848 | 950 | out_unlock: |
---|
849 | | - write_unlock(&dev_priv->resource_lock); |
---|
| 951 | + spin_unlock(&dev_priv->resource_lock); |
---|
850 | 952 | } |
---|
851 | 953 | |
---|
852 | 954 | /** |
---|
.. | .. |
---|
914 | 1016 | /* Do we really need to pin the MOB as well? */ |
---|
915 | 1017 | vmw_bo_pin_reserved(vbo, true); |
---|
916 | 1018 | } |
---|
917 | | - ret = vmw_resource_validate(res); |
---|
| 1019 | + ret = vmw_resource_validate(res, interruptible, true); |
---|
918 | 1020 | if (vbo) |
---|
919 | 1021 | ttm_bo_unreserve(&vbo->base); |
---|
920 | 1022 | if (ret) |
---|
.. | .. |
---|
923 | 1025 | res->pin_count++; |
---|
924 | 1026 | |
---|
925 | 1027 | out_no_validate: |
---|
926 | | - vmw_resource_unreserve(res, false, NULL, 0UL); |
---|
| 1028 | + vmw_resource_unreserve(res, false, false, false, NULL, 0UL); |
---|
927 | 1029 | out_no_reserve: |
---|
928 | 1030 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
---|
929 | 1031 | ttm_write_unlock(&dev_priv->reservation_sem); |
---|
.. | .. |
---|
959 | 1061 | ttm_bo_unreserve(&vbo->base); |
---|
960 | 1062 | } |
---|
961 | 1063 | |
---|
962 | | - vmw_resource_unreserve(res, false, NULL, 0UL); |
---|
| 1064 | + vmw_resource_unreserve(res, false, false, false, NULL, 0UL); |
---|
963 | 1065 | |
---|
964 | 1066 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
---|
965 | 1067 | ttm_read_unlock(&dev_priv->reservation_sem); |
---|
.. | .. |
---|
974 | 1076 | { |
---|
975 | 1077 | return res->func->res_type; |
---|
976 | 1078 | } |
---|
| 1079 | + |
---|
| 1080 | +/** |
---|
| 1081 | + * vmw_resource_update_dirty - Update a resource's dirty tracker with a |
---|
| 1082 | + * sequential range of touched backing store memory. |
---|
| 1083 | + * @res: The resource. |
---|
| 1084 | + * @start: The first page touched. |
---|
| 1085 | + * @end: The last page touched + 1. |
---|
| 1086 | + */ |
---|
| 1087 | +void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, |
---|
| 1088 | + pgoff_t end) |
---|
| 1089 | +{ |
---|
| 1090 | + if (res->dirty) |
---|
| 1091 | + res->func->dirty_range_add(res, start << PAGE_SHIFT, |
---|
| 1092 | + end << PAGE_SHIFT); |
---|
| 1093 | +} |
---|
| 1094 | + |
---|
| 1095 | +/** |
---|
| 1096 | + * vmw_resources_clean - Clean resources intersecting a mob range |
---|
| 1097 | + * @vbo: The mob buffer object |
---|
| 1098 | + * @start: The mob page offset starting the range |
---|
| 1099 | + * @end: The mob page offset ending the range |
---|
| 1100 | + * @num_prefault: Returns how many pages including the first have been |
---|
| 1101 | + * cleaned and are ok to prefault |
---|
| 1102 | + */ |
---|
| 1103 | +int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, |
---|
| 1104 | + pgoff_t end, pgoff_t *num_prefault) |
---|
| 1105 | +{ |
---|
| 1106 | + struct rb_node *cur = vbo->res_tree.rb_node; |
---|
| 1107 | + struct vmw_resource *found = NULL; |
---|
| 1108 | + unsigned long res_start = start << PAGE_SHIFT; |
---|
| 1109 | + unsigned long res_end = end << PAGE_SHIFT; |
---|
| 1110 | + unsigned long last_cleaned = 0; |
---|
| 1111 | + |
---|
| 1112 | + /* |
---|
| 1113 | + * Find the resource with lowest backup_offset that intersects the |
---|
| 1114 | + * range. |
---|
| 1115 | + */ |
---|
| 1116 | + while (cur) { |
---|
| 1117 | + struct vmw_resource *cur_res = |
---|
| 1118 | + container_of(cur, struct vmw_resource, mob_node); |
---|
| 1119 | + |
---|
| 1120 | + if (cur_res->backup_offset >= res_end) { |
---|
| 1121 | + cur = cur->rb_left; |
---|
| 1122 | + } else if (cur_res->backup_offset + cur_res->backup_size <= |
---|
| 1123 | + res_start) { |
---|
| 1124 | + cur = cur->rb_right; |
---|
| 1125 | + } else { |
---|
| 1126 | + found = cur_res; |
---|
| 1127 | + cur = cur->rb_left; |
---|
| 1128 | + /* Continue to look for resources with lower offsets */ |
---|
| 1129 | + } |
---|
| 1130 | + } |
---|
| 1131 | + |
---|
| 1132 | + /* |
---|
| 1133 | + * In order of increasing backup_offset, clean dirty resorces |
---|
| 1134 | + * intersecting the range. |
---|
| 1135 | + */ |
---|
| 1136 | + while (found) { |
---|
| 1137 | + if (found->res_dirty) { |
---|
| 1138 | + int ret; |
---|
| 1139 | + |
---|
| 1140 | + if (!found->func->clean) |
---|
| 1141 | + return -EINVAL; |
---|
| 1142 | + |
---|
| 1143 | + ret = found->func->clean(found); |
---|
| 1144 | + if (ret) |
---|
| 1145 | + return ret; |
---|
| 1146 | + |
---|
| 1147 | + found->res_dirty = false; |
---|
| 1148 | + } |
---|
| 1149 | + last_cleaned = found->backup_offset + found->backup_size; |
---|
| 1150 | + cur = rb_next(&found->mob_node); |
---|
| 1151 | + if (!cur) |
---|
| 1152 | + break; |
---|
| 1153 | + |
---|
| 1154 | + found = container_of(cur, struct vmw_resource, mob_node); |
---|
| 1155 | + if (found->backup_offset >= res_end) |
---|
| 1156 | + break; |
---|
| 1157 | + } |
---|
| 1158 | + |
---|
| 1159 | + /* |
---|
| 1160 | + * Set number of pages allowed prefaulting and fence the buffer object |
---|
| 1161 | + */ |
---|
| 1162 | + *num_prefault = 1; |
---|
| 1163 | + if (last_cleaned > res_start) { |
---|
| 1164 | + struct ttm_buffer_object *bo = &vbo->base; |
---|
| 1165 | + |
---|
| 1166 | + *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start, |
---|
| 1167 | + PAGE_SIZE); |
---|
| 1168 | + vmw_bo_fence_single(bo, NULL); |
---|
| 1169 | + if (bo->moving) |
---|
| 1170 | + dma_fence_put(bo->moving); |
---|
| 1171 | + bo->moving = dma_fence_get |
---|
| 1172 | + (dma_resv_get_excl(bo->base.resv)); |
---|
| 1173 | + } |
---|
| 1174 | + |
---|
| 1175 | + return 0; |
---|
| 1176 | +} |
---|