.. | .. |
---|
33 | 33 | static const struct ttm_place vram_placement_flags = { |
---|
34 | 34 | .fpfn = 0, |
---|
35 | 35 | .lpfn = 0, |
---|
36 | | - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
---|
| 36 | + .mem_type = TTM_PL_VRAM, |
---|
| 37 | + .flags = TTM_PL_FLAG_CACHED |
---|
37 | 38 | }; |
---|
38 | 39 | |
---|
39 | 40 | static const struct ttm_place vram_ne_placement_flags = { |
---|
40 | 41 | .fpfn = 0, |
---|
41 | 42 | .lpfn = 0, |
---|
42 | | - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
---|
| 43 | + .mem_type = TTM_PL_VRAM, |
---|
| 44 | + .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
---|
43 | 45 | }; |
---|
44 | 46 | |
---|
45 | 47 | static const struct ttm_place sys_placement_flags = { |
---|
46 | 48 | .fpfn = 0, |
---|
47 | 49 | .lpfn = 0, |
---|
48 | | - .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED |
---|
| 50 | + .mem_type = TTM_PL_SYSTEM, |
---|
| 51 | + .flags = TTM_PL_FLAG_CACHED |
---|
49 | 52 | }; |
---|
50 | 53 | |
---|
51 | 54 | static const struct ttm_place sys_ne_placement_flags = { |
---|
52 | 55 | .fpfn = 0, |
---|
53 | 56 | .lpfn = 0, |
---|
54 | | - .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
---|
| 57 | + .mem_type = TTM_PL_SYSTEM, |
---|
| 58 | + .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
---|
55 | 59 | }; |
---|
56 | 60 | |
---|
57 | 61 | static const struct ttm_place gmr_placement_flags = { |
---|
58 | 62 | .fpfn = 0, |
---|
59 | 63 | .lpfn = 0, |
---|
60 | | - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
---|
| 64 | + .mem_type = VMW_PL_GMR, |
---|
| 65 | + .flags = TTM_PL_FLAG_CACHED |
---|
61 | 66 | }; |
---|
62 | 67 | |
---|
63 | 68 | static const struct ttm_place gmr_ne_placement_flags = { |
---|
64 | 69 | .fpfn = 0, |
---|
65 | 70 | .lpfn = 0, |
---|
66 | | - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
---|
| 71 | + .mem_type = VMW_PL_GMR, |
---|
| 72 | + .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
---|
67 | 73 | }; |
---|
68 | 74 | |
---|
69 | 75 | static const struct ttm_place mob_placement_flags = { |
---|
70 | 76 | .fpfn = 0, |
---|
71 | 77 | .lpfn = 0, |
---|
72 | | - .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED |
---|
| 78 | + .mem_type = VMW_PL_MOB, |
---|
| 79 | + .flags = TTM_PL_FLAG_CACHED |
---|
73 | 80 | }; |
---|
74 | 81 | |
---|
75 | 82 | static const struct ttm_place mob_ne_placement_flags = { |
---|
76 | 83 | .fpfn = 0, |
---|
77 | 84 | .lpfn = 0, |
---|
78 | | - .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
---|
| 85 | + .mem_type = VMW_PL_MOB, |
---|
| 86 | + .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
---|
79 | 87 | }; |
---|
80 | 88 | |
---|
81 | 89 | struct ttm_placement vmw_vram_placement = { |
---|
.. | .. |
---|
89 | 97 | { |
---|
90 | 98 | .fpfn = 0, |
---|
91 | 99 | .lpfn = 0, |
---|
92 | | - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
---|
| 100 | + .mem_type = TTM_PL_VRAM, |
---|
| 101 | + .flags = TTM_PL_FLAG_CACHED |
---|
93 | 102 | }, { |
---|
94 | 103 | .fpfn = 0, |
---|
95 | 104 | .lpfn = 0, |
---|
96 | | - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
---|
| 105 | + .mem_type = VMW_PL_GMR, |
---|
| 106 | + .flags = TTM_PL_FLAG_CACHED |
---|
97 | 107 | } |
---|
98 | 108 | }; |
---|
99 | 109 | |
---|
.. | .. |
---|
101 | 111 | { |
---|
102 | 112 | .fpfn = 0, |
---|
103 | 113 | .lpfn = 0, |
---|
104 | | - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
---|
| 114 | + .mem_type = VMW_PL_GMR, |
---|
| 115 | + .flags = TTM_PL_FLAG_CACHED |
---|
105 | 116 | }, { |
---|
106 | 117 | .fpfn = 0, |
---|
107 | 118 | .lpfn = 0, |
---|
108 | | - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
---|
| 119 | + .mem_type = TTM_PL_VRAM, |
---|
| 120 | + .flags = TTM_PL_FLAG_CACHED |
---|
109 | 121 | } |
---|
110 | 122 | }; |
---|
111 | 123 | |
---|
.. | .. |
---|
120 | 132 | { |
---|
121 | 133 | .fpfn = 0, |
---|
122 | 134 | .lpfn = 0, |
---|
123 | | - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | |
---|
| 135 | + .mem_type = TTM_PL_VRAM, |
---|
| 136 | + .flags = TTM_PL_FLAG_CACHED | |
---|
124 | 137 | TTM_PL_FLAG_NO_EVICT |
---|
125 | 138 | }, { |
---|
126 | 139 | .fpfn = 0, |
---|
127 | 140 | .lpfn = 0, |
---|
128 | | - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | |
---|
| 141 | + .mem_type = VMW_PL_GMR, |
---|
| 142 | + .flags = TTM_PL_FLAG_CACHED | |
---|
129 | 143 | TTM_PL_FLAG_NO_EVICT |
---|
130 | 144 | } |
---|
131 | 145 | }; |
---|
.. | .. |
---|
169 | 183 | { |
---|
170 | 184 | .fpfn = 0, |
---|
171 | 185 | .lpfn = 0, |
---|
172 | | - .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED |
---|
| 186 | + .mem_type = TTM_PL_SYSTEM, |
---|
| 187 | + .flags = TTM_PL_FLAG_CACHED |
---|
173 | 188 | }, { |
---|
174 | 189 | .fpfn = 0, |
---|
175 | 190 | .lpfn = 0, |
---|
176 | | - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
---|
| 191 | + .mem_type = TTM_PL_VRAM, |
---|
| 192 | + .flags = TTM_PL_FLAG_CACHED |
---|
177 | 193 | }, { |
---|
178 | 194 | .fpfn = 0, |
---|
179 | 195 | .lpfn = 0, |
---|
180 | | - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
---|
| 196 | + .mem_type = VMW_PL_GMR, |
---|
| 197 | + .flags = TTM_PL_FLAG_CACHED |
---|
181 | 198 | }, { |
---|
182 | 199 | .fpfn = 0, |
---|
183 | 200 | .lpfn = 0, |
---|
184 | | - .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED |
---|
| 201 | + .mem_type = VMW_PL_MOB, |
---|
| 202 | + .flags = TTM_PL_FLAG_CACHED |
---|
185 | 203 | } |
---|
186 | 204 | }; |
---|
187 | 205 | |
---|
.. | .. |
---|
189 | 207 | { |
---|
190 | 208 | .fpfn = 0, |
---|
191 | 209 | .lpfn = 0, |
---|
192 | | - .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED |
---|
| 210 | + .mem_type = TTM_PL_SYSTEM, |
---|
| 211 | + .flags = TTM_PL_FLAG_CACHED |
---|
193 | 212 | }, { |
---|
194 | 213 | .fpfn = 0, |
---|
195 | 214 | .lpfn = 0, |
---|
196 | | - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
---|
| 215 | + .mem_type = VMW_PL_GMR, |
---|
| 216 | + .flags = TTM_PL_FLAG_CACHED |
---|
197 | 217 | }, { |
---|
198 | 218 | .fpfn = 0, |
---|
199 | 219 | .lpfn = 0, |
---|
200 | | - .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED |
---|
| 220 | + .mem_type = VMW_PL_MOB, |
---|
| 221 | + .flags = TTM_PL_FLAG_CACHED |
---|
201 | 222 | } |
---|
202 | 223 | }; |
---|
203 | 224 | |
---|
.. | .. |
---|
246 | 267 | struct vmw_sg_table vsgt; |
---|
247 | 268 | uint64_t sg_alloc_size; |
---|
248 | 269 | bool mapped; |
---|
| 270 | + bool bound; |
---|
249 | 271 | }; |
---|
250 | 272 | |
---|
251 | 273 | const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); |
---|
.. | .. |
---|
266 | 288 | |
---|
267 | 289 | static bool __vmw_piter_sg_next(struct vmw_piter *viter) |
---|
268 | 290 | { |
---|
269 | | - return __sg_page_iter_next(&viter->iter); |
---|
| 291 | + bool ret = __vmw_piter_non_sg_next(viter); |
---|
| 292 | + |
---|
| 293 | + return __sg_page_iter_dma_next(&viter->iter) && ret; |
---|
270 | 294 | } |
---|
271 | 295 | |
---|
272 | 296 | |
---|
.. | .. |
---|
283 | 307 | { |
---|
284 | 308 | return viter->pages[viter->i]; |
---|
285 | 309 | } |
---|
286 | | - |
---|
287 | | -static struct page *__vmw_piter_sg_page(struct vmw_piter *viter) |
---|
288 | | -{ |
---|
289 | | - return sg_page_iter_page(&viter->iter); |
---|
290 | | -} |
---|
291 | | - |
---|
292 | 310 | |
---|
293 | 311 | /** |
---|
294 | 312 | * Helper functions to return the DMA address of the current page. |
---|
.. | .. |
---|
330 | 348 | { |
---|
331 | 349 | viter->i = p_offset - 1; |
---|
332 | 350 | viter->num_pages = vsgt->num_pages; |
---|
| 351 | + viter->page = &__vmw_piter_non_sg_page; |
---|
| 352 | + viter->pages = vsgt->pages; |
---|
333 | 353 | switch (vsgt->mode) { |
---|
334 | 354 | case vmw_dma_phys: |
---|
335 | 355 | viter->next = &__vmw_piter_non_sg_next; |
---|
336 | 356 | viter->dma_address = &__vmw_piter_phys_addr; |
---|
337 | | - viter->page = &__vmw_piter_non_sg_page; |
---|
338 | | - viter->pages = vsgt->pages; |
---|
339 | 357 | break; |
---|
340 | 358 | case vmw_dma_alloc_coherent: |
---|
341 | 359 | viter->next = &__vmw_piter_non_sg_next; |
---|
342 | 360 | viter->dma_address = &__vmw_piter_dma_addr; |
---|
343 | | - viter->page = &__vmw_piter_non_sg_page; |
---|
344 | 361 | viter->addrs = vsgt->addrs; |
---|
345 | | - viter->pages = vsgt->pages; |
---|
346 | 362 | break; |
---|
347 | 363 | case vmw_dma_map_populate: |
---|
348 | 364 | case vmw_dma_map_bind: |
---|
349 | 365 | viter->next = &__vmw_piter_sg_next; |
---|
350 | 366 | viter->dma_address = &__vmw_piter_sg_addr; |
---|
351 | | - viter->page = &__vmw_piter_sg_page; |
---|
352 | | - __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl, |
---|
| 367 | + __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl, |
---|
353 | 368 | vsgt->sgt->orig_nents, p_offset); |
---|
354 | 369 | break; |
---|
355 | 370 | default: |
---|
.. | .. |
---|
369 | 384 | { |
---|
370 | 385 | struct device *dev = vmw_tt->dev_priv->dev->dev; |
---|
371 | 386 | |
---|
372 | | - dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents, |
---|
373 | | - DMA_BIDIRECTIONAL); |
---|
| 387 | + dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); |
---|
374 | 388 | vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; |
---|
375 | 389 | } |
---|
376 | 390 | |
---|
.. | .. |
---|
390 | 404 | static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) |
---|
391 | 405 | { |
---|
392 | 406 | struct device *dev = vmw_tt->dev_priv->dev->dev; |
---|
393 | | - int ret; |
---|
394 | 407 | |
---|
395 | | - ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents, |
---|
396 | | - DMA_BIDIRECTIONAL); |
---|
397 | | - if (unlikely(ret == 0)) |
---|
398 | | - return -ENOMEM; |
---|
399 | | - |
---|
400 | | - vmw_tt->sgt.nents = ret; |
---|
401 | | - |
---|
402 | | - return 0; |
---|
| 408 | + return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); |
---|
403 | 409 | } |
---|
404 | 410 | |
---|
405 | 411 | /** |
---|
.. | .. |
---|
426 | 432 | int ret = 0; |
---|
427 | 433 | static size_t sgl_size; |
---|
428 | 434 | static size_t sgt_size; |
---|
| 435 | + struct scatterlist *sg; |
---|
429 | 436 | |
---|
430 | 437 | if (vmw_tt->mapped) |
---|
431 | 438 | return 0; |
---|
.. | .. |
---|
448 | 455 | if (unlikely(ret != 0)) |
---|
449 | 456 | return ret; |
---|
450 | 457 | |
---|
451 | | - ret = __sg_alloc_table_from_pages |
---|
452 | | - (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, |
---|
453 | | - (unsigned long) vsgt->num_pages << PAGE_SHIFT, |
---|
454 | | - dma_get_max_seg_size(dev_priv->dev->dev), |
---|
455 | | - GFP_KERNEL); |
---|
456 | | - if (unlikely(ret != 0)) |
---|
| 458 | + sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, |
---|
| 459 | + vsgt->num_pages, 0, |
---|
| 460 | + (unsigned long) vsgt->num_pages << PAGE_SHIFT, |
---|
| 461 | + dma_get_max_seg_size(dev_priv->dev->dev), |
---|
| 462 | + NULL, 0, GFP_KERNEL); |
---|
| 463 | + if (IS_ERR(sg)) { |
---|
| 464 | + ret = PTR_ERR(sg); |
---|
457 | 465 | goto out_sg_alloc_fail; |
---|
| 466 | + } |
---|
458 | 467 | |
---|
459 | | - if (vsgt->num_pages > vmw_tt->sgt.nents) { |
---|
| 468 | + if (vsgt->num_pages > vmw_tt->sgt.orig_nents) { |
---|
460 | 469 | uint64_t over_alloc = |
---|
461 | 470 | sgl_size * (vsgt->num_pages - |
---|
462 | | - vmw_tt->sgt.nents); |
---|
| 471 | + vmw_tt->sgt.orig_nents); |
---|
463 | 472 | |
---|
464 | 473 | ttm_mem_global_free(glob, over_alloc); |
---|
465 | 474 | vmw_tt->sg_alloc_size -= over_alloc; |
---|
.. | .. |
---|
526 | 535 | vmw_tt->mapped = false; |
---|
527 | 536 | } |
---|
528 | 537 | |
---|
529 | | - |
---|
530 | | -/** |
---|
531 | | - * vmw_bo_map_dma - Make sure buffer object pages are visible to the device |
---|
532 | | - * |
---|
533 | | - * @bo: Pointer to a struct ttm_buffer_object |
---|
534 | | - * |
---|
535 | | - * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer |
---|
536 | | - * instead of a pointer to a struct vmw_ttm_backend as argument. |
---|
537 | | - * Note that the buffer object must be either pinned or reserved before |
---|
538 | | - * calling this function. |
---|
539 | | - */ |
---|
540 | | -int vmw_bo_map_dma(struct ttm_buffer_object *bo) |
---|
541 | | -{ |
---|
542 | | - struct vmw_ttm_tt *vmw_tt = |
---|
543 | | - container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
---|
544 | | - |
---|
545 | | - return vmw_ttm_map_dma(vmw_tt); |
---|
546 | | -} |
---|
547 | | - |
---|
548 | | - |
---|
549 | | -/** |
---|
550 | | - * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device |
---|
551 | | - * |
---|
552 | | - * @bo: Pointer to a struct ttm_buffer_object |
---|
553 | | - * |
---|
554 | | - * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer |
---|
555 | | - * instead of a pointer to a struct vmw_ttm_backend as argument. |
---|
556 | | - */ |
---|
557 | | -void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) |
---|
558 | | -{ |
---|
559 | | - struct vmw_ttm_tt *vmw_tt = |
---|
560 | | - container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
---|
561 | | - |
---|
562 | | - vmw_ttm_unmap_dma(vmw_tt); |
---|
563 | | -} |
---|
564 | | - |
---|
565 | | - |
---|
566 | 538 | /** |
---|
567 | 539 | * vmw_bo_sg_table - Return a struct vmw_sg_table object for a |
---|
568 | 540 | * TTM buffer object |
---|
.. | .. |
---|
583 | 555 | } |
---|
584 | 556 | |
---|
585 | 557 | |
---|
586 | | -static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) |
---|
| 558 | +static int vmw_ttm_bind(struct ttm_bo_device *bdev, |
---|
| 559 | + struct ttm_tt *ttm, struct ttm_resource *bo_mem) |
---|
587 | 560 | { |
---|
588 | 561 | struct vmw_ttm_tt *vmw_be = |
---|
589 | 562 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
---|
590 | | - int ret; |
---|
| 563 | + int ret = 0; |
---|
| 564 | + |
---|
| 565 | + if (!bo_mem) |
---|
| 566 | + return -EINVAL; |
---|
| 567 | + |
---|
| 568 | + if (vmw_be->bound) |
---|
| 569 | + return 0; |
---|
591 | 570 | |
---|
592 | 571 | ret = vmw_ttm_map_dma(vmw_be); |
---|
593 | 572 | if (unlikely(ret != 0)) |
---|
.. | .. |
---|
598 | 577 | |
---|
599 | 578 | switch (bo_mem->mem_type) { |
---|
600 | 579 | case VMW_PL_GMR: |
---|
601 | | - return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, |
---|
| 580 | + ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, |
---|
602 | 581 | ttm->num_pages, vmw_be->gmr_id); |
---|
| 582 | + break; |
---|
603 | 583 | case VMW_PL_MOB: |
---|
604 | 584 | if (unlikely(vmw_be->mob == NULL)) { |
---|
605 | 585 | vmw_be->mob = |
---|
.. | .. |
---|
608 | 588 | return -ENOMEM; |
---|
609 | 589 | } |
---|
610 | 590 | |
---|
611 | | - return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, |
---|
| 591 | + ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, |
---|
612 | 592 | &vmw_be->vsgt, ttm->num_pages, |
---|
613 | 593 | vmw_be->gmr_id); |
---|
| 594 | + break; |
---|
614 | 595 | default: |
---|
615 | 596 | BUG(); |
---|
616 | 597 | } |
---|
617 | | - return 0; |
---|
| 598 | + vmw_be->bound = true; |
---|
| 599 | + return ret; |
---|
618 | 600 | } |
---|
619 | 601 | |
---|
620 | | -static int vmw_ttm_unbind(struct ttm_tt *ttm) |
---|
| 602 | +static void vmw_ttm_unbind(struct ttm_bo_device *bdev, |
---|
| 603 | + struct ttm_tt *ttm) |
---|
621 | 604 | { |
---|
622 | 605 | struct vmw_ttm_tt *vmw_be = |
---|
623 | 606 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
---|
| 607 | + |
---|
| 608 | + if (!vmw_be->bound) |
---|
| 609 | + return; |
---|
624 | 610 | |
---|
625 | 611 | switch (vmw_be->mem_type) { |
---|
626 | 612 | case VMW_PL_GMR: |
---|
.. | .. |
---|
635 | 621 | |
---|
636 | 622 | if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) |
---|
637 | 623 | vmw_ttm_unmap_dma(vmw_be); |
---|
638 | | - |
---|
639 | | - return 0; |
---|
| 624 | + vmw_be->bound = false; |
---|
640 | 625 | } |
---|
641 | 626 | |
---|
642 | 627 | |
---|
643 | | -static void vmw_ttm_destroy(struct ttm_tt *ttm) |
---|
| 628 | +static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) |
---|
644 | 629 | { |
---|
645 | 630 | struct vmw_ttm_tt *vmw_be = |
---|
646 | 631 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
---|
647 | 632 | |
---|
| 633 | + vmw_ttm_unbind(bdev, ttm); |
---|
| 634 | + ttm_tt_destroy_common(bdev, ttm); |
---|
648 | 635 | vmw_ttm_unmap_dma(vmw_be); |
---|
649 | 636 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
---|
650 | 637 | ttm_dma_tt_fini(&vmw_be->dma_ttm); |
---|
.. | .. |
---|
658 | 645 | } |
---|
659 | 646 | |
---|
660 | 647 | |
---|
661 | | -static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) |
---|
| 648 | +static int vmw_ttm_populate(struct ttm_bo_device *bdev, |
---|
| 649 | + struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) |
---|
662 | 650 | { |
---|
663 | 651 | struct vmw_ttm_tt *vmw_tt = |
---|
664 | 652 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
---|
.. | .. |
---|
666 | 654 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
---|
667 | 655 | int ret; |
---|
668 | 656 | |
---|
669 | | - if (ttm->state != tt_unpopulated) |
---|
| 657 | + if (ttm_tt_is_populated(ttm)) |
---|
670 | 658 | return 0; |
---|
671 | 659 | |
---|
672 | 660 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { |
---|
.. | .. |
---|
686 | 674 | return ret; |
---|
687 | 675 | } |
---|
688 | 676 | |
---|
689 | | -static void vmw_ttm_unpopulate(struct ttm_tt *ttm) |
---|
| 677 | +static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev, |
---|
| 678 | + struct ttm_tt *ttm) |
---|
690 | 679 | { |
---|
691 | 680 | struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, |
---|
692 | 681 | dma_ttm.ttm); |
---|
.. | .. |
---|
710 | 699 | ttm_pool_unpopulate(ttm); |
---|
711 | 700 | } |
---|
712 | 701 | |
---|
713 | | -static struct ttm_backend_func vmw_ttm_func = { |
---|
714 | | - .bind = vmw_ttm_bind, |
---|
715 | | - .unbind = vmw_ttm_unbind, |
---|
716 | | - .destroy = vmw_ttm_destroy, |
---|
717 | | -}; |
---|
718 | | - |
---|
719 | 702 | static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, |
---|
720 | 703 | uint32_t page_flags) |
---|
721 | 704 | { |
---|
.. | .. |
---|
726 | 709 | if (!vmw_be) |
---|
727 | 710 | return NULL; |
---|
728 | 711 | |
---|
729 | | - vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; |
---|
730 | 712 | vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev); |
---|
731 | 713 | vmw_be->mob = NULL; |
---|
732 | 714 | |
---|
.. | .. |
---|
743 | 725 | return NULL; |
---|
744 | 726 | } |
---|
745 | 727 | |
---|
746 | | -static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
---|
747 | | -{ |
---|
748 | | - return 0; |
---|
749 | | -} |
---|
750 | | - |
---|
751 | | -static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
---|
752 | | - struct ttm_mem_type_manager *man) |
---|
753 | | -{ |
---|
754 | | - switch (type) { |
---|
755 | | - case TTM_PL_SYSTEM: |
---|
756 | | - /* System memory */ |
---|
757 | | - |
---|
758 | | - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
---|
759 | | - man->available_caching = TTM_PL_FLAG_CACHED; |
---|
760 | | - man->default_caching = TTM_PL_FLAG_CACHED; |
---|
761 | | - break; |
---|
762 | | - case TTM_PL_VRAM: |
---|
763 | | - /* "On-card" video ram */ |
---|
764 | | - man->func = &ttm_bo_manager_func; |
---|
765 | | - man->gpu_offset = 0; |
---|
766 | | - man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; |
---|
767 | | - man->available_caching = TTM_PL_FLAG_CACHED; |
---|
768 | | - man->default_caching = TTM_PL_FLAG_CACHED; |
---|
769 | | - break; |
---|
770 | | - case VMW_PL_GMR: |
---|
771 | | - case VMW_PL_MOB: |
---|
772 | | - /* |
---|
773 | | - * "Guest Memory Regions" is an aperture like feature with |
---|
774 | | - * one slot per bo. There is an upper limit of the number of |
---|
775 | | - * slots as well as the bo size. |
---|
776 | | - */ |
---|
777 | | - man->func = &vmw_gmrid_manager_func; |
---|
778 | | - man->gpu_offset = 0; |
---|
779 | | - man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE; |
---|
780 | | - man->available_caching = TTM_PL_FLAG_CACHED; |
---|
781 | | - man->default_caching = TTM_PL_FLAG_CACHED; |
---|
782 | | - break; |
---|
783 | | - default: |
---|
784 | | - DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); |
---|
785 | | - return -EINVAL; |
---|
786 | | - } |
---|
787 | | - return 0; |
---|
788 | | -} |
---|
789 | | - |
---|
790 | 728 | static void vmw_evict_flags(struct ttm_buffer_object *bo, |
---|
791 | 729 | struct ttm_placement *placement) |
---|
792 | 730 | { |
---|
.. | .. |
---|
801 | 739 | return vmw_user_bo_verify_access(bo, tfile); |
---|
802 | 740 | } |
---|
803 | 741 | |
---|
804 | | -static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
---|
| 742 | +static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) |
---|
805 | 743 | { |
---|
806 | | - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
---|
807 | 744 | struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); |
---|
808 | 745 | |
---|
809 | | - mem->bus.addr = NULL; |
---|
810 | | - mem->bus.is_iomem = false; |
---|
811 | | - mem->bus.offset = 0; |
---|
812 | | - mem->bus.size = mem->num_pages << PAGE_SHIFT; |
---|
813 | | - mem->bus.base = 0; |
---|
814 | | - if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) |
---|
815 | | - return -EINVAL; |
---|
816 | 746 | switch (mem->mem_type) { |
---|
817 | 747 | case TTM_PL_SYSTEM: |
---|
818 | 748 | case VMW_PL_GMR: |
---|
819 | 749 | case VMW_PL_MOB: |
---|
820 | 750 | return 0; |
---|
821 | 751 | case TTM_PL_VRAM: |
---|
822 | | - mem->bus.offset = mem->start << PAGE_SHIFT; |
---|
823 | | - mem->bus.base = dev_priv->vram_start; |
---|
| 752 | + mem->bus.offset = (mem->start << PAGE_SHIFT) + |
---|
| 753 | + dev_priv->vram_start; |
---|
824 | 754 | mem->bus.is_iomem = true; |
---|
825 | 755 | break; |
---|
826 | 756 | default: |
---|
.. | .. |
---|
829 | 759 | return 0; |
---|
830 | 760 | } |
---|
831 | 761 | |
---|
832 | | -static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
---|
833 | | -{ |
---|
834 | | -} |
---|
835 | | - |
---|
836 | | -static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) |
---|
837 | | -{ |
---|
838 | | - return 0; |
---|
839 | | -} |
---|
840 | | - |
---|
841 | 762 | /** |
---|
842 | 763 | * vmw_move_notify - TTM move_notify_callback |
---|
843 | 764 | * |
---|
844 | 765 | * @bo: The TTM buffer object about to move. |
---|
845 | | - * @mem: The struct ttm_mem_reg indicating to what memory |
---|
| 766 | + * @mem: The struct ttm_resource indicating to what memory |
---|
846 | 767 | * region the move is taking place. |
---|
847 | 768 | * |
---|
848 | 769 | * Calls move_notify for all subsystems needing it. |
---|
.. | .. |
---|
850 | 771 | */ |
---|
851 | 772 | static void vmw_move_notify(struct ttm_buffer_object *bo, |
---|
852 | 773 | bool evict, |
---|
853 | | - struct ttm_mem_reg *mem) |
---|
| 774 | + struct ttm_resource *mem) |
---|
854 | 775 | { |
---|
855 | 776 | vmw_bo_move_notify(bo, mem); |
---|
856 | 777 | vmw_query_move_notify(bo, mem); |
---|
.. | .. |
---|
873 | 794 | .ttm_tt_create = &vmw_ttm_tt_create, |
---|
874 | 795 | .ttm_tt_populate = &vmw_ttm_populate, |
---|
875 | 796 | .ttm_tt_unpopulate = &vmw_ttm_unpopulate, |
---|
876 | | - .invalidate_caches = vmw_invalidate_caches, |
---|
877 | | - .init_mem_type = vmw_init_mem_type, |
---|
| 797 | + .ttm_tt_bind = &vmw_ttm_bind, |
---|
| 798 | + .ttm_tt_unbind = &vmw_ttm_unbind, |
---|
| 799 | + .ttm_tt_destroy = &vmw_ttm_destroy, |
---|
878 | 800 | .eviction_valuable = ttm_bo_eviction_valuable, |
---|
879 | 801 | .evict_flags = vmw_evict_flags, |
---|
880 | 802 | .move = NULL, |
---|
881 | 803 | .verify_access = vmw_verify_access, |
---|
882 | 804 | .move_notify = vmw_move_notify, |
---|
883 | 805 | .swap_notify = vmw_swap_notify, |
---|
884 | | - .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
---|
885 | 806 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, |
---|
886 | | - .io_mem_free = &vmw_ttm_io_mem_free, |
---|
887 | 807 | }; |
---|
| 808 | + |
---|
| 809 | +int vmw_bo_create_and_populate(struct vmw_private *dev_priv, |
---|
| 810 | + unsigned long bo_size, |
---|
| 811 | + struct ttm_buffer_object **bo_p) |
---|
| 812 | +{ |
---|
| 813 | + struct ttm_operation_ctx ctx = { |
---|
| 814 | + .interruptible = false, |
---|
| 815 | + .no_wait_gpu = false |
---|
| 816 | + }; |
---|
| 817 | + struct ttm_buffer_object *bo; |
---|
| 818 | + int ret; |
---|
| 819 | + |
---|
| 820 | + ret = ttm_bo_create(&dev_priv->bdev, bo_size, |
---|
| 821 | + ttm_bo_type_device, |
---|
| 822 | + &vmw_sys_ne_placement, |
---|
| 823 | + 0, false, &bo); |
---|
| 824 | + |
---|
| 825 | + if (unlikely(ret != 0)) |
---|
| 826 | + return ret; |
---|
| 827 | + |
---|
| 828 | + ret = ttm_bo_reserve(bo, false, true, NULL); |
---|
| 829 | + BUG_ON(ret != 0); |
---|
| 830 | + ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx); |
---|
| 831 | + if (likely(ret == 0)) { |
---|
| 832 | + struct vmw_ttm_tt *vmw_tt = |
---|
| 833 | + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
---|
| 834 | + ret = vmw_ttm_map_dma(vmw_tt); |
---|
| 835 | + } |
---|
| 836 | + |
---|
| 837 | + ttm_bo_unreserve(bo); |
---|
| 838 | + |
---|
| 839 | + if (likely(ret == 0)) |
---|
| 840 | + *bo_p = bo; |
---|
| 841 | + return ret; |
---|
| 842 | +} |
---|