.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd |
---|
3 | 4 | * Author:Mark Yao <mark.yao@rock-chips.com> |
---|
4 | | - * |
---|
5 | | - * This software is licensed under the terms of the GNU General Public |
---|
6 | | - * License version 2, as published by the Free Software Foundation, and |
---|
7 | | - * may be copied, distributed, and modified under those terms. |
---|
8 | | - * |
---|
9 | | - * This program is distributed in the hope that it will be useful, |
---|
10 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
11 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
12 | | - * GNU General Public License for more details. |
---|
13 | 5 | */ |
---|
14 | 6 | |
---|
| 7 | +#include <linux/dma-buf-cache.h> |
---|
| 8 | +#include <linux/iommu.h> |
---|
| 9 | +#include <linux/vmalloc.h> |
---|
| 10 | + |
---|
15 | 11 | #include <drm/drm.h> |
---|
16 | | -#include <drm/drmP.h> |
---|
17 | 12 | #include <drm/drm_gem.h> |
---|
| 13 | +#include <drm/drm_prime.h> |
---|
18 | 14 | #include <drm/drm_vma_manager.h> |
---|
19 | 15 | |
---|
20 | | -#include <linux/dma-buf.h> |
---|
21 | 16 | #include <linux/genalloc.h> |
---|
22 | 17 | #include <linux/iommu.h> |
---|
23 | 18 | #include <linux/pagemap.h> |
---|
24 | 19 | #include <linux/vmalloc.h> |
---|
25 | | -#include <linux/swiotlb.h> |
---|
| 20 | +#include <linux/rockchip/rockchip_sip.h> |
---|
26 | 21 | |
---|
27 | 22 | #include "rockchip_drm_drv.h" |
---|
28 | 23 | #include "rockchip_drm_gem.h" |
---|
| 24 | + |
---|
| 25 | +static u32 bank_bit_first = 12; |
---|
| 26 | +static u32 bank_bit_mask = 0x7; |
---|
29 | 27 | |
---|
30 | 28 | struct page_info { |
---|
31 | 29 | struct page *page; |
---|
.. | .. |
---|
38 | 36 | { |
---|
39 | 37 | struct drm_device *drm = rk_obj->base.dev; |
---|
40 | 38 | struct rockchip_drm_private *private = drm->dev_private; |
---|
41 | | - int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_TLB_SHOT_ENTIRE; |
---|
| 39 | + int prot = IOMMU_READ | IOMMU_WRITE; |
---|
42 | 40 | ssize_t ret; |
---|
43 | 41 | |
---|
44 | 42 | mutex_lock(&private->mm_lock); |
---|
.. | .. |
---|
54 | 52 | |
---|
55 | 53 | rk_obj->dma_addr = rk_obj->mm.start; |
---|
56 | 54 | |
---|
57 | | - ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl, |
---|
58 | | - rk_obj->sgt->nents, prot); |
---|
| 55 | + ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt, |
---|
| 56 | + prot); |
---|
59 | 57 | if (ret < rk_obj->base.size) { |
---|
60 | 58 | DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n", |
---|
61 | 59 | ret, rk_obj->base.size); |
---|
62 | 60 | ret = -ENOMEM; |
---|
63 | 61 | goto err_remove_node; |
---|
64 | 62 | } |
---|
| 63 | + |
---|
| 64 | + iommu_flush_iotlb_all(private->domain); |
---|
65 | 65 | |
---|
66 | 66 | rk_obj->size = ret; |
---|
67 | 67 | |
---|
.. | .. |
---|
104 | 104 | } |
---|
105 | 105 | } |
---|
106 | 106 | |
---|
107 | | -static struct sg_table *rockchip_gem_pages_to_sg(struct page **pages, unsigned int nr_pages) |
---|
| 107 | +void rockchip_gem_get_ddr_info(void) |
---|
108 | 108 | { |
---|
109 | | - struct sg_table *sg = NULL; |
---|
110 | | - int ret; |
---|
111 | | -#define SG_SIZE_MAX (IO_TLB_SEGSIZE * (1 << IO_TLB_SHIFT)) |
---|
| 109 | + struct dram_addrmap_info *ddr_map_info; |
---|
112 | 110 | |
---|
113 | | - sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); |
---|
114 | | - if (!sg) { |
---|
115 | | - ret = -ENOMEM; |
---|
116 | | - goto out; |
---|
| 111 | + ddr_map_info = sip_smc_get_dram_map(); |
---|
| 112 | + if (ddr_map_info) { |
---|
| 113 | + bank_bit_first = ddr_map_info->bank_bit_first; |
---|
| 114 | + bank_bit_mask = ddr_map_info->bank_bit_mask; |
---|
117 | 115 | } |
---|
118 | | - |
---|
119 | | - ret = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0, |
---|
120 | | - nr_pages << PAGE_SHIFT, |
---|
121 | | - SG_SIZE_MAX, GFP_KERNEL); |
---|
122 | | - if (ret) |
---|
123 | | - goto out; |
---|
124 | | - |
---|
125 | | - return sg; |
---|
126 | | -out: |
---|
127 | | - kfree(sg); |
---|
128 | | - return ERR_PTR(ret); |
---|
129 | | -} |
---|
130 | | - |
---|
131 | | -static struct page **get_pages(struct drm_gem_object *obj) |
---|
132 | | -{ |
---|
133 | | - if (IS_ENABLED(CONFIG_DMABUF_PAGE_POOL)) { |
---|
134 | | - struct drm_device *drm = obj->dev; |
---|
135 | | - struct rockchip_drm_private *priv = drm->dev_private; |
---|
136 | | - struct dmabuf_page_pool *pool = priv->page_pools; |
---|
137 | | - |
---|
138 | | - return dmabuf_page_pool_alloc_pages_array(pool, |
---|
139 | | - obj->size >> |
---|
140 | | - PAGE_SHIFT); |
---|
141 | | - } |
---|
142 | | - |
---|
143 | | - return drm_gem_get_pages(obj); |
---|
144 | | -} |
---|
145 | | - |
---|
146 | | -static void put_pages(struct drm_gem_object *obj, struct page **pages, |
---|
147 | | - bool dirty, bool accessed) |
---|
148 | | -{ |
---|
149 | | - if (IS_ENABLED(CONFIG_DMABUF_PAGE_POOL)) { |
---|
150 | | - struct drm_device *drm = obj->dev; |
---|
151 | | - struct rockchip_drm_private *priv = drm->dev_private; |
---|
152 | | - struct dmabuf_page_pool *pool = priv->page_pools; |
---|
153 | | - |
---|
154 | | - return dmabuf_page_pool_free_pages_array(pool, pages, |
---|
155 | | - obj->size >> |
---|
156 | | - PAGE_SHIFT); |
---|
157 | | - } |
---|
158 | | - |
---|
159 | | - return drm_gem_put_pages(obj, pages, dirty, accessed); |
---|
160 | 116 | } |
---|
161 | 117 | |
---|
162 | 118 | static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj) |
---|
.. | .. |
---|
173 | 129 | struct list_head lists[PG_ROUND]; |
---|
174 | 130 | dma_addr_t phys; |
---|
175 | 131 | int end = 0; |
---|
176 | | - unsigned int bit12_14; |
---|
| 132 | + unsigned int bit_index; |
---|
177 | 133 | unsigned int block_index[PG_ROUND] = {0}; |
---|
178 | 134 | struct page_info *info; |
---|
179 | 135 | unsigned int maximum; |
---|
.. | .. |
---|
181 | 137 | for (i = 0; i < PG_ROUND; i++) |
---|
182 | 138 | INIT_LIST_HEAD(&lists[i]); |
---|
183 | 139 | |
---|
184 | | - pages = get_pages(&rk_obj->base); |
---|
| 140 | + pages = drm_gem_get_pages(&rk_obj->base); |
---|
185 | 141 | if (IS_ERR(pages)) |
---|
186 | 142 | return PTR_ERR(pages); |
---|
187 | 143 | |
---|
.. | .. |
---|
192 | 148 | n_pages = rk_obj->num_pages; |
---|
193 | 149 | |
---|
194 | 150 | dst_pages = __vmalloc(sizeof(struct page *) * n_pages, |
---|
195 | | - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); |
---|
| 151 | + GFP_KERNEL | __GFP_HIGHMEM); |
---|
196 | 152 | if (!dst_pages) { |
---|
197 | 153 | ret = -ENOMEM; |
---|
198 | 154 | goto err_put_pages; |
---|
199 | 155 | } |
---|
| 156 | + |
---|
| 157 | + DRM_DEBUG_KMS("bank_bit_first = 0x%x, bank_bit_mask = 0x%x\n", |
---|
| 158 | + bank_bit_first, bank_bit_mask); |
---|
200 | 159 | |
---|
201 | 160 | cur_page = 0; |
---|
202 | 161 | remain = n_pages; |
---|
.. | .. |
---|
209 | 168 | } |
---|
210 | 169 | |
---|
211 | 170 | chunk_pages = j - cur_page; |
---|
212 | | - if (chunk_pages > 7) { |
---|
| 171 | + if (chunk_pages >= PG_ROUND) { |
---|
213 | 172 | for (i = 0; i < chunk_pages; i++) |
---|
214 | 173 | dst_pages[end + i] = pages[cur_page + i]; |
---|
215 | 174 | end += chunk_pages; |
---|
.. | .. |
---|
224 | 183 | INIT_LIST_HEAD(&info->list); |
---|
225 | 184 | info->page = pages[cur_page + i]; |
---|
226 | 185 | phys = page_to_phys(info->page); |
---|
227 | | - bit12_14 = (phys >> 12) & 0x7; |
---|
228 | | - list_add_tail(&info->list, &lists[bit12_14]); |
---|
229 | | - block_index[bit12_14]++; |
---|
| 186 | + bit_index = ((phys >> bank_bit_first) & bank_bit_mask) % PG_ROUND; |
---|
| 187 | + list_add_tail(&info->list, &lists[bit_index]); |
---|
| 188 | + block_index[bit_index]++; |
---|
230 | 189 | } |
---|
231 | 190 | } |
---|
232 | 191 | |
---|
.. | .. |
---|
254 | 213 | |
---|
255 | 214 | DRM_DEBUG_KMS("%s, %d, end = %d, n_pages = %d\n", __func__, __LINE__, |
---|
256 | 215 | end, n_pages); |
---|
257 | | - |
---|
258 | | - rk_obj->sgt = rockchip_gem_pages_to_sg(dst_pages, rk_obj->num_pages); |
---|
259 | | - |
---|
| 216 | + rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev, |
---|
| 217 | + dst_pages, rk_obj->num_pages); |
---|
260 | 218 | if (IS_ERR(rk_obj->sgt)) { |
---|
261 | 219 | ret = PTR_ERR(rk_obj->sgt); |
---|
262 | 220 | goto err_put_list; |
---|
.. | .. |
---|
271 | 229 | * TODO: Replace this by drm_clflush_sg() once it can be implemented |
---|
272 | 230 | * without relying on symbols that are not exported. |
---|
273 | 231 | */ |
---|
274 | | - for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i) |
---|
| 232 | + for_each_sgtable_sg(rk_obj->sgt, s, i) |
---|
275 | 233 | sg_dma_address(s) = sg_phys(s); |
---|
276 | 234 | |
---|
277 | | - dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents, |
---|
278 | | - DMA_TO_DEVICE); |
---|
| 235 | + dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE); |
---|
279 | 236 | |
---|
280 | 237 | kvfree(pages); |
---|
281 | 238 | |
---|
.. | .. |
---|
285 | 242 | rockchip_gem_free_list(lists); |
---|
286 | 243 | kvfree(dst_pages); |
---|
287 | 244 | err_put_pages: |
---|
288 | | - put_pages(&rk_obj->base, rk_obj->pages, false, false); |
---|
289 | | - rk_obj->pages = NULL; |
---|
| 245 | + drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false); |
---|
290 | 246 | return ret; |
---|
291 | 247 | } |
---|
292 | 248 | |
---|
.. | .. |
---|
294 | 250 | { |
---|
295 | 251 | sg_free_table(rk_obj->sgt); |
---|
296 | 252 | kfree(rk_obj->sgt); |
---|
297 | | - rk_obj->sgt = NULL; |
---|
298 | | - put_pages(&rk_obj->base, rk_obj->pages, true, true); |
---|
299 | | - rk_obj->pages = NULL; |
---|
| 253 | + drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true); |
---|
300 | 254 | } |
---|
301 | 255 | |
---|
302 | 256 | static inline void *drm_calloc_large(size_t nmemb, size_t size); |
---|
.. | .. |
---|
346 | 300 | rk_obj->pages = drm_calloc_large(rk_obj->num_pages, |
---|
347 | 301 | sizeof(*rk_obj->pages)); |
---|
348 | 302 | if (!rk_obj->pages) { |
---|
| 303 | + ret = -ENOMEM; |
---|
349 | 304 | DRM_ERROR("failed to allocate pages.\n"); |
---|
350 | 305 | goto err_sg_table_free; |
---|
351 | 306 | } |
---|
.. | .. |
---|
383 | 338 | return kcalloc(nmemb, size, GFP_KERNEL); |
---|
384 | 339 | |
---|
385 | 340 | return __vmalloc(size * nmemb, |
---|
386 | | - GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); |
---|
| 341 | + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); |
---|
387 | 342 | } |
---|
388 | 343 | |
---|
389 | 344 | static inline void drm_free_large(void *ptr) |
---|
.. | .. |
---|
428 | 383 | paddr += PAGE_SIZE; |
---|
429 | 384 | i++; |
---|
430 | 385 | } |
---|
431 | | - sgt = rockchip_gem_pages_to_sg(rk_obj->pages, rk_obj->num_pages); |
---|
| 386 | + sgt = drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages); |
---|
432 | 387 | if (IS_ERR(sgt)) { |
---|
433 | 388 | ret = PTR_ERR(sgt); |
---|
434 | 389 | goto err_free_pages; |
---|
.. | .. |
---|
459 | 414 | rk_obj->base.size); |
---|
460 | 415 | } |
---|
461 | 416 | |
---|
| 417 | +static inline bool is_vop_enabled(void) |
---|
| 418 | +{ |
---|
| 419 | + return (IS_ENABLED(CONFIG_ROCKCHIP_VOP) || IS_ENABLED(CONFIG_ROCKCHIP_VOP2)); |
---|
| 420 | +} |
---|
| 421 | + |
---|
462 | 422 | static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, |
---|
463 | 423 | bool alloc_kmap) |
---|
464 | 424 | { |
---|
.. | .. |
---|
467 | 427 | struct rockchip_drm_private *private = drm->dev_private; |
---|
468 | 428 | int ret = 0; |
---|
469 | 429 | |
---|
470 | | - if (!private->domain) |
---|
| 430 | + if (!private->domain && is_vop_enabled()) |
---|
471 | 431 | rk_obj->flags |= ROCKCHIP_BO_CONTIG; |
---|
472 | 432 | |
---|
473 | 433 | if (rk_obj->flags & ROCKCHIP_BO_SECURE) { |
---|
.. | .. |
---|
507 | 467 | ret = rockchip_gem_iommu_map(rk_obj); |
---|
508 | 468 | if (ret < 0) |
---|
509 | 469 | goto err_free; |
---|
510 | | - } else { |
---|
| 470 | + } else if (is_vop_enabled()) { |
---|
511 | 471 | WARN_ON(!rk_obj->dma_handle); |
---|
512 | 472 | rk_obj->dma_addr = rk_obj->dma_handle; |
---|
513 | 473 | } |
---|
.. | .. |
---|
557 | 517 | } |
---|
558 | 518 | } |
---|
559 | 519 | |
---|
560 | | -/* |
---|
561 | | - * __vm_map_pages - maps range of kernel pages into user vma |
---|
562 | | - * @vma: user vma to map to |
---|
563 | | - * @pages: pointer to array of source kernel pages |
---|
564 | | - * @num: number of pages in page array |
---|
565 | | - * @offset: user's requested vm_pgoff |
---|
566 | | - * |
---|
567 | | - * This allows drivers to map range of kernel pages into a user vma. |
---|
568 | | - * |
---|
569 | | - * Return: 0 on success and error code otherwise. |
---|
570 | | - */ |
---|
571 | | -static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, |
---|
572 | | - unsigned long num, unsigned long offset) |
---|
573 | | -{ |
---|
574 | | - unsigned long count = vma_pages(vma); |
---|
575 | | - unsigned long uaddr = vma->vm_start; |
---|
576 | | - int ret, i; |
---|
577 | | - |
---|
578 | | - /* Fail if the user requested offset is beyond the end of the object */ |
---|
579 | | - if (offset > num) |
---|
580 | | - return -ENXIO; |
---|
581 | | - |
---|
582 | | - /* Fail if the user requested size exceeds available object size */ |
---|
583 | | - if (count > num - offset) |
---|
584 | | - return -ENXIO; |
---|
585 | | - |
---|
586 | | - for (i = 0; i < count; i++) { |
---|
587 | | - ret = vm_insert_page(vma, uaddr, pages[offset + i]); |
---|
588 | | - if (ret < 0) |
---|
589 | | - return ret; |
---|
590 | | - uaddr += PAGE_SIZE; |
---|
591 | | - } |
---|
592 | | - |
---|
593 | | - return 0; |
---|
594 | | -} |
---|
595 | | - |
---|
596 | 520 | static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj, |
---|
597 | 521 | struct vm_area_struct *vma) |
---|
598 | 522 | { |
---|
.. | .. |
---|
603 | 527 | if (user_count == 0) |
---|
604 | 528 | return -ENXIO; |
---|
605 | 529 | |
---|
606 | | - return __vm_map_pages(vma, rk_obj->pages, count, vma->vm_pgoff); |
---|
| 530 | + return vm_map_pages(vma, rk_obj->pages, count); |
---|
607 | 531 | } |
---|
608 | 532 | |
---|
609 | 533 | static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj, |
---|
.. | .. |
---|
687 | 611 | } |
---|
688 | 612 | |
---|
689 | 613 | static struct rockchip_gem_object * |
---|
690 | | - rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size) |
---|
| 614 | +rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size, |
---|
| 615 | + unsigned int flags) |
---|
691 | 616 | { |
---|
692 | 617 | struct address_space *mapping; |
---|
693 | 618 | struct rockchip_gem_object *rk_obj; |
---|
.. | .. |
---|
698 | 623 | #else |
---|
699 | 624 | gfp_t gfp_mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; |
---|
700 | 625 | #endif |
---|
| 626 | + |
---|
| 627 | + if (flags & ROCKCHIP_BO_DMA32) |
---|
| 628 | + gfp_mask |= __GFP_DMA32; |
---|
| 629 | + |
---|
701 | 630 | size = round_up(size, PAGE_SIZE); |
---|
702 | 631 | |
---|
703 | 632 | rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL); |
---|
.. | .. |
---|
721 | 650 | struct rockchip_gem_object *rk_obj; |
---|
722 | 651 | int ret; |
---|
723 | 652 | |
---|
724 | | - rk_obj = rockchip_gem_alloc_object(drm, size); |
---|
| 653 | + rk_obj = rockchip_gem_alloc_object(drm, size, flags); |
---|
725 | 654 | if (IS_ERR(rk_obj)) |
---|
726 | 655 | return rk_obj; |
---|
727 | 656 | rk_obj->flags = flags; |
---|
.. | .. |
---|
738 | 667 | } |
---|
739 | 668 | |
---|
740 | 669 | /* |
---|
| 670 | + * rockchip_gem_destroy - destroy gem object |
---|
| 671 | + * |
---|
| 672 | + * The dma_buf_unmap_attachment and dma_buf_detach will be re-defined if |
---|
| 673 | + * CONFIG_DMABUF_CACHE is enabled. |
---|
| 674 | + * |
---|
| 675 | + * Same as drm_prime_gem_destroy |
---|
| 676 | + */ |
---|
| 677 | +static void rockchip_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) |
---|
| 678 | +{ |
---|
| 679 | + struct dma_buf_attachment *attach; |
---|
| 680 | + struct dma_buf *dma_buf; |
---|
| 681 | + |
---|
| 682 | + attach = obj->import_attach; |
---|
| 683 | + if (sg) |
---|
| 684 | + dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); |
---|
| 685 | + dma_buf = attach->dmabuf; |
---|
| 686 | + dma_buf_detach(attach->dmabuf, attach); |
---|
| 687 | + /* remove the reference */ |
---|
| 688 | + dma_buf_put(dma_buf); |
---|
| 689 | +} |
---|
| 690 | + |
---|
| 691 | +/* |
---|
741 | 692 | * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked |
---|
742 | 693 | * callback function |
---|
743 | 694 | */ |
---|
.. | .. |
---|
751 | 702 | if (private->domain) { |
---|
752 | 703 | rockchip_gem_iommu_unmap(rk_obj); |
---|
753 | 704 | } else { |
---|
754 | | - dma_unmap_sg(drm->dev, rk_obj->sgt->sgl, |
---|
755 | | - rk_obj->sgt->nents, DMA_BIDIRECTIONAL); |
---|
| 705 | + dma_unmap_sgtable(drm->dev, rk_obj->sgt, |
---|
| 706 | + DMA_BIDIRECTIONAL, 0); |
---|
756 | 707 | } |
---|
757 | 708 | drm_free_large(rk_obj->pages); |
---|
758 | | -#ifndef CONFIG_ARCH_ROCKCHIP |
---|
759 | | - drm_prime_gem_destroy(obj, rk_obj->sgt); |
---|
760 | | -#endif |
---|
| 709 | + rockchip_gem_destroy(obj, rk_obj->sgt); |
---|
761 | 710 | } else { |
---|
762 | 711 | rockchip_gem_free_buf(rk_obj); |
---|
763 | 712 | } |
---|
.. | .. |
---|
797 | 746 | goto err_handle_create; |
---|
798 | 747 | |
---|
799 | 748 | /* drop reference from allocate - handle holds it now. */ |
---|
800 | | - drm_gem_object_put_unlocked(obj); |
---|
| 749 | + drm_gem_object_put(obj); |
---|
801 | 750 | |
---|
802 | 751 | return rk_obj; |
---|
803 | 752 | |
---|
.. | .. |
---|
819 | 768 | struct drm_mode_create_dumb *args) |
---|
820 | 769 | { |
---|
821 | 770 | struct rockchip_gem_object *rk_obj; |
---|
822 | | - int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); |
---|
| 771 | + u32 min_pitch = args->width * DIV_ROUND_UP(args->bpp, 8); |
---|
823 | 772 | |
---|
824 | 773 | /* |
---|
825 | 774 | * align to 64 bytes since Mali requires it. |
---|
.. | .. |
---|
847 | 796 | int ret; |
---|
848 | 797 | |
---|
849 | 798 | if (rk_obj->pages) |
---|
850 | | - return rockchip_gem_pages_to_sg(rk_obj->pages, rk_obj->num_pages); |
---|
| 799 | + return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages); |
---|
851 | 800 | |
---|
852 | 801 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); |
---|
853 | 802 | if (!sgt) |
---|
.. | .. |
---|
863 | 812 | } |
---|
864 | 813 | |
---|
865 | 814 | return sgt; |
---|
866 | | -} |
---|
867 | | - |
---|
868 | | -static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt, |
---|
869 | | - int count) |
---|
870 | | -{ |
---|
871 | | - struct scatterlist *s; |
---|
872 | | - dma_addr_t expected = sg_dma_address(sgt->sgl); |
---|
873 | | - unsigned int i; |
---|
874 | | - unsigned long size = 0; |
---|
875 | | - |
---|
876 | | - for_each_sg(sgt->sgl, s, count, i) { |
---|
877 | | - if (sg_dma_address(s) != expected) |
---|
878 | | - break; |
---|
879 | | - expected = sg_dma_address(s) + sg_dma_len(s); |
---|
880 | | - size += sg_dma_len(s); |
---|
881 | | - } |
---|
882 | | - return size; |
---|
883 | 815 | } |
---|
884 | 816 | |
---|
885 | 817 | static int |
---|
.. | .. |
---|
898 | 830 | struct sg_table *sg, |
---|
899 | 831 | struct rockchip_gem_object *rk_obj) |
---|
900 | 832 | { |
---|
901 | | - int count = dma_map_sg(drm->dev, sg->sgl, sg->nents, |
---|
902 | | - DMA_BIDIRECTIONAL); |
---|
903 | | - if (!count) |
---|
904 | | - return -EINVAL; |
---|
| 833 | + int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0); |
---|
| 834 | + if (err) |
---|
| 835 | + return err; |
---|
905 | 836 | |
---|
906 | | - if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) { |
---|
| 837 | + if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) { |
---|
907 | 838 | DRM_ERROR("failed to map sg_table to contiguous linear address.\n"); |
---|
908 | | - dma_unmap_sg(drm->dev, sg->sgl, sg->nents, |
---|
909 | | - DMA_BIDIRECTIONAL); |
---|
| 839 | + dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0); |
---|
910 | 840 | return -EINVAL; |
---|
911 | 841 | } |
---|
912 | 842 | |
---|
.. | .. |
---|
924 | 854 | struct rockchip_gem_object *rk_obj; |
---|
925 | 855 | int ret; |
---|
926 | 856 | |
---|
927 | | - rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size); |
---|
| 857 | + rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size, 0); |
---|
928 | 858 | if (IS_ERR(rk_obj)) |
---|
929 | 859 | return ERR_CAST(rk_obj); |
---|
930 | 860 | |
---|
.. | .. |
---|
964 | 894 | { |
---|
965 | 895 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); |
---|
966 | 896 | |
---|
967 | | - if (rk_obj->pages) { |
---|
968 | | - pgprot_t prot; |
---|
969 | | - |
---|
970 | | - prot = rk_obj->flags & ROCKCHIP_BO_CACHABLE ? PAGE_KERNEL : pgprot_writecombine(PAGE_KERNEL); |
---|
971 | | - |
---|
972 | | - return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, prot); |
---|
973 | | - } |
---|
| 897 | + if (rk_obj->pages) |
---|
| 898 | + return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, |
---|
| 899 | + pgprot_writecombine(PAGE_KERNEL)); |
---|
974 | 900 | |
---|
975 | 901 | if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) |
---|
976 | 902 | return NULL; |
---|
.. | .. |
---|
990 | 916 | /* Nothing to do if allocated by DMA mapping API. */ |
---|
991 | 917 | } |
---|
992 | 918 | |
---|
993 | | -int rockchip_gem_dumb_map_offset(struct drm_file *file_priv, |
---|
994 | | - struct drm_device *dev, uint32_t handle, |
---|
995 | | - uint64_t *offset) |
---|
996 | | -{ |
---|
997 | | - struct drm_gem_object *obj; |
---|
998 | | - int ret = 0; |
---|
999 | | - |
---|
1000 | | - obj = drm_gem_object_lookup(file_priv, handle); |
---|
1001 | | - if (!obj) { |
---|
1002 | | - DRM_ERROR("failed to lookup gem object.\n"); |
---|
1003 | | - return -EINVAL; |
---|
1004 | | - } |
---|
1005 | | - |
---|
1006 | | - ret = drm_gem_create_mmap_offset(obj); |
---|
1007 | | - if (ret) |
---|
1008 | | - goto out; |
---|
1009 | | - |
---|
1010 | | - *offset = drm_vma_node_offset_addr(&obj->vma_node); |
---|
1011 | | - DRM_DEBUG_KMS("offset = 0x%llx\n", *offset); |
---|
1012 | | - |
---|
1013 | | -out: |
---|
1014 | | - drm_gem_object_unreference_unlocked(obj); |
---|
1015 | | - |
---|
1016 | | - return ret; |
---|
1017 | | -} |
---|
1018 | | - |
---|
1019 | 919 | int rockchip_gem_create_ioctl(struct drm_device *dev, void *data, |
---|
1020 | 920 | struct drm_file *file_priv) |
---|
1021 | 921 | { |
---|
.. | .. |
---|
1032 | 932 | { |
---|
1033 | 933 | struct drm_rockchip_gem_map_off *args = data; |
---|
1034 | 934 | |
---|
1035 | | - return rockchip_gem_dumb_map_offset(file_priv, drm, args->handle, |
---|
1036 | | - &args->offset); |
---|
| 935 | + return drm_gem_dumb_map_offset(file_priv, drm, args->handle, |
---|
| 936 | + &args->offset); |
---|
1037 | 937 | } |
---|
1038 | 938 | |
---|
1039 | 939 | int rockchip_gem_get_phys_ioctl(struct drm_device *dev, void *data, |
---|
.. | .. |
---|
1060 | 960 | args->phy_addr = page_to_phys(rk_obj->pages[0]); |
---|
1061 | 961 | |
---|
1062 | 962 | out: |
---|
1063 | | - drm_gem_object_unreference_unlocked(obj); |
---|
| 963 | + drm_gem_object_put(obj); |
---|
| 964 | + |
---|
1064 | 965 | return ret; |
---|
1065 | 966 | } |
---|
1066 | 967 | |
---|
.. | .. |
---|
1079 | 980 | } |
---|
1080 | 981 | |
---|
1081 | 982 | int rockchip_gem_prime_end_cpu_access(struct drm_gem_object *obj, |
---|
1082 | | - enum dma_data_direction dir) |
---|
| 983 | + enum dma_data_direction dir) |
---|
1083 | 984 | { |
---|
1084 | 985 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); |
---|
1085 | 986 | struct drm_device *drm = obj->dev; |
---|
.. | .. |
---|
1089 | 990 | |
---|
1090 | 991 | dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, |
---|
1091 | 992 | rk_obj->sgt->nents, dir); |
---|
1092 | | - return 0; |
---|
1093 | | -} |
---|
1094 | | - |
---|
1095 | | -static int rockchip_gem_prime_sgl_sync_range(struct device *dev, |
---|
1096 | | - struct scatterlist *sgl, unsigned int nents, |
---|
1097 | | - unsigned int offset, unsigned int length, |
---|
1098 | | - enum dma_data_direction dir, bool for_cpu) |
---|
1099 | | -{ |
---|
1100 | | - int i; |
---|
1101 | | - struct scatterlist *sg; |
---|
1102 | | - unsigned int len = 0; |
---|
1103 | | - dma_addr_t sg_dma_addr; |
---|
1104 | | - |
---|
1105 | | - for_each_sg(sgl, sg, nents, i) { |
---|
1106 | | - unsigned int sg_offset, sg_left, size = 0; |
---|
1107 | | - |
---|
1108 | | - len += sg->length; |
---|
1109 | | - if (len <= offset) |
---|
1110 | | - continue; |
---|
1111 | | - |
---|
1112 | | - sg_dma_addr = sg_dma_address(sg); |
---|
1113 | | - sg_left = len - offset; |
---|
1114 | | - sg_offset = sg->length - sg_left; |
---|
1115 | | - |
---|
1116 | | - size = (length < sg_left) ? length : sg_left; |
---|
1117 | | - if (for_cpu) |
---|
1118 | | - dma_sync_single_range_for_cpu(dev, sg_dma_addr, |
---|
1119 | | - sg_offset, size, dir); |
---|
1120 | | - else |
---|
1121 | | - dma_sync_single_range_for_device(dev, sg_dma_addr, |
---|
1122 | | - sg_offset, size, dir); |
---|
1123 | | - |
---|
1124 | | - offset += size; |
---|
1125 | | - length -= size; |
---|
1126 | | - |
---|
1127 | | - if (length == 0) |
---|
1128 | | - break; |
---|
1129 | | - } |
---|
1130 | | - |
---|
1131 | | - return 0; |
---|
1132 | | -} |
---|
1133 | | - |
---|
1134 | | -int rockchip_gem_prime_begin_cpu_access_partial(struct drm_gem_object *obj, |
---|
1135 | | - enum dma_data_direction dir, |
---|
1136 | | - unsigned int offset, |
---|
1137 | | - unsigned int len) |
---|
1138 | | -{ |
---|
1139 | | - struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); |
---|
1140 | | - struct drm_device *drm = obj->dev; |
---|
1141 | | - |
---|
1142 | | - if (!rk_obj->sgt) |
---|
1143 | | - return 0; |
---|
1144 | | - |
---|
1145 | | - rockchip_gem_prime_sgl_sync_range(drm->dev, rk_obj->sgt->sgl, |
---|
1146 | | - rk_obj->sgt->nents, |
---|
1147 | | - offset, len, dir, true); |
---|
1148 | | - |
---|
1149 | | - return 0; |
---|
1150 | | -} |
---|
1151 | | - |
---|
1152 | | -int rockchip_gem_prime_end_cpu_access_partial(struct drm_gem_object *obj, |
---|
1153 | | - enum dma_data_direction dir, |
---|
1154 | | - unsigned int offset, |
---|
1155 | | - unsigned int len) |
---|
1156 | | -{ |
---|
1157 | | - struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); |
---|
1158 | | - struct drm_device *drm = obj->dev; |
---|
1159 | | - |
---|
1160 | | - if (!rk_obj->sgt) |
---|
1161 | | - return 0; |
---|
1162 | | - |
---|
1163 | | - rockchip_gem_prime_sgl_sync_range(drm->dev, rk_obj->sgt->sgl, |
---|
1164 | | - rk_obj->sgt->nents, |
---|
1165 | | - offset, len, dir, false); |
---|
1166 | | - |
---|
1167 | 993 | return 0; |
---|
1168 | 994 | } |
---|