forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
....@@ -1,31 +1,29 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
34 * Author:Mark Yao <mark.yao@rock-chips.com>
4
- *
5
- * This software is licensed under the terms of the GNU General Public
6
- * License version 2, as published by the Free Software Foundation, and
7
- * may be copied, distributed, and modified under those terms.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
135 */
146
7
+#include <linux/dma-buf-cache.h>
8
+#include <linux/iommu.h>
9
+#include <linux/vmalloc.h>
10
+
1511 #include <drm/drm.h>
16
-#include <drm/drmP.h>
1712 #include <drm/drm_gem.h>
13
+#include <drm/drm_prime.h>
1814 #include <drm/drm_vma_manager.h>
1915
20
-#include <linux/dma-buf.h>
2116 #include <linux/genalloc.h>
2217 #include <linux/iommu.h>
2318 #include <linux/pagemap.h>
2419 #include <linux/vmalloc.h>
25
-#include <linux/swiotlb.h>
20
+#include <linux/rockchip/rockchip_sip.h>
2621
2722 #include "rockchip_drm_drv.h"
2823 #include "rockchip_drm_gem.h"
24
+
25
+static u32 bank_bit_first = 12;
26
+static u32 bank_bit_mask = 0x7;
2927
3028 struct page_info {
3129 struct page *page;
....@@ -38,7 +36,7 @@
3836 {
3937 struct drm_device *drm = rk_obj->base.dev;
4038 struct rockchip_drm_private *private = drm->dev_private;
41
- int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_TLB_SHOT_ENTIRE;
39
+ int prot = IOMMU_READ | IOMMU_WRITE;
4240 ssize_t ret;
4341
4442 mutex_lock(&private->mm_lock);
....@@ -54,14 +52,16 @@
5452
5553 rk_obj->dma_addr = rk_obj->mm.start;
5654
57
- ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
58
- rk_obj->sgt->nents, prot);
55
+ ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
56
+ prot);
5957 if (ret < rk_obj->base.size) {
6058 DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
6159 ret, rk_obj->base.size);
6260 ret = -ENOMEM;
6361 goto err_remove_node;
6462 }
63
+
64
+ iommu_flush_iotlb_all(private->domain);
6565
6666 rk_obj->size = ret;
6767
....@@ -104,59 +104,15 @@
104104 }
105105 }
106106
107
-static struct sg_table *rockchip_gem_pages_to_sg(struct page **pages, unsigned int nr_pages)
107
+void rockchip_gem_get_ddr_info(void)
108108 {
109
- struct sg_table *sg = NULL;
110
- int ret;
111
-#define SG_SIZE_MAX (IO_TLB_SEGSIZE * (1 << IO_TLB_SHIFT))
109
+ struct dram_addrmap_info *ddr_map_info;
112110
113
- sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
114
- if (!sg) {
115
- ret = -ENOMEM;
116
- goto out;
111
+ ddr_map_info = sip_smc_get_dram_map();
112
+ if (ddr_map_info) {
113
+ bank_bit_first = ddr_map_info->bank_bit_first;
114
+ bank_bit_mask = ddr_map_info->bank_bit_mask;
117115 }
118
-
119
- ret = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
120
- nr_pages << PAGE_SHIFT,
121
- SG_SIZE_MAX, GFP_KERNEL);
122
- if (ret)
123
- goto out;
124
-
125
- return sg;
126
-out:
127
- kfree(sg);
128
- return ERR_PTR(ret);
129
-}
130
-
131
-static struct page **get_pages(struct drm_gem_object *obj)
132
-{
133
- if (IS_ENABLED(CONFIG_DMABUF_PAGE_POOL)) {
134
- struct drm_device *drm = obj->dev;
135
- struct rockchip_drm_private *priv = drm->dev_private;
136
- struct dmabuf_page_pool *pool = priv->page_pools;
137
-
138
- return dmabuf_page_pool_alloc_pages_array(pool,
139
- obj->size >>
140
- PAGE_SHIFT);
141
- }
142
-
143
- return drm_gem_get_pages(obj);
144
-}
145
-
146
-static void put_pages(struct drm_gem_object *obj, struct page **pages,
147
- bool dirty, bool accessed)
148
-{
149
- if (IS_ENABLED(CONFIG_DMABUF_PAGE_POOL)) {
150
- struct drm_device *drm = obj->dev;
151
- struct rockchip_drm_private *priv = drm->dev_private;
152
- struct dmabuf_page_pool *pool = priv->page_pools;
153
-
154
- return dmabuf_page_pool_free_pages_array(pool, pages,
155
- obj->size >>
156
- PAGE_SHIFT);
157
- }
158
-
159
- return drm_gem_put_pages(obj, pages, dirty, accessed);
160116 }
161117
162118 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
....@@ -173,7 +129,7 @@
173129 struct list_head lists[PG_ROUND];
174130 dma_addr_t phys;
175131 int end = 0;
176
- unsigned int bit12_14;
132
+ unsigned int bit_index;
177133 unsigned int block_index[PG_ROUND] = {0};
178134 struct page_info *info;
179135 unsigned int maximum;
....@@ -181,7 +137,7 @@
181137 for (i = 0; i < PG_ROUND; i++)
182138 INIT_LIST_HEAD(&lists[i]);
183139
184
- pages = get_pages(&rk_obj->base);
140
+ pages = drm_gem_get_pages(&rk_obj->base);
185141 if (IS_ERR(pages))
186142 return PTR_ERR(pages);
187143
....@@ -192,11 +148,14 @@
192148 n_pages = rk_obj->num_pages;
193149
194150 dst_pages = __vmalloc(sizeof(struct page *) * n_pages,
195
- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
151
+ GFP_KERNEL | __GFP_HIGHMEM);
196152 if (!dst_pages) {
197153 ret = -ENOMEM;
198154 goto err_put_pages;
199155 }
156
+
157
+ DRM_DEBUG_KMS("bank_bit_first = 0x%x, bank_bit_mask = 0x%x\n",
158
+ bank_bit_first, bank_bit_mask);
200159
201160 cur_page = 0;
202161 remain = n_pages;
....@@ -209,7 +168,7 @@
209168 }
210169
211170 chunk_pages = j - cur_page;
212
- if (chunk_pages > 7) {
171
+ if (chunk_pages >= PG_ROUND) {
213172 for (i = 0; i < chunk_pages; i++)
214173 dst_pages[end + i] = pages[cur_page + i];
215174 end += chunk_pages;
....@@ -224,9 +183,9 @@
224183 INIT_LIST_HEAD(&info->list);
225184 info->page = pages[cur_page + i];
226185 phys = page_to_phys(info->page);
227
- bit12_14 = (phys >> 12) & 0x7;
228
- list_add_tail(&info->list, &lists[bit12_14]);
229
- block_index[bit12_14]++;
186
+ bit_index = ((phys >> bank_bit_first) & bank_bit_mask) % PG_ROUND;
187
+ list_add_tail(&info->list, &lists[bit_index]);
188
+ block_index[bit_index]++;
230189 }
231190 }
232191
....@@ -254,9 +213,8 @@
254213
255214 DRM_DEBUG_KMS("%s, %d, end = %d, n_pages = %d\n", __func__, __LINE__,
256215 end, n_pages);
257
-
258
- rk_obj->sgt = rockchip_gem_pages_to_sg(dst_pages, rk_obj->num_pages);
259
-
216
+ rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
217
+ dst_pages, rk_obj->num_pages);
260218 if (IS_ERR(rk_obj->sgt)) {
261219 ret = PTR_ERR(rk_obj->sgt);
262220 goto err_put_list;
....@@ -271,11 +229,10 @@
271229 * TODO: Replace this by drm_clflush_sg() once it can be implemented
272230 * without relying on symbols that are not exported.
273231 */
274
- for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
232
+ for_each_sgtable_sg(rk_obj->sgt, s, i)
275233 sg_dma_address(s) = sg_phys(s);
276234
277
- dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
278
- DMA_TO_DEVICE);
235
+ dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
279236
280237 kvfree(pages);
281238
....@@ -285,8 +242,7 @@
285242 rockchip_gem_free_list(lists);
286243 kvfree(dst_pages);
287244 err_put_pages:
288
- put_pages(&rk_obj->base, rk_obj->pages, false, false);
289
- rk_obj->pages = NULL;
245
+ drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
290246 return ret;
291247 }
292248
....@@ -294,9 +250,7 @@
294250 {
295251 sg_free_table(rk_obj->sgt);
296252 kfree(rk_obj->sgt);
297
- rk_obj->sgt = NULL;
298
- put_pages(&rk_obj->base, rk_obj->pages, true, true);
299
- rk_obj->pages = NULL;
253
+ drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
300254 }
301255
302256 static inline void *drm_calloc_large(size_t nmemb, size_t size);
....@@ -346,6 +300,7 @@
346300 rk_obj->pages = drm_calloc_large(rk_obj->num_pages,
347301 sizeof(*rk_obj->pages));
348302 if (!rk_obj->pages) {
303
+ ret = -ENOMEM;
349304 DRM_ERROR("failed to allocate pages.\n");
350305 goto err_sg_table_free;
351306 }
....@@ -383,7 +338,7 @@
383338 return kcalloc(nmemb, size, GFP_KERNEL);
384339
385340 return __vmalloc(size * nmemb,
386
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
341
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
387342 }
388343
389344 static inline void drm_free_large(void *ptr)
....@@ -428,7 +383,7 @@
428383 paddr += PAGE_SIZE;
429384 i++;
430385 }
431
- sgt = rockchip_gem_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
386
+ sgt = drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
432387 if (IS_ERR(sgt)) {
433388 ret = PTR_ERR(sgt);
434389 goto err_free_pages;
....@@ -459,6 +414,11 @@
459414 rk_obj->base.size);
460415 }
461416
417
+static inline bool is_vop_enabled(void)
418
+{
419
+ return (IS_ENABLED(CONFIG_ROCKCHIP_VOP) || IS_ENABLED(CONFIG_ROCKCHIP_VOP2));
420
+}
421
+
462422 static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
463423 bool alloc_kmap)
464424 {
....@@ -467,7 +427,7 @@
467427 struct rockchip_drm_private *private = drm->dev_private;
468428 int ret = 0;
469429
470
- if (!private->domain)
430
+ if (!private->domain && is_vop_enabled())
471431 rk_obj->flags |= ROCKCHIP_BO_CONTIG;
472432
473433 if (rk_obj->flags & ROCKCHIP_BO_SECURE) {
....@@ -507,7 +467,7 @@
507467 ret = rockchip_gem_iommu_map(rk_obj);
508468 if (ret < 0)
509469 goto err_free;
510
- } else {
470
+ } else if (is_vop_enabled()) {
511471 WARN_ON(!rk_obj->dma_handle);
512472 rk_obj->dma_addr = rk_obj->dma_handle;
513473 }
....@@ -557,42 +517,6 @@
557517 }
558518 }
559519
560
-/*
561
- * __vm_map_pages - maps range of kernel pages into user vma
562
- * @vma: user vma to map to
563
- * @pages: pointer to array of source kernel pages
564
- * @num: number of pages in page array
565
- * @offset: user's requested vm_pgoff
566
- *
567
- * This allows drivers to map range of kernel pages into a user vma.
568
- *
569
- * Return: 0 on success and error code otherwise.
570
- */
571
-static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
572
- unsigned long num, unsigned long offset)
573
-{
574
- unsigned long count = vma_pages(vma);
575
- unsigned long uaddr = vma->vm_start;
576
- int ret, i;
577
-
578
- /* Fail if the user requested offset is beyond the end of the object */
579
- if (offset > num)
580
- return -ENXIO;
581
-
582
- /* Fail if the user requested size exceeds available object size */
583
- if (count > num - offset)
584
- return -ENXIO;
585
-
586
- for (i = 0; i < count; i++) {
587
- ret = vm_insert_page(vma, uaddr, pages[offset + i]);
588
- if (ret < 0)
589
- return ret;
590
- uaddr += PAGE_SIZE;
591
- }
592
-
593
- return 0;
594
-}
595
-
596520 static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
597521 struct vm_area_struct *vma)
598522 {
....@@ -603,7 +527,7 @@
603527 if (user_count == 0)
604528 return -ENXIO;
605529
606
- return __vm_map_pages(vma, rk_obj->pages, count, vma->vm_pgoff);
530
+ return vm_map_pages(vma, rk_obj->pages, count);
607531 }
608532
609533 static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
....@@ -687,7 +611,8 @@
687611 }
688612
689613 static struct rockchip_gem_object *
690
- rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
614
+rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size,
615
+ unsigned int flags)
691616 {
692617 struct address_space *mapping;
693618 struct rockchip_gem_object *rk_obj;
....@@ -698,6 +623,10 @@
698623 #else
699624 gfp_t gfp_mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
700625 #endif
626
+
627
+ if (flags & ROCKCHIP_BO_DMA32)
628
+ gfp_mask |= __GFP_DMA32;
629
+
701630 size = round_up(size, PAGE_SIZE);
702631
703632 rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
....@@ -721,7 +650,7 @@
721650 struct rockchip_gem_object *rk_obj;
722651 int ret;
723652
724
- rk_obj = rockchip_gem_alloc_object(drm, size);
653
+ rk_obj = rockchip_gem_alloc_object(drm, size, flags);
725654 if (IS_ERR(rk_obj))
726655 return rk_obj;
727656 rk_obj->flags = flags;
....@@ -738,6 +667,28 @@
738667 }
739668
740669 /*
670
+ * rockchip_gem_destroy - destroy gem object
671
+ *
672
+ * The dma_buf_unmap_attachment and dma_buf_detach will be re-defined if
673
+ * CONFIG_DMABUF_CACHE is enabled.
674
+ *
675
+ * Same as drm_prime_gem_destroy
676
+ */
677
+static void rockchip_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
678
+{
679
+ struct dma_buf_attachment *attach;
680
+ struct dma_buf *dma_buf;
681
+
682
+ attach = obj->import_attach;
683
+ if (sg)
684
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
685
+ dma_buf = attach->dmabuf;
686
+ dma_buf_detach(attach->dmabuf, attach);
687
+ /* remove the reference */
688
+ dma_buf_put(dma_buf);
689
+}
690
+
691
+/*
741692 * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
742693 * callback function
743694 */
....@@ -751,13 +702,11 @@
751702 if (private->domain) {
752703 rockchip_gem_iommu_unmap(rk_obj);
753704 } else {
754
- dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
755
- rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
705
+ dma_unmap_sgtable(drm->dev, rk_obj->sgt,
706
+ DMA_BIDIRECTIONAL, 0);
756707 }
757708 drm_free_large(rk_obj->pages);
758
-#ifndef CONFIG_ARCH_ROCKCHIP
759
- drm_prime_gem_destroy(obj, rk_obj->sgt);
760
-#endif
709
+ rockchip_gem_destroy(obj, rk_obj->sgt);
761710 } else {
762711 rockchip_gem_free_buf(rk_obj);
763712 }
....@@ -797,7 +746,7 @@
797746 goto err_handle_create;
798747
799748 /* drop reference from allocate - handle holds it now. */
800
- drm_gem_object_put_unlocked(obj);
749
+ drm_gem_object_put(obj);
801750
802751 return rk_obj;
803752
....@@ -819,7 +768,7 @@
819768 struct drm_mode_create_dumb *args)
820769 {
821770 struct rockchip_gem_object *rk_obj;
822
- int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
771
+ u32 min_pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
823772
824773 /*
825774 * align to 64 bytes since Mali requires it.
....@@ -847,7 +796,7 @@
847796 int ret;
848797
849798 if (rk_obj->pages)
850
- return rockchip_gem_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
799
+ return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
851800
852801 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
853802 if (!sgt)
....@@ -863,23 +812,6 @@
863812 }
864813
865814 return sgt;
866
-}
867
-
868
-static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
869
- int count)
870
-{
871
- struct scatterlist *s;
872
- dma_addr_t expected = sg_dma_address(sgt->sgl);
873
- unsigned int i;
874
- unsigned long size = 0;
875
-
876
- for_each_sg(sgt->sgl, s, count, i) {
877
- if (sg_dma_address(s) != expected)
878
- break;
879
- expected = sg_dma_address(s) + sg_dma_len(s);
880
- size += sg_dma_len(s);
881
- }
882
- return size;
883815 }
884816
885817 static int
....@@ -898,15 +830,13 @@
898830 struct sg_table *sg,
899831 struct rockchip_gem_object *rk_obj)
900832 {
901
- int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
902
- DMA_BIDIRECTIONAL);
903
- if (!count)
904
- return -EINVAL;
833
+ int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
834
+ if (err)
835
+ return err;
905836
906
- if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
837
+ if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
907838 DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
908
- dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
909
- DMA_BIDIRECTIONAL);
839
+ dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
910840 return -EINVAL;
911841 }
912842
....@@ -924,7 +854,7 @@
924854 struct rockchip_gem_object *rk_obj;
925855 int ret;
926856
927
- rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
857
+ rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size, 0);
928858 if (IS_ERR(rk_obj))
929859 return ERR_CAST(rk_obj);
930860
....@@ -964,13 +894,9 @@
964894 {
965895 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
966896
967
- if (rk_obj->pages) {
968
- pgprot_t prot;
969
-
970
- prot = rk_obj->flags & ROCKCHIP_BO_CACHABLE ? PAGE_KERNEL : pgprot_writecombine(PAGE_KERNEL);
971
-
972
- return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, prot);
973
- }
897
+ if (rk_obj->pages)
898
+ return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
899
+ pgprot_writecombine(PAGE_KERNEL));
974900
975901 if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
976902 return NULL;
....@@ -990,32 +916,6 @@
990916 /* Nothing to do if allocated by DMA mapping API. */
991917 }
992918
993
-int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
994
- struct drm_device *dev, uint32_t handle,
995
- uint64_t *offset)
996
-{
997
- struct drm_gem_object *obj;
998
- int ret = 0;
999
-
1000
- obj = drm_gem_object_lookup(file_priv, handle);
1001
- if (!obj) {
1002
- DRM_ERROR("failed to lookup gem object.\n");
1003
- return -EINVAL;
1004
- }
1005
-
1006
- ret = drm_gem_create_mmap_offset(obj);
1007
- if (ret)
1008
- goto out;
1009
-
1010
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
1011
- DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
1012
-
1013
-out:
1014
- drm_gem_object_unreference_unlocked(obj);
1015
-
1016
- return ret;
1017
-}
1018
-
1019919 int rockchip_gem_create_ioctl(struct drm_device *dev, void *data,
1020920 struct drm_file *file_priv)
1021921 {
....@@ -1032,8 +932,8 @@
1032932 {
1033933 struct drm_rockchip_gem_map_off *args = data;
1034934
1035
- return rockchip_gem_dumb_map_offset(file_priv, drm, args->handle,
1036
- &args->offset);
935
+ return drm_gem_dumb_map_offset(file_priv, drm, args->handle,
936
+ &args->offset);
1037937 }
1038938
1039939 int rockchip_gem_get_phys_ioctl(struct drm_device *dev, void *data,
....@@ -1060,7 +960,8 @@
1060960 args->phy_addr = page_to_phys(rk_obj->pages[0]);
1061961
1062962 out:
1063
- drm_gem_object_unreference_unlocked(obj);
963
+ drm_gem_object_put(obj);
964
+
1064965 return ret;
1065966 }
1066967
....@@ -1079,7 +980,7 @@
1079980 }
1080981
1081982 int rockchip_gem_prime_end_cpu_access(struct drm_gem_object *obj,
1082
- enum dma_data_direction dir)
983
+ enum dma_data_direction dir)
1083984 {
1084985 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
1085986 struct drm_device *drm = obj->dev;
....@@ -1089,80 +990,5 @@
1089990
1090991 dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl,
1091992 rk_obj->sgt->nents, dir);
1092
- return 0;
1093
-}
1094
-
1095
-static int rockchip_gem_prime_sgl_sync_range(struct device *dev,
1096
- struct scatterlist *sgl, unsigned int nents,
1097
- unsigned int offset, unsigned int length,
1098
- enum dma_data_direction dir, bool for_cpu)
1099
-{
1100
- int i;
1101
- struct scatterlist *sg;
1102
- unsigned int len = 0;
1103
- dma_addr_t sg_dma_addr;
1104
-
1105
- for_each_sg(sgl, sg, nents, i) {
1106
- unsigned int sg_offset, sg_left, size = 0;
1107
-
1108
- len += sg->length;
1109
- if (len <= offset)
1110
- continue;
1111
-
1112
- sg_dma_addr = sg_dma_address(sg);
1113
- sg_left = len - offset;
1114
- sg_offset = sg->length - sg_left;
1115
-
1116
- size = (length < sg_left) ? length : sg_left;
1117
- if (for_cpu)
1118
- dma_sync_single_range_for_cpu(dev, sg_dma_addr,
1119
- sg_offset, size, dir);
1120
- else
1121
- dma_sync_single_range_for_device(dev, sg_dma_addr,
1122
- sg_offset, size, dir);
1123
-
1124
- offset += size;
1125
- length -= size;
1126
-
1127
- if (length == 0)
1128
- break;
1129
- }
1130
-
1131
- return 0;
1132
-}
1133
-
1134
-int rockchip_gem_prime_begin_cpu_access_partial(struct drm_gem_object *obj,
1135
- enum dma_data_direction dir,
1136
- unsigned int offset,
1137
- unsigned int len)
1138
-{
1139
- struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
1140
- struct drm_device *drm = obj->dev;
1141
-
1142
- if (!rk_obj->sgt)
1143
- return 0;
1144
-
1145
- rockchip_gem_prime_sgl_sync_range(drm->dev, rk_obj->sgt->sgl,
1146
- rk_obj->sgt->nents,
1147
- offset, len, dir, true);
1148
-
1149
- return 0;
1150
-}
1151
-
1152
-int rockchip_gem_prime_end_cpu_access_partial(struct drm_gem_object *obj,
1153
- enum dma_data_direction dir,
1154
- unsigned int offset,
1155
- unsigned int len)
1156
-{
1157
- struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
1158
- struct drm_device *drm = obj->dev;
1159
-
1160
- if (!rk_obj->sgt)
1161
- return 0;
1162
-
1163
- rockchip_gem_prime_sgl_sync_range(drm->dev, rk_obj->sgt->sgl,
1164
- rk_obj->sgt->nents,
1165
- offset, len, dir, false);
1166
-
1167993 return 0;
1168994 }