forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/gpu/drm/mediatek/mtk_drm_gem.c
....@@ -1,19 +1,14 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (c) 2015 MediaTek Inc.
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License version 2 as
6
- * published by the Free Software Foundation.
7
- *
8
- * This program is distributed in the hope that it will be useful,
9
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
10
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
- * GNU General Public License for more details.
124 */
135
14
-#include <drm/drmP.h>
15
-#include <drm/drm_gem.h>
166 #include <linux/dma-buf.h>
7
+
8
+#include <drm/drm.h>
9
+#include <drm/drm_device.h>
10
+#include <drm/drm_gem.h>
11
+#include <drm/drm_prime.h>
1712
1813 #include "mtk_drm_drv.h"
1914 #include "mtk_drm_gem.h"
....@@ -122,7 +117,7 @@
122117 goto err_handle_create;
123118
124119 /* drop reference from allocate - handle holds it now. */
125
- drm_gem_object_put_unlocked(&mtk_gem->base);
120
+ drm_gem_object_put(&mtk_gem->base);
126121
127122 return 0;
128123
....@@ -144,7 +139,6 @@
144139 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
145140 */
146141 vma->vm_flags &= ~VM_PFNMAP;
147
- vma->vm_pgoff = 0;
148142
149143 ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
150144 mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
....@@ -175,6 +169,12 @@
175169 return ret;
176170
177171 obj = vma->vm_private_data;
172
+
173
+ /*
174
+ * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
175
+ * whole buffer from the start.
176
+ */
177
+ vma->vm_pgoff = 0;
178178
179179 return mtk_drm_gem_object_mmap(obj, vma);
180180 }
....@@ -212,32 +212,60 @@
212212 struct dma_buf_attachment *attach, struct sg_table *sg)
213213 {
214214 struct mtk_drm_gem_obj *mtk_gem;
215
- int ret;
216
- struct scatterlist *s;
217
- unsigned int i;
218
- dma_addr_t expected;
215
+
216
+ /* check if the entries in the sg_table are contiguous */
217
+ if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
218
+ DRM_ERROR("sg_table is not contiguous");
219
+ return ERR_PTR(-EINVAL);
220
+ }
219221
220222 mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
221
-
222223 if (IS_ERR(mtk_gem))
223224 return ERR_CAST(mtk_gem);
224
-
225
- expected = sg_dma_address(sg->sgl);
226
- for_each_sg(sg->sgl, s, sg->nents, i) {
227
- if (sg_dma_address(s) != expected) {
228
- DRM_ERROR("sg_table is not contiguous");
229
- ret = -EINVAL;
230
- goto err_gem_free;
231
- }
232
- expected = sg_dma_address(s) + sg_dma_len(s);
233
- }
234225
235226 mtk_gem->dma_addr = sg_dma_address(sg->sgl);
236227 mtk_gem->sg = sg;
237228
238229 return &mtk_gem->base;
230
+}
239231
240
-err_gem_free:
241
- kfree(mtk_gem);
242
- return ERR_PTR(ret);
232
+void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
233
+{
234
+ struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
235
+ struct sg_table *sgt;
236
+ unsigned int npages;
237
+
238
+ if (mtk_gem->kvaddr)
239
+ return mtk_gem->kvaddr;
240
+
241
+ sgt = mtk_gem_prime_get_sg_table(obj);
242
+ if (IS_ERR(sgt))
243
+ return NULL;
244
+
245
+ npages = obj->size >> PAGE_SHIFT;
246
+ mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
247
+ if (!mtk_gem->pages)
248
+ goto out;
249
+
250
+ drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages);
251
+
252
+ mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
253
+ pgprot_writecombine(PAGE_KERNEL));
254
+
255
+out:
256
+ kfree(sgt);
257
+
258
+ return mtk_gem->kvaddr;
259
+}
260
+
261
+void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
262
+{
263
+ struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
264
+
265
+ if (!mtk_gem->pages)
266
+ return;
267
+
268
+ vunmap(vaddr);
269
+ mtk_gem->kvaddr = 0;
270
+ kfree(mtk_gem->pages);
243271 }