hc
2023-11-06 15ade055295d13f95d49e3d99b09f3bbfb4a43e7
kernel/drivers/video/rockchip/rga3/rga_dma_buf.c
....@@ -203,15 +203,14 @@
203203 size_t size, u64 dma_limit,
204204 struct device *dev)
205205 {
206
- struct rga_iommu_dma_cookie *cookie = domain->iova_cookie;
206
+ struct rga_iommu_dma_cookie *cookie = (void *)domain->iova_cookie;
207207 struct iova_domain *iovad = &cookie->iovad;
208208 unsigned long shift, iova_len, iova = 0;
209
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
210
- dma_addr_t limit;
211
-#endif
212209
213210 shift = iova_shift(iovad);
214211 iova_len = size >> shift;
212
+
213
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0))
215214 /*
216215 * Freeing non-power-of-two-sized allocations back into the IOVA caches
217216 * will come back to bite us badly, so we have to waste a bit of space
....@@ -220,6 +219,7 @@
220219 */
221220 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
222221 iova_len = roundup_pow_of_two(iova_len);
222
+#endif
223223
224224 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
225225 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
....@@ -231,12 +231,13 @@
231231 if (domain->geometry.force_aperture)
232232 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
233233
234
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
235
- iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
234
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 19, 111) && \
235
+ LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
236
+ iova = alloc_iova_fast(iovad, iova_len,
237
+ min_t(dma_addr_t, dma_limit >> shift, iovad->end_pfn),
238
+ true);
236239 #else
237
- limit = min_t(dma_addr_t, dma_limit >> shift, iovad->end_pfn);
238
-
239
- iova = alloc_iova_fast(iovad, iova_len, limit, true);
240
+ iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
240241 #endif
241242
242243 return (dma_addr_t)iova << shift;
....@@ -245,7 +246,7 @@
245246 static void rga_iommu_dma_free_iova(struct iommu_domain *domain,
246247 dma_addr_t iova, size_t size)
247248 {
248
- struct rga_iommu_dma_cookie *cookie = domain->iova_cookie;
249
+ struct rga_iommu_dma_cookie *cookie = (void *)domain->iova_cookie;
249250 struct iova_domain *iovad = &cookie->iovad;
250251
251252 free_iova_fast(iovad, iova_pfn(iovad, iova), size >> iova_shift(iovad));
....@@ -284,7 +285,7 @@
284285 }
285286
286287 domain = rga_iommu_get_dma_domain(rga_dev);
287
- cookie = domain->iova_cookie;
288
+ cookie = (void *)domain->iova_cookie;
288289 iovad = &cookie->iovad;
289290 align_size = iova_align(iovad, size);
290291
....@@ -329,7 +330,7 @@
329330 }
330331
331332 domain = rga_iommu_get_dma_domain(rga_dev);
332
- cookie = domain->iova_cookie;
333
+ cookie = (void *)domain->iova_cookie;
333334 iovad = &cookie->iovad;
334335 align_size = iova_align(iovad, size);
335336
....@@ -393,12 +394,20 @@
393394 {
394395 int ret = 0;
395396 void *vaddr;
397
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
398
+ struct iosys_map map;
399
+#endif
396400 struct dma_buf *dma_buf;
397401
398402 dma_buf = rga_dma_buffer->dma_buf;
399403
400404 if (!IS_ERR_OR_NULL(dma_buf)) {
405
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
406
+ ret = dma_buf_vmap(dma_buf, &map);
407
+ vaddr = ret ? NULL : map.vaddr;
408
+#else
401409 vaddr = dma_buf_vmap(dma_buf);
410
+#endif
402411 if (vaddr) {
403412 ret = rga_virtual_memory_check(vaddr, img->vir_w,
404413 img->vir_h, img->format, img->yrgb_addr);
....@@ -406,8 +415,11 @@
406415 pr_err("can't vmap the dma buffer!\n");
407416 return -EINVAL;
408417 }
409
-
418
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
419
+ dma_buf_vunmap(dma_buf, &map);
420
+#else
410421 dma_buf_vunmap(dma_buf, vaddr);
422
+#endif
411423 }
412424
413425 return ret;
....@@ -424,21 +436,21 @@
424436 if (dma_buf != NULL) {
425437 get_dma_buf(dma_buf);
426438 } else {
427
- pr_err("dma_buf is Invalid[%p]\n", dma_buf);
439
+ pr_err("dma_buf is invalid[%p]\n", dma_buf);
428440 return -EINVAL;
429441 }
430442
431443 attach = dma_buf_attach(dma_buf, rga_dev);
432444 if (IS_ERR(attach)) {
433
- pr_err("Failed to attach dma_buf\n");
434
- ret = -EINVAL;
445
+ ret = PTR_ERR(attach);
446
+ pr_err("Failed to attach dma_buf, ret[%d]\n", ret);
435447 goto err_get_attach;
436448 }
437449
438450 sgt = dma_buf_map_attachment(attach, dir);
439451 if (IS_ERR(sgt)) {
440
- pr_err("Failed to map src attachment\n");
441
- ret = -EINVAL;
452
+ ret = PTR_ERR(sgt);
453
+ pr_err("Failed to map attachment, ret[%d]\n", ret);
442454 goto err_get_sgt;
443455 }
444456
....@@ -474,22 +486,22 @@
474486
475487 dma_buf = dma_buf_get(fd);
476488 if (IS_ERR(dma_buf)) {
477
- pr_err("dma_buf_get fail fd[%d]\n", fd);
478
- ret = -EINVAL;
489
+ ret = PTR_ERR(dma_buf);
490
+ pr_err("Fail to get dma_buf from fd[%d], ret[%d]\n", fd, ret);
479491 return ret;
480492 }
481493
482494 attach = dma_buf_attach(dma_buf, rga_dev);
483495 if (IS_ERR(attach)) {
484
- pr_err("Failed to attach dma_buf\n");
485
- ret = -EINVAL;
496
+ ret = PTR_ERR(attach);
497
+ pr_err("Failed to attach dma_buf, ret[%d]\n", ret);
486498 goto err_get_attach;
487499 }
488500
489501 sgt = dma_buf_map_attachment(attach, dir);
490502 if (IS_ERR(sgt)) {
491
- pr_err("Failed to map src attachment\n");
492
- ret = -EINVAL;
503
+ ret = PTR_ERR(sgt);
504
+ pr_err("Failed to map attachment, ret[%d]\n", ret);
493505 goto err_get_sgt;
494506 }
495507