| .. | .. |
|---|
| 203 | 203 | size_t size, u64 dma_limit, |
|---|
| 204 | 204 | struct device *dev) |
|---|
| 205 | 205 | { |
|---|
| 206 | | - struct rga_iommu_dma_cookie *cookie = domain->iova_cookie; |
|---|
| 206 | + struct rga_iommu_dma_cookie *cookie = (void *)domain->iova_cookie; |
|---|
| 207 | 207 | struct iova_domain *iovad = &cookie->iovad; |
|---|
| 208 | 208 | unsigned long shift, iova_len, iova = 0; |
|---|
| 209 | | -#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) |
|---|
| 210 | | - dma_addr_t limit; |
|---|
| 211 | | -#endif |
|---|
| 212 | 209 | |
|---|
| 213 | 210 | shift = iova_shift(iovad); |
|---|
| 214 | 211 | iova_len = size >> shift; |
|---|
| 212 | + |
|---|
| 213 | +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) |
|---|
| 215 | 214 | /* |
|---|
| 216 | 215 | * Freeing non-power-of-two-sized allocations back into the IOVA caches |
|---|
| 217 | 216 | * will come back to bite us badly, so we have to waste a bit of space |
|---|
| .. | .. |
|---|
| 220 | 219 | */ |
|---|
| 221 | 220 | if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) |
|---|
| 222 | 221 | iova_len = roundup_pow_of_two(iova_len); |
|---|
| 222 | +#endif |
|---|
| 223 | 223 | |
|---|
| 224 | 224 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) |
|---|
| 225 | 225 | dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); |
|---|
| .. | .. |
|---|
| 231 | 231 | if (domain->geometry.force_aperture) |
|---|
| 232 | 232 | dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); |
|---|
| 233 | 233 | |
|---|
| 234 | | -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) |
|---|
| 235 | | - iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true); |
|---|
| 234 | +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 19, 111) && \ |
|---|
| 235 | + LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) |
|---|
| 236 | + iova = alloc_iova_fast(iovad, iova_len, |
|---|
| 237 | + min_t(dma_addr_t, dma_limit >> shift, iovad->end_pfn), |
|---|
| 238 | + true); |
|---|
| 236 | 239 | #else |
|---|
| 237 | | - limit = min_t(dma_addr_t, dma_limit >> shift, iovad->end_pfn); |
|---|
| 238 | | - |
|---|
| 239 | | - iova = alloc_iova_fast(iovad, iova_len, limit, true); |
|---|
| 240 | + iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true); |
|---|
| 240 | 241 | #endif |
|---|
| 241 | 242 | |
|---|
| 242 | 243 | return (dma_addr_t)iova << shift; |
|---|
| .. | .. |
|---|
| 245 | 246 | static void rga_iommu_dma_free_iova(struct iommu_domain *domain, |
|---|
| 246 | 247 | dma_addr_t iova, size_t size) |
|---|
| 247 | 248 | { |
|---|
| 248 | | - struct rga_iommu_dma_cookie *cookie = domain->iova_cookie; |
|---|
| 249 | + struct rga_iommu_dma_cookie *cookie = (void *)domain->iova_cookie; |
|---|
| 249 | 250 | struct iova_domain *iovad = &cookie->iovad; |
|---|
| 250 | 251 | |
|---|
| 251 | 252 | free_iova_fast(iovad, iova_pfn(iovad, iova), size >> iova_shift(iovad)); |
|---|
| .. | .. |
|---|
| 284 | 285 | } |
|---|
| 285 | 286 | |
|---|
| 286 | 287 | domain = rga_iommu_get_dma_domain(rga_dev); |
|---|
| 287 | | - cookie = domain->iova_cookie; |
|---|
| 288 | + cookie = (void *)domain->iova_cookie; |
|---|
| 288 | 289 | iovad = &cookie->iovad; |
|---|
| 289 | 290 | align_size = iova_align(iovad, size); |
|---|
| 290 | 291 | |
|---|
| .. | .. |
|---|
| 329 | 330 | } |
|---|
| 330 | 331 | |
|---|
| 331 | 332 | domain = rga_iommu_get_dma_domain(rga_dev); |
|---|
| 332 | | - cookie = domain->iova_cookie; |
|---|
| 333 | + cookie = (void *)domain->iova_cookie; |
|---|
| 333 | 334 | iovad = &cookie->iovad; |
|---|
| 334 | 335 | align_size = iova_align(iovad, size); |
|---|
| 335 | 336 | |
|---|
| .. | .. |
|---|
| 393 | 394 | { |
|---|
| 394 | 395 | int ret = 0; |
|---|
| 395 | 396 | void *vaddr; |
|---|
| 397 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) |
|---|
| 398 | + struct iosys_map map; |
|---|
| 399 | +#endif |
|---|
| 396 | 400 | struct dma_buf *dma_buf; |
|---|
| 397 | 401 | |
|---|
| 398 | 402 | dma_buf = rga_dma_buffer->dma_buf; |
|---|
| 399 | 403 | |
|---|
| 400 | 404 | if (!IS_ERR_OR_NULL(dma_buf)) { |
|---|
| 405 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) |
|---|
| 406 | + ret = dma_buf_vmap(dma_buf, &map); |
|---|
| 407 | + vaddr = ret ? NULL : map.vaddr; |
|---|
| 408 | +#else |
|---|
| 401 | 409 | vaddr = dma_buf_vmap(dma_buf); |
|---|
| 410 | +#endif |
|---|
| 402 | 411 | if (vaddr) { |
|---|
| 403 | 412 | ret = rga_virtual_memory_check(vaddr, img->vir_w, |
|---|
| 404 | 413 | img->vir_h, img->format, img->yrgb_addr); |
|---|
| .. | .. |
|---|
| 406 | 415 | pr_err("can't vmap the dma buffer!\n"); |
|---|
| 407 | 416 | return -EINVAL; |
|---|
| 408 | 417 | } |
|---|
| 409 | | - |
|---|
| 418 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) |
|---|
| 419 | + dma_buf_vunmap(dma_buf, &map); |
|---|
| 420 | +#else |
|---|
| 410 | 421 | dma_buf_vunmap(dma_buf, vaddr); |
|---|
| 422 | +#endif |
|---|
| 411 | 423 | } |
|---|
| 412 | 424 | |
|---|
| 413 | 425 | return ret; |
|---|
| .. | .. |
|---|
| 424 | 436 | if (dma_buf != NULL) { |
|---|
| 425 | 437 | get_dma_buf(dma_buf); |
|---|
| 426 | 438 | } else { |
|---|
| 427 | | - pr_err("dma_buf is Invalid[%p]\n", dma_buf); |
|---|
| 439 | + pr_err("dma_buf is invalid[%p]\n", dma_buf); |
|---|
| 428 | 440 | return -EINVAL; |
|---|
| 429 | 441 | } |
|---|
| 430 | 442 | |
|---|
| 431 | 443 | attach = dma_buf_attach(dma_buf, rga_dev); |
|---|
| 432 | 444 | if (IS_ERR(attach)) { |
|---|
| 433 | | - pr_err("Failed to attach dma_buf\n"); |
|---|
| 434 | | - ret = -EINVAL; |
|---|
| 445 | + ret = PTR_ERR(attach); |
|---|
| 446 | + pr_err("Failed to attach dma_buf, ret[%d]\n", ret); |
|---|
| 435 | 447 | goto err_get_attach; |
|---|
| 436 | 448 | } |
|---|
| 437 | 449 | |
|---|
| 438 | 450 | sgt = dma_buf_map_attachment(attach, dir); |
|---|
| 439 | 451 | if (IS_ERR(sgt)) { |
|---|
| 440 | | - pr_err("Failed to map src attachment\n"); |
|---|
| 441 | | - ret = -EINVAL; |
|---|
| 452 | + ret = PTR_ERR(sgt); |
|---|
| 453 | + pr_err("Failed to map attachment, ret[%d]\n", ret); |
|---|
| 442 | 454 | goto err_get_sgt; |
|---|
| 443 | 455 | } |
|---|
| 444 | 456 | |
|---|
| .. | .. |
|---|
| 474 | 486 | |
|---|
| 475 | 487 | dma_buf = dma_buf_get(fd); |
|---|
| 476 | 488 | if (IS_ERR(dma_buf)) { |
|---|
| 477 | | - pr_err("dma_buf_get fail fd[%d]\n", fd); |
|---|
| 478 | | - ret = -EINVAL; |
|---|
| 489 | + ret = PTR_ERR(dma_buf); |
|---|
| 490 | + pr_err("Fail to get dma_buf from fd[%d], ret[%d]\n", fd, ret); |
|---|
| 479 | 491 | return ret; |
|---|
| 480 | 492 | } |
|---|
| 481 | 493 | |
|---|
| 482 | 494 | attach = dma_buf_attach(dma_buf, rga_dev); |
|---|
| 483 | 495 | if (IS_ERR(attach)) { |
|---|
| 484 | | - pr_err("Failed to attach dma_buf\n"); |
|---|
| 485 | | - ret = -EINVAL; |
|---|
| 496 | + ret = PTR_ERR(attach); |
|---|
| 497 | + pr_err("Failed to attach dma_buf, ret[%d]\n", ret); |
|---|
| 486 | 498 | goto err_get_attach; |
|---|
| 487 | 499 | } |
|---|
| 488 | 500 | |
|---|
| 489 | 501 | sgt = dma_buf_map_attachment(attach, dir); |
|---|
| 490 | 502 | if (IS_ERR(sgt)) { |
|---|
| 491 | | - pr_err("Failed to map src attachment\n"); |
|---|
| 492 | | - ret = -EINVAL; |
|---|
| 503 | + ret = PTR_ERR(sgt); |
|---|
| 504 | + pr_err("Failed to map attachment, ret[%d]\n", ret); |
|---|
| 493 | 505 | goto err_get_sgt; |
|---|
| 494 | 506 | } |
|---|
| 495 | 507 | |
|---|