| .. | .. |
|---|
| 203 | 203 | size_t size, u64 dma_limit, |
|---|
| 204 | 204 | struct device *dev) |
|---|
| 205 | 205 | { |
|---|
| 206 | | - struct rga_iommu_dma_cookie *cookie = domain->iova_cookie; |
|---|
| 206 | + struct rga_iommu_dma_cookie *cookie = (void *)domain->iova_cookie; |
|---|
| 207 | 207 | struct iova_domain *iovad = &cookie->iovad; |
|---|
| 208 | 208 | unsigned long shift, iova_len, iova = 0; |
|---|
| 209 | 209 | |
|---|
| .. | .. |
|---|
| 246 | 246 | static void rga_iommu_dma_free_iova(struct iommu_domain *domain, |
|---|
| 247 | 247 | dma_addr_t iova, size_t size) |
|---|
| 248 | 248 | { |
|---|
| 249 | | - struct rga_iommu_dma_cookie *cookie = domain->iova_cookie; |
|---|
| 249 | + struct rga_iommu_dma_cookie *cookie = (void *)domain->iova_cookie; |
|---|
| 250 | 250 | struct iova_domain *iovad = &cookie->iovad; |
|---|
| 251 | 251 | |
|---|
| 252 | 252 | free_iova_fast(iovad, iova_pfn(iovad, iova), size >> iova_shift(iovad)); |
|---|
| .. | .. |
|---|
| 285 | 285 | } |
|---|
| 286 | 286 | |
|---|
| 287 | 287 | domain = rga_iommu_get_dma_domain(rga_dev); |
|---|
| 288 | | - cookie = domain->iova_cookie; |
|---|
| 288 | + cookie = (void *)domain->iova_cookie; |
|---|
| 289 | 289 | iovad = &cookie->iovad; |
|---|
| 290 | 290 | align_size = iova_align(iovad, size); |
|---|
| 291 | 291 | |
|---|
| .. | .. |
|---|
| 330 | 330 | } |
|---|
| 331 | 331 | |
|---|
| 332 | 332 | domain = rga_iommu_get_dma_domain(rga_dev); |
|---|
| 333 | | - cookie = domain->iova_cookie; |
|---|
| 333 | + cookie = (void *)domain->iova_cookie; |
|---|
| 334 | 334 | iovad = &cookie->iovad; |
|---|
| 335 | 335 | align_size = iova_align(iovad, size); |
|---|
| 336 | 336 | |
|---|
| .. | .. |
|---|
| 394 | 394 | { |
|---|
| 395 | 395 | int ret = 0; |
|---|
| 396 | 396 | void *vaddr; |
|---|
| 397 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) |
|---|
| 398 | + struct iosys_map map; |
|---|
| 399 | +#endif |
|---|
| 397 | 400 | struct dma_buf *dma_buf; |
|---|
| 398 | 401 | |
|---|
| 399 | 402 | dma_buf = rga_dma_buffer->dma_buf; |
|---|
| 400 | 403 | |
|---|
| 401 | 404 | if (!IS_ERR_OR_NULL(dma_buf)) { |
|---|
| 405 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) |
|---|
| 406 | + ret = dma_buf_vmap(dma_buf, &map); |
|---|
| 407 | + vaddr = ret ? NULL : map.vaddr; |
|---|
| 408 | +#else |
|---|
| 402 | 409 | vaddr = dma_buf_vmap(dma_buf); |
|---|
| 410 | +#endif |
|---|
| 403 | 411 | if (vaddr) { |
|---|
| 404 | 412 | ret = rga_virtual_memory_check(vaddr, img->vir_w, |
|---|
| 405 | 413 | img->vir_h, img->format, img->yrgb_addr); |
|---|
| .. | .. |
|---|
| 407 | 415 | pr_err("can't vmap the dma buffer!\n"); |
|---|
| 408 | 416 | return -EINVAL; |
|---|
| 409 | 417 | } |
|---|
| 410 | | - |
|---|
| 418 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) |
|---|
| 419 | + dma_buf_vunmap(dma_buf, &map); |
|---|
| 420 | +#else |
|---|
| 411 | 421 | dma_buf_vunmap(dma_buf, vaddr); |
|---|
| 422 | +#endif |
|---|
| 412 | 423 | } |
|---|
| 413 | 424 | |
|---|
| 414 | 425 | return ret; |
|---|