| .. | .. |
|---|
| 53 | 53 | for (i = 0; i < pageCount; i++) { |
|---|
| 54 | 54 | vma = find_vma(current_mm, (Memory + i) << PAGE_SHIFT); |
|---|
| 55 | 55 | if (!vma) { |
|---|
| 56 | | - pr_err("failed to get vma\n"); |
|---|
| 56 | + pr_err("page[%d] failed to get vma\n", i); |
|---|
| 57 | 57 | ret = RGA_OUT_OF_RESOURCES; |
|---|
| 58 | 58 | break; |
|---|
| 59 | 59 | } |
|---|
| 60 | 60 | |
|---|
| 61 | 61 | pgd = pgd_offset(current_mm, (Memory + i) << PAGE_SHIFT); |
|---|
| 62 | 62 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) { |
|---|
| 63 | | - pr_err("failed to get pgd\n"); |
|---|
| 63 | + pr_err("page[%d] failed to get pgd\n", i); |
|---|
| 64 | 64 | ret = RGA_OUT_OF_RESOURCES; |
|---|
| 65 | 65 | break; |
|---|
| 66 | 66 | } |
|---|
| .. | .. |
|---|
| 71 | 71 | */ |
|---|
| 72 | 72 | p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT); |
|---|
| 73 | 73 | if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) { |
|---|
| 74 | | - pr_err("failed to get p4d\n"); |
|---|
| 74 | + pr_err("page[%d] failed to get p4d\n", i); |
|---|
| 75 | 75 | ret = RGA_OUT_OF_RESOURCES; |
|---|
| 76 | 76 | break; |
|---|
| 77 | 77 | } |
|---|
| .. | .. |
|---|
| 82 | 82 | #endif |
|---|
| 83 | 83 | |
|---|
| 84 | 84 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) { |
|---|
| 85 | | - pr_err("failed to get pud\n"); |
|---|
| 85 | + pr_err("page[%d] failed to get pud\n", i); |
|---|
| 86 | 86 | ret = RGA_OUT_OF_RESOURCES; |
|---|
| 87 | 87 | break; |
|---|
| 88 | 88 | } |
|---|
| 89 | 89 | pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT); |
|---|
| 90 | 90 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) { |
|---|
| 91 | | - pr_err("failed to get pmd\n"); |
|---|
| 91 | + pr_err("page[%d] failed to get pmd\n", i); |
|---|
| 92 | 92 | ret = RGA_OUT_OF_RESOURCES; |
|---|
| 93 | 93 | break; |
|---|
| 94 | 94 | } |
|---|
| 95 | 95 | pte = pte_offset_map_lock(current_mm, pmd, |
|---|
| 96 | 96 | (Memory + i) << PAGE_SHIFT, &ptl); |
|---|
| 97 | 97 | if (pte_none(*pte)) { |
|---|
| 98 | | - pr_err("failed to get pte\n"); |
|---|
| 98 | + pr_err("page[%d] failed to get pte\n", i); |
|---|
| 99 | 99 | pte_unmap_unlock(pte, ptl); |
|---|
| 100 | 100 | ret = RGA_OUT_OF_RESOURCES; |
|---|
| 101 | 101 | break; |
|---|
| .. | .. |
|---|
| 105 | 105 | pages[i] = pfn_to_page(pfn); |
|---|
| 106 | 106 | pte_unmap_unlock(pte, ptl); |
|---|
| 107 | 107 | } |
|---|
| 108 | + |
|---|
| 109 | + if (ret == RGA_OUT_OF_RESOURCES && i > 0) |
|---|
| 110 | + pr_err("Only get buffer %d byte from vma, but current image required %d byte", |
|---|
| 111 | + (int)(i * PAGE_SIZE), (int)(pageCount * PAGE_SIZE)); |
|---|
| 108 | 112 | |
|---|
| 109 | 113 | return ret; |
|---|
| 110 | 114 | } |
|---|
| .. | .. |
|---|
| 144 | 148 | put_page(pages[i]); |
|---|
| 145 | 149 | |
|---|
| 146 | 150 | ret = rga_get_user_pages_from_vma(pages, Memory, pageCount, current_mm); |
|---|
| 147 | | - if (ret < 0) { |
|---|
| 148 | | - pr_err("Can not get user pages from vma, result = %d, pagecount = %d\n", |
|---|
| 149 | | - result, pageCount); |
|---|
| 151 | + if (ret < 0 && result > 0) { |
|---|
| 152 | + pr_err("Only get buffer %d byte from user pages, but current image required %d byte\n", |
|---|
| 153 | + (int)(result * PAGE_SIZE), (int)(pageCount * PAGE_SIZE)); |
|---|
| 150 | 154 | } |
|---|
| 151 | 155 | } |
|---|
| 152 | 156 | |
|---|
| .. | .. |
|---|
| 177 | 181 | } |
|---|
| 178 | 182 | |
|---|
| 179 | 183 | /* get sg form pages. */ |
|---|
| 180 | | - ret = sg_alloc_table_from_pages(sgt, virt_addr->pages, |
|---|
| 184 | + /* iova requires minimum page alignment, so sgt cannot have offset */ |
|---|
| 185 | + ret = sg_alloc_table_from_pages(sgt, |
|---|
| 186 | + virt_addr->pages, |
|---|
| 181 | 187 | virt_addr->page_count, |
|---|
| 182 | | - 0, virt_addr->size, |
|---|
| 188 | + 0, |
|---|
| 189 | + virt_addr->size, |
|---|
| 183 | 190 | GFP_KERNEL); |
|---|
| 184 | 191 | if (ret) { |
|---|
| 185 | 192 | pr_err("sg_alloc_table_from_pages failed"); |
|---|
| .. | .. |
|---|
| 245 | 252 | if (!size) { |
|---|
| 246 | 253 | pr_err("failed to calculating buffer size! size = %ld, count = %d, offset = %ld\n", |
|---|
| 247 | 254 | size, count, (unsigned long)offset); |
|---|
| 255 | + rga_dump_memory_parm(memory_parm); |
|---|
| 248 | 256 | return -EFAULT; |
|---|
| 249 | 257 | } |
|---|
| 250 | 258 | |
|---|
| 251 | 259 | /* alloc pages and page_table */ |
|---|
| 252 | 260 | order = get_order(count * sizeof(struct page *)); |
|---|
| 261 | + if (order >= MAX_ORDER) { |
|---|
| 262 | + pr_err("Can not alloc pages with order[%d] for viraddr pages, max_order = %d\n", |
|---|
| 263 | + order, MAX_ORDER); |
|---|
| 264 | + return -ENOMEM; |
|---|
| 265 | + } |
|---|
| 266 | + |
|---|
| 253 | 267 | pages = (struct page **)__get_free_pages(GFP_KERNEL, order); |
|---|
| 254 | 268 | if (pages == NULL) { |
|---|
| 255 | | - pr_err("%s can not alloc pages for pages\n", __func__); |
|---|
| 269 | + pr_err("%s can not alloc pages for viraddr pages\n", __func__); |
|---|
| 256 | 270 | return -ENOMEM; |
|---|
| 257 | 271 | } |
|---|
| 258 | 272 | |
|---|
| 259 | 273 | /* get pages from virtual address. */ |
|---|
| 260 | 274 | ret = rga_get_user_pages(pages, viraddr >> PAGE_SHIFT, count, writeFlag, mm); |
|---|
| 261 | 275 | if (ret < 0) { |
|---|
| 262 | | - pr_err("failed to get pages"); |
|---|
| 276 | + pr_err("failed to get pages from virtual adrees: 0x%lx\n", |
|---|
| 277 | + (unsigned long)viraddr); |
|---|
| 263 | 278 | ret = -EINVAL; |
|---|
| 264 | 279 | goto out_free_pages; |
|---|
| 265 | 280 | } else if (ret > 0) { |
|---|
| .. | .. |
|---|
| 301 | 316 | |
|---|
| 302 | 317 | if (scheduler->data->mmu == RGA_MMU && |
|---|
| 303 | 318 | !(mm_flag & RGA_MEM_UNDER_4G)) { |
|---|
| 304 | | - pr_err("%s unsupported Memory larger than 4G!\n", |
|---|
| 319 | + pr_err("%s unsupported memory larger than 4G!\n", |
|---|
| 305 | 320 | rga_get_mmu_type_str(scheduler->data->mmu)); |
|---|
| 306 | 321 | return false; |
|---|
| 307 | 322 | } |
|---|
| .. | .. |
|---|
| 358 | 373 | struct rga_job *job) |
|---|
| 359 | 374 | { |
|---|
| 360 | 375 | int ret; |
|---|
| 376 | + int ex_buffer_size; |
|---|
| 361 | 377 | uint32_t mm_flag = 0; |
|---|
| 362 | 378 | phys_addr_t phys_addr = 0; |
|---|
| 363 | 379 | struct rga_dma_buffer *buffer; |
|---|
| .. | .. |
|---|
| 369 | 385 | if (scheduler == NULL) { |
|---|
| 370 | 386 | pr_err("Invalid scheduler device!\n"); |
|---|
| 371 | 387 | return -EINVAL; |
|---|
| 388 | + } |
|---|
| 389 | + |
|---|
| 390 | + if (external_buffer->memory_parm.size) |
|---|
| 391 | + ex_buffer_size = external_buffer->memory_parm.size; |
|---|
| 392 | + else |
|---|
| 393 | + ex_buffer_size = rga_image_size_cal(external_buffer->memory_parm.width, |
|---|
| 394 | + external_buffer->memory_parm.height, |
|---|
| 395 | + external_buffer->memory_parm.format, |
|---|
| 396 | + NULL, NULL, NULL); |
|---|
| 397 | + if (ex_buffer_size <= 0) { |
|---|
| 398 | + pr_err("failed to calculating buffer size!\n"); |
|---|
| 399 | + rga_dump_memory_parm(&external_buffer->memory_parm); |
|---|
| 400 | + return ex_buffer_size == 0 ? -EINVAL : ex_buffer_size; |
|---|
| 372 | 401 | } |
|---|
| 373 | 402 | |
|---|
| 374 | 403 | /* |
|---|
| .. | .. |
|---|
| 402 | 431 | pr_err("%s core[%d] map dma buffer error!\n", |
|---|
| 403 | 432 | __func__, scheduler->core); |
|---|
| 404 | 433 | goto free_buffer; |
|---|
| 434 | + } |
|---|
| 435 | + |
|---|
| 436 | + if (buffer->size < ex_buffer_size) { |
|---|
| 437 | + pr_err("Only get buffer %ld byte from %s = 0x%lx, but current image required %d byte\n", |
|---|
| 438 | + buffer->size, rga_get_memory_type_str(external_buffer->type), |
|---|
| 439 | + (unsigned long)external_buffer->memory, ex_buffer_size); |
|---|
| 440 | + rga_dump_memory_parm(&external_buffer->memory_parm); |
|---|
| 441 | + ret = -EINVAL; |
|---|
| 442 | + goto unmap_buffer; |
|---|
| 405 | 443 | } |
|---|
| 406 | 444 | |
|---|
| 407 | 445 | buffer->scheduler = scheduler; |
|---|
| .. | .. |
|---|
| 576 | 614 | if (mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS) |
|---|
| 577 | 615 | break; |
|---|
| 578 | 616 | |
|---|
| 579 | | - pr_err("Current RGA mmu[%d] cannot support virtual address!\n", |
|---|
| 580 | | - scheduler->data->mmu); |
|---|
| 617 | + pr_err("Current %s[%d] cannot support virtual address!\n", |
|---|
| 618 | + rga_get_mmu_type_str(scheduler->data->mmu), scheduler->data->mmu); |
|---|
| 581 | 619 | goto free_dma_buffer; |
|---|
| 582 | 620 | } |
|---|
| 583 | 621 | |
|---|
| .. | .. |
|---|
| 589 | 627 | internal_buffer->virt_addr = virt_addr; |
|---|
| 590 | 628 | internal_buffer->dma_buffer = buffer; |
|---|
| 591 | 629 | internal_buffer->mm_flag = mm_flag; |
|---|
| 592 | | - internal_buffer->phys_addr = phys_addr ? phys_addr : 0; |
|---|
| 630 | + internal_buffer->phys_addr = phys_addr ? phys_addr + virt_addr->offset : 0; |
|---|
| 593 | 631 | |
|---|
| 594 | 632 | return 0; |
|---|
| 595 | 633 | |
|---|
| .. | .. |
|---|
| 649 | 687 | internal_buffer->memory_parm.format, |
|---|
| 650 | 688 | NULL, NULL, NULL); |
|---|
| 651 | 689 | if (buffer_size <= 0) { |
|---|
| 652 | | - pr_err("Fault to get phys addr size!\n"); |
|---|
| 690 | + pr_err("Failed to get phys addr size!\n"); |
|---|
| 691 | + rga_dump_memory_parm(&internal_buffer->memory_parm); |
|---|
| 653 | 692 | return buffer_size == 0 ? -EINVAL : buffer_size; |
|---|
| 654 | 693 | } |
|---|
| 655 | 694 | |
|---|
| .. | .. |
|---|
| 674 | 713 | ret = rga_iommu_map(phys_addr, buffer_size, buffer, scheduler->dev); |
|---|
| 675 | 714 | if (ret < 0) { |
|---|
| 676 | 715 | pr_err("%s core[%d] map phys_addr error!\n", __func__, scheduler->core); |
|---|
| 677 | | - return ret; |
|---|
| 716 | + goto free_dma_buffer; |
|---|
| 678 | 717 | } |
|---|
| 679 | 718 | } |
|---|
| 680 | 719 | |
|---|
| .. | .. |
|---|
| 686 | 725 | internal_buffer->dma_buffer = buffer; |
|---|
| 687 | 726 | |
|---|
| 688 | 727 | return 0; |
|---|
| 728 | + |
|---|
| 729 | +free_dma_buffer: |
|---|
| 730 | + kfree(buffer); |
|---|
| 731 | + |
|---|
| 732 | + return ret; |
|---|
| 689 | 733 | } |
|---|
| 690 | 734 | |
|---|
| 691 | 735 | static int rga_mm_unmap_buffer(struct rga_internal_buffer *internal_buffer) |
|---|
| .. | .. |
|---|
| 738 | 782 | |
|---|
| 739 | 783 | ret = rga_mm_map_virt_addr(external_buffer, internal_buffer, job, write_flag); |
|---|
| 740 | 784 | if (ret < 0) { |
|---|
| 741 | | - pr_err("%s iommu_map virtual address error!\n", __func__); |
|---|
| 785 | + pr_err("%s map virtual address error!\n", __func__); |
|---|
| 742 | 786 | return ret; |
|---|
| 743 | 787 | } |
|---|
| 744 | 788 | |
|---|
| .. | .. |
|---|
| 751 | 795 | |
|---|
| 752 | 796 | ret = rga_mm_map_phys_addr(external_buffer, internal_buffer, job); |
|---|
| 753 | 797 | if (ret < 0) { |
|---|
| 754 | | - pr_err("%s iommu_map physical address error!\n", __func__); |
|---|
| 798 | + pr_err("%s map physical address error!\n", __func__); |
|---|
| 755 | 799 | return ret; |
|---|
| 756 | 800 | } |
|---|
| 757 | 801 | |
|---|
| .. | .. |
|---|
| 1130 | 1174 | |
|---|
| 1131 | 1175 | if (job->flags & RGA_JOB_USE_HANDLE) { |
|---|
| 1132 | 1176 | order = get_order(page_count * sizeof(uint32_t *)); |
|---|
| 1177 | + if (order >= MAX_ORDER) { |
|---|
| 1178 | + pr_err("Can not alloc pages with order[%d] for page_table, max_order = %d\n", |
|---|
| 1179 | + order, MAX_ORDER); |
|---|
| 1180 | + return -ENOMEM; |
|---|
| 1181 | + } |
|---|
| 1182 | + |
|---|
| 1133 | 1183 | page_table = (uint32_t *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order); |
|---|
| 1134 | 1184 | if (page_table == NULL) { |
|---|
| 1135 | | - pr_err("%s can not alloc pages for pages, order = %d\n", |
|---|
| 1185 | + pr_err("%s can not alloc pages for page_table, order = %d\n", |
|---|
| 1136 | 1186 | __func__, order); |
|---|
| 1137 | 1187 | return -ENOMEM; |
|---|
| 1138 | 1188 | } |
|---|
| .. | .. |
|---|
| 1189 | 1239 | |
|---|
| 1190 | 1240 | if (job->flags & RGA_JOB_USE_HANDLE) { |
|---|
| 1191 | 1241 | order = get_order(page_count * sizeof(uint32_t *)); |
|---|
| 1242 | + if (order >= MAX_ORDER) { |
|---|
| 1243 | + pr_err("Can not alloc pages with order[%d] for page_table, max_order = %d\n", |
|---|
| 1244 | + order, MAX_ORDER); |
|---|
| 1245 | + return -ENOMEM; |
|---|
| 1246 | + } |
|---|
| 1247 | + |
|---|
| 1192 | 1248 | page_table = (uint32_t *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order); |
|---|
| 1193 | 1249 | if (page_table == NULL) { |
|---|
| 1194 | | - pr_err("%s can not alloc pages for pages, order = %d\n", |
|---|
| 1250 | + pr_err("%s can not alloc pages for page_table, order = %d\n", |
|---|
| 1195 | 1251 | __func__, order); |
|---|
| 1196 | 1252 | return -ENOMEM; |
|---|
| 1197 | 1253 | } |
|---|
| .. | .. |
|---|
| 1334 | 1390 | uint64_t handle, |
|---|
| 1335 | 1391 | uint64_t *channel_addr, |
|---|
| 1336 | 1392 | struct rga_internal_buffer **buf, |
|---|
| 1393 | + int require_size, |
|---|
| 1337 | 1394 | enum dma_data_direction dir) |
|---|
| 1338 | 1395 | { |
|---|
| 1339 | 1396 | int ret = 0; |
|---|
| .. | .. |
|---|
| 1369 | 1426 | return ret; |
|---|
| 1370 | 1427 | } |
|---|
| 1371 | 1428 | |
|---|
| 1429 | + if (internal_buffer->size < require_size) { |
|---|
| 1430 | + ret = -EINVAL; |
|---|
| 1431 | + pr_err("Only get buffer %ld byte from handle[%ld], but current required %d byte\n", |
|---|
| 1432 | + internal_buffer->size, (unsigned long)handle, require_size); |
|---|
| 1433 | + |
|---|
| 1434 | + goto put_internal_buffer; |
|---|
| 1435 | + } |
|---|
| 1436 | + |
|---|
| 1372 | 1437 | if (internal_buffer->type == RGA_VIRTUAL_ADDRESS) { |
|---|
| 1373 | 1438 | /* |
|---|
| 1374 | 1439 | * Some userspace virtual addresses do not have an |
|---|
| .. | .. |
|---|
| 1378 | 1443 | ret = rga_mm_sync_dma_sg_for_device(internal_buffer, job, dir); |
|---|
| 1379 | 1444 | if (ret < 0) { |
|---|
| 1380 | 1445 | pr_err("sync sgt for device error!\n"); |
|---|
| 1381 | | - return ret; |
|---|
| 1446 | + goto put_internal_buffer; |
|---|
| 1382 | 1447 | } |
|---|
| 1383 | 1448 | } |
|---|
| 1384 | 1449 | |
|---|
| 1385 | 1450 | return 0; |
|---|
| 1451 | + |
|---|
| 1452 | +put_internal_buffer: |
|---|
| 1453 | + mutex_lock(&mm->lock); |
|---|
| 1454 | + kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer); |
|---|
| 1455 | + mutex_unlock(&mm->lock); |
|---|
| 1456 | + |
|---|
| 1457 | + return ret; |
|---|
| 1458 | + |
|---|
| 1386 | 1459 | } |
|---|
| 1387 | 1460 | |
|---|
| 1388 | 1461 | static void rga_mm_put_buffer(struct rga_mm *mm, |
|---|
| .. | .. |
|---|
| 1397 | 1470 | mutex_lock(&mm->lock); |
|---|
| 1398 | 1471 | kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer); |
|---|
| 1399 | 1472 | mutex_unlock(&mm->lock); |
|---|
| 1400 | | -} |
|---|
| 1401 | | - |
|---|
| 1402 | | -static int rga_mm_get_channel_handle_info(struct rga_mm *mm, |
|---|
| 1403 | | - struct rga_job *job, |
|---|
| 1404 | | - struct rga_img_info_t *img, |
|---|
| 1405 | | - struct rga_job_buffer *job_buf, |
|---|
| 1406 | | - enum dma_data_direction dir) |
|---|
| 1407 | | -{ |
|---|
| 1408 | | - int ret = 0; |
|---|
| 1409 | | - int handle = 0; |
|---|
| 1410 | | - |
|---|
| 1411 | | - /* using third-address */ |
|---|
| 1412 | | - if (img->uv_addr > 0) { |
|---|
| 1413 | | - handle = img->yrgb_addr; |
|---|
| 1414 | | - if (handle > 0) { |
|---|
| 1415 | | - ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr, |
|---|
| 1416 | | - &job_buf->y_addr, dir); |
|---|
| 1417 | | - if (ret < 0) { |
|---|
| 1418 | | - pr_err("handle[%d] Can't get src y/rgb address info!\n", handle); |
|---|
| 1419 | | - return ret; |
|---|
| 1420 | | - } |
|---|
| 1421 | | - } |
|---|
| 1422 | | - |
|---|
| 1423 | | - handle = img->uv_addr; |
|---|
| 1424 | | - if (handle > 0) { |
|---|
| 1425 | | - ret = rga_mm_get_buffer(mm, job, handle, &img->uv_addr, |
|---|
| 1426 | | - &job_buf->uv_addr, dir); |
|---|
| 1427 | | - if (ret < 0) { |
|---|
| 1428 | | - pr_err("handle[%d] Can't get src uv address info!\n", handle); |
|---|
| 1429 | | - return ret; |
|---|
| 1430 | | - } |
|---|
| 1431 | | - } |
|---|
| 1432 | | - |
|---|
| 1433 | | - handle = img->v_addr; |
|---|
| 1434 | | - if (handle > 0) { |
|---|
| 1435 | | - ret = rga_mm_get_buffer(mm, job, handle, &img->v_addr, |
|---|
| 1436 | | - &job_buf->v_addr, dir); |
|---|
| 1437 | | - if (ret < 0) { |
|---|
| 1438 | | - pr_err("handle[%d] Can't get src uv address info!\n", handle); |
|---|
| 1439 | | - return ret; |
|---|
| 1440 | | - } |
|---|
| 1441 | | - } |
|---|
| 1442 | | - } else { |
|---|
| 1443 | | - handle = img->yrgb_addr; |
|---|
| 1444 | | - if (handle > 0) { |
|---|
| 1445 | | - ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr, |
|---|
| 1446 | | - &job_buf->addr, dir); |
|---|
| 1447 | | - if (ret < 0) { |
|---|
| 1448 | | - pr_err("handle[%d] Can't get src y/rgb address info!\n", handle); |
|---|
| 1449 | | - return ret; |
|---|
| 1450 | | - } |
|---|
| 1451 | | - } |
|---|
| 1452 | | - |
|---|
| 1453 | | - rga_convert_addr(img, false); |
|---|
| 1454 | | - } |
|---|
| 1455 | | - |
|---|
| 1456 | | - if (job->scheduler->data->mmu == RGA_MMU && |
|---|
| 1457 | | - rga_mm_is_need_mmu(job, job_buf->addr)) { |
|---|
| 1458 | | - ret = rga_mm_set_mmu_base(job, img, job_buf); |
|---|
| 1459 | | - if (ret < 0) { |
|---|
| 1460 | | - pr_err("Can't set RGA2 MMU_BASE from handle!\n"); |
|---|
| 1461 | | - return ret; |
|---|
| 1462 | | - } |
|---|
| 1463 | | - } |
|---|
| 1464 | | - |
|---|
| 1465 | | - return 0; |
|---|
| 1466 | 1473 | } |
|---|
| 1467 | 1474 | |
|---|
| 1468 | 1475 | static void rga_mm_put_channel_handle_info(struct rga_mm *mm, |
|---|
| .. | .. |
|---|
| 1481 | 1488 | free_pages((unsigned long)job_buf->page_table, job_buf->order); |
|---|
| 1482 | 1489 | } |
|---|
| 1483 | 1490 | |
|---|
| 1491 | +static int rga_mm_get_channel_handle_info(struct rga_mm *mm, |
|---|
| 1492 | + struct rga_job *job, |
|---|
| 1493 | + struct rga_img_info_t *img, |
|---|
| 1494 | + struct rga_job_buffer *job_buf, |
|---|
| 1495 | + enum dma_data_direction dir) |
|---|
| 1496 | +{ |
|---|
| 1497 | + int ret = 0; |
|---|
| 1498 | + int handle = 0; |
|---|
| 1499 | + int img_size, yrgb_size, uv_size, v_size; |
|---|
| 1500 | + |
|---|
| 1501 | + img_size = rga_image_size_cal(img->vir_w, img->vir_h, img->format, |
|---|
| 1502 | + &yrgb_size, &uv_size, &v_size); |
|---|
| 1503 | + if (img_size <= 0) { |
|---|
| 1504 | + pr_err("Image size cal error! width = %d, height = %d, format = %s\n", |
|---|
| 1505 | + img->vir_w, img->vir_h, rga_get_format_name(img->format)); |
|---|
| 1506 | + return -EINVAL; |
|---|
| 1507 | + } |
|---|
| 1508 | + |
|---|
| 1509 | + /* using third-address */ |
|---|
| 1510 | + if (img->uv_addr > 0) { |
|---|
| 1511 | + handle = img->yrgb_addr; |
|---|
| 1512 | + if (handle > 0) { |
|---|
| 1513 | + ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr, |
|---|
| 1514 | + &job_buf->y_addr, yrgb_size, dir); |
|---|
| 1515 | + if (ret < 0) { |
|---|
| 1516 | + pr_err("handle[%d] Can't get y/rgb address info!\n", handle); |
|---|
| 1517 | + return ret; |
|---|
| 1518 | + } |
|---|
| 1519 | + } |
|---|
| 1520 | + |
|---|
| 1521 | + handle = img->uv_addr; |
|---|
| 1522 | + if (handle > 0) { |
|---|
| 1523 | + ret = rga_mm_get_buffer(mm, job, handle, &img->uv_addr, |
|---|
| 1524 | + &job_buf->uv_addr, uv_size, dir); |
|---|
| 1525 | + if (ret < 0) { |
|---|
| 1526 | + pr_err("handle[%d] Can't get uv address info!\n", handle); |
|---|
| 1527 | + return ret; |
|---|
| 1528 | + } |
|---|
| 1529 | + } |
|---|
| 1530 | + |
|---|
| 1531 | + handle = img->v_addr; |
|---|
| 1532 | + if (handle > 0) { |
|---|
| 1533 | + ret = rga_mm_get_buffer(mm, job, handle, &img->v_addr, |
|---|
| 1534 | + &job_buf->v_addr, v_size, dir); |
|---|
| 1535 | + if (ret < 0) { |
|---|
| 1536 | + pr_err("handle[%d] Can't get uv address info!\n", handle); |
|---|
| 1537 | + return ret; |
|---|
| 1538 | + } |
|---|
| 1539 | + } |
|---|
| 1540 | + } else { |
|---|
| 1541 | + handle = img->yrgb_addr; |
|---|
| 1542 | + if (handle > 0) { |
|---|
| 1543 | + ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr, |
|---|
| 1544 | + &job_buf->addr, img_size, dir); |
|---|
| 1545 | + if (ret < 0) { |
|---|
| 1546 | + pr_err("handle[%d] Can't get y/rgb address info!\n", handle); |
|---|
| 1547 | + return ret; |
|---|
| 1548 | + } |
|---|
| 1549 | + } |
|---|
| 1550 | + |
|---|
| 1551 | + rga_convert_addr(img, false); |
|---|
| 1552 | + } |
|---|
| 1553 | + |
|---|
| 1554 | + if (job->scheduler->data->mmu == RGA_MMU && |
|---|
| 1555 | + rga_mm_is_need_mmu(job, job_buf->addr)) { |
|---|
| 1556 | + ret = rga_mm_set_mmu_base(job, img, job_buf); |
|---|
| 1557 | + if (ret < 0) { |
|---|
| 1558 | + pr_err("Can't set RGA2 MMU_BASE from handle!\n"); |
|---|
| 1559 | + |
|---|
| 1560 | + rga_mm_put_channel_handle_info(mm, job, job_buf, dir); |
|---|
| 1561 | + return ret; |
|---|
| 1562 | + } |
|---|
| 1563 | + } |
|---|
| 1564 | + |
|---|
| 1565 | + return 0; |
|---|
| 1566 | +} |
|---|
| 1567 | + |
|---|
| 1484 | 1568 | static int rga_mm_get_handle_info(struct rga_job *job) |
|---|
| 1485 | 1569 | { |
|---|
| 1486 | 1570 | int ret = 0; |
|---|
| .. | .. |
|---|
| 1496 | 1580 | &job->src_buffer, |
|---|
| 1497 | 1581 | DMA_TO_DEVICE); |
|---|
| 1498 | 1582 | if (ret < 0) { |
|---|
| 1499 | | - pr_err("Can't get src buffer third info!\n"); |
|---|
| 1583 | + pr_err("Can't get src buffer info from handle!\n"); |
|---|
| 1500 | 1584 | return ret; |
|---|
| 1501 | 1585 | } |
|---|
| 1502 | 1586 | } |
|---|
| .. | .. |
|---|
| 1506 | 1590 | &job->dst_buffer, |
|---|
| 1507 | 1591 | DMA_TO_DEVICE); |
|---|
| 1508 | 1592 | if (ret < 0) { |
|---|
| 1509 | | - pr_err("Can't get dst buffer third info!\n"); |
|---|
| 1593 | + pr_err("Can't get dst buffer info from handle!\n"); |
|---|
| 1510 | 1594 | return ret; |
|---|
| 1511 | 1595 | } |
|---|
| 1512 | 1596 | } |
|---|
| .. | .. |
|---|
| 1528 | 1612 | DMA_BIDIRECTIONAL); |
|---|
| 1529 | 1613 | } |
|---|
| 1530 | 1614 | if (ret < 0) { |
|---|
| 1531 | | - pr_err("Can't get pat buffer third info!\n"); |
|---|
| 1615 | + pr_err("Can't get pat buffer info from handle!\n"); |
|---|
| 1532 | 1616 | return ret; |
|---|
| 1533 | 1617 | } |
|---|
| 1534 | 1618 | } |
|---|
| .. | .. |
|---|
| 1869 | 1953 | uint32_t rga_mm_import_buffer(struct rga_external_buffer *external_buffer, |
|---|
| 1870 | 1954 | struct rga_session *session) |
|---|
| 1871 | 1955 | { |
|---|
| 1872 | | - int ret = 0; |
|---|
| 1956 | + int ret = 0, new_id; |
|---|
| 1873 | 1957 | struct rga_mm *mm; |
|---|
| 1874 | 1958 | struct rga_internal_buffer *internal_buffer; |
|---|
| 1875 | 1959 | |
|---|
| .. | .. |
|---|
| 1911 | 1995 | * allocation under our spinlock. |
|---|
| 1912 | 1996 | */ |
|---|
| 1913 | 1997 | idr_preload(GFP_KERNEL); |
|---|
| 1914 | | - internal_buffer->handle = idr_alloc(&mm->memory_idr, internal_buffer, 1, 0, GFP_KERNEL); |
|---|
| 1998 | + new_id = idr_alloc_cyclic(&mm->memory_idr, internal_buffer, 1, 0, GFP_NOWAIT); |
|---|
| 1915 | 1999 | idr_preload_end(); |
|---|
| 2000 | + if (new_id < 0) { |
|---|
| 2001 | + pr_err("internal_buffer alloc id failed!\n"); |
|---|
| 2002 | + goto FREE_INTERNAL_BUFFER; |
|---|
| 2003 | + } |
|---|
| 1916 | 2004 | |
|---|
| 2005 | + internal_buffer->handle = new_id; |
|---|
| 1917 | 2006 | mm->buffer_count++; |
|---|
| 1918 | 2007 | |
|---|
| 1919 | 2008 | if (DEBUGGER_EN(MM)) { |
|---|