| .. | .. |
|---|
| 53 | 53 | for (i = 0; i < pageCount; i++) { |
|---|
| 54 | 54 | vma = find_vma(current_mm, (Memory + i) << PAGE_SHIFT); |
|---|
| 55 | 55 | if (!vma) { |
|---|
| 56 | | - pr_err("failed to get vma\n"); |
|---|
| 56 | + pr_err("page[%d] failed to get vma\n", i); |
|---|
| 57 | 57 | ret = RGA_OUT_OF_RESOURCES; |
|---|
| 58 | 58 | break; |
|---|
| 59 | 59 | } |
|---|
| 60 | 60 | |
|---|
| 61 | 61 | pgd = pgd_offset(current_mm, (Memory + i) << PAGE_SHIFT); |
|---|
| 62 | 62 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) { |
|---|
| 63 | | - pr_err("failed to get pgd\n"); |
|---|
| 63 | + pr_err("page[%d] failed to get pgd\n", i); |
|---|
| 64 | 64 | ret = RGA_OUT_OF_RESOURCES; |
|---|
| 65 | 65 | break; |
|---|
| 66 | 66 | } |
|---|
| .. | .. |
|---|
| 71 | 71 | */ |
|---|
| 72 | 72 | p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT); |
|---|
| 73 | 73 | if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) { |
|---|
| 74 | | - pr_err("failed to get p4d\n"); |
|---|
| 74 | + pr_err("page[%d] failed to get p4d\n", i); |
|---|
| 75 | 75 | ret = RGA_OUT_OF_RESOURCES; |
|---|
| 76 | 76 | break; |
|---|
| 77 | 77 | } |
|---|
| .. | .. |
|---|
| 82 | 82 | #endif |
|---|
| 83 | 83 | |
|---|
| 84 | 84 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) { |
|---|
| 85 | | - pr_err("failed to get pud\n"); |
|---|
| 85 | + pr_err("page[%d] failed to get pud\n", i); |
|---|
| 86 | 86 | ret = RGA_OUT_OF_RESOURCES; |
|---|
| 87 | 87 | break; |
|---|
| 88 | 88 | } |
|---|
| 89 | 89 | pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT); |
|---|
| 90 | 90 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) { |
|---|
| 91 | | - pr_err("failed to get pmd\n"); |
|---|
| 91 | + pr_err("page[%d] failed to get pmd\n", i); |
|---|
| 92 | 92 | ret = RGA_OUT_OF_RESOURCES; |
|---|
| 93 | 93 | break; |
|---|
| 94 | 94 | } |
|---|
| 95 | 95 | pte = pte_offset_map_lock(current_mm, pmd, |
|---|
| 96 | 96 | (Memory + i) << PAGE_SHIFT, &ptl); |
|---|
| 97 | 97 | if (pte_none(*pte)) { |
|---|
| 98 | | - pr_err("failed to get pte\n"); |
|---|
| 98 | + pr_err("page[%d] failed to get pte\n", i); |
|---|
| 99 | 99 | pte_unmap_unlock(pte, ptl); |
|---|
| 100 | 100 | ret = RGA_OUT_OF_RESOURCES; |
|---|
| 101 | 101 | break; |
|---|
| .. | .. |
|---|
| 105 | 105 | pages[i] = pfn_to_page(pfn); |
|---|
| 106 | 106 | pte_unmap_unlock(pte, ptl); |
|---|
| 107 | 107 | } |
|---|
| 108 | + |
|---|
| 109 | + if (ret == RGA_OUT_OF_RESOURCES && i > 0) |
|---|
| 110 | + pr_err("Only get buffer %d byte from vma, but current image required %d byte", |
|---|
| 111 | + (int)(i * PAGE_SIZE), (int)(pageCount * PAGE_SIZE)); |
|---|
| 108 | 112 | |
|---|
| 109 | 113 | return ret; |
|---|
| 110 | 114 | } |
|---|
| .. | .. |
|---|
| 144 | 148 | put_page(pages[i]); |
|---|
| 145 | 149 | |
|---|
| 146 | 150 | ret = rga_get_user_pages_from_vma(pages, Memory, pageCount, current_mm); |
|---|
| 147 | | - if (ret < 0) { |
|---|
| 148 | | - pr_err("Can not get user pages from vma, result = %d, pagecount = %d\n", |
|---|
| 149 | | - result, pageCount); |
|---|
| 151 | + if (ret < 0 && result > 0) { |
|---|
| 152 | + pr_err("Only get buffer %d byte from user pages, but current image required %d byte\n", |
|---|
| 153 | + (int)(result * PAGE_SIZE), (int)(pageCount * PAGE_SIZE)); |
|---|
| 150 | 154 | } |
|---|
| 151 | 155 | } |
|---|
| 152 | 156 | |
|---|
| .. | .. |
|---|
| 177 | 181 | } |
|---|
| 178 | 182 | |
|---|
| 179 | 183 | /* get sg form pages. */ |
|---|
| 180 | | - ret = sg_alloc_table_from_pages(sgt, virt_addr->pages, |
|---|
| 184 | + /* iova requires minimum page alignment, so sgt cannot have offset */ |
|---|
| 185 | + ret = sg_alloc_table_from_pages(sgt, |
|---|
| 186 | + virt_addr->pages, |
|---|
| 181 | 187 | virt_addr->page_count, |
|---|
| 182 | | - 0, virt_addr->size, |
|---|
| 188 | + 0, |
|---|
| 189 | + virt_addr->size, |
|---|
| 183 | 190 | GFP_KERNEL); |
|---|
| 184 | 191 | if (ret) { |
|---|
| 185 | 192 | pr_err("sg_alloc_table_from_pages failed"); |
|---|
| .. | .. |
|---|
| 245 | 252 | if (!size) { |
|---|
| 246 | 253 | pr_err("failed to calculating buffer size! size = %ld, count = %d, offset = %ld\n", |
|---|
| 247 | 254 | size, count, (unsigned long)offset); |
|---|
| 255 | + rga_dump_memory_parm(memory_parm); |
|---|
| 248 | 256 | return -EFAULT; |
|---|
| 249 | 257 | } |
|---|
| 250 | 258 | |
|---|
| 251 | 259 | /* alloc pages and page_table */ |
|---|
| 252 | 260 | order = get_order(count * sizeof(struct page *)); |
|---|
| 261 | + if (order >= MAX_ORDER) { |
|---|
| 262 | + pr_err("Can not alloc pages with order[%d] for viraddr pages, max_order = %d\n", |
|---|
| 263 | + order, MAX_ORDER); |
|---|
| 264 | + return -ENOMEM; |
|---|
| 265 | + } |
|---|
| 266 | + |
|---|
| 253 | 267 | pages = (struct page **)__get_free_pages(GFP_KERNEL, order); |
|---|
| 254 | 268 | if (pages == NULL) { |
|---|
| 255 | | - pr_err("%s can not alloc pages for pages\n", __func__); |
|---|
| 269 | + pr_err("%s can not alloc pages for viraddr pages\n", __func__); |
|---|
| 256 | 270 | return -ENOMEM; |
|---|
| 257 | 271 | } |
|---|
| 258 | 272 | |
|---|
| 259 | 273 | /* get pages from virtual address. */ |
|---|
| 260 | 274 | ret = rga_get_user_pages(pages, viraddr >> PAGE_SHIFT, count, writeFlag, mm); |
|---|
| 261 | 275 | if (ret < 0) { |
|---|
| 262 | | - pr_err("failed to get pages"); |
|---|
| 276 | + pr_err("failed to get pages from virtual adrees: 0x%lx\n", |
|---|
| 277 | + (unsigned long)viraddr); |
|---|
| 263 | 278 | ret = -EINVAL; |
|---|
| 264 | 279 | goto out_free_pages; |
|---|
| 265 | 280 | } else if (ret > 0) { |
|---|
| .. | .. |
|---|
| 301 | 316 | |
|---|
| 302 | 317 | if (scheduler->data->mmu == RGA_MMU && |
|---|
| 303 | 318 | !(mm_flag & RGA_MEM_UNDER_4G)) { |
|---|
| 304 | | - pr_err("%s unsupported Memory larger than 4G!\n", |
|---|
| 319 | + pr_err("%s unsupported memory larger than 4G!\n", |
|---|
| 305 | 320 | rga_get_mmu_type_str(scheduler->data->mmu)); |
|---|
| 306 | 321 | return false; |
|---|
| 307 | 322 | } |
|---|
| .. | .. |
|---|
| 358 | 373 | struct rga_job *job) |
|---|
| 359 | 374 | { |
|---|
| 360 | 375 | int ret; |
|---|
| 376 | + int ex_buffer_size; |
|---|
| 361 | 377 | uint32_t mm_flag = 0; |
|---|
| 362 | 378 | phys_addr_t phys_addr = 0; |
|---|
| 363 | 379 | struct rga_dma_buffer *buffer; |
|---|
| .. | .. |
|---|
| 369 | 385 | if (scheduler == NULL) { |
|---|
| 370 | 386 | pr_err("Invalid scheduler device!\n"); |
|---|
| 371 | 387 | return -EINVAL; |
|---|
| 388 | + } |
|---|
| 389 | + |
|---|
| 390 | + if (external_buffer->memory_parm.size) |
|---|
| 391 | + ex_buffer_size = external_buffer->memory_parm.size; |
|---|
| 392 | + else |
|---|
| 393 | + ex_buffer_size = rga_image_size_cal(external_buffer->memory_parm.width, |
|---|
| 394 | + external_buffer->memory_parm.height, |
|---|
| 395 | + external_buffer->memory_parm.format, |
|---|
| 396 | + NULL, NULL, NULL); |
|---|
| 397 | + if (ex_buffer_size <= 0) { |
|---|
| 398 | + pr_err("failed to calculating buffer size!\n"); |
|---|
| 399 | + rga_dump_memory_parm(&external_buffer->memory_parm); |
|---|
| 400 | + return ex_buffer_size == 0 ? -EINVAL : ex_buffer_size; |
|---|
| 372 | 401 | } |
|---|
| 373 | 402 | |
|---|
| 374 | 403 | /* |
|---|
| .. | .. |
|---|
| 404 | 433 | goto free_buffer; |
|---|
| 405 | 434 | } |
|---|
| 406 | 435 | |
|---|
| 436 | + if (buffer->size < ex_buffer_size) { |
|---|
| 437 | + pr_err("Only get buffer %ld byte from %s = 0x%lx, but current image required %d byte\n", |
|---|
| 438 | + buffer->size, rga_get_memory_type_str(external_buffer->type), |
|---|
| 439 | + (unsigned long)external_buffer->memory, ex_buffer_size); |
|---|
| 440 | + rga_dump_memory_parm(&external_buffer->memory_parm); |
|---|
| 441 | + ret = -EINVAL; |
|---|
| 442 | + goto unmap_buffer; |
|---|
| 443 | + } |
|---|
| 444 | + |
|---|
| 407 | 445 | buffer->scheduler = scheduler; |
|---|
| 408 | 446 | |
|---|
| 409 | 447 | if (rga_mm_check_range_sgt(buffer->sgt)) |
|---|
| .. | .. |
|---|
| 417 | 455 | phys_addr = sg_phys(buffer->sgt->sgl); |
|---|
| 418 | 456 | if (phys_addr == 0) { |
|---|
| 419 | 457 | pr_err("%s get physical address error!", __func__); |
|---|
| 458 | + ret = -EFAULT; |
|---|
| 420 | 459 | goto unmap_buffer; |
|---|
| 421 | 460 | } |
|---|
| 422 | 461 | |
|---|
| .. | .. |
|---|
| 533 | 572 | phys_addr = sg_phys(sgt->sgl); |
|---|
| 534 | 573 | if (phys_addr == 0) { |
|---|
| 535 | 574 | pr_err("%s get physical address error!", __func__); |
|---|
| 575 | + ret = -EFAULT; |
|---|
| 536 | 576 | goto free_sgt; |
|---|
| 537 | 577 | } |
|---|
| 538 | 578 | |
|---|
| 539 | 579 | mm_flag |= RGA_MEM_PHYSICAL_CONTIGUOUS; |
|---|
| 540 | 580 | } |
|---|
| 581 | + |
|---|
| 582 | + /* |
|---|
| 583 | + * Some userspace virtual addresses do not have an |
|---|
| 584 | + * interface for flushing the cache, so it is mandatory |
|---|
| 585 | + * to flush the cache when the virtual address is used. |
|---|
| 586 | + */ |
|---|
| 587 | + mm_flag |= RGA_MEM_FORCE_FLUSH_CACHE; |
|---|
| 541 | 588 | |
|---|
| 542 | 589 | if (!rga_mm_check_memory_limit(scheduler, mm_flag)) { |
|---|
| 543 | 590 | pr_err("scheduler core[%d] unsupported mm_flag[0x%x]!\n", |
|---|
| .. | .. |
|---|
| 576 | 623 | if (mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS) |
|---|
| 577 | 624 | break; |
|---|
| 578 | 625 | |
|---|
| 579 | | - pr_err("Current RGA mmu[%d] cannot support virtual address!\n", |
|---|
| 580 | | - scheduler->data->mmu); |
|---|
| 626 | + pr_err("Current %s[%d] cannot support physically discontinuous virtual address!\n", |
|---|
| 627 | + rga_get_mmu_type_str(scheduler->data->mmu), scheduler->data->mmu); |
|---|
| 628 | + ret = -EOPNOTSUPP; |
|---|
| 581 | 629 | goto free_dma_buffer; |
|---|
| 582 | 630 | } |
|---|
| 583 | 631 | |
|---|
| .. | .. |
|---|
| 589 | 637 | internal_buffer->virt_addr = virt_addr; |
|---|
| 590 | 638 | internal_buffer->dma_buffer = buffer; |
|---|
| 591 | 639 | internal_buffer->mm_flag = mm_flag; |
|---|
| 592 | | - internal_buffer->phys_addr = phys_addr ? phys_addr : 0; |
|---|
| 640 | + internal_buffer->phys_addr = phys_addr ? phys_addr + virt_addr->offset : 0; |
|---|
| 593 | 641 | |
|---|
| 594 | 642 | return 0; |
|---|
| 595 | 643 | |
|---|
| .. | .. |
|---|
| 649 | 697 | internal_buffer->memory_parm.format, |
|---|
| 650 | 698 | NULL, NULL, NULL); |
|---|
| 651 | 699 | if (buffer_size <= 0) { |
|---|
| 652 | | - pr_err("Fault to get phys addr size!\n"); |
|---|
| 700 | + pr_err("Failed to get phys addr size!\n"); |
|---|
| 701 | + rga_dump_memory_parm(&internal_buffer->memory_parm); |
|---|
| 653 | 702 | return buffer_size == 0 ? -EINVAL : buffer_size; |
|---|
| 654 | 703 | } |
|---|
| 655 | 704 | |
|---|
| .. | .. |
|---|
| 674 | 723 | ret = rga_iommu_map(phys_addr, buffer_size, buffer, scheduler->dev); |
|---|
| 675 | 724 | if (ret < 0) { |
|---|
| 676 | 725 | pr_err("%s core[%d] map phys_addr error!\n", __func__, scheduler->core); |
|---|
| 677 | | - return ret; |
|---|
| 726 | + goto free_dma_buffer; |
|---|
| 678 | 727 | } |
|---|
| 679 | 728 | } |
|---|
| 680 | 729 | |
|---|
| .. | .. |
|---|
| 686 | 735 | internal_buffer->dma_buffer = buffer; |
|---|
| 687 | 736 | |
|---|
| 688 | 737 | return 0; |
|---|
| 738 | + |
|---|
| 739 | +free_dma_buffer: |
|---|
| 740 | + kfree(buffer); |
|---|
| 741 | + |
|---|
| 742 | + return ret; |
|---|
| 689 | 743 | } |
|---|
| 690 | 744 | |
|---|
| 691 | 745 | static int rga_mm_unmap_buffer(struct rga_internal_buffer *internal_buffer) |
|---|
| .. | .. |
|---|
| 738 | 792 | |
|---|
| 739 | 793 | ret = rga_mm_map_virt_addr(external_buffer, internal_buffer, job, write_flag); |
|---|
| 740 | 794 | if (ret < 0) { |
|---|
| 741 | | - pr_err("%s iommu_map virtual address error!\n", __func__); |
|---|
| 795 | + pr_err("%s map virtual address error!\n", __func__); |
|---|
| 742 | 796 | return ret; |
|---|
| 743 | 797 | } |
|---|
| 744 | 798 | |
|---|
| .. | .. |
|---|
| 751 | 805 | |
|---|
| 752 | 806 | ret = rga_mm_map_phys_addr(external_buffer, internal_buffer, job); |
|---|
| 753 | 807 | if (ret < 0) { |
|---|
| 754 | | - pr_err("%s iommu_map physical address error!\n", __func__); |
|---|
| 808 | + pr_err("%s map physical address error!\n", __func__); |
|---|
| 755 | 809 | return ret; |
|---|
| 756 | 810 | } |
|---|
| 757 | 811 | |
|---|
| .. | .. |
|---|
| 789 | 843 | return 0; |
|---|
| 790 | 844 | } |
|---|
| 791 | 845 | |
|---|
| 846 | +static void rga_mm_buffer_destroy(struct rga_internal_buffer *buffer) |
|---|
| 847 | +{ |
|---|
| 848 | + rga_mm_kref_release_buffer(&buffer->refcount); |
|---|
| 849 | +} |
|---|
| 850 | + |
|---|
| 792 | 851 | static struct rga_internal_buffer * |
|---|
| 793 | 852 | rga_mm_lookup_external(struct rga_mm *mm_session, |
|---|
| 794 | | - struct rga_external_buffer *external_buffer) |
|---|
| 853 | + struct rga_external_buffer *external_buffer, |
|---|
| 854 | + struct mm_struct *current_mm) |
|---|
| 795 | 855 | { |
|---|
| 796 | 856 | int id; |
|---|
| 797 | 857 | struct dma_buf *dma_buf = NULL; |
|---|
| .. | .. |
|---|
| 824 | 884 | continue; |
|---|
| 825 | 885 | |
|---|
| 826 | 886 | if (temp_buffer->virt_addr->addr == external_buffer->memory) { |
|---|
| 827 | | - output_buffer = temp_buffer; |
|---|
| 828 | | - break; |
|---|
| 887 | + if (temp_buffer->current_mm == current_mm) { |
|---|
| 888 | + output_buffer = temp_buffer; |
|---|
| 889 | + break; |
|---|
| 890 | + } |
|---|
| 891 | + |
|---|
| 892 | + continue; |
|---|
| 829 | 893 | } |
|---|
| 830 | 894 | } |
|---|
| 831 | 895 | |
|---|
| .. | .. |
|---|
| 1130 | 1194 | |
|---|
| 1131 | 1195 | if (job->flags & RGA_JOB_USE_HANDLE) { |
|---|
| 1132 | 1196 | order = get_order(page_count * sizeof(uint32_t *)); |
|---|
| 1197 | + if (order >= MAX_ORDER) { |
|---|
| 1198 | + pr_err("Can not alloc pages with order[%d] for page_table, max_order = %d\n", |
|---|
| 1199 | + order, MAX_ORDER); |
|---|
| 1200 | + return -ENOMEM; |
|---|
| 1201 | + } |
|---|
| 1202 | + |
|---|
| 1133 | 1203 | page_table = (uint32_t *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order); |
|---|
| 1134 | 1204 | if (page_table == NULL) { |
|---|
| 1135 | | - pr_err("%s can not alloc pages for pages, order = %d\n", |
|---|
| 1205 | + pr_err("%s can not alloc pages for page_table, order = %d\n", |
|---|
| 1136 | 1206 | __func__, order); |
|---|
| 1137 | 1207 | return -ENOMEM; |
|---|
| 1138 | 1208 | } |
|---|
| .. | .. |
|---|
| 1189 | 1259 | |
|---|
| 1190 | 1260 | if (job->flags & RGA_JOB_USE_HANDLE) { |
|---|
| 1191 | 1261 | order = get_order(page_count * sizeof(uint32_t *)); |
|---|
| 1262 | + if (order >= MAX_ORDER) { |
|---|
| 1263 | + pr_err("Can not alloc pages with order[%d] for page_table, max_order = %d\n", |
|---|
| 1264 | + order, MAX_ORDER); |
|---|
| 1265 | + return -ENOMEM; |
|---|
| 1266 | + } |
|---|
| 1267 | + |
|---|
| 1192 | 1268 | page_table = (uint32_t *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order); |
|---|
| 1193 | 1269 | if (page_table == NULL) { |
|---|
| 1194 | | - pr_err("%s can not alloc pages for pages, order = %d\n", |
|---|
| 1270 | + pr_err("%s can not alloc pages for page_table, order = %d\n", |
|---|
| 1195 | 1271 | __func__, order); |
|---|
| 1196 | 1272 | return -ENOMEM; |
|---|
| 1197 | 1273 | } |
|---|
| .. | .. |
|---|
| 1239 | 1315 | struct sg_table *sgt; |
|---|
| 1240 | 1316 | struct rga_scheduler_t *scheduler; |
|---|
| 1241 | 1317 | |
|---|
| 1242 | | - sgt = rga_mm_lookup_sgt(buffer); |
|---|
| 1243 | | - if (sgt == NULL) { |
|---|
| 1244 | | - pr_err("%s(%d), failed to get sgt, core = 0x%x\n", |
|---|
| 1245 | | - __func__, __LINE__, job->core); |
|---|
| 1246 | | - return -EINVAL; |
|---|
| 1247 | | - } |
|---|
| 1248 | | - |
|---|
| 1249 | 1318 | scheduler = buffer->dma_buffer->scheduler; |
|---|
| 1250 | 1319 | if (scheduler == NULL) { |
|---|
| 1251 | 1320 | pr_err("%s(%d), failed to get scheduler, core = 0x%x\n", |
|---|
| .. | .. |
|---|
| 1253 | 1322 | return -EFAULT; |
|---|
| 1254 | 1323 | } |
|---|
| 1255 | 1324 | |
|---|
| 1256 | | - dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir); |
|---|
| 1325 | + if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS && |
|---|
| 1326 | + scheduler->data->mmu != RGA_IOMMU) { |
|---|
| 1327 | + dma_sync_single_for_device(scheduler->dev, buffer->phys_addr, buffer->size, dir); |
|---|
| 1328 | + } else { |
|---|
| 1329 | + sgt = rga_mm_lookup_sgt(buffer); |
|---|
| 1330 | + if (sgt == NULL) { |
|---|
| 1331 | + pr_err("%s(%d), failed to get sgt, core = 0x%x\n", |
|---|
| 1332 | + __func__, __LINE__, job->core); |
|---|
| 1333 | + return -EINVAL; |
|---|
| 1334 | + } |
|---|
| 1335 | + |
|---|
| 1336 | + dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir); |
|---|
| 1337 | + } |
|---|
| 1257 | 1338 | |
|---|
| 1258 | 1339 | return 0; |
|---|
| 1259 | 1340 | } |
|---|
| .. | .. |
|---|
| 1265 | 1346 | struct sg_table *sgt; |
|---|
| 1266 | 1347 | struct rga_scheduler_t *scheduler; |
|---|
| 1267 | 1348 | |
|---|
| 1268 | | - sgt = rga_mm_lookup_sgt(buffer); |
|---|
| 1269 | | - if (sgt == NULL) { |
|---|
| 1270 | | - pr_err("%s(%d), failed to get sgt, core = 0x%x\n", |
|---|
| 1271 | | - __func__, __LINE__, job->core); |
|---|
| 1272 | | - return -EINVAL; |
|---|
| 1273 | | - } |
|---|
| 1274 | | - |
|---|
| 1275 | 1349 | scheduler = buffer->dma_buffer->scheduler; |
|---|
| 1276 | 1350 | if (scheduler == NULL) { |
|---|
| 1277 | 1351 | pr_err("%s(%d), failed to get scheduler, core = 0x%x\n", |
|---|
| .. | .. |
|---|
| 1279 | 1353 | return -EFAULT; |
|---|
| 1280 | 1354 | } |
|---|
| 1281 | 1355 | |
|---|
| 1282 | | - dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir); |
|---|
| 1356 | + if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS && |
|---|
| 1357 | + scheduler->data->mmu != RGA_IOMMU) { |
|---|
| 1358 | + dma_sync_single_for_cpu(scheduler->dev, buffer->phys_addr, buffer->size, dir); |
|---|
| 1359 | + } else { |
|---|
| 1360 | + sgt = rga_mm_lookup_sgt(buffer); |
|---|
| 1361 | + if (sgt == NULL) { |
|---|
| 1362 | + pr_err("%s(%d), failed to get sgt, core = 0x%x\n", |
|---|
| 1363 | + __func__, __LINE__, job->core); |
|---|
| 1364 | + return -EINVAL; |
|---|
| 1365 | + } |
|---|
| 1366 | + |
|---|
| 1367 | + dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir); |
|---|
| 1368 | + } |
|---|
| 1283 | 1369 | |
|---|
| 1284 | 1370 | return 0; |
|---|
| 1285 | 1371 | } |
|---|
| .. | .. |
|---|
| 1334 | 1420 | uint64_t handle, |
|---|
| 1335 | 1421 | uint64_t *channel_addr, |
|---|
| 1336 | 1422 | struct rga_internal_buffer **buf, |
|---|
| 1423 | + int require_size, |
|---|
| 1337 | 1424 | enum dma_data_direction dir) |
|---|
| 1338 | 1425 | { |
|---|
| 1339 | 1426 | int ret = 0; |
|---|
| .. | .. |
|---|
| 1369 | 1456 | return ret; |
|---|
| 1370 | 1457 | } |
|---|
| 1371 | 1458 | |
|---|
| 1372 | | - if (internal_buffer->type == RGA_VIRTUAL_ADDRESS) { |
|---|
| 1459 | + if (internal_buffer->size < require_size) { |
|---|
| 1460 | + ret = -EINVAL; |
|---|
| 1461 | + pr_err("Only get buffer %ld byte from handle[%ld], but current required %d byte\n", |
|---|
| 1462 | + internal_buffer->size, (unsigned long)handle, require_size); |
|---|
| 1463 | + |
|---|
| 1464 | + goto put_internal_buffer; |
|---|
| 1465 | + } |
|---|
| 1466 | + |
|---|
| 1467 | + if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) { |
|---|
| 1373 | 1468 | /* |
|---|
| 1374 | 1469 | * Some userspace virtual addresses do not have an |
|---|
| 1375 | 1470 | * interface for flushing the cache, so it is mandatory |
|---|
| .. | .. |
|---|
| 1378 | 1473 | ret = rga_mm_sync_dma_sg_for_device(internal_buffer, job, dir); |
|---|
| 1379 | 1474 | if (ret < 0) { |
|---|
| 1380 | 1475 | pr_err("sync sgt for device error!\n"); |
|---|
| 1381 | | - return ret; |
|---|
| 1476 | + goto put_internal_buffer; |
|---|
| 1382 | 1477 | } |
|---|
| 1383 | 1478 | } |
|---|
| 1384 | 1479 | |
|---|
| 1385 | 1480 | return 0; |
|---|
| 1481 | + |
|---|
| 1482 | +put_internal_buffer: |
|---|
| 1483 | + mutex_lock(&mm->lock); |
|---|
| 1484 | + kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer); |
|---|
| 1485 | + mutex_unlock(&mm->lock); |
|---|
| 1486 | + |
|---|
| 1487 | + return ret; |
|---|
| 1488 | + |
|---|
| 1386 | 1489 | } |
|---|
| 1387 | 1490 | |
|---|
| 1388 | 1491 | static void rga_mm_put_buffer(struct rga_mm *mm, |
|---|
| .. | .. |
|---|
| 1390 | 1493 | struct rga_internal_buffer *internal_buffer, |
|---|
| 1391 | 1494 | enum dma_data_direction dir) |
|---|
| 1392 | 1495 | { |
|---|
| 1393 | | - if (internal_buffer->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE) |
|---|
| 1496 | + if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE) |
|---|
| 1394 | 1497 | if (rga_mm_sync_dma_sg_for_cpu(internal_buffer, job, dir)) |
|---|
| 1395 | 1498 | pr_err("sync sgt for cpu error!\n"); |
|---|
| 1499 | + |
|---|
| 1500 | + if (DEBUGGER_EN(MM)) { |
|---|
| 1501 | + pr_info("handle[%d] put info:\n", (int)internal_buffer->handle); |
|---|
| 1502 | + rga_mm_dump_buffer(internal_buffer); |
|---|
| 1503 | + } |
|---|
| 1396 | 1504 | |
|---|
| 1397 | 1505 | mutex_lock(&mm->lock); |
|---|
| 1398 | 1506 | kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer); |
|---|
| 1399 | 1507 | mutex_unlock(&mm->lock); |
|---|
| 1400 | | -} |
|---|
| 1401 | | - |
|---|
| 1402 | | -static int rga_mm_get_channel_handle_info(struct rga_mm *mm, |
|---|
| 1403 | | - struct rga_job *job, |
|---|
| 1404 | | - struct rga_img_info_t *img, |
|---|
| 1405 | | - struct rga_job_buffer *job_buf, |
|---|
| 1406 | | - enum dma_data_direction dir) |
|---|
| 1407 | | -{ |
|---|
| 1408 | | - int ret = 0; |
|---|
| 1409 | | - int handle = 0; |
|---|
| 1410 | | - |
|---|
| 1411 | | - /* using third-address */ |
|---|
| 1412 | | - if (img->uv_addr > 0) { |
|---|
| 1413 | | - handle = img->yrgb_addr; |
|---|
| 1414 | | - if (handle > 0) { |
|---|
| 1415 | | - ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr, |
|---|
| 1416 | | - &job_buf->y_addr, dir); |
|---|
| 1417 | | - if (ret < 0) { |
|---|
| 1418 | | - pr_err("handle[%d] Can't get src y/rgb address info!\n", handle); |
|---|
| 1419 | | - return ret; |
|---|
| 1420 | | - } |
|---|
| 1421 | | - } |
|---|
| 1422 | | - |
|---|
| 1423 | | - handle = img->uv_addr; |
|---|
| 1424 | | - if (handle > 0) { |
|---|
| 1425 | | - ret = rga_mm_get_buffer(mm, job, handle, &img->uv_addr, |
|---|
| 1426 | | - &job_buf->uv_addr, dir); |
|---|
| 1427 | | - if (ret < 0) { |
|---|
| 1428 | | - pr_err("handle[%d] Can't get src uv address info!\n", handle); |
|---|
| 1429 | | - return ret; |
|---|
| 1430 | | - } |
|---|
| 1431 | | - } |
|---|
| 1432 | | - |
|---|
| 1433 | | - handle = img->v_addr; |
|---|
| 1434 | | - if (handle > 0) { |
|---|
| 1435 | | - ret = rga_mm_get_buffer(mm, job, handle, &img->v_addr, |
|---|
| 1436 | | - &job_buf->v_addr, dir); |
|---|
| 1437 | | - if (ret < 0) { |
|---|
| 1438 | | - pr_err("handle[%d] Can't get src uv address info!\n", handle); |
|---|
| 1439 | | - return ret; |
|---|
| 1440 | | - } |
|---|
| 1441 | | - } |
|---|
| 1442 | | - } else { |
|---|
| 1443 | | - handle = img->yrgb_addr; |
|---|
| 1444 | | - if (handle > 0) { |
|---|
| 1445 | | - ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr, |
|---|
| 1446 | | - &job_buf->addr, dir); |
|---|
| 1447 | | - if (ret < 0) { |
|---|
| 1448 | | - pr_err("handle[%d] Can't get src y/rgb address info!\n", handle); |
|---|
| 1449 | | - return ret; |
|---|
| 1450 | | - } |
|---|
| 1451 | | - } |
|---|
| 1452 | | - |
|---|
| 1453 | | - rga_convert_addr(img, false); |
|---|
| 1454 | | - } |
|---|
| 1455 | | - |
|---|
| 1456 | | - if (job->scheduler->data->mmu == RGA_MMU && |
|---|
| 1457 | | - rga_mm_is_need_mmu(job, job_buf->addr)) { |
|---|
| 1458 | | - ret = rga_mm_set_mmu_base(job, img, job_buf); |
|---|
| 1459 | | - if (ret < 0) { |
|---|
| 1460 | | - pr_err("Can't set RGA2 MMU_BASE from handle!\n"); |
|---|
| 1461 | | - return ret; |
|---|
| 1462 | | - } |
|---|
| 1463 | | - } |
|---|
| 1464 | | - |
|---|
| 1465 | | - return 0; |
|---|
| 1466 | 1508 | } |
|---|
| 1467 | 1509 | |
|---|
| 1468 | 1510 | static void rga_mm_put_channel_handle_info(struct rga_mm *mm, |
|---|
| .. | .. |
|---|
| 1481 | 1523 | free_pages((unsigned long)job_buf->page_table, job_buf->order); |
|---|
| 1482 | 1524 | } |
|---|
| 1483 | 1525 | |
|---|
| 1526 | +static int rga_mm_get_channel_handle_info(struct rga_mm *mm, |
|---|
| 1527 | + struct rga_job *job, |
|---|
| 1528 | + struct rga_img_info_t *img, |
|---|
| 1529 | + struct rga_job_buffer *job_buf, |
|---|
| 1530 | + enum dma_data_direction dir) |
|---|
| 1531 | +{ |
|---|
| 1532 | + int ret = 0; |
|---|
| 1533 | + int handle = 0; |
|---|
| 1534 | + int img_size, yrgb_size, uv_size, v_size; |
|---|
| 1535 | + |
|---|
| 1536 | + img_size = rga_image_size_cal(img->vir_w, img->vir_h, img->format, |
|---|
| 1537 | + &yrgb_size, &uv_size, &v_size); |
|---|
| 1538 | + if (img_size <= 0) { |
|---|
| 1539 | + pr_err("Image size cal error! width = %d, height = %d, format = %s\n", |
|---|
| 1540 | + img->vir_w, img->vir_h, rga_get_format_name(img->format)); |
|---|
| 1541 | + return -EINVAL; |
|---|
| 1542 | + } |
|---|
| 1543 | + |
|---|
| 1544 | + /* using third-address */ |
|---|
| 1545 | + if (img->uv_addr > 0) { |
|---|
| 1546 | + handle = img->yrgb_addr; |
|---|
| 1547 | + if (handle > 0) { |
|---|
| 1548 | + ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr, |
|---|
| 1549 | + &job_buf->y_addr, yrgb_size, dir); |
|---|
| 1550 | + if (ret < 0) { |
|---|
| 1551 | + pr_err("handle[%d] Can't get y/rgb address info!\n", handle); |
|---|
| 1552 | + return ret; |
|---|
| 1553 | + } |
|---|
| 1554 | + } |
|---|
| 1555 | + |
|---|
| 1556 | + handle = img->uv_addr; |
|---|
| 1557 | + if (handle > 0) { |
|---|
| 1558 | + ret = rga_mm_get_buffer(mm, job, handle, &img->uv_addr, |
|---|
| 1559 | + &job_buf->uv_addr, uv_size, dir); |
|---|
| 1560 | + if (ret < 0) { |
|---|
| 1561 | + pr_err("handle[%d] Can't get uv address info!\n", handle); |
|---|
| 1562 | + return ret; |
|---|
| 1563 | + } |
|---|
| 1564 | + } |
|---|
| 1565 | + |
|---|
| 1566 | + handle = img->v_addr; |
|---|
| 1567 | + if (handle > 0) { |
|---|
| 1568 | + ret = rga_mm_get_buffer(mm, job, handle, &img->v_addr, |
|---|
| 1569 | + &job_buf->v_addr, v_size, dir); |
|---|
| 1570 | + if (ret < 0) { |
|---|
| 1571 | + pr_err("handle[%d] Can't get uv address info!\n", handle); |
|---|
| 1572 | + return ret; |
|---|
| 1573 | + } |
|---|
| 1574 | + } |
|---|
| 1575 | + } else { |
|---|
| 1576 | + handle = img->yrgb_addr; |
|---|
| 1577 | + if (handle > 0) { |
|---|
| 1578 | + ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr, |
|---|
| 1579 | + &job_buf->addr, img_size, dir); |
|---|
| 1580 | + if (ret < 0) { |
|---|
| 1581 | + pr_err("handle[%d] Can't get y/rgb address info!\n", handle); |
|---|
| 1582 | + return ret; |
|---|
| 1583 | + } |
|---|
| 1584 | + } |
|---|
| 1585 | + |
|---|
| 1586 | + rga_convert_addr(img, false); |
|---|
| 1587 | + } |
|---|
| 1588 | + |
|---|
| 1589 | + if (job->scheduler->data->mmu == RGA_MMU && |
|---|
| 1590 | + rga_mm_is_need_mmu(job, job_buf->addr)) { |
|---|
| 1591 | + ret = rga_mm_set_mmu_base(job, img, job_buf); |
|---|
| 1592 | + if (ret < 0) { |
|---|
| 1593 | + pr_err("Can't set RGA2 MMU_BASE from handle!\n"); |
|---|
| 1594 | + |
|---|
| 1595 | + rga_mm_put_channel_handle_info(mm, job, job_buf, dir); |
|---|
| 1596 | + return ret; |
|---|
| 1597 | + } |
|---|
| 1598 | + } |
|---|
| 1599 | + |
|---|
| 1600 | + return 0; |
|---|
| 1601 | +} |
|---|
| 1602 | + |
|---|
| 1484 | 1603 | static int rga_mm_get_handle_info(struct rga_job *job) |
|---|
| 1485 | 1604 | { |
|---|
| 1486 | 1605 | int ret = 0; |
|---|
| .. | .. |
|---|
| 1491 | 1610 | req = &job->rga_command_base; |
|---|
| 1492 | 1611 | mm = rga_drvdata->mm; |
|---|
| 1493 | 1612 | |
|---|
| 1613 | + switch (req->render_mode) { |
|---|
| 1614 | + case BITBLT_MODE: |
|---|
| 1615 | + case COLOR_PALETTE_MODE: |
|---|
| 1616 | + if (unlikely(req->src.yrgb_addr <= 0)) { |
|---|
| 1617 | + pr_err("render_mode[0x%x] src0 channel handle[%ld] must is valid!", |
|---|
| 1618 | + req->render_mode, (unsigned long)req->src.yrgb_addr); |
|---|
| 1619 | + return -EINVAL; |
|---|
| 1620 | + } |
|---|
| 1621 | + |
|---|
| 1622 | + if (unlikely(req->dst.yrgb_addr <= 0)) { |
|---|
| 1623 | + pr_err("render_mode[0x%x] dst channel handle[%ld] must is valid!", |
|---|
| 1624 | + req->render_mode, (unsigned long)req->dst.yrgb_addr); |
|---|
| 1625 | + return -EINVAL; |
|---|
| 1626 | + } |
|---|
| 1627 | + |
|---|
| 1628 | + if (req->bsfilter_flag) { |
|---|
| 1629 | + if (unlikely(req->pat.yrgb_addr <= 0)) { |
|---|
| 1630 | + pr_err("render_mode[0x%x] src1/pat channel handle[%ld] must is valid!", |
|---|
| 1631 | + req->render_mode, (unsigned long)req->pat.yrgb_addr); |
|---|
| 1632 | + return -EINVAL; |
|---|
| 1633 | + } |
|---|
| 1634 | + } |
|---|
| 1635 | + |
|---|
| 1636 | + break; |
|---|
| 1637 | + case COLOR_FILL_MODE: |
|---|
| 1638 | + if (unlikely(req->dst.yrgb_addr <= 0)) { |
|---|
| 1639 | + pr_err("render_mode[0x%x] dst channel handle[%ld] must is valid!", |
|---|
| 1640 | + req->render_mode, (unsigned long)req->dst.yrgb_addr); |
|---|
| 1641 | + return -EINVAL; |
|---|
| 1642 | + } |
|---|
| 1643 | + |
|---|
| 1644 | + break; |
|---|
| 1645 | + |
|---|
| 1646 | + case UPDATE_PALETTE_TABLE_MODE: |
|---|
| 1647 | + case UPDATE_PATTEN_BUF_MODE: |
|---|
| 1648 | + if (unlikely(req->pat.yrgb_addr <= 0)) { |
|---|
| 1649 | + pr_err("render_mode[0x%x] lut/pat channel handle[%ld] must is valid!, req->render_mode", |
|---|
| 1650 | + req->render_mode, (unsigned long)req->pat.yrgb_addr); |
|---|
| 1651 | + return -EINVAL; |
|---|
| 1652 | + } |
|---|
| 1653 | + |
|---|
| 1654 | + break; |
|---|
| 1655 | + default: |
|---|
| 1656 | + pr_err("%s, unknown render mode!\n", __func__); |
|---|
| 1657 | + break; |
|---|
| 1658 | + } |
|---|
| 1659 | + |
|---|
| 1494 | 1660 | if (likely(req->src.yrgb_addr > 0)) { |
|---|
| 1495 | 1661 | ret = rga_mm_get_channel_handle_info(mm, job, &req->src, |
|---|
| 1496 | 1662 | &job->src_buffer, |
|---|
| 1497 | 1663 | DMA_TO_DEVICE); |
|---|
| 1498 | 1664 | if (ret < 0) { |
|---|
| 1499 | | - pr_err("Can't get src buffer third info!\n"); |
|---|
| 1665 | + pr_err("Can't get src buffer info from handle!\n"); |
|---|
| 1500 | 1666 | return ret; |
|---|
| 1501 | 1667 | } |
|---|
| 1502 | 1668 | } |
|---|
| .. | .. |
|---|
| 1506 | 1672 | &job->dst_buffer, |
|---|
| 1507 | 1673 | DMA_TO_DEVICE); |
|---|
| 1508 | 1674 | if (ret < 0) { |
|---|
| 1509 | | - pr_err("Can't get dst buffer third info!\n"); |
|---|
| 1675 | + pr_err("Can't get dst buffer info from handle!\n"); |
|---|
| 1510 | 1676 | return ret; |
|---|
| 1511 | 1677 | } |
|---|
| 1512 | 1678 | } |
|---|
| .. | .. |
|---|
| 1528 | 1694 | DMA_BIDIRECTIONAL); |
|---|
| 1529 | 1695 | } |
|---|
| 1530 | 1696 | if (ret < 0) { |
|---|
| 1531 | | - pr_err("Can't get pat buffer third info!\n"); |
|---|
| 1697 | + pr_err("Can't get pat buffer info from handle!\n"); |
|---|
| 1532 | 1698 | return ret; |
|---|
| 1533 | 1699 | } |
|---|
| 1534 | 1700 | } |
|---|
| .. | .. |
|---|
| 1681 | 1847 | struct rga_job_buffer *job_buffer, |
|---|
| 1682 | 1848 | enum dma_data_direction dir) |
|---|
| 1683 | 1849 | { |
|---|
| 1684 | | - if (job_buffer->addr->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE) |
|---|
| 1850 | + if (job_buffer->addr->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE) |
|---|
| 1685 | 1851 | if (rga_mm_sync_dma_sg_for_cpu(job_buffer->addr, job, dir)) |
|---|
| 1686 | 1852 | pr_err("sync sgt for cpu error!\n"); |
|---|
| 1687 | 1853 | |
|---|
| .. | .. |
|---|
| 1718 | 1884 | goto error_unmap_buffer; |
|---|
| 1719 | 1885 | } |
|---|
| 1720 | 1886 | |
|---|
| 1721 | | - if (buffer->type == RGA_VIRTUAL_ADDRESS) { |
|---|
| 1722 | | - /* |
|---|
| 1723 | | - * Some userspace virtual addresses do not have an |
|---|
| 1724 | | - * interface for flushing the cache, so it is mandatory |
|---|
| 1725 | | - * to flush the cache when the virtual address is used. |
|---|
| 1726 | | - */ |
|---|
| 1887 | + if (buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) { |
|---|
| 1727 | 1888 | ret = rga_mm_sync_dma_sg_for_device(buffer, job, dir); |
|---|
| 1728 | 1889 | if (ret < 0) { |
|---|
| 1729 | 1890 | pr_err("sync sgt for device error!\n"); |
|---|
| .. | .. |
|---|
| 1840 | 2001 | int rga_mm_map_job_info(struct rga_job *job) |
|---|
| 1841 | 2002 | { |
|---|
| 1842 | 2003 | int ret; |
|---|
| 2004 | + ktime_t timestamp = ktime_get(); |
|---|
| 1843 | 2005 | |
|---|
| 1844 | 2006 | if (job->flags & RGA_JOB_USE_HANDLE) { |
|---|
| 1845 | 2007 | ret = rga_mm_get_handle_info(job); |
|---|
| .. | .. |
|---|
| 1847 | 2009 | pr_err("failed to get buffer from handle\n"); |
|---|
| 1848 | 2010 | return ret; |
|---|
| 1849 | 2011 | } |
|---|
| 2012 | + |
|---|
| 2013 | + if (DEBUGGER_EN(TIME)) |
|---|
| 2014 | + pr_info("request[%d], get buffer_handle info cost %lld us\n", |
|---|
| 2015 | + job->request_id, ktime_us_delta(ktime_get(), timestamp)); |
|---|
| 1850 | 2016 | } else { |
|---|
| 1851 | 2017 | ret = rga_mm_map_buffer_info(job); |
|---|
| 1852 | 2018 | if (ret < 0) { |
|---|
| 1853 | 2019 | pr_err("failed to map buffer\n"); |
|---|
| 1854 | 2020 | return ret; |
|---|
| 1855 | 2021 | } |
|---|
| 2022 | + |
|---|
| 2023 | + if (DEBUGGER_EN(TIME)) |
|---|
| 2024 | + pr_info("request[%d], map buffer cost %lld us\n", |
|---|
| 2025 | + job->request_id, ktime_us_delta(ktime_get(), timestamp)); |
|---|
| 1856 | 2026 | } |
|---|
| 1857 | 2027 | |
|---|
| 1858 | 2028 | return 0; |
|---|
| .. | .. |
|---|
| 1860 | 2030 | |
|---|
| 1861 | 2031 | void rga_mm_unmap_job_info(struct rga_job *job) |
|---|
| 1862 | 2032 | { |
|---|
| 1863 | | - if (job->flags & RGA_JOB_USE_HANDLE) |
|---|
| 2033 | + ktime_t timestamp = ktime_get(); |
|---|
| 2034 | + |
|---|
| 2035 | + if (job->flags & RGA_JOB_USE_HANDLE) { |
|---|
| 1864 | 2036 | rga_mm_put_handle_info(job); |
|---|
| 1865 | | - else |
|---|
| 2037 | + |
|---|
| 2038 | + if (DEBUGGER_EN(TIME)) |
|---|
| 2039 | + pr_info("request[%d], put buffer_handle info cost %lld us\n", |
|---|
| 2040 | + job->request_id, ktime_us_delta(ktime_get(), timestamp)); |
|---|
| 2041 | + } else { |
|---|
| 1866 | 2042 | rga_mm_unmap_buffer_info(job); |
|---|
| 2043 | + |
|---|
| 2044 | + if (DEBUGGER_EN(TIME)) |
|---|
| 2045 | + pr_info("request[%d], unmap buffer cost %lld us\n", |
|---|
| 2046 | + job->request_id, ktime_us_delta(ktime_get(), timestamp)); |
|---|
| 2047 | + } |
|---|
| 1867 | 2048 | } |
|---|
| 1868 | 2049 | |
|---|
| 1869 | | -uint32_t rga_mm_import_buffer(struct rga_external_buffer *external_buffer, |
|---|
| 1870 | | - struct rga_session *session) |
|---|
| 2050 | +/* |
|---|
| 2051 | + * rga_mm_import_buffer - Importing external buffer into the RGA driver |
|---|
| 2052 | + * |
|---|
| 2053 | + * @external_buffer: [in] Parameters of external buffer |
|---|
| 2054 | + * @session: [in] Session of the current process |
|---|
| 2055 | + * |
|---|
| 2056 | + * returns: |
|---|
| 2057 | + * if return value > 0, the buffer import is successful and is the generated |
|---|
| 2058 | + * buffer-handle, negative error code on failure. |
|---|
| 2059 | + */ |
|---|
| 2060 | +int rga_mm_import_buffer(struct rga_external_buffer *external_buffer, |
|---|
| 2061 | + struct rga_session *session) |
|---|
| 1871 | 2062 | { |
|---|
| 1872 | | - int ret = 0; |
|---|
| 2063 | + int ret = 0, new_id; |
|---|
| 1873 | 2064 | struct rga_mm *mm; |
|---|
| 1874 | 2065 | struct rga_internal_buffer *internal_buffer; |
|---|
| 1875 | 2066 | |
|---|
| 1876 | 2067 | mm = rga_drvdata->mm; |
|---|
| 1877 | 2068 | if (mm == NULL) { |
|---|
| 1878 | 2069 | pr_err("rga mm is null!\n"); |
|---|
| 1879 | | - return 0; |
|---|
| 2070 | + return -EFAULT; |
|---|
| 1880 | 2071 | } |
|---|
| 1881 | 2072 | |
|---|
| 1882 | 2073 | mutex_lock(&mm->lock); |
|---|
| 1883 | 2074 | |
|---|
| 1884 | 2075 | /* first, Check whether to rga_mm */ |
|---|
| 1885 | | - internal_buffer = rga_mm_lookup_external(mm, external_buffer); |
|---|
| 2076 | + internal_buffer = rga_mm_lookup_external(mm, external_buffer, current->mm); |
|---|
| 1886 | 2077 | if (!IS_ERR_OR_NULL(internal_buffer)) { |
|---|
| 1887 | 2078 | kref_get(&internal_buffer->refcount); |
|---|
| 1888 | 2079 | |
|---|
| 1889 | 2080 | mutex_unlock(&mm->lock); |
|---|
| 2081 | + |
|---|
| 2082 | + if (DEBUGGER_EN(MM)) { |
|---|
| 2083 | + pr_info("import existing buffer:\n"); |
|---|
| 2084 | + rga_mm_dump_buffer(internal_buffer); |
|---|
| 2085 | + } |
|---|
| 2086 | + |
|---|
| 1890 | 2087 | return internal_buffer->handle; |
|---|
| 1891 | 2088 | } |
|---|
| 1892 | 2089 | |
|---|
| .. | .. |
|---|
| 1896 | 2093 | pr_err("%s alloc internal_buffer error!\n", __func__); |
|---|
| 1897 | 2094 | |
|---|
| 1898 | 2095 | mutex_unlock(&mm->lock); |
|---|
| 1899 | | - return 0; |
|---|
| 2096 | + return -ENOMEM; |
|---|
| 1900 | 2097 | } |
|---|
| 1901 | 2098 | |
|---|
| 1902 | 2099 | ret = rga_mm_map_buffer(external_buffer, internal_buffer, NULL, true); |
|---|
| .. | .. |
|---|
| 1911 | 2108 | * allocation under our spinlock. |
|---|
| 1912 | 2109 | */ |
|---|
| 1913 | 2110 | idr_preload(GFP_KERNEL); |
|---|
| 1914 | | - internal_buffer->handle = idr_alloc(&mm->memory_idr, internal_buffer, 1, 0, GFP_KERNEL); |
|---|
| 2111 | + new_id = idr_alloc_cyclic(&mm->memory_idr, internal_buffer, 1, 0, GFP_NOWAIT); |
|---|
| 1915 | 2112 | idr_preload_end(); |
|---|
| 2113 | + if (new_id < 0) { |
|---|
| 2114 | + pr_err("internal_buffer alloc id failed!\n"); |
|---|
| 2115 | + ret = new_id; |
|---|
| 2116 | + goto FREE_INTERNAL_BUFFER; |
|---|
| 2117 | + } |
|---|
| 1916 | 2118 | |
|---|
| 2119 | + internal_buffer->handle = new_id; |
|---|
| 1917 | 2120 | mm->buffer_count++; |
|---|
| 1918 | 2121 | |
|---|
| 1919 | 2122 | if (DEBUGGER_EN(MM)) { |
|---|
| .. | .. |
|---|
| 1928 | 2131 | mutex_unlock(&mm->lock); |
|---|
| 1929 | 2132 | kfree(internal_buffer); |
|---|
| 1930 | 2133 | |
|---|
| 1931 | | - return 0; |
|---|
| 2134 | + return ret; |
|---|
| 1932 | 2135 | } |
|---|
| 1933 | 2136 | |
|---|
| 1934 | 2137 | int rga_mm_release_buffer(uint32_t handle) |
|---|
| .. | .. |
|---|
| 1980 | 2183 | |
|---|
| 1981 | 2184 | idr_for_each_entry(&mm->memory_idr, buffer, i) { |
|---|
| 1982 | 2185 | if (session == buffer->session) { |
|---|
| 1983 | | - pr_err("[tgid:%d] Decrement the reference of handle[%d] when the user exits\n", |
|---|
| 2186 | + pr_err("[tgid:%d] Destroy handle[%d] when the user exits\n", |
|---|
| 1984 | 2187 | session->tgid, buffer->handle); |
|---|
| 1985 | | - kref_put(&buffer->refcount, rga_mm_kref_release_buffer); |
|---|
| 2188 | + rga_mm_buffer_destroy(buffer); |
|---|
| 1986 | 2189 | } |
|---|
| 1987 | 2190 | } |
|---|
| 1988 | 2191 | |
|---|