forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/video/rockchip/rga3/rga_mm.c
....@@ -53,14 +53,14 @@
5353 for (i = 0; i < pageCount; i++) {
5454 vma = find_vma(current_mm, (Memory + i) << PAGE_SHIFT);
5555 if (!vma) {
56
- pr_err("failed to get vma\n");
56
+ pr_err("page[%d] failed to get vma\n", i);
5757 ret = RGA_OUT_OF_RESOURCES;
5858 break;
5959 }
6060
6161 pgd = pgd_offset(current_mm, (Memory + i) << PAGE_SHIFT);
6262 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) {
63
- pr_err("failed to get pgd\n");
63
+ pr_err("page[%d] failed to get pgd\n", i);
6464 ret = RGA_OUT_OF_RESOURCES;
6565 break;
6666 }
....@@ -71,7 +71,7 @@
7171 */
7272 p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT);
7373 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) {
74
- pr_err("failed to get p4d\n");
74
+ pr_err("page[%d] failed to get p4d\n", i);
7575 ret = RGA_OUT_OF_RESOURCES;
7676 break;
7777 }
....@@ -82,20 +82,20 @@
8282 #endif
8383
8484 if (pud_none(*pud) || unlikely(pud_bad(*pud))) {
85
- pr_err("failed to get pud\n");
85
+ pr_err("page[%d] failed to get pud\n", i);
8686 ret = RGA_OUT_OF_RESOURCES;
8787 break;
8888 }
8989 pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
9090 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
91
- pr_err("failed to get pmd\n");
91
+ pr_err("page[%d] failed to get pmd\n", i);
9292 ret = RGA_OUT_OF_RESOURCES;
9393 break;
9494 }
9595 pte = pte_offset_map_lock(current_mm, pmd,
9696 (Memory + i) << PAGE_SHIFT, &ptl);
9797 if (pte_none(*pte)) {
98
- pr_err("failed to get pte\n");
98
+ pr_err("page[%d] failed to get pte\n", i);
9999 pte_unmap_unlock(pte, ptl);
100100 ret = RGA_OUT_OF_RESOURCES;
101101 break;
....@@ -105,6 +105,10 @@
105105 pages[i] = pfn_to_page(pfn);
106106 pte_unmap_unlock(pte, ptl);
107107 }
108
+
109
+ if (ret == RGA_OUT_OF_RESOURCES && i > 0)
110
+ pr_err("Only get buffer %d byte from vma, but current image required %d byte",
111
+ (int)(i * PAGE_SIZE), (int)(pageCount * PAGE_SIZE));
108112
109113 return ret;
110114 }
....@@ -144,9 +148,9 @@
144148 put_page(pages[i]);
145149
146150 ret = rga_get_user_pages_from_vma(pages, Memory, pageCount, current_mm);
147
- if (ret < 0) {
148
- pr_err("Can not get user pages from vma, result = %d, pagecount = %d\n",
149
- result, pageCount);
151
+ if (ret < 0 && result > 0) {
152
+ pr_err("Only get buffer %d byte from user pages, but current image required %d byte\n",
153
+ (int)(result * PAGE_SIZE), (int)(pageCount * PAGE_SIZE));
150154 }
151155 }
152156
....@@ -177,9 +181,12 @@
177181 }
178182
179183 /* get sg form pages. */
180
- ret = sg_alloc_table_from_pages(sgt, virt_addr->pages,
184
+ /* iova requires minimum page alignment, so sgt cannot have offset */
185
+ ret = sg_alloc_table_from_pages(sgt,
186
+ virt_addr->pages,
181187 virt_addr->page_count,
182
- 0, virt_addr->size,
188
+ 0,
189
+ virt_addr->size,
183190 GFP_KERNEL);
184191 if (ret) {
185192 pr_err("sg_alloc_table_from_pages failed");
....@@ -245,21 +252,29 @@
245252 if (!size) {
246253 pr_err("failed to calculating buffer size! size = %ld, count = %d, offset = %ld\n",
247254 size, count, (unsigned long)offset);
255
+ rga_dump_memory_parm(memory_parm);
248256 return -EFAULT;
249257 }
250258
251259 /* alloc pages and page_table */
252260 order = get_order(count * sizeof(struct page *));
261
+ if (order >= MAX_ORDER) {
262
+ pr_err("Can not alloc pages with order[%d] for viraddr pages, max_order = %d\n",
263
+ order, MAX_ORDER);
264
+ return -ENOMEM;
265
+ }
266
+
253267 pages = (struct page **)__get_free_pages(GFP_KERNEL, order);
254268 if (pages == NULL) {
255
- pr_err("%s can not alloc pages for pages\n", __func__);
269
+ pr_err("%s can not alloc pages for viraddr pages\n", __func__);
256270 return -ENOMEM;
257271 }
258272
259273 /* get pages from virtual address. */
260274 ret = rga_get_user_pages(pages, viraddr >> PAGE_SHIFT, count, writeFlag, mm);
261275 if (ret < 0) {
262
- pr_err("failed to get pages");
276
+ pr_err("failed to get pages from virtual adrees: 0x%lx\n",
277
+ (unsigned long)viraddr);
263278 ret = -EINVAL;
264279 goto out_free_pages;
265280 } else if (ret > 0) {
....@@ -301,7 +316,7 @@
301316
302317 if (scheduler->data->mmu == RGA_MMU &&
303318 !(mm_flag & RGA_MEM_UNDER_4G)) {
304
- pr_err("%s unsupported Memory larger than 4G!\n",
319
+ pr_err("%s unsupported memory larger than 4G!\n",
305320 rga_get_mmu_type_str(scheduler->data->mmu));
306321 return false;
307322 }
....@@ -358,6 +373,7 @@
358373 struct rga_job *job)
359374 {
360375 int ret;
376
+ int ex_buffer_size;
361377 uint32_t mm_flag = 0;
362378 phys_addr_t phys_addr = 0;
363379 struct rga_dma_buffer *buffer;
....@@ -369,6 +385,19 @@
369385 if (scheduler == NULL) {
370386 pr_err("Invalid scheduler device!\n");
371387 return -EINVAL;
388
+ }
389
+
390
+ if (external_buffer->memory_parm.size)
391
+ ex_buffer_size = external_buffer->memory_parm.size;
392
+ else
393
+ ex_buffer_size = rga_image_size_cal(external_buffer->memory_parm.width,
394
+ external_buffer->memory_parm.height,
395
+ external_buffer->memory_parm.format,
396
+ NULL, NULL, NULL);
397
+ if (ex_buffer_size <= 0) {
398
+ pr_err("failed to calculating buffer size!\n");
399
+ rga_dump_memory_parm(&external_buffer->memory_parm);
400
+ return ex_buffer_size == 0 ? -EINVAL : ex_buffer_size;
372401 }
373402
374403 /*
....@@ -404,6 +433,15 @@
404433 goto free_buffer;
405434 }
406435
436
+ if (buffer->size < ex_buffer_size) {
437
+ pr_err("Only get buffer %ld byte from %s = 0x%lx, but current image required %d byte\n",
438
+ buffer->size, rga_get_memory_type_str(external_buffer->type),
439
+ (unsigned long)external_buffer->memory, ex_buffer_size);
440
+ rga_dump_memory_parm(&external_buffer->memory_parm);
441
+ ret = -EINVAL;
442
+ goto unmap_buffer;
443
+ }
444
+
407445 buffer->scheduler = scheduler;
408446
409447 if (rga_mm_check_range_sgt(buffer->sgt))
....@@ -417,6 +455,7 @@
417455 phys_addr = sg_phys(buffer->sgt->sgl);
418456 if (phys_addr == 0) {
419457 pr_err("%s get physical address error!", __func__);
458
+ ret = -EFAULT;
420459 goto unmap_buffer;
421460 }
422461
....@@ -533,11 +572,19 @@
533572 phys_addr = sg_phys(sgt->sgl);
534573 if (phys_addr == 0) {
535574 pr_err("%s get physical address error!", __func__);
575
+ ret = -EFAULT;
536576 goto free_sgt;
537577 }
538578
539579 mm_flag |= RGA_MEM_PHYSICAL_CONTIGUOUS;
540580 }
581
+
582
+ /*
583
+ * Some userspace virtual addresses do not have an
584
+ * interface for flushing the cache, so it is mandatory
585
+ * to flush the cache when the virtual address is used.
586
+ */
587
+ mm_flag |= RGA_MEM_FORCE_FLUSH_CACHE;
541588
542589 if (!rga_mm_check_memory_limit(scheduler, mm_flag)) {
543590 pr_err("scheduler core[%d] unsupported mm_flag[0x%x]!\n",
....@@ -576,8 +623,9 @@
576623 if (mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS)
577624 break;
578625
579
- pr_err("Current RGA mmu[%d] cannot support virtual address!\n",
580
- scheduler->data->mmu);
626
+ pr_err("Current %s[%d] cannot support physically discontinuous virtual address!\n",
627
+ rga_get_mmu_type_str(scheduler->data->mmu), scheduler->data->mmu);
628
+ ret = -EOPNOTSUPP;
581629 goto free_dma_buffer;
582630 }
583631
....@@ -589,7 +637,7 @@
589637 internal_buffer->virt_addr = virt_addr;
590638 internal_buffer->dma_buffer = buffer;
591639 internal_buffer->mm_flag = mm_flag;
592
- internal_buffer->phys_addr = phys_addr ? phys_addr : 0;
640
+ internal_buffer->phys_addr = phys_addr ? phys_addr + virt_addr->offset : 0;
593641
594642 return 0;
595643
....@@ -649,7 +697,8 @@
649697 internal_buffer->memory_parm.format,
650698 NULL, NULL, NULL);
651699 if (buffer_size <= 0) {
652
- pr_err("Fault to get phys addr size!\n");
700
+ pr_err("Failed to get phys addr size!\n");
701
+ rga_dump_memory_parm(&internal_buffer->memory_parm);
653702 return buffer_size == 0 ? -EINVAL : buffer_size;
654703 }
655704
....@@ -674,7 +723,7 @@
674723 ret = rga_iommu_map(phys_addr, buffer_size, buffer, scheduler->dev);
675724 if (ret < 0) {
676725 pr_err("%s core[%d] map phys_addr error!\n", __func__, scheduler->core);
677
- return ret;
726
+ goto free_dma_buffer;
678727 }
679728 }
680729
....@@ -686,6 +735,11 @@
686735 internal_buffer->dma_buffer = buffer;
687736
688737 return 0;
738
+
739
+free_dma_buffer:
740
+ kfree(buffer);
741
+
742
+ return ret;
689743 }
690744
691745 static int rga_mm_unmap_buffer(struct rga_internal_buffer *internal_buffer)
....@@ -738,7 +792,7 @@
738792
739793 ret = rga_mm_map_virt_addr(external_buffer, internal_buffer, job, write_flag);
740794 if (ret < 0) {
741
- pr_err("%s iommu_map virtual address error!\n", __func__);
795
+ pr_err("%s map virtual address error!\n", __func__);
742796 return ret;
743797 }
744798
....@@ -751,7 +805,7 @@
751805
752806 ret = rga_mm_map_phys_addr(external_buffer, internal_buffer, job);
753807 if (ret < 0) {
754
- pr_err("%s iommu_map physical address error!\n", __func__);
808
+ pr_err("%s map physical address error!\n", __func__);
755809 return ret;
756810 }
757811
....@@ -789,9 +843,15 @@
789843 return 0;
790844 }
791845
846
+static void rga_mm_buffer_destroy(struct rga_internal_buffer *buffer)
847
+{
848
+ rga_mm_kref_release_buffer(&buffer->refcount);
849
+}
850
+
792851 static struct rga_internal_buffer *
793852 rga_mm_lookup_external(struct rga_mm *mm_session,
794
- struct rga_external_buffer *external_buffer)
853
+ struct rga_external_buffer *external_buffer,
854
+ struct mm_struct *current_mm)
795855 {
796856 int id;
797857 struct dma_buf *dma_buf = NULL;
....@@ -824,8 +884,12 @@
824884 continue;
825885
826886 if (temp_buffer->virt_addr->addr == external_buffer->memory) {
827
- output_buffer = temp_buffer;
828
- break;
887
+ if (temp_buffer->current_mm == current_mm) {
888
+ output_buffer = temp_buffer;
889
+ break;
890
+ }
891
+
892
+ continue;
829893 }
830894 }
831895
....@@ -1130,9 +1194,15 @@
11301194
11311195 if (job->flags & RGA_JOB_USE_HANDLE) {
11321196 order = get_order(page_count * sizeof(uint32_t *));
1197
+ if (order >= MAX_ORDER) {
1198
+ pr_err("Can not alloc pages with order[%d] for page_table, max_order = %d\n",
1199
+ order, MAX_ORDER);
1200
+ return -ENOMEM;
1201
+ }
1202
+
11331203 page_table = (uint32_t *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
11341204 if (page_table == NULL) {
1135
- pr_err("%s can not alloc pages for pages, order = %d\n",
1205
+ pr_err("%s can not alloc pages for page_table, order = %d\n",
11361206 __func__, order);
11371207 return -ENOMEM;
11381208 }
....@@ -1189,9 +1259,15 @@
11891259
11901260 if (job->flags & RGA_JOB_USE_HANDLE) {
11911261 order = get_order(page_count * sizeof(uint32_t *));
1262
+ if (order >= MAX_ORDER) {
1263
+ pr_err("Can not alloc pages with order[%d] for page_table, max_order = %d\n",
1264
+ order, MAX_ORDER);
1265
+ return -ENOMEM;
1266
+ }
1267
+
11921268 page_table = (uint32_t *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
11931269 if (page_table == NULL) {
1194
- pr_err("%s can not alloc pages for pages, order = %d\n",
1270
+ pr_err("%s can not alloc pages for page_table, order = %d\n",
11951271 __func__, order);
11961272 return -ENOMEM;
11971273 }
....@@ -1239,13 +1315,6 @@
12391315 struct sg_table *sgt;
12401316 struct rga_scheduler_t *scheduler;
12411317
1242
- sgt = rga_mm_lookup_sgt(buffer);
1243
- if (sgt == NULL) {
1244
- pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1245
- __func__, __LINE__, job->core);
1246
- return -EINVAL;
1247
- }
1248
-
12491318 scheduler = buffer->dma_buffer->scheduler;
12501319 if (scheduler == NULL) {
12511320 pr_err("%s(%d), failed to get scheduler, core = 0x%x\n",
....@@ -1253,7 +1322,19 @@
12531322 return -EFAULT;
12541323 }
12551324
1256
- dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1325
+ if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS &&
1326
+ scheduler->data->mmu != RGA_IOMMU) {
1327
+ dma_sync_single_for_device(scheduler->dev, buffer->phys_addr, buffer->size, dir);
1328
+ } else {
1329
+ sgt = rga_mm_lookup_sgt(buffer);
1330
+ if (sgt == NULL) {
1331
+ pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1332
+ __func__, __LINE__, job->core);
1333
+ return -EINVAL;
1334
+ }
1335
+
1336
+ dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1337
+ }
12571338
12581339 return 0;
12591340 }
....@@ -1265,13 +1346,6 @@
12651346 struct sg_table *sgt;
12661347 struct rga_scheduler_t *scheduler;
12671348
1268
- sgt = rga_mm_lookup_sgt(buffer);
1269
- if (sgt == NULL) {
1270
- pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1271
- __func__, __LINE__, job->core);
1272
- return -EINVAL;
1273
- }
1274
-
12751349 scheduler = buffer->dma_buffer->scheduler;
12761350 if (scheduler == NULL) {
12771351 pr_err("%s(%d), failed to get scheduler, core = 0x%x\n",
....@@ -1279,7 +1353,19 @@
12791353 return -EFAULT;
12801354 }
12811355
1282
- dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1356
+ if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS &&
1357
+ scheduler->data->mmu != RGA_IOMMU) {
1358
+ dma_sync_single_for_cpu(scheduler->dev, buffer->phys_addr, buffer->size, dir);
1359
+ } else {
1360
+ sgt = rga_mm_lookup_sgt(buffer);
1361
+ if (sgt == NULL) {
1362
+ pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1363
+ __func__, __LINE__, job->core);
1364
+ return -EINVAL;
1365
+ }
1366
+
1367
+ dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1368
+ }
12831369
12841370 return 0;
12851371 }
....@@ -1334,6 +1420,7 @@
13341420 uint64_t handle,
13351421 uint64_t *channel_addr,
13361422 struct rga_internal_buffer **buf,
1423
+ int require_size,
13371424 enum dma_data_direction dir)
13381425 {
13391426 int ret = 0;
....@@ -1369,7 +1456,15 @@
13691456 return ret;
13701457 }
13711458
1372
- if (internal_buffer->type == RGA_VIRTUAL_ADDRESS) {
1459
+ if (internal_buffer->size < require_size) {
1460
+ ret = -EINVAL;
1461
+ pr_err("Only get buffer %ld byte from handle[%ld], but current required %d byte\n",
1462
+ internal_buffer->size, (unsigned long)handle, require_size);
1463
+
1464
+ goto put_internal_buffer;
1465
+ }
1466
+
1467
+ if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) {
13731468 /*
13741469 * Some userspace virtual addresses do not have an
13751470 * interface for flushing the cache, so it is mandatory
....@@ -1378,11 +1473,19 @@
13781473 ret = rga_mm_sync_dma_sg_for_device(internal_buffer, job, dir);
13791474 if (ret < 0) {
13801475 pr_err("sync sgt for device error!\n");
1381
- return ret;
1476
+ goto put_internal_buffer;
13821477 }
13831478 }
13841479
13851480 return 0;
1481
+
1482
+put_internal_buffer:
1483
+ mutex_lock(&mm->lock);
1484
+ kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer);
1485
+ mutex_unlock(&mm->lock);
1486
+
1487
+ return ret;
1488
+
13861489 }
13871490
13881491 static void rga_mm_put_buffer(struct rga_mm *mm,
....@@ -1390,79 +1493,18 @@
13901493 struct rga_internal_buffer *internal_buffer,
13911494 enum dma_data_direction dir)
13921495 {
1393
- if (internal_buffer->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE)
1496
+ if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE)
13941497 if (rga_mm_sync_dma_sg_for_cpu(internal_buffer, job, dir))
13951498 pr_err("sync sgt for cpu error!\n");
1499
+
1500
+ if (DEBUGGER_EN(MM)) {
1501
+ pr_info("handle[%d] put info:\n", (int)internal_buffer->handle);
1502
+ rga_mm_dump_buffer(internal_buffer);
1503
+ }
13961504
13971505 mutex_lock(&mm->lock);
13981506 kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer);
13991507 mutex_unlock(&mm->lock);
1400
-}
1401
-
1402
-static int rga_mm_get_channel_handle_info(struct rga_mm *mm,
1403
- struct rga_job *job,
1404
- struct rga_img_info_t *img,
1405
- struct rga_job_buffer *job_buf,
1406
- enum dma_data_direction dir)
1407
-{
1408
- int ret = 0;
1409
- int handle = 0;
1410
-
1411
- /* using third-address */
1412
- if (img->uv_addr > 0) {
1413
- handle = img->yrgb_addr;
1414
- if (handle > 0) {
1415
- ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
1416
- &job_buf->y_addr, dir);
1417
- if (ret < 0) {
1418
- pr_err("handle[%d] Can't get src y/rgb address info!\n", handle);
1419
- return ret;
1420
- }
1421
- }
1422
-
1423
- handle = img->uv_addr;
1424
- if (handle > 0) {
1425
- ret = rga_mm_get_buffer(mm, job, handle, &img->uv_addr,
1426
- &job_buf->uv_addr, dir);
1427
- if (ret < 0) {
1428
- pr_err("handle[%d] Can't get src uv address info!\n", handle);
1429
- return ret;
1430
- }
1431
- }
1432
-
1433
- handle = img->v_addr;
1434
- if (handle > 0) {
1435
- ret = rga_mm_get_buffer(mm, job, handle, &img->v_addr,
1436
- &job_buf->v_addr, dir);
1437
- if (ret < 0) {
1438
- pr_err("handle[%d] Can't get src uv address info!\n", handle);
1439
- return ret;
1440
- }
1441
- }
1442
- } else {
1443
- handle = img->yrgb_addr;
1444
- if (handle > 0) {
1445
- ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
1446
- &job_buf->addr, dir);
1447
- if (ret < 0) {
1448
- pr_err("handle[%d] Can't get src y/rgb address info!\n", handle);
1449
- return ret;
1450
- }
1451
- }
1452
-
1453
- rga_convert_addr(img, false);
1454
- }
1455
-
1456
- if (job->scheduler->data->mmu == RGA_MMU &&
1457
- rga_mm_is_need_mmu(job, job_buf->addr)) {
1458
- ret = rga_mm_set_mmu_base(job, img, job_buf);
1459
- if (ret < 0) {
1460
- pr_err("Can't set RGA2 MMU_BASE from handle!\n");
1461
- return ret;
1462
- }
1463
- }
1464
-
1465
- return 0;
14661508 }
14671509
14681510 static void rga_mm_put_channel_handle_info(struct rga_mm *mm,
....@@ -1481,6 +1523,83 @@
14811523 free_pages((unsigned long)job_buf->page_table, job_buf->order);
14821524 }
14831525
1526
+static int rga_mm_get_channel_handle_info(struct rga_mm *mm,
1527
+ struct rga_job *job,
1528
+ struct rga_img_info_t *img,
1529
+ struct rga_job_buffer *job_buf,
1530
+ enum dma_data_direction dir)
1531
+{
1532
+ int ret = 0;
1533
+ int handle = 0;
1534
+ int img_size, yrgb_size, uv_size, v_size;
1535
+
1536
+ img_size = rga_image_size_cal(img->vir_w, img->vir_h, img->format,
1537
+ &yrgb_size, &uv_size, &v_size);
1538
+ if (img_size <= 0) {
1539
+ pr_err("Image size cal error! width = %d, height = %d, format = %s\n",
1540
+ img->vir_w, img->vir_h, rga_get_format_name(img->format));
1541
+ return -EINVAL;
1542
+ }
1543
+
1544
+ /* using third-address */
1545
+ if (img->uv_addr > 0) {
1546
+ handle = img->yrgb_addr;
1547
+ if (handle > 0) {
1548
+ ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
1549
+ &job_buf->y_addr, yrgb_size, dir);
1550
+ if (ret < 0) {
1551
+ pr_err("handle[%d] Can't get y/rgb address info!\n", handle);
1552
+ return ret;
1553
+ }
1554
+ }
1555
+
1556
+ handle = img->uv_addr;
1557
+ if (handle > 0) {
1558
+ ret = rga_mm_get_buffer(mm, job, handle, &img->uv_addr,
1559
+ &job_buf->uv_addr, uv_size, dir);
1560
+ if (ret < 0) {
1561
+ pr_err("handle[%d] Can't get uv address info!\n", handle);
1562
+ return ret;
1563
+ }
1564
+ }
1565
+
1566
+ handle = img->v_addr;
1567
+ if (handle > 0) {
1568
+ ret = rga_mm_get_buffer(mm, job, handle, &img->v_addr,
1569
+ &job_buf->v_addr, v_size, dir);
1570
+ if (ret < 0) {
1571
+ pr_err("handle[%d] Can't get uv address info!\n", handle);
1572
+ return ret;
1573
+ }
1574
+ }
1575
+ } else {
1576
+ handle = img->yrgb_addr;
1577
+ if (handle > 0) {
1578
+ ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
1579
+ &job_buf->addr, img_size, dir);
1580
+ if (ret < 0) {
1581
+ pr_err("handle[%d] Can't get y/rgb address info!\n", handle);
1582
+ return ret;
1583
+ }
1584
+ }
1585
+
1586
+ rga_convert_addr(img, false);
1587
+ }
1588
+
1589
+ if (job->scheduler->data->mmu == RGA_MMU &&
1590
+ rga_mm_is_need_mmu(job, job_buf->addr)) {
1591
+ ret = rga_mm_set_mmu_base(job, img, job_buf);
1592
+ if (ret < 0) {
1593
+ pr_err("Can't set RGA2 MMU_BASE from handle!\n");
1594
+
1595
+ rga_mm_put_channel_handle_info(mm, job, job_buf, dir);
1596
+ return ret;
1597
+ }
1598
+ }
1599
+
1600
+ return 0;
1601
+}
1602
+
14841603 static int rga_mm_get_handle_info(struct rga_job *job)
14851604 {
14861605 int ret = 0;
....@@ -1491,12 +1610,59 @@
14911610 req = &job->rga_command_base;
14921611 mm = rga_drvdata->mm;
14931612
1613
+ switch (req->render_mode) {
1614
+ case BITBLT_MODE:
1615
+ case COLOR_PALETTE_MODE:
1616
+ if (unlikely(req->src.yrgb_addr <= 0)) {
1617
+ pr_err("render_mode[0x%x] src0 channel handle[%ld] must is valid!",
1618
+ req->render_mode, (unsigned long)req->src.yrgb_addr);
1619
+ return -EINVAL;
1620
+ }
1621
+
1622
+ if (unlikely(req->dst.yrgb_addr <= 0)) {
1623
+ pr_err("render_mode[0x%x] dst channel handle[%ld] must is valid!",
1624
+ req->render_mode, (unsigned long)req->dst.yrgb_addr);
1625
+ return -EINVAL;
1626
+ }
1627
+
1628
+ if (req->bsfilter_flag) {
1629
+ if (unlikely(req->pat.yrgb_addr <= 0)) {
1630
+ pr_err("render_mode[0x%x] src1/pat channel handle[%ld] must is valid!",
1631
+ req->render_mode, (unsigned long)req->pat.yrgb_addr);
1632
+ return -EINVAL;
1633
+ }
1634
+ }
1635
+
1636
+ break;
1637
+ case COLOR_FILL_MODE:
1638
+ if (unlikely(req->dst.yrgb_addr <= 0)) {
1639
+ pr_err("render_mode[0x%x] dst channel handle[%ld] must is valid!",
1640
+ req->render_mode, (unsigned long)req->dst.yrgb_addr);
1641
+ return -EINVAL;
1642
+ }
1643
+
1644
+ break;
1645
+
1646
+ case UPDATE_PALETTE_TABLE_MODE:
1647
+ case UPDATE_PATTEN_BUF_MODE:
1648
+ if (unlikely(req->pat.yrgb_addr <= 0)) {
1649
+ pr_err("render_mode[0x%x] lut/pat channel handle[%ld] must is valid!, req->render_mode",
1650
+ req->render_mode, (unsigned long)req->pat.yrgb_addr);
1651
+ return -EINVAL;
1652
+ }
1653
+
1654
+ break;
1655
+ default:
1656
+ pr_err("%s, unknown render mode!\n", __func__);
1657
+ break;
1658
+ }
1659
+
14941660 if (likely(req->src.yrgb_addr > 0)) {
14951661 ret = rga_mm_get_channel_handle_info(mm, job, &req->src,
14961662 &job->src_buffer,
14971663 DMA_TO_DEVICE);
14981664 if (ret < 0) {
1499
- pr_err("Can't get src buffer third info!\n");
1665
+ pr_err("Can't get src buffer info from handle!\n");
15001666 return ret;
15011667 }
15021668 }
....@@ -1506,7 +1672,7 @@
15061672 &job->dst_buffer,
15071673 DMA_TO_DEVICE);
15081674 if (ret < 0) {
1509
- pr_err("Can't get dst buffer third info!\n");
1675
+ pr_err("Can't get dst buffer info from handle!\n");
15101676 return ret;
15111677 }
15121678 }
....@@ -1528,7 +1694,7 @@
15281694 DMA_BIDIRECTIONAL);
15291695 }
15301696 if (ret < 0) {
1531
- pr_err("Can't get pat buffer third info!\n");
1697
+ pr_err("Can't get pat buffer info from handle!\n");
15321698 return ret;
15331699 }
15341700 }
....@@ -1681,7 +1847,7 @@
16811847 struct rga_job_buffer *job_buffer,
16821848 enum dma_data_direction dir)
16831849 {
1684
- if (job_buffer->addr->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE)
1850
+ if (job_buffer->addr->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE)
16851851 if (rga_mm_sync_dma_sg_for_cpu(job_buffer->addr, job, dir))
16861852 pr_err("sync sgt for cpu error!\n");
16871853
....@@ -1718,12 +1884,7 @@
17181884 goto error_unmap_buffer;
17191885 }
17201886
1721
- if (buffer->type == RGA_VIRTUAL_ADDRESS) {
1722
- /*
1723
- * Some userspace virtual addresses do not have an
1724
- * interface for flushing the cache, so it is mandatory
1725
- * to flush the cache when the virtual address is used.
1726
- */
1887
+ if (buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) {
17271888 ret = rga_mm_sync_dma_sg_for_device(buffer, job, dir);
17281889 if (ret < 0) {
17291890 pr_err("sync sgt for device error!\n");
....@@ -1840,6 +2001,7 @@
18402001 int rga_mm_map_job_info(struct rga_job *job)
18412002 {
18422003 int ret;
2004
+ ktime_t timestamp = ktime_get();
18432005
18442006 if (job->flags & RGA_JOB_USE_HANDLE) {
18452007 ret = rga_mm_get_handle_info(job);
....@@ -1847,12 +2009,20 @@
18472009 pr_err("failed to get buffer from handle\n");
18482010 return ret;
18492011 }
2012
+
2013
+ if (DEBUGGER_EN(TIME))
2014
+ pr_info("request[%d], get buffer_handle info cost %lld us\n",
2015
+ job->request_id, ktime_us_delta(ktime_get(), timestamp));
18502016 } else {
18512017 ret = rga_mm_map_buffer_info(job);
18522018 if (ret < 0) {
18532019 pr_err("failed to map buffer\n");
18542020 return ret;
18552021 }
2022
+
2023
+ if (DEBUGGER_EN(TIME))
2024
+ pr_info("request[%d], map buffer cost %lld us\n",
2025
+ job->request_id, ktime_us_delta(ktime_get(), timestamp));
18562026 }
18572027
18582028 return 0;
....@@ -1860,33 +2030,60 @@
18602030
18612031 void rga_mm_unmap_job_info(struct rga_job *job)
18622032 {
1863
- if (job->flags & RGA_JOB_USE_HANDLE)
2033
+ ktime_t timestamp = ktime_get();
2034
+
2035
+ if (job->flags & RGA_JOB_USE_HANDLE) {
18642036 rga_mm_put_handle_info(job);
1865
- else
2037
+
2038
+ if (DEBUGGER_EN(TIME))
2039
+ pr_info("request[%d], put buffer_handle info cost %lld us\n",
2040
+ job->request_id, ktime_us_delta(ktime_get(), timestamp));
2041
+ } else {
18662042 rga_mm_unmap_buffer_info(job);
2043
+
2044
+ if (DEBUGGER_EN(TIME))
2045
+ pr_info("request[%d], unmap buffer cost %lld us\n",
2046
+ job->request_id, ktime_us_delta(ktime_get(), timestamp));
2047
+ }
18672048 }
18682049
1869
-uint32_t rga_mm_import_buffer(struct rga_external_buffer *external_buffer,
1870
- struct rga_session *session)
2050
+/*
2051
+ * rga_mm_import_buffer - Importing external buffer into the RGA driver
2052
+ *
2053
+ * @external_buffer: [in] Parameters of external buffer
2054
+ * @session: [in] Session of the current process
2055
+ *
2056
+ * returns:
2057
+ * if return value > 0, the buffer import is successful and is the generated
2058
+ * buffer-handle, negative error code on failure.
2059
+ */
2060
+int rga_mm_import_buffer(struct rga_external_buffer *external_buffer,
2061
+ struct rga_session *session)
18712062 {
1872
- int ret = 0;
2063
+ int ret = 0, new_id;
18732064 struct rga_mm *mm;
18742065 struct rga_internal_buffer *internal_buffer;
18752066
18762067 mm = rga_drvdata->mm;
18772068 if (mm == NULL) {
18782069 pr_err("rga mm is null!\n");
1879
- return 0;
2070
+ return -EFAULT;
18802071 }
18812072
18822073 mutex_lock(&mm->lock);
18832074
18842075 /* first, Check whether to rga_mm */
1885
- internal_buffer = rga_mm_lookup_external(mm, external_buffer);
2076
+ internal_buffer = rga_mm_lookup_external(mm, external_buffer, current->mm);
18862077 if (!IS_ERR_OR_NULL(internal_buffer)) {
18872078 kref_get(&internal_buffer->refcount);
18882079
18892080 mutex_unlock(&mm->lock);
2081
+
2082
+ if (DEBUGGER_EN(MM)) {
2083
+ pr_info("import existing buffer:\n");
2084
+ rga_mm_dump_buffer(internal_buffer);
2085
+ }
2086
+
18902087 return internal_buffer->handle;
18912088 }
18922089
....@@ -1896,7 +2093,7 @@
18962093 pr_err("%s alloc internal_buffer error!\n", __func__);
18972094
18982095 mutex_unlock(&mm->lock);
1899
- return 0;
2096
+ return -ENOMEM;
19002097 }
19012098
19022099 ret = rga_mm_map_buffer(external_buffer, internal_buffer, NULL, true);
....@@ -1911,9 +2108,15 @@
19112108 * allocation under our spinlock.
19122109 */
19132110 idr_preload(GFP_KERNEL);
1914
- internal_buffer->handle = idr_alloc(&mm->memory_idr, internal_buffer, 1, 0, GFP_KERNEL);
2111
+ new_id = idr_alloc_cyclic(&mm->memory_idr, internal_buffer, 1, 0, GFP_NOWAIT);
19152112 idr_preload_end();
2113
+ if (new_id < 0) {
2114
+ pr_err("internal_buffer alloc id failed!\n");
2115
+ ret = new_id;
2116
+ goto FREE_INTERNAL_BUFFER;
2117
+ }
19162118
2119
+ internal_buffer->handle = new_id;
19172120 mm->buffer_count++;
19182121
19192122 if (DEBUGGER_EN(MM)) {
....@@ -1928,7 +2131,7 @@
19282131 mutex_unlock(&mm->lock);
19292132 kfree(internal_buffer);
19302133
1931
- return 0;
2134
+ return ret;
19322135 }
19332136
19342137 int rga_mm_release_buffer(uint32_t handle)
....@@ -1980,9 +2183,9 @@
19802183
19812184 idr_for_each_entry(&mm->memory_idr, buffer, i) {
19822185 if (session == buffer->session) {
1983
- pr_err("[tgid:%d] Decrement the reference of handle[%d] when the user exits\n",
2186
+ pr_err("[tgid:%d] Destroy handle[%d] when the user exits\n",
19842187 session->tgid, buffer->handle);
1985
- kref_put(&buffer->refcount, rga_mm_kref_release_buffer);
2188
+ rga_mm_buffer_destroy(buffer);
19862189 }
19872190 }
19882191