forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 072de836f53be56a70cecf70b43ae43b7ce17376
kernel/drivers/video/rockchip/rga3/rga_mm.c
....@@ -53,14 +53,14 @@
5353 for (i = 0; i < pageCount; i++) {
5454 vma = find_vma(current_mm, (Memory + i) << PAGE_SHIFT);
5555 if (!vma) {
56
- pr_err("failed to get vma\n");
56
+ pr_err("page[%d] failed to get vma\n", i);
5757 ret = RGA_OUT_OF_RESOURCES;
5858 break;
5959 }
6060
6161 pgd = pgd_offset(current_mm, (Memory + i) << PAGE_SHIFT);
6262 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) {
63
- pr_err("failed to get pgd\n");
63
+ pr_err("page[%d] failed to get pgd\n", i);
6464 ret = RGA_OUT_OF_RESOURCES;
6565 break;
6666 }
....@@ -71,7 +71,7 @@
7171 */
7272 p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT);
7373 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) {
74
- pr_err("failed to get p4d\n");
74
+ pr_err("page[%d] failed to get p4d\n", i);
7575 ret = RGA_OUT_OF_RESOURCES;
7676 break;
7777 }
....@@ -82,20 +82,20 @@
8282 #endif
8383
8484 if (pud_none(*pud) || unlikely(pud_bad(*pud))) {
85
- pr_err("failed to get pud\n");
85
+ pr_err("page[%d] failed to get pud\n", i);
8686 ret = RGA_OUT_OF_RESOURCES;
8787 break;
8888 }
8989 pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
9090 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
91
- pr_err("failed to get pmd\n");
91
+ pr_err("page[%d] failed to get pmd\n", i);
9292 ret = RGA_OUT_OF_RESOURCES;
9393 break;
9494 }
9595 pte = pte_offset_map_lock(current_mm, pmd,
9696 (Memory + i) << PAGE_SHIFT, &ptl);
9797 if (pte_none(*pte)) {
98
- pr_err("failed to get pte\n");
98
+ pr_err("page[%d] failed to get pte\n", i);
9999 pte_unmap_unlock(pte, ptl);
100100 ret = RGA_OUT_OF_RESOURCES;
101101 break;
....@@ -105,6 +105,10 @@
105105 pages[i] = pfn_to_page(pfn);
106106 pte_unmap_unlock(pte, ptl);
107107 }
108
+
109
+ if (ret == RGA_OUT_OF_RESOURCES && i > 0)
110
+ pr_err("Only get buffer %d byte from vma, but current image required %d byte",
111
+ (int)(i * PAGE_SIZE), (int)(pageCount * PAGE_SIZE));
108112
109113 return ret;
110114 }
....@@ -144,9 +148,9 @@
144148 put_page(pages[i]);
145149
146150 ret = rga_get_user_pages_from_vma(pages, Memory, pageCount, current_mm);
147
- if (ret < 0) {
148
- pr_err("Can not get user pages from vma, result = %d, pagecount = %d\n",
149
- result, pageCount);
151
+ if (ret < 0 && result > 0) {
152
+ pr_err("Only get buffer %d byte from user pages, but current image required %d byte\n",
153
+ (int)(result * PAGE_SIZE), (int)(pageCount * PAGE_SIZE));
150154 }
151155 }
152156
....@@ -177,9 +181,12 @@
177181 }
178182
179183 /* get sg form pages. */
180
- ret = sg_alloc_table_from_pages(sgt, virt_addr->pages,
184
+ /* iova requires minimum page alignment, so sgt cannot have offset */
185
+ ret = sg_alloc_table_from_pages(sgt,
186
+ virt_addr->pages,
181187 virt_addr->page_count,
182
- 0, virt_addr->size,
188
+ 0,
189
+ virt_addr->size,
183190 GFP_KERNEL);
184191 if (ret) {
185192 pr_err("sg_alloc_table_from_pages failed");
....@@ -245,21 +252,29 @@
245252 if (!size) {
246253 pr_err("failed to calculating buffer size! size = %ld, count = %d, offset = %ld\n",
247254 size, count, (unsigned long)offset);
255
+ rga_dump_memory_parm(memory_parm);
248256 return -EFAULT;
249257 }
250258
251259 /* alloc pages and page_table */
252260 order = get_order(count * sizeof(struct page *));
261
+ if (order >= MAX_ORDER) {
262
+ pr_err("Can not alloc pages with order[%d] for viraddr pages, max_order = %d\n",
263
+ order, MAX_ORDER);
264
+ return -ENOMEM;
265
+ }
266
+
253267 pages = (struct page **)__get_free_pages(GFP_KERNEL, order);
254268 if (pages == NULL) {
255
- pr_err("%s can not alloc pages for pages\n", __func__);
269
+ pr_err("%s can not alloc pages for viraddr pages\n", __func__);
256270 return -ENOMEM;
257271 }
258272
259273 /* get pages from virtual address. */
260274 ret = rga_get_user_pages(pages, viraddr >> PAGE_SHIFT, count, writeFlag, mm);
261275 if (ret < 0) {
262
- pr_err("failed to get pages");
276
+ pr_err("failed to get pages from virtual adrees: 0x%lx\n",
277
+ (unsigned long)viraddr);
263278 ret = -EINVAL;
264279 goto out_free_pages;
265280 } else if (ret > 0) {
....@@ -301,7 +316,7 @@
301316
302317 if (scheduler->data->mmu == RGA_MMU &&
303318 !(mm_flag & RGA_MEM_UNDER_4G)) {
304
- pr_err("%s unsupported Memory larger than 4G!\n",
319
+ pr_err("%s unsupported memory larger than 4G!\n",
305320 rga_get_mmu_type_str(scheduler->data->mmu));
306321 return false;
307322 }
....@@ -358,6 +373,7 @@
358373 struct rga_job *job)
359374 {
360375 int ret;
376
+ int ex_buffer_size;
361377 uint32_t mm_flag = 0;
362378 phys_addr_t phys_addr = 0;
363379 struct rga_dma_buffer *buffer;
....@@ -369,6 +385,19 @@
369385 if (scheduler == NULL) {
370386 pr_err("Invalid scheduler device!\n");
371387 return -EINVAL;
388
+ }
389
+
390
+ if (external_buffer->memory_parm.size)
391
+ ex_buffer_size = external_buffer->memory_parm.size;
392
+ else
393
+ ex_buffer_size = rga_image_size_cal(external_buffer->memory_parm.width,
394
+ external_buffer->memory_parm.height,
395
+ external_buffer->memory_parm.format,
396
+ NULL, NULL, NULL);
397
+ if (ex_buffer_size <= 0) {
398
+ pr_err("failed to calculating buffer size!\n");
399
+ rga_dump_memory_parm(&external_buffer->memory_parm);
400
+ return ex_buffer_size == 0 ? -EINVAL : ex_buffer_size;
372401 }
373402
374403 /*
....@@ -402,6 +431,15 @@
402431 pr_err("%s core[%d] map dma buffer error!\n",
403432 __func__, scheduler->core);
404433 goto free_buffer;
434
+ }
435
+
436
+ if (buffer->size < ex_buffer_size) {
437
+ pr_err("Only get buffer %ld byte from %s = 0x%lx, but current image required %d byte\n",
438
+ buffer->size, rga_get_memory_type_str(external_buffer->type),
439
+ (unsigned long)external_buffer->memory, ex_buffer_size);
440
+ rga_dump_memory_parm(&external_buffer->memory_parm);
441
+ ret = -EINVAL;
442
+ goto unmap_buffer;
405443 }
406444
407445 buffer->scheduler = scheduler;
....@@ -539,6 +577,13 @@
539577 mm_flag |= RGA_MEM_PHYSICAL_CONTIGUOUS;
540578 }
541579
580
+ /*
581
+ * Some userspace virtual addresses do not have an
582
+ * interface for flushing the cache, so it is mandatory
583
+ * to flush the cache when the virtual address is used.
584
+ */
585
+ mm_flag |= RGA_MEM_FORCE_FLUSH_CACHE;
586
+
542587 if (!rga_mm_check_memory_limit(scheduler, mm_flag)) {
543588 pr_err("scheduler core[%d] unsupported mm_flag[0x%x]!\n",
544589 scheduler->core, mm_flag);
....@@ -576,8 +621,8 @@
576621 if (mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS)
577622 break;
578623
579
- pr_err("Current RGA mmu[%d] cannot support virtual address!\n",
580
- scheduler->data->mmu);
624
+ pr_err("Current %s[%d] cannot support virtual address!\n",
625
+ rga_get_mmu_type_str(scheduler->data->mmu), scheduler->data->mmu);
581626 goto free_dma_buffer;
582627 }
583628
....@@ -589,7 +634,7 @@
589634 internal_buffer->virt_addr = virt_addr;
590635 internal_buffer->dma_buffer = buffer;
591636 internal_buffer->mm_flag = mm_flag;
592
- internal_buffer->phys_addr = phys_addr ? phys_addr : 0;
637
+ internal_buffer->phys_addr = phys_addr ? phys_addr + virt_addr->offset : 0;
593638
594639 return 0;
595640
....@@ -649,7 +694,8 @@
649694 internal_buffer->memory_parm.format,
650695 NULL, NULL, NULL);
651696 if (buffer_size <= 0) {
652
- pr_err("Fault to get phys addr size!\n");
697
+ pr_err("Failed to get phys addr size!\n");
698
+ rga_dump_memory_parm(&internal_buffer->memory_parm);
653699 return buffer_size == 0 ? -EINVAL : buffer_size;
654700 }
655701
....@@ -674,7 +720,7 @@
674720 ret = rga_iommu_map(phys_addr, buffer_size, buffer, scheduler->dev);
675721 if (ret < 0) {
676722 pr_err("%s core[%d] map phys_addr error!\n", __func__, scheduler->core);
677
- return ret;
723
+ goto free_dma_buffer;
678724 }
679725 }
680726
....@@ -686,6 +732,11 @@
686732 internal_buffer->dma_buffer = buffer;
687733
688734 return 0;
735
+
736
+free_dma_buffer:
737
+ kfree(buffer);
738
+
739
+ return ret;
689740 }
690741
691742 static int rga_mm_unmap_buffer(struct rga_internal_buffer *internal_buffer)
....@@ -738,7 +789,7 @@
738789
739790 ret = rga_mm_map_virt_addr(external_buffer, internal_buffer, job, write_flag);
740791 if (ret < 0) {
741
- pr_err("%s iommu_map virtual address error!\n", __func__);
792
+ pr_err("%s map virtual address error!\n", __func__);
742793 return ret;
743794 }
744795
....@@ -751,7 +802,7 @@
751802
752803 ret = rga_mm_map_phys_addr(external_buffer, internal_buffer, job);
753804 if (ret < 0) {
754
- pr_err("%s iommu_map physical address error!\n", __func__);
805
+ pr_err("%s map physical address error!\n", __func__);
755806 return ret;
756807 }
757808
....@@ -1130,9 +1181,15 @@
11301181
11311182 if (job->flags & RGA_JOB_USE_HANDLE) {
11321183 order = get_order(page_count * sizeof(uint32_t *));
1184
+ if (order >= MAX_ORDER) {
1185
+ pr_err("Can not alloc pages with order[%d] for page_table, max_order = %d\n",
1186
+ order, MAX_ORDER);
1187
+ return -ENOMEM;
1188
+ }
1189
+
11331190 page_table = (uint32_t *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
11341191 if (page_table == NULL) {
1135
- pr_err("%s can not alloc pages for pages, order = %d\n",
1192
+ pr_err("%s can not alloc pages for page_table, order = %d\n",
11361193 __func__, order);
11371194 return -ENOMEM;
11381195 }
....@@ -1189,9 +1246,15 @@
11891246
11901247 if (job->flags & RGA_JOB_USE_HANDLE) {
11911248 order = get_order(page_count * sizeof(uint32_t *));
1249
+ if (order >= MAX_ORDER) {
1250
+ pr_err("Can not alloc pages with order[%d] for page_table, max_order = %d\n",
1251
+ order, MAX_ORDER);
1252
+ return -ENOMEM;
1253
+ }
1254
+
11921255 page_table = (uint32_t *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
11931256 if (page_table == NULL) {
1194
- pr_err("%s can not alloc pages for pages, order = %d\n",
1257
+ pr_err("%s can not alloc pages for page_table, order = %d\n",
11951258 __func__, order);
11961259 return -ENOMEM;
11971260 }
....@@ -1239,13 +1302,6 @@
12391302 struct sg_table *sgt;
12401303 struct rga_scheduler_t *scheduler;
12411304
1242
- sgt = rga_mm_lookup_sgt(buffer);
1243
- if (sgt == NULL) {
1244
- pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1245
- __func__, __LINE__, job->core);
1246
- return -EINVAL;
1247
- }
1248
-
12491305 scheduler = buffer->dma_buffer->scheduler;
12501306 if (scheduler == NULL) {
12511307 pr_err("%s(%d), failed to get scheduler, core = 0x%x\n",
....@@ -1253,7 +1309,18 @@
12531309 return -EFAULT;
12541310 }
12551311
1256
- dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1312
+ if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS) {
1313
+ dma_sync_single_for_device(scheduler->dev, buffer->phys_addr, buffer->size, dir);
1314
+ } else {
1315
+ sgt = rga_mm_lookup_sgt(buffer);
1316
+ if (sgt == NULL) {
1317
+ pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1318
+ __func__, __LINE__, job->core);
1319
+ return -EINVAL;
1320
+ }
1321
+
1322
+ dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1323
+ }
12571324
12581325 return 0;
12591326 }
....@@ -1265,13 +1332,6 @@
12651332 struct sg_table *sgt;
12661333 struct rga_scheduler_t *scheduler;
12671334
1268
- sgt = rga_mm_lookup_sgt(buffer);
1269
- if (sgt == NULL) {
1270
- pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1271
- __func__, __LINE__, job->core);
1272
- return -EINVAL;
1273
- }
1274
-
12751335 scheduler = buffer->dma_buffer->scheduler;
12761336 if (scheduler == NULL) {
12771337 pr_err("%s(%d), failed to get scheduler, core = 0x%x\n",
....@@ -1279,7 +1339,18 @@
12791339 return -EFAULT;
12801340 }
12811341
1282
- dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1342
+ if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS) {
1343
+ dma_sync_single_for_cpu(scheduler->dev, buffer->phys_addr, buffer->size, dir);
1344
+ } else {
1345
+ sgt = rga_mm_lookup_sgt(buffer);
1346
+ if (sgt == NULL) {
1347
+ pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1348
+ __func__, __LINE__, job->core);
1349
+ return -EINVAL;
1350
+ }
1351
+
1352
+ dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1353
+ }
12831354
12841355 return 0;
12851356 }
....@@ -1334,6 +1405,7 @@
13341405 uint64_t handle,
13351406 uint64_t *channel_addr,
13361407 struct rga_internal_buffer **buf,
1408
+ int require_size,
13371409 enum dma_data_direction dir)
13381410 {
13391411 int ret = 0;
....@@ -1369,7 +1441,15 @@
13691441 return ret;
13701442 }
13711443
1372
- if (internal_buffer->type == RGA_VIRTUAL_ADDRESS) {
1444
+ if (internal_buffer->size < require_size) {
1445
+ ret = -EINVAL;
1446
+ pr_err("Only get buffer %ld byte from handle[%ld], but current required %d byte\n",
1447
+ internal_buffer->size, (unsigned long)handle, require_size);
1448
+
1449
+ goto put_internal_buffer;
1450
+ }
1451
+
1452
+ if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) {
13731453 /*
13741454 * Some userspace virtual addresses do not have an
13751455 * interface for flushing the cache, so it is mandatory
....@@ -1378,11 +1458,19 @@
13781458 ret = rga_mm_sync_dma_sg_for_device(internal_buffer, job, dir);
13791459 if (ret < 0) {
13801460 pr_err("sync sgt for device error!\n");
1381
- return ret;
1461
+ goto put_internal_buffer;
13821462 }
13831463 }
13841464
13851465 return 0;
1466
+
1467
+put_internal_buffer:
1468
+ mutex_lock(&mm->lock);
1469
+ kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer);
1470
+ mutex_unlock(&mm->lock);
1471
+
1472
+ return ret;
1473
+
13861474 }
13871475
13881476 static void rga_mm_put_buffer(struct rga_mm *mm,
....@@ -1390,79 +1478,13 @@
13901478 struct rga_internal_buffer *internal_buffer,
13911479 enum dma_data_direction dir)
13921480 {
1393
- if (internal_buffer->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE)
1481
+ if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE)
13941482 if (rga_mm_sync_dma_sg_for_cpu(internal_buffer, job, dir))
13951483 pr_err("sync sgt for cpu error!\n");
13961484
13971485 mutex_lock(&mm->lock);
13981486 kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer);
13991487 mutex_unlock(&mm->lock);
1400
-}
1401
-
1402
-static int rga_mm_get_channel_handle_info(struct rga_mm *mm,
1403
- struct rga_job *job,
1404
- struct rga_img_info_t *img,
1405
- struct rga_job_buffer *job_buf,
1406
- enum dma_data_direction dir)
1407
-{
1408
- int ret = 0;
1409
- int handle = 0;
1410
-
1411
- /* using third-address */
1412
- if (img->uv_addr > 0) {
1413
- handle = img->yrgb_addr;
1414
- if (handle > 0) {
1415
- ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
1416
- &job_buf->y_addr, dir);
1417
- if (ret < 0) {
1418
- pr_err("handle[%d] Can't get src y/rgb address info!\n", handle);
1419
- return ret;
1420
- }
1421
- }
1422
-
1423
- handle = img->uv_addr;
1424
- if (handle > 0) {
1425
- ret = rga_mm_get_buffer(mm, job, handle, &img->uv_addr,
1426
- &job_buf->uv_addr, dir);
1427
- if (ret < 0) {
1428
- pr_err("handle[%d] Can't get src uv address info!\n", handle);
1429
- return ret;
1430
- }
1431
- }
1432
-
1433
- handle = img->v_addr;
1434
- if (handle > 0) {
1435
- ret = rga_mm_get_buffer(mm, job, handle, &img->v_addr,
1436
- &job_buf->v_addr, dir);
1437
- if (ret < 0) {
1438
- pr_err("handle[%d] Can't get src uv address info!\n", handle);
1439
- return ret;
1440
- }
1441
- }
1442
- } else {
1443
- handle = img->yrgb_addr;
1444
- if (handle > 0) {
1445
- ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
1446
- &job_buf->addr, dir);
1447
- if (ret < 0) {
1448
- pr_err("handle[%d] Can't get src y/rgb address info!\n", handle);
1449
- return ret;
1450
- }
1451
- }
1452
-
1453
- rga_convert_addr(img, false);
1454
- }
1455
-
1456
- if (job->scheduler->data->mmu == RGA_MMU &&
1457
- rga_mm_is_need_mmu(job, job_buf->addr)) {
1458
- ret = rga_mm_set_mmu_base(job, img, job_buf);
1459
- if (ret < 0) {
1460
- pr_err("Can't set RGA2 MMU_BASE from handle!\n");
1461
- return ret;
1462
- }
1463
- }
1464
-
1465
- return 0;
14661488 }
14671489
14681490 static void rga_mm_put_channel_handle_info(struct rga_mm *mm,
....@@ -1481,6 +1503,83 @@
14811503 free_pages((unsigned long)job_buf->page_table, job_buf->order);
14821504 }
14831505
1506
+static int rga_mm_get_channel_handle_info(struct rga_mm *mm,
1507
+ struct rga_job *job,
1508
+ struct rga_img_info_t *img,
1509
+ struct rga_job_buffer *job_buf,
1510
+ enum dma_data_direction dir)
1511
+{
1512
+ int ret = 0;
1513
+ int handle = 0;
1514
+ int img_size, yrgb_size, uv_size, v_size;
1515
+
1516
+ img_size = rga_image_size_cal(img->vir_w, img->vir_h, img->format,
1517
+ &yrgb_size, &uv_size, &v_size);
1518
+ if (img_size <= 0) {
1519
+ pr_err("Image size cal error! width = %d, height = %d, format = %s\n",
1520
+ img->vir_w, img->vir_h, rga_get_format_name(img->format));
1521
+ return -EINVAL;
1522
+ }
1523
+
1524
+ /* using third-address */
1525
+ if (img->uv_addr > 0) {
1526
+ handle = img->yrgb_addr;
1527
+ if (handle > 0) {
1528
+ ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
1529
+ &job_buf->y_addr, yrgb_size, dir);
1530
+ if (ret < 0) {
1531
+ pr_err("handle[%d] Can't get y/rgb address info!\n", handle);
1532
+ return ret;
1533
+ }
1534
+ }
1535
+
1536
+ handle = img->uv_addr;
1537
+ if (handle > 0) {
1538
+ ret = rga_mm_get_buffer(mm, job, handle, &img->uv_addr,
1539
+ &job_buf->uv_addr, uv_size, dir);
1540
+ if (ret < 0) {
1541
+ pr_err("handle[%d] Can't get uv address info!\n", handle);
1542
+ return ret;
1543
+ }
1544
+ }
1545
+
1546
+ handle = img->v_addr;
1547
+ if (handle > 0) {
1548
+ ret = rga_mm_get_buffer(mm, job, handle, &img->v_addr,
1549
+ &job_buf->v_addr, v_size, dir);
1550
+ if (ret < 0) {
1551
+ pr_err("handle[%d] Can't get uv address info!\n", handle);
1552
+ return ret;
1553
+ }
1554
+ }
1555
+ } else {
1556
+ handle = img->yrgb_addr;
1557
+ if (handle > 0) {
1558
+ ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
1559
+ &job_buf->addr, img_size, dir);
1560
+ if (ret < 0) {
1561
+ pr_err("handle[%d] Can't get y/rgb address info!\n", handle);
1562
+ return ret;
1563
+ }
1564
+ }
1565
+
1566
+ rga_convert_addr(img, false);
1567
+ }
1568
+
1569
+ if (job->scheduler->data->mmu == RGA_MMU &&
1570
+ rga_mm_is_need_mmu(job, job_buf->addr)) {
1571
+ ret = rga_mm_set_mmu_base(job, img, job_buf);
1572
+ if (ret < 0) {
1573
+ pr_err("Can't set RGA2 MMU_BASE from handle!\n");
1574
+
1575
+ rga_mm_put_channel_handle_info(mm, job, job_buf, dir);
1576
+ return ret;
1577
+ }
1578
+ }
1579
+
1580
+ return 0;
1581
+}
1582
+
14841583 static int rga_mm_get_handle_info(struct rga_job *job)
14851584 {
14861585 int ret = 0;
....@@ -1491,12 +1590,59 @@
14911590 req = &job->rga_command_base;
14921591 mm = rga_drvdata->mm;
14931592
1593
+ switch (req->render_mode) {
1594
+ case BITBLT_MODE:
1595
+ case COLOR_PALETTE_MODE:
1596
+ if (unlikely(req->src.yrgb_addr <= 0)) {
1597
+ pr_err("render_mode[0x%x] src0 channel handle[%ld] must is valid!",
1598
+ req->render_mode, (unsigned long)req->src.yrgb_addr);
1599
+ return -EINVAL;
1600
+ }
1601
+
1602
+ if (unlikely(req->dst.yrgb_addr <= 0)) {
1603
+ pr_err("render_mode[0x%x] dst channel handle[%ld] must is valid!",
1604
+ req->render_mode, (unsigned long)req->dst.yrgb_addr);
1605
+ return -EINVAL;
1606
+ }
1607
+
1608
+ if (req->bsfilter_flag) {
1609
+ if (unlikely(req->pat.yrgb_addr <= 0)) {
1610
+ pr_err("render_mode[0x%x] src1/pat channel handle[%ld] must is valid!",
1611
+ req->render_mode, (unsigned long)req->pat.yrgb_addr);
1612
+ return -EINVAL;
1613
+ }
1614
+ }
1615
+
1616
+ break;
1617
+ case COLOR_FILL_MODE:
1618
+ if (unlikely(req->dst.yrgb_addr <= 0)) {
1619
+ pr_err("render_mode[0x%x] dst channel handle[%ld] must is valid!",
1620
+ req->render_mode, (unsigned long)req->dst.yrgb_addr);
1621
+ return -EINVAL;
1622
+ }
1623
+
1624
+ break;
1625
+
1626
+ case UPDATE_PALETTE_TABLE_MODE:
1627
+ case UPDATE_PATTEN_BUF_MODE:
1628
+ if (unlikely(req->pat.yrgb_addr <= 0)) {
1629
+ pr_err("render_mode[0x%x] lut/pat channel handle[%ld] must is valid!, req->render_mode",
1630
+ req->render_mode, (unsigned long)req->pat.yrgb_addr);
1631
+ return -EINVAL;
1632
+ }
1633
+
1634
+ break;
1635
+ default:
1636
+ pr_err("%s, unknown render mode!\n", __func__);
1637
+ break;
1638
+ }
1639
+
14941640 if (likely(req->src.yrgb_addr > 0)) {
14951641 ret = rga_mm_get_channel_handle_info(mm, job, &req->src,
14961642 &job->src_buffer,
14971643 DMA_TO_DEVICE);
14981644 if (ret < 0) {
1499
- pr_err("Can't get src buffer third info!\n");
1645
+ pr_err("Can't get src buffer info from handle!\n");
15001646 return ret;
15011647 }
15021648 }
....@@ -1506,7 +1652,7 @@
15061652 &job->dst_buffer,
15071653 DMA_TO_DEVICE);
15081654 if (ret < 0) {
1509
- pr_err("Can't get dst buffer third info!\n");
1655
+ pr_err("Can't get dst buffer info from handle!\n");
15101656 return ret;
15111657 }
15121658 }
....@@ -1528,7 +1674,7 @@
15281674 DMA_BIDIRECTIONAL);
15291675 }
15301676 if (ret < 0) {
1531
- pr_err("Can't get pat buffer third info!\n");
1677
+ pr_err("Can't get pat buffer info from handle!\n");
15321678 return ret;
15331679 }
15341680 }
....@@ -1681,7 +1827,7 @@
16811827 struct rga_job_buffer *job_buffer,
16821828 enum dma_data_direction dir)
16831829 {
1684
- if (job_buffer->addr->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE)
1830
+ if (job_buffer->addr->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE)
16851831 if (rga_mm_sync_dma_sg_for_cpu(job_buffer->addr, job, dir))
16861832 pr_err("sync sgt for cpu error!\n");
16871833
....@@ -1718,12 +1864,7 @@
17181864 goto error_unmap_buffer;
17191865 }
17201866
1721
- if (buffer->type == RGA_VIRTUAL_ADDRESS) {
1722
- /*
1723
- * Some userspace virtual addresses do not have an
1724
- * interface for flushing the cache, so it is mandatory
1725
- * to flush the cache when the virtual address is used.
1726
- */
1867
+ if (buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) {
17271868 ret = rga_mm_sync_dma_sg_for_device(buffer, job, dir);
17281869 if (ret < 0) {
17291870 pr_err("sync sgt for device error!\n");
....@@ -1869,7 +2010,7 @@
18692010 uint32_t rga_mm_import_buffer(struct rga_external_buffer *external_buffer,
18702011 struct rga_session *session)
18712012 {
1872
- int ret = 0;
2013
+ int ret = 0, new_id;
18732014 struct rga_mm *mm;
18742015 struct rga_internal_buffer *internal_buffer;
18752016
....@@ -1911,9 +2052,14 @@
19112052 * allocation under our spinlock.
19122053 */
19132054 idr_preload(GFP_KERNEL);
1914
- internal_buffer->handle = idr_alloc(&mm->memory_idr, internal_buffer, 1, 0, GFP_KERNEL);
2055
+ new_id = idr_alloc_cyclic(&mm->memory_idr, internal_buffer, 1, 0, GFP_NOWAIT);
19152056 idr_preload_end();
2057
+ if (new_id < 0) {
2058
+ pr_err("internal_buffer alloc id failed!\n");
2059
+ goto FREE_INTERNAL_BUFFER;
2060
+ }
19162061
2062
+ internal_buffer->handle = new_id;
19172063 mm->buffer_count++;
19182064
19192065 if (DEBUGGER_EN(MM)) {