hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/drivers/video/rockchip/rga3/rga_mm.c
....@@ -53,14 +53,14 @@
5353 for (i = 0; i < pageCount; i++) {
5454 vma = find_vma(current_mm, (Memory + i) << PAGE_SHIFT);
5555 if (!vma) {
56
- pr_err("failed to get vma\n");
56
+ pr_err("page[%d] failed to get vma\n", i);
5757 ret = RGA_OUT_OF_RESOURCES;
5858 break;
5959 }
6060
6161 pgd = pgd_offset(current_mm, (Memory + i) << PAGE_SHIFT);
6262 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) {
63
- pr_err("failed to get pgd\n");
63
+ pr_err("page[%d] failed to get pgd\n", i);
6464 ret = RGA_OUT_OF_RESOURCES;
6565 break;
6666 }
....@@ -71,7 +71,7 @@
7171 */
7272 p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT);
7373 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) {
74
- pr_err("failed to get p4d\n");
74
+ pr_err("page[%d] failed to get p4d\n", i);
7575 ret = RGA_OUT_OF_RESOURCES;
7676 break;
7777 }
....@@ -82,20 +82,20 @@
8282 #endif
8383
8484 if (pud_none(*pud) || unlikely(pud_bad(*pud))) {
85
- pr_err("failed to get pud\n");
85
+ pr_err("page[%d] failed to get pud\n", i);
8686 ret = RGA_OUT_OF_RESOURCES;
8787 break;
8888 }
8989 pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
9090 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
91
- pr_err("failed to get pmd\n");
91
+ pr_err("page[%d] failed to get pmd\n", i);
9292 ret = RGA_OUT_OF_RESOURCES;
9393 break;
9494 }
9595 pte = pte_offset_map_lock(current_mm, pmd,
9696 (Memory + i) << PAGE_SHIFT, &ptl);
9797 if (pte_none(*pte)) {
98
- pr_err("failed to get pte\n");
98
+ pr_err("page[%d] failed to get pte\n", i);
9999 pte_unmap_unlock(pte, ptl);
100100 ret = RGA_OUT_OF_RESOURCES;
101101 break;
....@@ -105,6 +105,10 @@
105105 pages[i] = pfn_to_page(pfn);
106106 pte_unmap_unlock(pte, ptl);
107107 }
108
+
109
+ if (ret == RGA_OUT_OF_RESOURCES && i > 0)
110
+ pr_err("Only get buffer %d byte from vma, but current image required %d byte",
111
+ (int)(i * PAGE_SIZE), (int)(pageCount * PAGE_SIZE));
108112
109113 return ret;
110114 }
....@@ -144,9 +148,9 @@
144148 put_page(pages[i]);
145149
146150 ret = rga_get_user_pages_from_vma(pages, Memory, pageCount, current_mm);
147
- if (ret < 0) {
148
- pr_err("Can not get user pages from vma, result = %d, pagecount = %d\n",
149
- result, pageCount);
151
+ if (ret < 0 && result > 0) {
152
+ pr_err("Only get buffer %d byte from user pages, but current image required %d byte\n",
153
+ (int)(result * PAGE_SIZE), (int)(pageCount * PAGE_SIZE));
150154 }
151155 }
152156
....@@ -177,9 +181,12 @@
177181 }
178182
179183 /* get sg form pages. */
180
- ret = sg_alloc_table_from_pages(sgt, virt_addr->pages,
184
+ /* iova requires minimum page alignment, so sgt cannot have offset */
185
+ ret = sg_alloc_table_from_pages(sgt,
186
+ virt_addr->pages,
181187 virt_addr->page_count,
182
- 0, virt_addr->size,
188
+ 0,
189
+ virt_addr->size,
183190 GFP_KERNEL);
184191 if (ret) {
185192 pr_err("sg_alloc_table_from_pages failed");
....@@ -245,21 +252,29 @@
245252 if (!size) {
246253 pr_err("failed to calculating buffer size! size = %ld, count = %d, offset = %ld\n",
247254 size, count, (unsigned long)offset);
255
+ rga_dump_memory_parm(memory_parm);
248256 return -EFAULT;
249257 }
250258
251259 /* alloc pages and page_table */
252260 order = get_order(count * sizeof(struct page *));
261
+ if (order >= MAX_ORDER) {
262
+ pr_err("Can not alloc pages with order[%d] for viraddr pages, max_order = %d\n",
263
+ order, MAX_ORDER);
264
+ return -ENOMEM;
265
+ }
266
+
253267 pages = (struct page **)__get_free_pages(GFP_KERNEL, order);
254268 if (pages == NULL) {
255
- pr_err("%s can not alloc pages for pages\n", __func__);
269
+ pr_err("%s can not alloc pages for viraddr pages\n", __func__);
256270 return -ENOMEM;
257271 }
258272
259273 /* get pages from virtual address. */
260274 ret = rga_get_user_pages(pages, viraddr >> PAGE_SHIFT, count, writeFlag, mm);
261275 if (ret < 0) {
262
- pr_err("failed to get pages");
276
+ pr_err("failed to get pages from virtual adrees: 0x%lx\n",
277
+ (unsigned long)viraddr);
263278 ret = -EINVAL;
264279 goto out_free_pages;
265280 } else if (ret > 0) {
....@@ -301,7 +316,7 @@
301316
302317 if (scheduler->data->mmu == RGA_MMU &&
303318 !(mm_flag & RGA_MEM_UNDER_4G)) {
304
- pr_err("%s unsupported Memory larger than 4G!\n",
319
+ pr_err("%s unsupported memory larger than 4G!\n",
305320 rga_get_mmu_type_str(scheduler->data->mmu));
306321 return false;
307322 }
....@@ -358,6 +373,7 @@
358373 struct rga_job *job)
359374 {
360375 int ret;
376
+ int ex_buffer_size;
361377 uint32_t mm_flag = 0;
362378 phys_addr_t phys_addr = 0;
363379 struct rga_dma_buffer *buffer;
....@@ -369,6 +385,19 @@
369385 if (scheduler == NULL) {
370386 pr_err("Invalid scheduler device!\n");
371387 return -EINVAL;
388
+ }
389
+
390
+ if (external_buffer->memory_parm.size)
391
+ ex_buffer_size = external_buffer->memory_parm.size;
392
+ else
393
+ ex_buffer_size = rga_image_size_cal(external_buffer->memory_parm.width,
394
+ external_buffer->memory_parm.height,
395
+ external_buffer->memory_parm.format,
396
+ NULL, NULL, NULL);
397
+ if (ex_buffer_size <= 0) {
398
+ pr_err("failed to calculating buffer size!\n");
399
+ rga_dump_memory_parm(&external_buffer->memory_parm);
400
+ return ex_buffer_size == 0 ? -EINVAL : ex_buffer_size;
372401 }
373402
374403 /*
....@@ -402,6 +431,15 @@
402431 pr_err("%s core[%d] map dma buffer error!\n",
403432 __func__, scheduler->core);
404433 goto free_buffer;
434
+ }
435
+
436
+ if (buffer->size < ex_buffer_size) {
437
+ pr_err("Only get buffer %ld byte from %s = 0x%lx, but current image required %d byte\n",
438
+ buffer->size, rga_get_memory_type_str(external_buffer->type),
439
+ (unsigned long)external_buffer->memory, ex_buffer_size);
440
+ rga_dump_memory_parm(&external_buffer->memory_parm);
441
+ ret = -EINVAL;
442
+ goto unmap_buffer;
405443 }
406444
407445 buffer->scheduler = scheduler;
....@@ -576,8 +614,8 @@
576614 if (mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS)
577615 break;
578616
579
- pr_err("Current RGA mmu[%d] cannot support virtual address!\n",
580
- scheduler->data->mmu);
617
+ pr_err("Current %s[%d] cannot support virtual address!\n",
618
+ rga_get_mmu_type_str(scheduler->data->mmu), scheduler->data->mmu);
581619 goto free_dma_buffer;
582620 }
583621
....@@ -589,7 +627,7 @@
589627 internal_buffer->virt_addr = virt_addr;
590628 internal_buffer->dma_buffer = buffer;
591629 internal_buffer->mm_flag = mm_flag;
592
- internal_buffer->phys_addr = phys_addr ? phys_addr : 0;
630
+ internal_buffer->phys_addr = phys_addr ? phys_addr + virt_addr->offset : 0;
593631
594632 return 0;
595633
....@@ -649,7 +687,8 @@
649687 internal_buffer->memory_parm.format,
650688 NULL, NULL, NULL);
651689 if (buffer_size <= 0) {
652
- pr_err("Fault to get phys addr size!\n");
690
+ pr_err("Failed to get phys addr size!\n");
691
+ rga_dump_memory_parm(&internal_buffer->memory_parm);
653692 return buffer_size == 0 ? -EINVAL : buffer_size;
654693 }
655694
....@@ -674,7 +713,7 @@
674713 ret = rga_iommu_map(phys_addr, buffer_size, buffer, scheduler->dev);
675714 if (ret < 0) {
676715 pr_err("%s core[%d] map phys_addr error!\n", __func__, scheduler->core);
677
- return ret;
716
+ goto free_dma_buffer;
678717 }
679718 }
680719
....@@ -686,6 +725,11 @@
686725 internal_buffer->dma_buffer = buffer;
687726
688727 return 0;
728
+
729
+free_dma_buffer:
730
+ kfree(buffer);
731
+
732
+ return ret;
689733 }
690734
691735 static int rga_mm_unmap_buffer(struct rga_internal_buffer *internal_buffer)
....@@ -738,7 +782,7 @@
738782
739783 ret = rga_mm_map_virt_addr(external_buffer, internal_buffer, job, write_flag);
740784 if (ret < 0) {
741
- pr_err("%s iommu_map virtual address error!\n", __func__);
785
+ pr_err("%s map virtual address error!\n", __func__);
742786 return ret;
743787 }
744788
....@@ -751,7 +795,7 @@
751795
752796 ret = rga_mm_map_phys_addr(external_buffer, internal_buffer, job);
753797 if (ret < 0) {
754
- pr_err("%s iommu_map physical address error!\n", __func__);
798
+ pr_err("%s map physical address error!\n", __func__);
755799 return ret;
756800 }
757801
....@@ -1130,9 +1174,15 @@
11301174
11311175 if (job->flags & RGA_JOB_USE_HANDLE) {
11321176 order = get_order(page_count * sizeof(uint32_t *));
1177
+ if (order >= MAX_ORDER) {
1178
+ pr_err("Can not alloc pages with order[%d] for page_table, max_order = %d\n",
1179
+ order, MAX_ORDER);
1180
+ return -ENOMEM;
1181
+ }
1182
+
11331183 page_table = (uint32_t *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
11341184 if (page_table == NULL) {
1135
- pr_err("%s can not alloc pages for pages, order = %d\n",
1185
+ pr_err("%s can not alloc pages for page_table, order = %d\n",
11361186 __func__, order);
11371187 return -ENOMEM;
11381188 }
....@@ -1189,9 +1239,15 @@
11891239
11901240 if (job->flags & RGA_JOB_USE_HANDLE) {
11911241 order = get_order(page_count * sizeof(uint32_t *));
1242
+ if (order >= MAX_ORDER) {
1243
+ pr_err("Can not alloc pages with order[%d] for page_table, max_order = %d\n",
1244
+ order, MAX_ORDER);
1245
+ return -ENOMEM;
1246
+ }
1247
+
11921248 page_table = (uint32_t *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
11931249 if (page_table == NULL) {
1194
- pr_err("%s can not alloc pages for pages, order = %d\n",
1250
+ pr_err("%s can not alloc pages for page_table, order = %d\n",
11951251 __func__, order);
11961252 return -ENOMEM;
11971253 }
....@@ -1334,6 +1390,7 @@
13341390 uint64_t handle,
13351391 uint64_t *channel_addr,
13361392 struct rga_internal_buffer **buf,
1393
+ int require_size,
13371394 enum dma_data_direction dir)
13381395 {
13391396 int ret = 0;
....@@ -1369,6 +1426,14 @@
13691426 return ret;
13701427 }
13711428
1429
+ if (internal_buffer->size < require_size) {
1430
+ ret = -EINVAL;
1431
+ pr_err("Only get buffer %ld byte from handle[%ld], but current required %d byte\n",
1432
+ internal_buffer->size, (unsigned long)handle, require_size);
1433
+
1434
+ goto put_internal_buffer;
1435
+ }
1436
+
13721437 if (internal_buffer->type == RGA_VIRTUAL_ADDRESS) {
13731438 /*
13741439 * Some userspace virtual addresses do not have an
....@@ -1378,11 +1443,19 @@
13781443 ret = rga_mm_sync_dma_sg_for_device(internal_buffer, job, dir);
13791444 if (ret < 0) {
13801445 pr_err("sync sgt for device error!\n");
1381
- return ret;
1446
+ goto put_internal_buffer;
13821447 }
13831448 }
13841449
13851450 return 0;
1451
+
1452
+put_internal_buffer:
1453
+ mutex_lock(&mm->lock);
1454
+ kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer);
1455
+ mutex_unlock(&mm->lock);
1456
+
1457
+ return ret;
1458
+
13861459 }
13871460
13881461 static void rga_mm_put_buffer(struct rga_mm *mm,
....@@ -1397,72 +1470,6 @@
13971470 mutex_lock(&mm->lock);
13981471 kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer);
13991472 mutex_unlock(&mm->lock);
1400
-}
1401
-
1402
-static int rga_mm_get_channel_handle_info(struct rga_mm *mm,
1403
- struct rga_job *job,
1404
- struct rga_img_info_t *img,
1405
- struct rga_job_buffer *job_buf,
1406
- enum dma_data_direction dir)
1407
-{
1408
- int ret = 0;
1409
- int handle = 0;
1410
-
1411
- /* using third-address */
1412
- if (img->uv_addr > 0) {
1413
- handle = img->yrgb_addr;
1414
- if (handle > 0) {
1415
- ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
1416
- &job_buf->y_addr, dir);
1417
- if (ret < 0) {
1418
- pr_err("handle[%d] Can't get src y/rgb address info!\n", handle);
1419
- return ret;
1420
- }
1421
- }
1422
-
1423
- handle = img->uv_addr;
1424
- if (handle > 0) {
1425
- ret = rga_mm_get_buffer(mm, job, handle, &img->uv_addr,
1426
- &job_buf->uv_addr, dir);
1427
- if (ret < 0) {
1428
- pr_err("handle[%d] Can't get src uv address info!\n", handle);
1429
- return ret;
1430
- }
1431
- }
1432
-
1433
- handle = img->v_addr;
1434
- if (handle > 0) {
1435
- ret = rga_mm_get_buffer(mm, job, handle, &img->v_addr,
1436
- &job_buf->v_addr, dir);
1437
- if (ret < 0) {
1438
- pr_err("handle[%d] Can't get src uv address info!\n", handle);
1439
- return ret;
1440
- }
1441
- }
1442
- } else {
1443
- handle = img->yrgb_addr;
1444
- if (handle > 0) {
1445
- ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
1446
- &job_buf->addr, dir);
1447
- if (ret < 0) {
1448
- pr_err("handle[%d] Can't get src y/rgb address info!\n", handle);
1449
- return ret;
1450
- }
1451
- }
1452
-
1453
- rga_convert_addr(img, false);
1454
- }
1455
-
1456
- if (job->scheduler->data->mmu == RGA_MMU &&
1457
- rga_mm_is_need_mmu(job, job_buf->addr)) {
1458
- ret = rga_mm_set_mmu_base(job, img, job_buf);
1459
- if (ret < 0) {
1460
- pr_err("Can't set RGA2 MMU_BASE from handle!\n");
1461
- return ret;
1462
- }
1463
- }
1464
-
1465
- return 0;
14661473 }
14671474
14681475 static void rga_mm_put_channel_handle_info(struct rga_mm *mm,
....@@ -1481,6 +1488,83 @@
14811488 free_pages((unsigned long)job_buf->page_table, job_buf->order);
14821489 }
14831490
1491
+static int rga_mm_get_channel_handle_info(struct rga_mm *mm,
1492
+ struct rga_job *job,
1493
+ struct rga_img_info_t *img,
1494
+ struct rga_job_buffer *job_buf,
1495
+ enum dma_data_direction dir)
1496
+{
1497
+ int ret = 0;
1498
+ int handle = 0;
1499
+ int img_size, yrgb_size, uv_size, v_size;
1500
+
1501
+ img_size = rga_image_size_cal(img->vir_w, img->vir_h, img->format,
1502
+ &yrgb_size, &uv_size, &v_size);
1503
+ if (img_size <= 0) {
1504
+ pr_err("Image size cal error! width = %d, height = %d, format = %s\n",
1505
+ img->vir_w, img->vir_h, rga_get_format_name(img->format));
1506
+ return -EINVAL;
1507
+ }
1508
+
1509
+ /* using third-address */
1510
+ if (img->uv_addr > 0) {
1511
+ handle = img->yrgb_addr;
1512
+ if (handle > 0) {
1513
+ ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
1514
+ &job_buf->y_addr, yrgb_size, dir);
1515
+ if (ret < 0) {
1516
+ pr_err("handle[%d] Can't get y/rgb address info!\n", handle);
1517
+ return ret;
1518
+ }
1519
+ }
1520
+
1521
+ handle = img->uv_addr;
1522
+ if (handle > 0) {
1523
+ ret = rga_mm_get_buffer(mm, job, handle, &img->uv_addr,
1524
+ &job_buf->uv_addr, uv_size, dir);
1525
+ if (ret < 0) {
1526
+ pr_err("handle[%d] Can't get uv address info!\n", handle);
1527
+ return ret;
1528
+ }
1529
+ }
1530
+
1531
+ handle = img->v_addr;
1532
+ if (handle > 0) {
1533
+ ret = rga_mm_get_buffer(mm, job, handle, &img->v_addr,
1534
+ &job_buf->v_addr, v_size, dir);
1535
+ if (ret < 0) {
1536
+ pr_err("handle[%d] Can't get uv address info!\n", handle);
1537
+ return ret;
1538
+ }
1539
+ }
1540
+ } else {
1541
+ handle = img->yrgb_addr;
1542
+ if (handle > 0) {
1543
+ ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
1544
+ &job_buf->addr, img_size, dir);
1545
+ if (ret < 0) {
1546
+ pr_err("handle[%d] Can't get y/rgb address info!\n", handle);
1547
+ return ret;
1548
+ }
1549
+ }
1550
+
1551
+ rga_convert_addr(img, false);
1552
+ }
1553
+
1554
+ if (job->scheduler->data->mmu == RGA_MMU &&
1555
+ rga_mm_is_need_mmu(job, job_buf->addr)) {
1556
+ ret = rga_mm_set_mmu_base(job, img, job_buf);
1557
+ if (ret < 0) {
1558
+ pr_err("Can't set RGA2 MMU_BASE from handle!\n");
1559
+
1560
+ rga_mm_put_channel_handle_info(mm, job, job_buf, dir);
1561
+ return ret;
1562
+ }
1563
+ }
1564
+
1565
+ return 0;
1566
+}
1567
+
14841568 static int rga_mm_get_handle_info(struct rga_job *job)
14851569 {
14861570 int ret = 0;
....@@ -1496,7 +1580,7 @@
14961580 &job->src_buffer,
14971581 DMA_TO_DEVICE);
14981582 if (ret < 0) {
1499
- pr_err("Can't get src buffer third info!\n");
1583
+ pr_err("Can't get src buffer info from handle!\n");
15001584 return ret;
15011585 }
15021586 }
....@@ -1506,7 +1590,7 @@
15061590 &job->dst_buffer,
15071591 DMA_TO_DEVICE);
15081592 if (ret < 0) {
1509
- pr_err("Can't get dst buffer third info!\n");
1593
+ pr_err("Can't get dst buffer info from handle!\n");
15101594 return ret;
15111595 }
15121596 }
....@@ -1528,7 +1612,7 @@
15281612 DMA_BIDIRECTIONAL);
15291613 }
15301614 if (ret < 0) {
1531
- pr_err("Can't get pat buffer third info!\n");
1615
+ pr_err("Can't get pat buffer info from handle!\n");
15321616 return ret;
15331617 }
15341618 }
....@@ -1869,7 +1953,7 @@
18691953 uint32_t rga_mm_import_buffer(struct rga_external_buffer *external_buffer,
18701954 struct rga_session *session)
18711955 {
1872
- int ret = 0;
1956
+ int ret = 0, new_id;
18731957 struct rga_mm *mm;
18741958 struct rga_internal_buffer *internal_buffer;
18751959
....@@ -1911,9 +1995,14 @@
19111995 * allocation under our spinlock.
19121996 */
19131997 idr_preload(GFP_KERNEL);
1914
- internal_buffer->handle = idr_alloc(&mm->memory_idr, internal_buffer, 1, 0, GFP_KERNEL);
1998
+ new_id = idr_alloc_cyclic(&mm->memory_idr, internal_buffer, 1, 0, GFP_NOWAIT);
19151999 idr_preload_end();
2000
+ if (new_id < 0) {
2001
+ pr_err("internal_buffer alloc id failed!\n");
2002
+ goto FREE_INTERNAL_BUFFER;
2003
+ }
19162004
2005
+ internal_buffer->handle = new_id;
19172006 mm->buffer_count++;
19182007
19192008 if (DEBUGGER_EN(MM)) {