hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/drivers/video/rockchip/rga3/rga_mm.c
....@@ -577,6 +577,13 @@
577577 mm_flag |= RGA_MEM_PHYSICAL_CONTIGUOUS;
578578 }
579579
580
+ /*
581
+ * Some userspace virtual addresses do not have an
582
+ * interface for flushing the cache, so it is mandatory
583
+ * to flush the cache when the virtual address is used.
584
+ */
585
+ mm_flag |= RGA_MEM_FORCE_FLUSH_CACHE;
586
+
580587 if (!rga_mm_check_memory_limit(scheduler, mm_flag)) {
581588 pr_err("scheduler core[%d] unsupported mm_flag[0x%x]!\n",
582589 scheduler->core, mm_flag);
....@@ -1295,13 +1302,6 @@
12951302 struct sg_table *sgt;
12961303 struct rga_scheduler_t *scheduler;
12971304
1298
- sgt = rga_mm_lookup_sgt(buffer);
1299
- if (sgt == NULL) {
1300
- pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1301
- __func__, __LINE__, job->core);
1302
- return -EINVAL;
1303
- }
1304
-
13051305 scheduler = buffer->dma_buffer->scheduler;
13061306 if (scheduler == NULL) {
13071307 pr_err("%s(%d), failed to get scheduler, core = 0x%x\n",
....@@ -1309,7 +1309,18 @@
13091309 return -EFAULT;
13101310 }
13111311
1312
- dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1312
+ if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS) {
1313
+ dma_sync_single_for_device(scheduler->dev, buffer->phys_addr, buffer->size, dir);
1314
+ } else {
1315
+ sgt = rga_mm_lookup_sgt(buffer);
1316
+ if (sgt == NULL) {
1317
+ pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1318
+ __func__, __LINE__, job->core);
1319
+ return -EINVAL;
1320
+ }
1321
+
1322
+ dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1323
+ }
13131324
13141325 return 0;
13151326 }
....@@ -1321,13 +1332,6 @@
13211332 struct sg_table *sgt;
13221333 struct rga_scheduler_t *scheduler;
13231334
1324
- sgt = rga_mm_lookup_sgt(buffer);
1325
- if (sgt == NULL) {
1326
- pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1327
- __func__, __LINE__, job->core);
1328
- return -EINVAL;
1329
- }
1330
-
13311335 scheduler = buffer->dma_buffer->scheduler;
13321336 if (scheduler == NULL) {
13331337 pr_err("%s(%d), failed to get scheduler, core = 0x%x\n",
....@@ -1335,7 +1339,18 @@
13351339 return -EFAULT;
13361340 }
13371341
1338
- dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1342
+ if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS) {
1343
+ dma_sync_single_for_cpu(scheduler->dev, buffer->phys_addr, buffer->size, dir);
1344
+ } else {
1345
+ sgt = rga_mm_lookup_sgt(buffer);
1346
+ if (sgt == NULL) {
1347
+ pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1348
+ __func__, __LINE__, job->core);
1349
+ return -EINVAL;
1350
+ }
1351
+
1352
+ dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1353
+ }
13391354
13401355 return 0;
13411356 }
....@@ -1434,7 +1449,7 @@
14341449 goto put_internal_buffer;
14351450 }
14361451
1437
- if (internal_buffer->type == RGA_VIRTUAL_ADDRESS) {
1452
+ if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) {
14381453 /*
14391454 * Some userspace virtual addresses do not have an
14401455 * interface for flushing the cache, so it is mandatory
....@@ -1463,7 +1478,7 @@
14631478 struct rga_internal_buffer *internal_buffer,
14641479 enum dma_data_direction dir)
14651480 {
1466
- if (internal_buffer->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE)
1481
+ if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE)
14671482 if (rga_mm_sync_dma_sg_for_cpu(internal_buffer, job, dir))
14681483 pr_err("sync sgt for cpu error!\n");
14691484
....@@ -1574,6 +1589,53 @@
15741589
15751590 req = &job->rga_command_base;
15761591 mm = rga_drvdata->mm;
1592
+
1593
+ switch (req->render_mode) {
1594
+ case BITBLT_MODE:
1595
+ case COLOR_PALETTE_MODE:
1596
+ if (unlikely(req->src.yrgb_addr <= 0)) {
1597
+ pr_err("render_mode[0x%x] src0 channel handle[%ld] must is valid!",
1598
+ req->render_mode, (unsigned long)req->src.yrgb_addr);
1599
+ return -EINVAL;
1600
+ }
1601
+
1602
+ if (unlikely(req->dst.yrgb_addr <= 0)) {
1603
+ pr_err("render_mode[0x%x] dst channel handle[%ld] must is valid!",
1604
+ req->render_mode, (unsigned long)req->dst.yrgb_addr);
1605
+ return -EINVAL;
1606
+ }
1607
+
1608
+ if (req->bsfilter_flag) {
1609
+ if (unlikely(req->pat.yrgb_addr <= 0)) {
1610
+ pr_err("render_mode[0x%x] src1/pat channel handle[%ld] must is valid!",
1611
+ req->render_mode, (unsigned long)req->pat.yrgb_addr);
1612
+ return -EINVAL;
1613
+ }
1614
+ }
1615
+
1616
+ break;
1617
+ case COLOR_FILL_MODE:
1618
+ if (unlikely(req->dst.yrgb_addr <= 0)) {
1619
+ pr_err("render_mode[0x%x] dst channel handle[%ld] must is valid!",
1620
+ req->render_mode, (unsigned long)req->dst.yrgb_addr);
1621
+ return -EINVAL;
1622
+ }
1623
+
1624
+ break;
1625
+
1626
+ case UPDATE_PALETTE_TABLE_MODE:
1627
+ case UPDATE_PATTEN_BUF_MODE:
1628
+ if (unlikely(req->pat.yrgb_addr <= 0)) {
1629
+ pr_err("render_mode[0x%x] lut/pat channel handle[%ld] must is valid!, req->render_mode",
1630
+ req->render_mode, (unsigned long)req->pat.yrgb_addr);
1631
+ return -EINVAL;
1632
+ }
1633
+
1634
+ break;
1635
+ default:
1636
+ pr_err("%s, unknown render mode!\n", __func__);
1637
+ break;
1638
+ }
15771639
15781640 if (likely(req->src.yrgb_addr > 0)) {
15791641 ret = rga_mm_get_channel_handle_info(mm, job, &req->src,
....@@ -1765,7 +1827,7 @@
17651827 struct rga_job_buffer *job_buffer,
17661828 enum dma_data_direction dir)
17671829 {
1768
- if (job_buffer->addr->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE)
1830
+ if (job_buffer->addr->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE)
17691831 if (rga_mm_sync_dma_sg_for_cpu(job_buffer->addr, job, dir))
17701832 pr_err("sync sgt for cpu error!\n");
17711833
....@@ -1802,12 +1864,7 @@
18021864 goto error_unmap_buffer;
18031865 }
18041866
1805
- if (buffer->type == RGA_VIRTUAL_ADDRESS) {
1806
- /*
1807
- * Some userspace virtual addresses do not have an
1808
- * interface for flushing the cache, so it is mandatory
1809
- * to flush the cache when the virtual address is used.
1810
- */
1867
+ if (buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) {
18111868 ret = rga_mm_sync_dma_sg_for_device(buffer, job, dir);
18121869 if (ret < 0) {
18131870 pr_err("sync sgt for device error!\n");