.. | .. |
---|
577 | 577 | mm_flag |= RGA_MEM_PHYSICAL_CONTIGUOUS; |
---|
578 | 578 | } |
---|
579 | 579 | |
---|
| 580 | + /* |
---|
| 581 | + * Some userspace virtual addresses do not have an |
---|
| 582 | + * interface for flushing the cache, so it is mandatory |
---|
| 583 | + * to flush the cache when the virtual address is used. |
---|
| 584 | + */ |
---|
| 585 | + mm_flag |= RGA_MEM_FORCE_FLUSH_CACHE; |
---|
| 586 | + |
---|
580 | 587 | if (!rga_mm_check_memory_limit(scheduler, mm_flag)) { |
---|
581 | 588 | pr_err("scheduler core[%d] unsupported mm_flag[0x%x]!\n", |
---|
582 | 589 | scheduler->core, mm_flag); |
---|
.. | .. |
---|
1295 | 1302 | struct sg_table *sgt; |
---|
1296 | 1303 | struct rga_scheduler_t *scheduler; |
---|
1297 | 1304 | |
---|
1298 | | - sgt = rga_mm_lookup_sgt(buffer); |
---|
1299 | | - if (sgt == NULL) { |
---|
1300 | | - pr_err("%s(%d), failed to get sgt, core = 0x%x\n", |
---|
1301 | | - __func__, __LINE__, job->core); |
---|
1302 | | - return -EINVAL; |
---|
1303 | | - } |
---|
1304 | | - |
---|
1305 | 1305 | scheduler = buffer->dma_buffer->scheduler; |
---|
1306 | 1306 | if (scheduler == NULL) { |
---|
1307 | 1307 | pr_err("%s(%d), failed to get scheduler, core = 0x%x\n", |
---|
.. | .. |
---|
1309 | 1309 | return -EFAULT; |
---|
1310 | 1310 | } |
---|
1311 | 1311 | |
---|
1312 | | - dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir); |
---|
| 1312 | + if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS) { |
---|
| 1313 | + dma_sync_single_for_device(scheduler->dev, buffer->phys_addr, buffer->size, dir); |
---|
| 1314 | + } else { |
---|
| 1315 | + sgt = rga_mm_lookup_sgt(buffer); |
---|
| 1316 | + if (sgt == NULL) { |
---|
| 1317 | + pr_err("%s(%d), failed to get sgt, core = 0x%x\n", |
---|
| 1318 | + __func__, __LINE__, job->core); |
---|
| 1319 | + return -EINVAL; |
---|
| 1320 | + } |
---|
| 1321 | + |
---|
| 1322 | + dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir); |
---|
| 1323 | + } |
---|
1313 | 1324 | |
---|
1314 | 1325 | return 0; |
---|
1315 | 1326 | } |
---|
.. | .. |
---|
1321 | 1332 | struct sg_table *sgt; |
---|
1322 | 1333 | struct rga_scheduler_t *scheduler; |
---|
1323 | 1334 | |
---|
1324 | | - sgt = rga_mm_lookup_sgt(buffer); |
---|
1325 | | - if (sgt == NULL) { |
---|
1326 | | - pr_err("%s(%d), failed to get sgt, core = 0x%x\n", |
---|
1327 | | - __func__, __LINE__, job->core); |
---|
1328 | | - return -EINVAL; |
---|
1329 | | - } |
---|
1330 | | - |
---|
1331 | 1335 | scheduler = buffer->dma_buffer->scheduler; |
---|
1332 | 1336 | if (scheduler == NULL) { |
---|
1333 | 1337 | pr_err("%s(%d), failed to get scheduler, core = 0x%x\n", |
---|
.. | .. |
---|
1335 | 1339 | return -EFAULT; |
---|
1336 | 1340 | } |
---|
1337 | 1341 | |
---|
1338 | | - dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir); |
---|
| 1342 | + if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS) { |
---|
| 1343 | + dma_sync_single_for_cpu(scheduler->dev, buffer->phys_addr, buffer->size, dir); |
---|
| 1344 | + } else { |
---|
| 1345 | + sgt = rga_mm_lookup_sgt(buffer); |
---|
| 1346 | + if (sgt == NULL) { |
---|
| 1347 | + pr_err("%s(%d), failed to get sgt, core = 0x%x\n", |
---|
| 1348 | + __func__, __LINE__, job->core); |
---|
| 1349 | + return -EINVAL; |
---|
| 1350 | + } |
---|
| 1351 | + |
---|
| 1352 | + dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir); |
---|
| 1353 | + } |
---|
1339 | 1354 | |
---|
1340 | 1355 | return 0; |
---|
1341 | 1356 | } |
---|
.. | .. |
---|
1434 | 1449 | goto put_internal_buffer; |
---|
1435 | 1450 | } |
---|
1436 | 1451 | |
---|
1437 | | - if (internal_buffer->type == RGA_VIRTUAL_ADDRESS) { |
---|
| 1452 | + if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) { |
---|
1438 | 1453 | /* |
---|
1439 | 1454 | * Some userspace virtual addresses do not have an |
---|
1440 | 1455 | * interface for flushing the cache, so it is mandatory |
---|
.. | .. |
---|
1463 | 1478 | struct rga_internal_buffer *internal_buffer, |
---|
1464 | 1479 | enum dma_data_direction dir) |
---|
1465 | 1480 | { |
---|
1466 | | - if (internal_buffer->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE) |
---|
| 1481 | + if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE) |
---|
1467 | 1482 | if (rga_mm_sync_dma_sg_for_cpu(internal_buffer, job, dir)) |
---|
1468 | 1483 | pr_err("sync sgt for cpu error!\n"); |
---|
1469 | 1484 | |
---|
.. | .. |
---|
1574 | 1589 | |
---|
1575 | 1590 | req = &job->rga_command_base; |
---|
1576 | 1591 | mm = rga_drvdata->mm; |
---|
| 1592 | + |
---|
| 1593 | + switch (req->render_mode) { |
---|
| 1594 | + case BITBLT_MODE: |
---|
| 1595 | + case COLOR_PALETTE_MODE: |
---|
| 1596 | + if (unlikely(req->src.yrgb_addr <= 0)) { |
---|
| 1597 | + pr_err("render_mode[0x%x] src0 channel handle[%ld] must is valid!", |
---|
| 1598 | + req->render_mode, (unsigned long)req->src.yrgb_addr); |
---|
| 1599 | + return -EINVAL; |
---|
| 1600 | + } |
---|
| 1601 | + |
---|
| 1602 | + if (unlikely(req->dst.yrgb_addr <= 0)) { |
---|
| 1603 | + pr_err("render_mode[0x%x] dst channel handle[%ld] must is valid!", |
---|
| 1604 | + req->render_mode, (unsigned long)req->dst.yrgb_addr); |
---|
| 1605 | + return -EINVAL; |
---|
| 1606 | + } |
---|
| 1607 | + |
---|
| 1608 | + if (req->bsfilter_flag) { |
---|
| 1609 | + if (unlikely(req->pat.yrgb_addr <= 0)) { |
---|
| 1610 | + pr_err("render_mode[0x%x] src1/pat channel handle[%ld] must is valid!", |
---|
| 1611 | + req->render_mode, (unsigned long)req->pat.yrgb_addr); |
---|
| 1612 | + return -EINVAL; |
---|
| 1613 | + } |
---|
| 1614 | + } |
---|
| 1615 | + |
---|
| 1616 | + break; |
---|
| 1617 | + case COLOR_FILL_MODE: |
---|
| 1618 | + if (unlikely(req->dst.yrgb_addr <= 0)) { |
---|
| 1619 | + pr_err("render_mode[0x%x] dst channel handle[%ld] must is valid!", |
---|
| 1620 | + req->render_mode, (unsigned long)req->dst.yrgb_addr); |
---|
| 1621 | + return -EINVAL; |
---|
| 1622 | + } |
---|
| 1623 | + |
---|
| 1624 | + break; |
---|
| 1625 | + |
---|
| 1626 | + case UPDATE_PALETTE_TABLE_MODE: |
---|
| 1627 | + case UPDATE_PATTEN_BUF_MODE: |
---|
| 1628 | + if (unlikely(req->pat.yrgb_addr <= 0)) { |
---|
| 1629 | + pr_err("render_mode[0x%x] lut/pat channel handle[%ld] must is valid!, req->render_mode", |
---|
| 1630 | + req->render_mode, (unsigned long)req->pat.yrgb_addr); |
---|
| 1631 | + return -EINVAL; |
---|
| 1632 | + } |
---|
| 1633 | + |
---|
| 1634 | + break; |
---|
| 1635 | + default: |
---|
| 1636 | + pr_err("%s, unknown render mode!\n", __func__); |
---|
| 1637 | + break; |
---|
| 1638 | + } |
---|
1577 | 1639 | |
---|
1578 | 1640 | if (likely(req->src.yrgb_addr > 0)) { |
---|
1579 | 1641 | ret = rga_mm_get_channel_handle_info(mm, job, &req->src, |
---|
.. | .. |
---|
1765 | 1827 | struct rga_job_buffer *job_buffer, |
---|
1766 | 1828 | enum dma_data_direction dir) |
---|
1767 | 1829 | { |
---|
1768 | | - if (job_buffer->addr->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE) |
---|
| 1830 | + if (job_buffer->addr->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE) |
---|
1769 | 1831 | if (rga_mm_sync_dma_sg_for_cpu(job_buffer->addr, job, dir)) |
---|
1770 | 1832 | pr_err("sync sgt for cpu error!\n"); |
---|
1771 | 1833 | |
---|
.. | .. |
---|
1802 | 1864 | goto error_unmap_buffer; |
---|
1803 | 1865 | } |
---|
1804 | 1866 | |
---|
1805 | | - if (buffer->type == RGA_VIRTUAL_ADDRESS) { |
---|
1806 | | - /* |
---|
1807 | | - * Some userspace virtual addresses do not have an |
---|
1808 | | - * interface for flushing the cache, so it is mandatory |
---|
1809 | | - * to flush the cache when the virtual address is used. |
---|
1810 | | - */ |
---|
| 1867 | + if (buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) { |
---|
1811 | 1868 | ret = rga_mm_sync_dma_sg_for_device(buffer, job, dir); |
---|
1812 | 1869 | if (ret < 0) { |
---|
1813 | 1870 | pr_err("sync sgt for device error!\n"); |
---|