.. | .. |
---|
455 | 455 | phys_addr = sg_phys(buffer->sgt->sgl); |
---|
456 | 456 | if (phys_addr == 0) { |
---|
457 | 457 | pr_err("%s get physical address error!", __func__); |
---|
| 458 | + ret = -EFAULT; |
---|
458 | 459 | goto unmap_buffer; |
---|
459 | 460 | } |
---|
460 | 461 | |
---|
.. | .. |
---|
571 | 572 | phys_addr = sg_phys(sgt->sgl); |
---|
572 | 573 | if (phys_addr == 0) { |
---|
573 | 574 | pr_err("%s get physical address error!", __func__); |
---|
| 575 | + ret = -EFAULT; |
---|
574 | 576 | goto free_sgt; |
---|
575 | 577 | } |
---|
576 | 578 | |
---|
577 | 579 | mm_flag |= RGA_MEM_PHYSICAL_CONTIGUOUS; |
---|
578 | 580 | } |
---|
| 581 | + |
---|
| 582 | + /* |
---|
| 583 | + * Some userspace virtual addresses do not have an |
---|
| 584 | + * interface for flushing the cache, so it is mandatory |
---|
| 585 | + * to flush the cache when the virtual address is used. |
---|
| 586 | + */ |
---|
| 587 | + mm_flag |= RGA_MEM_FORCE_FLUSH_CACHE; |
---|
579 | 588 | |
---|
580 | 589 | if (!rga_mm_check_memory_limit(scheduler, mm_flag)) { |
---|
581 | 590 | pr_err("scheduler core[%d] unsupported mm_flag[0x%x]!\n", |
---|
.. | .. |
---|
614 | 623 | if (mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS) |
---|
615 | 624 | break; |
---|
616 | 625 | |
---|
617 | | - pr_err("Current %s[%d] cannot support virtual address!\n", |
---|
| 626 | + pr_err("Current %s[%d] cannot support physically discontinuous virtual address!\n", |
---|
618 | 627 | rga_get_mmu_type_str(scheduler->data->mmu), scheduler->data->mmu); |
---|
| 628 | + ret = -EOPNOTSUPP; |
---|
619 | 629 | goto free_dma_buffer; |
---|
620 | 630 | } |
---|
621 | 631 | |
---|
.. | .. |
---|
833 | 843 | return 0; |
---|
834 | 844 | } |
---|
835 | 845 | |
---|
| 846 | +static void rga_mm_buffer_destroy(struct rga_internal_buffer *buffer) |
---|
| 847 | +{ |
---|
| 848 | + rga_mm_kref_release_buffer(&buffer->refcount); |
---|
| 849 | +} |
---|
| 850 | + |
---|
836 | 851 | static struct rga_internal_buffer * |
---|
837 | 852 | rga_mm_lookup_external(struct rga_mm *mm_session, |
---|
838 | | - struct rga_external_buffer *external_buffer) |
---|
| 853 | + struct rga_external_buffer *external_buffer, |
---|
| 854 | + struct mm_struct *current_mm) |
---|
839 | 855 | { |
---|
840 | 856 | int id; |
---|
841 | 857 | struct dma_buf *dma_buf = NULL; |
---|
.. | .. |
---|
868 | 884 | continue; |
---|
869 | 885 | |
---|
870 | 886 | if (temp_buffer->virt_addr->addr == external_buffer->memory) { |
---|
871 | | - output_buffer = temp_buffer; |
---|
872 | | - break; |
---|
| 887 | + if (temp_buffer->current_mm == current_mm) { |
---|
| 888 | + output_buffer = temp_buffer; |
---|
| 889 | + break; |
---|
| 890 | + } |
---|
| 891 | + |
---|
| 892 | + continue; |
---|
873 | 893 | } |
---|
874 | 894 | } |
---|
875 | 895 | |
---|
.. | .. |
---|
1295 | 1315 | struct sg_table *sgt; |
---|
1296 | 1316 | struct rga_scheduler_t *scheduler; |
---|
1297 | 1317 | |
---|
1298 | | - sgt = rga_mm_lookup_sgt(buffer); |
---|
1299 | | - if (sgt == NULL) { |
---|
1300 | | - pr_err("%s(%d), failed to get sgt, core = 0x%x\n", |
---|
1301 | | - __func__, __LINE__, job->core); |
---|
1302 | | - return -EINVAL; |
---|
1303 | | - } |
---|
1304 | | - |
---|
1305 | 1318 | scheduler = buffer->dma_buffer->scheduler; |
---|
1306 | 1319 | if (scheduler == NULL) { |
---|
1307 | 1320 | pr_err("%s(%d), failed to get scheduler, core = 0x%x\n", |
---|
.. | .. |
---|
1309 | 1322 | return -EFAULT; |
---|
1310 | 1323 | } |
---|
1311 | 1324 | |
---|
1312 | | - dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir); |
---|
| 1325 | + if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS && |
---|
| 1326 | + scheduler->data->mmu != RGA_IOMMU) { |
---|
| 1327 | + dma_sync_single_for_device(scheduler->dev, buffer->phys_addr, buffer->size, dir); |
---|
| 1328 | + } else { |
---|
| 1329 | + sgt = rga_mm_lookup_sgt(buffer); |
---|
| 1330 | + if (sgt == NULL) { |
---|
| 1331 | + pr_err("%s(%d), failed to get sgt, core = 0x%x\n", |
---|
| 1332 | + __func__, __LINE__, job->core); |
---|
| 1333 | + return -EINVAL; |
---|
| 1334 | + } |
---|
| 1335 | + |
---|
| 1336 | + dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir); |
---|
| 1337 | + } |
---|
1313 | 1338 | |
---|
1314 | 1339 | return 0; |
---|
1315 | 1340 | } |
---|
.. | .. |
---|
1321 | 1346 | struct sg_table *sgt; |
---|
1322 | 1347 | struct rga_scheduler_t *scheduler; |
---|
1323 | 1348 | |
---|
1324 | | - sgt = rga_mm_lookup_sgt(buffer); |
---|
1325 | | - if (sgt == NULL) { |
---|
1326 | | - pr_err("%s(%d), failed to get sgt, core = 0x%x\n", |
---|
1327 | | - __func__, __LINE__, job->core); |
---|
1328 | | - return -EINVAL; |
---|
1329 | | - } |
---|
1330 | | - |
---|
1331 | 1349 | scheduler = buffer->dma_buffer->scheduler; |
---|
1332 | 1350 | if (scheduler == NULL) { |
---|
1333 | 1351 | pr_err("%s(%d), failed to get scheduler, core = 0x%x\n", |
---|
.. | .. |
---|
1335 | 1353 | return -EFAULT; |
---|
1336 | 1354 | } |
---|
1337 | 1355 | |
---|
1338 | | - dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir); |
---|
| 1356 | + if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS && |
---|
| 1357 | + scheduler->data->mmu != RGA_IOMMU) { |
---|
| 1358 | + dma_sync_single_for_cpu(scheduler->dev, buffer->phys_addr, buffer->size, dir); |
---|
| 1359 | + } else { |
---|
| 1360 | + sgt = rga_mm_lookup_sgt(buffer); |
---|
| 1361 | + if (sgt == NULL) { |
---|
| 1362 | + pr_err("%s(%d), failed to get sgt, core = 0x%x\n", |
---|
| 1363 | + __func__, __LINE__, job->core); |
---|
| 1364 | + return -EINVAL; |
---|
| 1365 | + } |
---|
| 1366 | + |
---|
| 1367 | + dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir); |
---|
| 1368 | + } |
---|
1339 | 1369 | |
---|
1340 | 1370 | return 0; |
---|
1341 | 1371 | } |
---|
.. | .. |
---|
1434 | 1464 | goto put_internal_buffer; |
---|
1435 | 1465 | } |
---|
1436 | 1466 | |
---|
1437 | | - if (internal_buffer->type == RGA_VIRTUAL_ADDRESS) { |
---|
| 1467 | + if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) { |
---|
1438 | 1468 | /* |
---|
1439 | 1469 | * Some userspace virtual addresses do not have an |
---|
1440 | 1470 | * interface for flushing the cache, so it is mandatory |
---|
.. | .. |
---|
1463 | 1493 | struct rga_internal_buffer *internal_buffer, |
---|
1464 | 1494 | enum dma_data_direction dir) |
---|
1465 | 1495 | { |
---|
1466 | | - if (internal_buffer->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE) |
---|
| 1496 | + if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE) |
---|
1467 | 1497 | if (rga_mm_sync_dma_sg_for_cpu(internal_buffer, job, dir)) |
---|
1468 | 1498 | pr_err("sync sgt for cpu error!\n"); |
---|
| 1499 | + |
---|
| 1500 | + if (DEBUGGER_EN(MM)) { |
---|
| 1501 | + pr_info("handle[%d] put info:\n", (int)internal_buffer->handle); |
---|
| 1502 | + rga_mm_dump_buffer(internal_buffer); |
---|
| 1503 | + } |
---|
1469 | 1504 | |
---|
1470 | 1505 | mutex_lock(&mm->lock); |
---|
1471 | 1506 | kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer); |
---|
.. | .. |
---|
1574 | 1609 | |
---|
1575 | 1610 | req = &job->rga_command_base; |
---|
1576 | 1611 | mm = rga_drvdata->mm; |
---|
| 1612 | + |
---|
| 1613 | + switch (req->render_mode) { |
---|
| 1614 | + case BITBLT_MODE: |
---|
| 1615 | + case COLOR_PALETTE_MODE: |
---|
| 1616 | + if (unlikely(req->src.yrgb_addr <= 0)) { |
---|
| 1617 | + pr_err("render_mode[0x%x] src0 channel handle[%ld] must is valid!", |
---|
| 1618 | + req->render_mode, (unsigned long)req->src.yrgb_addr); |
---|
| 1619 | + return -EINVAL; |
---|
| 1620 | + } |
---|
| 1621 | + |
---|
| 1622 | + if (unlikely(req->dst.yrgb_addr <= 0)) { |
---|
| 1623 | + pr_err("render_mode[0x%x] dst channel handle[%ld] must is valid!", |
---|
| 1624 | + req->render_mode, (unsigned long)req->dst.yrgb_addr); |
---|
| 1625 | + return -EINVAL; |
---|
| 1626 | + } |
---|
| 1627 | + |
---|
| 1628 | + if (req->bsfilter_flag) { |
---|
| 1629 | + if (unlikely(req->pat.yrgb_addr <= 0)) { |
---|
| 1630 | + pr_err("render_mode[0x%x] src1/pat channel handle[%ld] must is valid!", |
---|
| 1631 | + req->render_mode, (unsigned long)req->pat.yrgb_addr); |
---|
| 1632 | + return -EINVAL; |
---|
| 1633 | + } |
---|
| 1634 | + } |
---|
| 1635 | + |
---|
| 1636 | + break; |
---|
| 1637 | + case COLOR_FILL_MODE: |
---|
| 1638 | + if (unlikely(req->dst.yrgb_addr <= 0)) { |
---|
| 1639 | + pr_err("render_mode[0x%x] dst channel handle[%ld] must is valid!", |
---|
| 1640 | + req->render_mode, (unsigned long)req->dst.yrgb_addr); |
---|
| 1641 | + return -EINVAL; |
---|
| 1642 | + } |
---|
| 1643 | + |
---|
| 1644 | + break; |
---|
| 1645 | + |
---|
| 1646 | + case UPDATE_PALETTE_TABLE_MODE: |
---|
| 1647 | + case UPDATE_PATTEN_BUF_MODE: |
---|
| 1648 | + if (unlikely(req->pat.yrgb_addr <= 0)) { |
---|
| 1649 | + pr_err("render_mode[0x%x] lut/pat channel handle[%ld] must is valid!, req->render_mode", |
---|
| 1650 | + req->render_mode, (unsigned long)req->pat.yrgb_addr); |
---|
| 1651 | + return -EINVAL; |
---|
| 1652 | + } |
---|
| 1653 | + |
---|
| 1654 | + break; |
---|
| 1655 | + default: |
---|
| 1656 | + pr_err("%s, unknown render mode!\n", __func__); |
---|
| 1657 | + break; |
---|
| 1658 | + } |
---|
1577 | 1659 | |
---|
1578 | 1660 | if (likely(req->src.yrgb_addr > 0)) { |
---|
1579 | 1661 | ret = rga_mm_get_channel_handle_info(mm, job, &req->src, |
---|
.. | .. |
---|
1765 | 1847 | struct rga_job_buffer *job_buffer, |
---|
1766 | 1848 | enum dma_data_direction dir) |
---|
1767 | 1849 | { |
---|
1768 | | - if (job_buffer->addr->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE) |
---|
| 1850 | + if (job_buffer->addr->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE) |
---|
1769 | 1851 | if (rga_mm_sync_dma_sg_for_cpu(job_buffer->addr, job, dir)) |
---|
1770 | 1852 | pr_err("sync sgt for cpu error!\n"); |
---|
1771 | 1853 | |
---|
.. | .. |
---|
1802 | 1884 | goto error_unmap_buffer; |
---|
1803 | 1885 | } |
---|
1804 | 1886 | |
---|
1805 | | - if (buffer->type == RGA_VIRTUAL_ADDRESS) { |
---|
1806 | | - /* |
---|
1807 | | - * Some userspace virtual addresses do not have an |
---|
1808 | | - * interface for flushing the cache, so it is mandatory |
---|
1809 | | - * to flush the cache when the virtual address is used. |
---|
1810 | | - */ |
---|
| 1887 | + if (buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) { |
---|
1811 | 1888 | ret = rga_mm_sync_dma_sg_for_device(buffer, job, dir); |
---|
1812 | 1889 | if (ret < 0) { |
---|
1813 | 1890 | pr_err("sync sgt for device error!\n"); |
---|
.. | .. |
---|
1924 | 2001 | int rga_mm_map_job_info(struct rga_job *job) |
---|
1925 | 2002 | { |
---|
1926 | 2003 | int ret; |
---|
| 2004 | + ktime_t timestamp = ktime_get(); |
---|
1927 | 2005 | |
---|
1928 | 2006 | if (job->flags & RGA_JOB_USE_HANDLE) { |
---|
1929 | 2007 | ret = rga_mm_get_handle_info(job); |
---|
.. | .. |
---|
1931 | 2009 | pr_err("failed to get buffer from handle\n"); |
---|
1932 | 2010 | return ret; |
---|
1933 | 2011 | } |
---|
| 2012 | + |
---|
| 2013 | + if (DEBUGGER_EN(TIME)) |
---|
| 2014 | + pr_info("request[%d], get buffer_handle info cost %lld us\n", |
---|
| 2015 | + job->request_id, ktime_us_delta(ktime_get(), timestamp)); |
---|
1934 | 2016 | } else { |
---|
1935 | 2017 | ret = rga_mm_map_buffer_info(job); |
---|
1936 | 2018 | if (ret < 0) { |
---|
1937 | 2019 | pr_err("failed to map buffer\n"); |
---|
1938 | 2020 | return ret; |
---|
1939 | 2021 | } |
---|
| 2022 | + |
---|
| 2023 | + if (DEBUGGER_EN(TIME)) |
---|
| 2024 | + pr_info("request[%d], map buffer cost %lld us\n", |
---|
| 2025 | + job->request_id, ktime_us_delta(ktime_get(), timestamp)); |
---|
1940 | 2026 | } |
---|
1941 | 2027 | |
---|
1942 | 2028 | return 0; |
---|
.. | .. |
---|
1944 | 2030 | |
---|
1945 | 2031 | void rga_mm_unmap_job_info(struct rga_job *job) |
---|
1946 | 2032 | { |
---|
1947 | | - if (job->flags & RGA_JOB_USE_HANDLE) |
---|
| 2033 | + ktime_t timestamp = ktime_get(); |
---|
| 2034 | + |
---|
| 2035 | + if (job->flags & RGA_JOB_USE_HANDLE) { |
---|
1948 | 2036 | rga_mm_put_handle_info(job); |
---|
1949 | | - else |
---|
| 2037 | + |
---|
| 2038 | + if (DEBUGGER_EN(TIME)) |
---|
| 2039 | + pr_info("request[%d], put buffer_handle info cost %lld us\n", |
---|
| 2040 | + job->request_id, ktime_us_delta(ktime_get(), timestamp)); |
---|
| 2041 | + } else { |
---|
1950 | 2042 | rga_mm_unmap_buffer_info(job); |
---|
| 2043 | + |
---|
| 2044 | + if (DEBUGGER_EN(TIME)) |
---|
| 2045 | + pr_info("request[%d], unmap buffer cost %lld us\n", |
---|
| 2046 | + job->request_id, ktime_us_delta(ktime_get(), timestamp)); |
---|
| 2047 | + } |
---|
1951 | 2048 | } |
---|
1952 | 2049 | |
---|
1953 | | -uint32_t rga_mm_import_buffer(struct rga_external_buffer *external_buffer, |
---|
1954 | | - struct rga_session *session) |
---|
| 2050 | +/* |
---|
| 2051 | + * rga_mm_import_buffer - Importing external buffer into the RGA driver |
---|
| 2052 | + * |
---|
| 2053 | + * @external_buffer: [in] Parameters of external buffer |
---|
| 2054 | + * @session: [in] Session of the current process |
---|
| 2055 | + * |
---|
| 2056 | + * returns: |
---|
| 2057 | + * if return value > 0, the buffer import is successful and is the generated |
---|
| 2058 | + * buffer-handle, negative error code on failure. |
---|
| 2059 | + */ |
---|
| 2060 | +int rga_mm_import_buffer(struct rga_external_buffer *external_buffer, |
---|
| 2061 | + struct rga_session *session) |
---|
1955 | 2062 | { |
---|
1956 | 2063 | int ret = 0, new_id; |
---|
1957 | 2064 | struct rga_mm *mm; |
---|
.. | .. |
---|
1960 | 2067 | mm = rga_drvdata->mm; |
---|
1961 | 2068 | if (mm == NULL) { |
---|
1962 | 2069 | pr_err("rga mm is null!\n"); |
---|
1963 | | - return 0; |
---|
| 2070 | + return -EFAULT; |
---|
1964 | 2071 | } |
---|
1965 | 2072 | |
---|
1966 | 2073 | mutex_lock(&mm->lock); |
---|
1967 | 2074 | |
---|
1968 | 2075 | /* first, Check whether to rga_mm */ |
---|
1969 | | - internal_buffer = rga_mm_lookup_external(mm, external_buffer); |
---|
| 2076 | + internal_buffer = rga_mm_lookup_external(mm, external_buffer, current->mm); |
---|
1970 | 2077 | if (!IS_ERR_OR_NULL(internal_buffer)) { |
---|
1971 | 2078 | kref_get(&internal_buffer->refcount); |
---|
1972 | 2079 | |
---|
1973 | 2080 | mutex_unlock(&mm->lock); |
---|
| 2081 | + |
---|
| 2082 | + if (DEBUGGER_EN(MM)) { |
---|
| 2083 | + pr_info("import existing buffer:\n"); |
---|
| 2084 | + rga_mm_dump_buffer(internal_buffer); |
---|
| 2085 | + } |
---|
| 2086 | + |
---|
1974 | 2087 | return internal_buffer->handle; |
---|
1975 | 2088 | } |
---|
1976 | 2089 | |
---|
.. | .. |
---|
1980 | 2093 | pr_err("%s alloc internal_buffer error!\n", __func__); |
---|
1981 | 2094 | |
---|
1982 | 2095 | mutex_unlock(&mm->lock); |
---|
1983 | | - return 0; |
---|
| 2096 | + return -ENOMEM; |
---|
1984 | 2097 | } |
---|
1985 | 2098 | |
---|
1986 | 2099 | ret = rga_mm_map_buffer(external_buffer, internal_buffer, NULL, true); |
---|
.. | .. |
---|
1999 | 2112 | idr_preload_end(); |
---|
2000 | 2113 | if (new_id < 0) { |
---|
2001 | 2114 | pr_err("internal_buffer alloc id failed!\n"); |
---|
| 2115 | + ret = new_id; |
---|
2002 | 2116 | goto FREE_INTERNAL_BUFFER; |
---|
2003 | 2117 | } |
---|
2004 | 2118 | |
---|
.. | .. |
---|
2017 | 2131 | mutex_unlock(&mm->lock); |
---|
2018 | 2132 | kfree(internal_buffer); |
---|
2019 | 2133 | |
---|
2020 | | - return 0; |
---|
| 2134 | + return ret; |
---|
2021 | 2135 | } |
---|
2022 | 2136 | |
---|
2023 | 2137 | int rga_mm_release_buffer(uint32_t handle) |
---|
.. | .. |
---|
2069 | 2183 | |
---|
2070 | 2184 | idr_for_each_entry(&mm->memory_idr, buffer, i) { |
---|
2071 | 2185 | if (session == buffer->session) { |
---|
2072 | | - pr_err("[tgid:%d] Decrement the reference of handle[%d] when the user exits\n", |
---|
| 2186 | + pr_err("[tgid:%d] Destroy handle[%d] when the user exits\n", |
---|
2073 | 2187 | session->tgid, buffer->handle); |
---|
2074 | | - kref_put(&buffer->refcount, rga_mm_kref_release_buffer); |
---|
| 2188 | + rga_mm_buffer_destroy(buffer); |
---|
2075 | 2189 | } |
---|
2076 | 2190 | } |
---|
2077 | 2191 | |
---|