.. | .. |
---|
455 | 455 | phys_addr = sg_phys(buffer->sgt->sgl); |
---|
456 | 456 | if (phys_addr == 0) { |
---|
457 | 457 | pr_err("%s get physical address error!", __func__); |
---|
| 458 | + ret = -EFAULT; |
---|
458 | 459 | goto unmap_buffer; |
---|
459 | 460 | } |
---|
460 | 461 | |
---|
.. | .. |
---|
571 | 572 | phys_addr = sg_phys(sgt->sgl); |
---|
572 | 573 | if (phys_addr == 0) { |
---|
573 | 574 | pr_err("%s get physical address error!", __func__); |
---|
| 575 | + ret = -EFAULT; |
---|
574 | 576 | goto free_sgt; |
---|
575 | 577 | } |
---|
576 | 578 | |
---|
.. | .. |
---|
621 | 623 | if (mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS) |
---|
622 | 624 | break; |
---|
623 | 625 | |
---|
624 | | - pr_err("Current %s[%d] cannot support virtual address!\n", |
---|
| 626 | + pr_err("Current %s[%d] cannot support physically discontinuous virtual address!\n", |
---|
625 | 627 | rga_get_mmu_type_str(scheduler->data->mmu), scheduler->data->mmu); |
---|
| 628 | + ret = -EOPNOTSUPP; |
---|
626 | 629 | goto free_dma_buffer; |
---|
627 | 630 | } |
---|
628 | 631 | |
---|
.. | .. |
---|
840 | 843 | return 0; |
---|
841 | 844 | } |
---|
842 | 845 | |
---|
| 846 | +static void rga_mm_buffer_destroy(struct rga_internal_buffer *buffer) |
---|
| 847 | +{ |
---|
| 848 | + rga_mm_kref_release_buffer(&buffer->refcount); |
---|
| 849 | +} |
---|
| 850 | + |
---|
843 | 851 | static struct rga_internal_buffer * |
---|
844 | 852 | rga_mm_lookup_external(struct rga_mm *mm_session, |
---|
845 | | - struct rga_external_buffer *external_buffer) |
---|
| 853 | + struct rga_external_buffer *external_buffer, |
---|
| 854 | + struct mm_struct *current_mm) |
---|
846 | 855 | { |
---|
847 | 856 | int id; |
---|
848 | 857 | struct dma_buf *dma_buf = NULL; |
---|
.. | .. |
---|
875 | 884 | continue; |
---|
876 | 885 | |
---|
877 | 886 | if (temp_buffer->virt_addr->addr == external_buffer->memory) { |
---|
878 | | - output_buffer = temp_buffer; |
---|
879 | | - break; |
---|
| 887 | + if (temp_buffer->current_mm == current_mm) { |
---|
| 888 | + output_buffer = temp_buffer; |
---|
| 889 | + break; |
---|
| 890 | + } |
---|
| 891 | + |
---|
| 892 | + continue; |
---|
880 | 893 | } |
---|
881 | 894 | } |
---|
882 | 895 | |
---|
.. | .. |
---|
1309 | 1322 | return -EFAULT; |
---|
1310 | 1323 | } |
---|
1311 | 1324 | |
---|
1312 | | - if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS) { |
---|
| 1325 | + if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS && |
---|
| 1326 | + scheduler->data->mmu != RGA_IOMMU) { |
---|
1313 | 1327 | dma_sync_single_for_device(scheduler->dev, buffer->phys_addr, buffer->size, dir); |
---|
1314 | 1328 | } else { |
---|
1315 | 1329 | sgt = rga_mm_lookup_sgt(buffer); |
---|
.. | .. |
---|
1339 | 1353 | return -EFAULT; |
---|
1340 | 1354 | } |
---|
1341 | 1355 | |
---|
1342 | | - if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS) { |
---|
| 1356 | + if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS && |
---|
| 1357 | + scheduler->data->mmu != RGA_IOMMU) { |
---|
1343 | 1358 | dma_sync_single_for_cpu(scheduler->dev, buffer->phys_addr, buffer->size, dir); |
---|
1344 | 1359 | } else { |
---|
1345 | 1360 | sgt = rga_mm_lookup_sgt(buffer); |
---|
.. | .. |
---|
1481 | 1496 | if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE) |
---|
1482 | 1497 | if (rga_mm_sync_dma_sg_for_cpu(internal_buffer, job, dir)) |
---|
1483 | 1498 | pr_err("sync sgt for cpu error!\n"); |
---|
| 1499 | + |
---|
| 1500 | + if (DEBUGGER_EN(MM)) { |
---|
| 1501 | + pr_info("handle[%d] put info:\n", (int)internal_buffer->handle); |
---|
| 1502 | + rga_mm_dump_buffer(internal_buffer); |
---|
| 1503 | + } |
---|
1484 | 1504 | |
---|
1485 | 1505 | mutex_lock(&mm->lock); |
---|
1486 | 1506 | kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer); |
---|
.. | .. |
---|
1981 | 2001 | int rga_mm_map_job_info(struct rga_job *job) |
---|
1982 | 2002 | { |
---|
1983 | 2003 | int ret; |
---|
| 2004 | + ktime_t timestamp = ktime_get(); |
---|
1984 | 2005 | |
---|
1985 | 2006 | if (job->flags & RGA_JOB_USE_HANDLE) { |
---|
1986 | 2007 | ret = rga_mm_get_handle_info(job); |
---|
.. | .. |
---|
1988 | 2009 | pr_err("failed to get buffer from handle\n"); |
---|
1989 | 2010 | return ret; |
---|
1990 | 2011 | } |
---|
| 2012 | + |
---|
| 2013 | + if (DEBUGGER_EN(TIME)) |
---|
| 2014 | + pr_info("request[%d], get buffer_handle info cost %lld us\n", |
---|
| 2015 | + job->request_id, ktime_us_delta(ktime_get(), timestamp)); |
---|
1991 | 2016 | } else { |
---|
1992 | 2017 | ret = rga_mm_map_buffer_info(job); |
---|
1993 | 2018 | if (ret < 0) { |
---|
1994 | 2019 | pr_err("failed to map buffer\n"); |
---|
1995 | 2020 | return ret; |
---|
1996 | 2021 | } |
---|
| 2022 | + |
---|
| 2023 | + if (DEBUGGER_EN(TIME)) |
---|
| 2024 | + pr_info("request[%d], map buffer cost %lld us\n", |
---|
| 2025 | + job->request_id, ktime_us_delta(ktime_get(), timestamp)); |
---|
1997 | 2026 | } |
---|
1998 | 2027 | |
---|
1999 | 2028 | return 0; |
---|
.. | .. |
---|
2001 | 2030 | |
---|
2002 | 2031 | void rga_mm_unmap_job_info(struct rga_job *job) |
---|
2003 | 2032 | { |
---|
2004 | | - if (job->flags & RGA_JOB_USE_HANDLE) |
---|
| 2033 | + ktime_t timestamp = ktime_get(); |
---|
| 2034 | + |
---|
| 2035 | + if (job->flags & RGA_JOB_USE_HANDLE) { |
---|
2005 | 2036 | rga_mm_put_handle_info(job); |
---|
2006 | | - else |
---|
| 2037 | + |
---|
| 2038 | + if (DEBUGGER_EN(TIME)) |
---|
| 2039 | + pr_info("request[%d], put buffer_handle info cost %lld us\n", |
---|
| 2040 | + job->request_id, ktime_us_delta(ktime_get(), timestamp)); |
---|
| 2041 | + } else { |
---|
2007 | 2042 | rga_mm_unmap_buffer_info(job); |
---|
| 2043 | + |
---|
| 2044 | + if (DEBUGGER_EN(TIME)) |
---|
| 2045 | + pr_info("request[%d], unmap buffer cost %lld us\n", |
---|
| 2046 | + job->request_id, ktime_us_delta(ktime_get(), timestamp)); |
---|
| 2047 | + } |
---|
2008 | 2048 | } |
---|
2009 | 2049 | |
---|
2010 | | -uint32_t rga_mm_import_buffer(struct rga_external_buffer *external_buffer, |
---|
2011 | | - struct rga_session *session) |
---|
| 2050 | +/* |
---|
| 2051 | + * rga_mm_import_buffer - Importing external buffer into the RGA driver |
---|
| 2052 | + * |
---|
| 2053 | + * @external_buffer: [in] Parameters of external buffer |
---|
| 2054 | + * @session: [in] Session of the current process |
---|
| 2055 | + * |
---|
| 2056 | + * returns: |
---|
| 2057 | + * if return value > 0, the buffer import is successful and is the generated |
---|
| 2058 | + * buffer-handle, negative error code on failure. |
---|
| 2059 | + */ |
---|
| 2060 | +int rga_mm_import_buffer(struct rga_external_buffer *external_buffer, |
---|
| 2061 | + struct rga_session *session) |
---|
2012 | 2062 | { |
---|
2013 | 2063 | int ret = 0, new_id; |
---|
2014 | 2064 | struct rga_mm *mm; |
---|
.. | .. |
---|
2017 | 2067 | mm = rga_drvdata->mm; |
---|
2018 | 2068 | if (mm == NULL) { |
---|
2019 | 2069 | pr_err("rga mm is null!\n"); |
---|
2020 | | - return 0; |
---|
| 2070 | + return -EFAULT; |
---|
2021 | 2071 | } |
---|
2022 | 2072 | |
---|
2023 | 2073 | mutex_lock(&mm->lock); |
---|
2024 | 2074 | |
---|
2025 | 2075 | /* first, Check whether to rga_mm */ |
---|
2026 | | - internal_buffer = rga_mm_lookup_external(mm, external_buffer); |
---|
| 2076 | + internal_buffer = rga_mm_lookup_external(mm, external_buffer, current->mm); |
---|
2027 | 2077 | if (!IS_ERR_OR_NULL(internal_buffer)) { |
---|
2028 | 2078 | kref_get(&internal_buffer->refcount); |
---|
2029 | 2079 | |
---|
2030 | 2080 | mutex_unlock(&mm->lock); |
---|
| 2081 | + |
---|
| 2082 | + if (DEBUGGER_EN(MM)) { |
---|
| 2083 | + pr_info("import existing buffer:\n"); |
---|
| 2084 | + rga_mm_dump_buffer(internal_buffer); |
---|
| 2085 | + } |
---|
| 2086 | + |
---|
2031 | 2087 | return internal_buffer->handle; |
---|
2032 | 2088 | } |
---|
2033 | 2089 | |
---|
.. | .. |
---|
2037 | 2093 | pr_err("%s alloc internal_buffer error!\n", __func__); |
---|
2038 | 2094 | |
---|
2039 | 2095 | mutex_unlock(&mm->lock); |
---|
2040 | | - return 0; |
---|
| 2096 | + return -ENOMEM; |
---|
2041 | 2097 | } |
---|
2042 | 2098 | |
---|
2043 | 2099 | ret = rga_mm_map_buffer(external_buffer, internal_buffer, NULL, true); |
---|
.. | .. |
---|
2056 | 2112 | idr_preload_end(); |
---|
2057 | 2113 | if (new_id < 0) { |
---|
2058 | 2114 | pr_err("internal_buffer alloc id failed!\n"); |
---|
| 2115 | + ret = new_id; |
---|
2059 | 2116 | goto FREE_INTERNAL_BUFFER; |
---|
2060 | 2117 | } |
---|
2061 | 2118 | |
---|
.. | .. |
---|
2074 | 2131 | mutex_unlock(&mm->lock); |
---|
2075 | 2132 | kfree(internal_buffer); |
---|
2076 | 2133 | |
---|
2077 | | - return 0; |
---|
| 2134 | + return ret; |
---|
2078 | 2135 | } |
---|
2079 | 2136 | |
---|
2080 | 2137 | int rga_mm_release_buffer(uint32_t handle) |
---|
.. | .. |
---|
2126 | 2183 | |
---|
2127 | 2184 | idr_for_each_entry(&mm->memory_idr, buffer, i) { |
---|
2128 | 2185 | if (session == buffer->session) { |
---|
2129 | | - pr_err("[tgid:%d] Decrement the reference of handle[%d] when the user exits\n", |
---|
| 2186 | + pr_err("[tgid:%d] Destroy handle[%d] when the user exits\n", |
---|
2130 | 2187 | session->tgid, buffer->handle); |
---|
2131 | | - kref_put(&buffer->refcount, rga_mm_kref_release_buffer); |
---|
| 2188 | + rga_mm_buffer_destroy(buffer); |
---|
2132 | 2189 | } |
---|
2133 | 2190 | } |
---|
2134 | 2191 | |
---|