forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/video/rockchip/rga3/rga_mm.c
....@@ -455,6 +455,7 @@
455455 phys_addr = sg_phys(buffer->sgt->sgl);
456456 if (phys_addr == 0) {
457457 pr_err("%s get physical address error!", __func__);
458
+ ret = -EFAULT;
458459 goto unmap_buffer;
459460 }
460461
....@@ -571,11 +572,19 @@
571572 phys_addr = sg_phys(sgt->sgl);
572573 if (phys_addr == 0) {
573574 pr_err("%s get physical address error!", __func__);
575
+ ret = -EFAULT;
574576 goto free_sgt;
575577 }
576578
577579 mm_flag |= RGA_MEM_PHYSICAL_CONTIGUOUS;
578580 }
581
+
582
+ /*
583
+ * Some userspace virtual addresses do not have an
584
+ * interface for flushing the cache, so it is mandatory
585
+ * to flush the cache when the virtual address is used.
586
+ */
587
+ mm_flag |= RGA_MEM_FORCE_FLUSH_CACHE;
579588
580589 if (!rga_mm_check_memory_limit(scheduler, mm_flag)) {
581590 pr_err("scheduler core[%d] unsupported mm_flag[0x%x]!\n",
....@@ -614,8 +623,9 @@
614623 if (mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS)
615624 break;
616625
617
- pr_err("Current %s[%d] cannot support virtual address!\n",
626
+ pr_err("Current %s[%d] cannot support physically discontinuous virtual address!\n",
618627 rga_get_mmu_type_str(scheduler->data->mmu), scheduler->data->mmu);
628
+ ret = -EOPNOTSUPP;
619629 goto free_dma_buffer;
620630 }
621631
....@@ -833,9 +843,15 @@
833843 return 0;
834844 }
835845
846
+static void rga_mm_buffer_destroy(struct rga_internal_buffer *buffer)
847
+{
848
+ rga_mm_kref_release_buffer(&buffer->refcount);
849
+}
850
+
836851 static struct rga_internal_buffer *
837852 rga_mm_lookup_external(struct rga_mm *mm_session,
838
- struct rga_external_buffer *external_buffer)
853
+ struct rga_external_buffer *external_buffer,
854
+ struct mm_struct *current_mm)
839855 {
840856 int id;
841857 struct dma_buf *dma_buf = NULL;
....@@ -868,8 +884,12 @@
868884 continue;
869885
870886 if (temp_buffer->virt_addr->addr == external_buffer->memory) {
871
- output_buffer = temp_buffer;
872
- break;
887
+ if (temp_buffer->current_mm == current_mm) {
888
+ output_buffer = temp_buffer;
889
+ break;
890
+ }
891
+
892
+ continue;
873893 }
874894 }
875895
....@@ -1295,13 +1315,6 @@
12951315 struct sg_table *sgt;
12961316 struct rga_scheduler_t *scheduler;
12971317
1298
- sgt = rga_mm_lookup_sgt(buffer);
1299
- if (sgt == NULL) {
1300
- pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1301
- __func__, __LINE__, job->core);
1302
- return -EINVAL;
1303
- }
1304
-
13051318 scheduler = buffer->dma_buffer->scheduler;
13061319 if (scheduler == NULL) {
13071320 pr_err("%s(%d), failed to get scheduler, core = 0x%x\n",
....@@ -1309,7 +1322,19 @@
13091322 return -EFAULT;
13101323 }
13111324
1312
- dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1325
+ if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS &&
1326
+ scheduler->data->mmu != RGA_IOMMU) {
1327
+ dma_sync_single_for_device(scheduler->dev, buffer->phys_addr, buffer->size, dir);
1328
+ } else {
1329
+ sgt = rga_mm_lookup_sgt(buffer);
1330
+ if (sgt == NULL) {
1331
+ pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1332
+ __func__, __LINE__, job->core);
1333
+ return -EINVAL;
1334
+ }
1335
+
1336
+ dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1337
+ }
13131338
13141339 return 0;
13151340 }
....@@ -1321,13 +1346,6 @@
13211346 struct sg_table *sgt;
13221347 struct rga_scheduler_t *scheduler;
13231348
1324
- sgt = rga_mm_lookup_sgt(buffer);
1325
- if (sgt == NULL) {
1326
- pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1327
- __func__, __LINE__, job->core);
1328
- return -EINVAL;
1329
- }
1330
-
13311349 scheduler = buffer->dma_buffer->scheduler;
13321350 if (scheduler == NULL) {
13331351 pr_err("%s(%d), failed to get scheduler, core = 0x%x\n",
....@@ -1335,7 +1353,19 @@
13351353 return -EFAULT;
13361354 }
13371355
1338
- dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1356
+ if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS &&
1357
+ scheduler->data->mmu != RGA_IOMMU) {
1358
+ dma_sync_single_for_cpu(scheduler->dev, buffer->phys_addr, buffer->size, dir);
1359
+ } else {
1360
+ sgt = rga_mm_lookup_sgt(buffer);
1361
+ if (sgt == NULL) {
1362
+ pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1363
+ __func__, __LINE__, job->core);
1364
+ return -EINVAL;
1365
+ }
1366
+
1367
+ dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1368
+ }
13391369
13401370 return 0;
13411371 }
....@@ -1434,7 +1464,7 @@
14341464 goto put_internal_buffer;
14351465 }
14361466
1437
- if (internal_buffer->type == RGA_VIRTUAL_ADDRESS) {
1467
+ if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) {
14381468 /*
14391469 * Some userspace virtual addresses do not have an
14401470 * interface for flushing the cache, so it is mandatory
....@@ -1463,9 +1493,14 @@
14631493 struct rga_internal_buffer *internal_buffer,
14641494 enum dma_data_direction dir)
14651495 {
1466
- if (internal_buffer->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE)
1496
+ if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE)
14671497 if (rga_mm_sync_dma_sg_for_cpu(internal_buffer, job, dir))
14681498 pr_err("sync sgt for cpu error!\n");
1499
+
1500
+ if (DEBUGGER_EN(MM)) {
1501
+ pr_info("handle[%d] put info:\n", (int)internal_buffer->handle);
1502
+ rga_mm_dump_buffer(internal_buffer);
1503
+ }
14691504
14701505 mutex_lock(&mm->lock);
14711506 kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer);
....@@ -1574,6 +1609,53 @@
15741609
15751610 req = &job->rga_command_base;
15761611 mm = rga_drvdata->mm;
1612
+
1613
+ switch (req->render_mode) {
1614
+ case BITBLT_MODE:
1615
+ case COLOR_PALETTE_MODE:
1616
+ if (unlikely(req->src.yrgb_addr <= 0)) {
1617
+ pr_err("render_mode[0x%x] src0 channel handle[%ld] must is valid!",
1618
+ req->render_mode, (unsigned long)req->src.yrgb_addr);
1619
+ return -EINVAL;
1620
+ }
1621
+
1622
+ if (unlikely(req->dst.yrgb_addr <= 0)) {
1623
+ pr_err("render_mode[0x%x] dst channel handle[%ld] must is valid!",
1624
+ req->render_mode, (unsigned long)req->dst.yrgb_addr);
1625
+ return -EINVAL;
1626
+ }
1627
+
1628
+ if (req->bsfilter_flag) {
1629
+ if (unlikely(req->pat.yrgb_addr <= 0)) {
1630
+ pr_err("render_mode[0x%x] src1/pat channel handle[%ld] must is valid!",
1631
+ req->render_mode, (unsigned long)req->pat.yrgb_addr);
1632
+ return -EINVAL;
1633
+ }
1634
+ }
1635
+
1636
+ break;
1637
+ case COLOR_FILL_MODE:
1638
+ if (unlikely(req->dst.yrgb_addr <= 0)) {
1639
+ pr_err("render_mode[0x%x] dst channel handle[%ld] must is valid!",
1640
+ req->render_mode, (unsigned long)req->dst.yrgb_addr);
1641
+ return -EINVAL;
1642
+ }
1643
+
1644
+ break;
1645
+
1646
+ case UPDATE_PALETTE_TABLE_MODE:
1647
+ case UPDATE_PATTEN_BUF_MODE:
1648
+ if (unlikely(req->pat.yrgb_addr <= 0)) {
1649
+ pr_err("render_mode[0x%x] lut/pat channel handle[%ld] must is valid!, req->render_mode",
1650
+ req->render_mode, (unsigned long)req->pat.yrgb_addr);
1651
+ return -EINVAL;
1652
+ }
1653
+
1654
+ break;
1655
+ default:
1656
+ pr_err("%s, unknown render mode!\n", __func__);
1657
+ break;
1658
+ }
15771659
15781660 if (likely(req->src.yrgb_addr > 0)) {
15791661 ret = rga_mm_get_channel_handle_info(mm, job, &req->src,
....@@ -1765,7 +1847,7 @@
17651847 struct rga_job_buffer *job_buffer,
17661848 enum dma_data_direction dir)
17671849 {
1768
- if (job_buffer->addr->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE)
1850
+ if (job_buffer->addr->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE)
17691851 if (rga_mm_sync_dma_sg_for_cpu(job_buffer->addr, job, dir))
17701852 pr_err("sync sgt for cpu error!\n");
17711853
....@@ -1802,12 +1884,7 @@
18021884 goto error_unmap_buffer;
18031885 }
18041886
1805
- if (buffer->type == RGA_VIRTUAL_ADDRESS) {
1806
- /*
1807
- * Some userspace virtual addresses do not have an
1808
- * interface for flushing the cache, so it is mandatory
1809
- * to flush the cache when the virtual address is used.
1810
- */
1887
+ if (buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) {
18111888 ret = rga_mm_sync_dma_sg_for_device(buffer, job, dir);
18121889 if (ret < 0) {
18131890 pr_err("sync sgt for device error!\n");
....@@ -1924,6 +2001,7 @@
19242001 int rga_mm_map_job_info(struct rga_job *job)
19252002 {
19262003 int ret;
2004
+ ktime_t timestamp = ktime_get();
19272005
19282006 if (job->flags & RGA_JOB_USE_HANDLE) {
19292007 ret = rga_mm_get_handle_info(job);
....@@ -1931,12 +2009,20 @@
19312009 pr_err("failed to get buffer from handle\n");
19322010 return ret;
19332011 }
2012
+
2013
+ if (DEBUGGER_EN(TIME))
2014
+ pr_info("request[%d], get buffer_handle info cost %lld us\n",
2015
+ job->request_id, ktime_us_delta(ktime_get(), timestamp));
19342016 } else {
19352017 ret = rga_mm_map_buffer_info(job);
19362018 if (ret < 0) {
19372019 pr_err("failed to map buffer\n");
19382020 return ret;
19392021 }
2022
+
2023
+ if (DEBUGGER_EN(TIME))
2024
+ pr_info("request[%d], map buffer cost %lld us\n",
2025
+ job->request_id, ktime_us_delta(ktime_get(), timestamp));
19402026 }
19412027
19422028 return 0;
....@@ -1944,14 +2030,35 @@
19442030
19452031 void rga_mm_unmap_job_info(struct rga_job *job)
19462032 {
1947
- if (job->flags & RGA_JOB_USE_HANDLE)
2033
+ ktime_t timestamp = ktime_get();
2034
+
2035
+ if (job->flags & RGA_JOB_USE_HANDLE) {
19482036 rga_mm_put_handle_info(job);
1949
- else
2037
+
2038
+ if (DEBUGGER_EN(TIME))
2039
+ pr_info("request[%d], put buffer_handle info cost %lld us\n",
2040
+ job->request_id, ktime_us_delta(ktime_get(), timestamp));
2041
+ } else {
19502042 rga_mm_unmap_buffer_info(job);
2043
+
2044
+ if (DEBUGGER_EN(TIME))
2045
+ pr_info("request[%d], unmap buffer cost %lld us\n",
2046
+ job->request_id, ktime_us_delta(ktime_get(), timestamp));
2047
+ }
19512048 }
19522049
1953
-uint32_t rga_mm_import_buffer(struct rga_external_buffer *external_buffer,
1954
- struct rga_session *session)
2050
+/*
2051
+ * rga_mm_import_buffer - Importing external buffer into the RGA driver
2052
+ *
2053
+ * @external_buffer: [in] Parameters of external buffer
2054
+ * @session: [in] Session of the current process
2055
+ *
2056
+ * returns:
2057
+ * if return value > 0, the buffer import is successful and is the generated
2058
+ * buffer-handle, negative error code on failure.
2059
+ */
2060
+int rga_mm_import_buffer(struct rga_external_buffer *external_buffer,
2061
+ struct rga_session *session)
19552062 {
19562063 int ret = 0, new_id;
19572064 struct rga_mm *mm;
....@@ -1960,17 +2067,23 @@
19602067 mm = rga_drvdata->mm;
19612068 if (mm == NULL) {
19622069 pr_err("rga mm is null!\n");
1963
- return 0;
2070
+ return -EFAULT;
19642071 }
19652072
19662073 mutex_lock(&mm->lock);
19672074
19682075 /* first, Check whether to rga_mm */
1969
- internal_buffer = rga_mm_lookup_external(mm, external_buffer);
2076
+ internal_buffer = rga_mm_lookup_external(mm, external_buffer, current->mm);
19702077 if (!IS_ERR_OR_NULL(internal_buffer)) {
19712078 kref_get(&internal_buffer->refcount);
19722079
19732080 mutex_unlock(&mm->lock);
2081
+
2082
+ if (DEBUGGER_EN(MM)) {
2083
+ pr_info("import existing buffer:\n");
2084
+ rga_mm_dump_buffer(internal_buffer);
2085
+ }
2086
+
19742087 return internal_buffer->handle;
19752088 }
19762089
....@@ -1980,7 +2093,7 @@
19802093 pr_err("%s alloc internal_buffer error!\n", __func__);
19812094
19822095 mutex_unlock(&mm->lock);
1983
- return 0;
2096
+ return -ENOMEM;
19842097 }
19852098
19862099 ret = rga_mm_map_buffer(external_buffer, internal_buffer, NULL, true);
....@@ -1999,6 +2112,7 @@
19992112 idr_preload_end();
20002113 if (new_id < 0) {
20012114 pr_err("internal_buffer alloc id failed!\n");
2115
+ ret = new_id;
20022116 goto FREE_INTERNAL_BUFFER;
20032117 }
20042118
....@@ -2017,7 +2131,7 @@
20172131 mutex_unlock(&mm->lock);
20182132 kfree(internal_buffer);
20192133
2020
- return 0;
2134
+ return ret;
20212135 }
20222136
20232137 int rga_mm_release_buffer(uint32_t handle)
....@@ -2069,9 +2183,9 @@
20692183
20702184 idr_for_each_entry(&mm->memory_idr, buffer, i) {
20712185 if (session == buffer->session) {
2072
- pr_err("[tgid:%d] Decrement the reference of handle[%d] when the user exits\n",
2186
+ pr_err("[tgid:%d] Destroy handle[%d] when the user exits\n",
20732187 session->tgid, buffer->handle);
2074
- kref_put(&buffer->refcount, rga_mm_kref_release_buffer);
2188
+ rga_mm_buffer_destroy(buffer);
20752189 }
20762190 }
20772191