hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/video/rockchip/rga/rga_mmu_info.c
....@@ -1,253 +1,298 @@
11 /* SPDX-License-Identifier: GPL-2.0 */
2
-
3
-
4
-#include <linux/version.h>
5
-#include <linux/init.h>
6
-#include <linux/module.h>
7
-#include <linux/fs.h>
8
-#include <linux/sched.h>
9
-#include <linux/signal.h>
10
-#include <linux/pagemap.h>
11
-#include <linux/seq_file.h>
12
-#include <linux/mm.h>
13
-#include <linux/mman.h>
14
-#include <linux/sched.h>
15
-#include <linux/slab.h>
16
-#include <linux/memory.h>
17
-#include <linux/dma-mapping.h>
18
-#include <asm/memory.h>
19
-#include <asm/atomic.h>
20
-#include <asm/cacheflush.h>
21
-#include "rga_mmu_info.h"
22
-#include <linux/delay.h>
23
-
24
-extern rga_service_info rga_service;
25
-extern struct rga_mmu_buf_t rga_mmu_buf;
2
+
3
+
4
+#include <linux/version.h>
5
+#include <linux/init.h>
6
+#include <linux/module.h>
7
+#include <linux/fs.h>
8
+#include <linux/sched.h>
9
+#include <linux/signal.h>
10
+#include <linux/pagemap.h>
11
+#include <linux/seq_file.h>
12
+#include <linux/mm.h>
13
+#include <linux/mman.h>
14
+#include <linux/sched.h>
15
+#include <linux/slab.h>
16
+#include <linux/memory.h>
17
+#include <linux/dma-mapping.h>
18
+#include <asm/memory.h>
19
+#include <asm/atomic.h>
20
+#include <asm/cacheflush.h>
21
+#include "rga_mmu_info.h"
22
+#include <linux/delay.h>
23
+
24
+extern rga_service_info rga_service;
25
+extern struct rga_mmu_buf_t rga_mmu_buf;
2626
2727 #if RGA_DEBUGFS
2828 extern int RGA_CHECK_MODE;
2929 #endif
30
-
31
-#define KERNEL_SPACE_VALID 0xc0000000
32
-
33
-static int rga_mmu_buf_get(struct rga_mmu_buf_t *t, uint32_t size)
34
-{
35
- mutex_lock(&rga_service.lock);
36
- t->front += size;
37
- mutex_unlock(&rga_service.lock);
38
-
39
- return 0;
40
-}
41
-
42
-static int rga_mmu_buf_get_try(struct rga_mmu_buf_t *t, uint32_t size)
43
-{
44
- int ret = 0;
45
-
46
- mutex_lock(&rga_service.lock);
47
- if ((t->back - t->front) > t->size) {
48
- if(t->front + size > t->back - t->size) {
49
- ret = -ENOMEM;
50
- goto out;
51
- }
52
- } else {
53
- if ((t->front + size) > t->back) {
54
- ret = -ENOMEM;
55
- goto out;
56
- }
57
- if (t->front + size > t->size) {
58
- if (size > (t->back - t->size)) {
59
- ret = -ENOMEM;
60
- goto out;
61
- }
62
- t->front = 0;
63
- }
64
- }
65
-
66
-out:
67
- mutex_unlock(&rga_service.lock);
68
- return ret;
69
-}
70
-
71
-static int rga_mem_size_cal(unsigned long Mem, uint32_t MemSize, unsigned long *StartAddr)
72
-{
73
- unsigned long start, end;
74
- uint32_t pageCount;
75
-
76
- end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
77
- start = Mem >> PAGE_SHIFT;
78
- pageCount = end - start;
79
- *StartAddr = start;
80
- return pageCount;
81
-}
82
-
83
-static int rga_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr, unsigned long v_addr,
84
- int format, uint32_t w, uint32_t h, unsigned long *StartAddr )
85
-{
86
- uint32_t size_yrgb = 0;
87
- uint32_t size_uv = 0;
88
- uint32_t size_v = 0;
89
- uint32_t stride = 0;
90
- unsigned long start, end;
91
- uint32_t pageCount;
92
-
93
- switch(format)
94
- {
95
- case RK_FORMAT_RGBA_8888 :
96
- stride = (w * 4 + 3) & (~3);
97
- size_yrgb = stride*h;
98
- start = yrgb_addr >> PAGE_SHIFT;
99
- pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
100
- break;
101
- case RK_FORMAT_RGBX_8888 :
102
- stride = (w * 4 + 3) & (~3);
103
- size_yrgb = stride*h;
104
- start = yrgb_addr >> PAGE_SHIFT;
105
- pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
106
- break;
107
- case RK_FORMAT_RGB_888 :
108
- stride = (w * 3 + 3) & (~3);
109
- size_yrgb = stride*h;
110
- start = yrgb_addr >> PAGE_SHIFT;
111
- pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
112
- break;
113
- case RK_FORMAT_BGRA_8888 :
114
- size_yrgb = w*h*4;
115
- start = yrgb_addr >> PAGE_SHIFT;
116
- pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
117
- break;
118
- case RK_FORMAT_RGB_565 :
119
- stride = (w*2 + 3) & (~3);
120
- size_yrgb = stride * h;
121
- start = yrgb_addr >> PAGE_SHIFT;
122
- pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
123
- break;
124
- case RK_FORMAT_RGBA_5551 :
125
- stride = (w*2 + 3) & (~3);
126
- size_yrgb = stride * h;
127
- start = yrgb_addr >> PAGE_SHIFT;
128
- pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
129
- break;
130
- case RK_FORMAT_RGBA_4444 :
131
- stride = (w*2 + 3) & (~3);
132
- size_yrgb = stride * h;
133
- start = yrgb_addr >> PAGE_SHIFT;
134
- pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
135
- break;
136
- case RK_FORMAT_BGR_888 :
137
- stride = (w*3 + 3) & (~3);
138
- size_yrgb = stride * h;
139
- start = yrgb_addr >> PAGE_SHIFT;
140
- pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
141
- break;
142
-
143
- /* YUV FORMAT */
144
- case RK_FORMAT_YCbCr_422_SP :
145
- stride = (w + 3) & (~3);
146
- size_yrgb = stride * h;
147
- size_uv = stride * h;
148
- start = MIN(yrgb_addr, uv_addr);
149
-
150
- start >>= PAGE_SHIFT;
151
- end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
152
- end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
153
- pageCount = end - start;
154
- break;
155
- case RK_FORMAT_YCbCr_422_P :
156
- stride = (w + 3) & (~3);
157
- size_yrgb = stride * h;
158
- size_uv = ((stride >> 1) * h);
159
- size_v = ((stride >> 1) * h);
160
- start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
161
- start = start >> PAGE_SHIFT;
162
- end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
163
- end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
164
- pageCount = end - start;
165
- break;
166
- case RK_FORMAT_YCbCr_420_SP :
167
- stride = (w + 3) & (~3);
168
- size_yrgb = stride * h;
169
- size_uv = (stride * (h >> 1));
170
- start = MIN(yrgb_addr, uv_addr);
171
- start >>= PAGE_SHIFT;
172
- end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
173
- end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
174
- pageCount = end - start;
175
- break;
176
- case RK_FORMAT_YCbCr_420_P :
177
- stride = (w + 3) & (~3);
178
- size_yrgb = stride * h;
179
- size_uv = ((stride >> 1) * (h >> 1));
180
- size_v = ((stride >> 1) * (h >> 1));
181
- start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
182
- start >>= PAGE_SHIFT;
183
- end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
184
- end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
185
- pageCount = end - start;
186
- break;
187
-
188
- case RK_FORMAT_YCrCb_422_SP :
189
- stride = (w + 3) & (~3);
190
- size_yrgb = stride * h;
191
- size_uv = stride * h;
192
- start = MIN(yrgb_addr, uv_addr);
193
- start >>= PAGE_SHIFT;
194
- end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
195
- end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
196
- pageCount = end - start;
197
- break;
198
- case RK_FORMAT_YCrCb_422_P :
199
- stride = (w + 3) & (~3);
200
- size_yrgb = stride * h;
201
- size_uv = ((stride >> 1) * h);
202
- size_v = ((stride >> 1) * h);
203
- start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
204
- start >>= PAGE_SHIFT;
205
- end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
206
- end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
207
- pageCount = end - start;
208
- break;
209
-
210
- case RK_FORMAT_YCrCb_420_SP :
211
- stride = (w + 3) & (~3);
212
- size_yrgb = stride * h;
213
- size_uv = (stride * (h >> 1));
214
- start = MIN(yrgb_addr, uv_addr);
215
- start >>= PAGE_SHIFT;
216
- end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
217
- end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
218
- pageCount = end - start;
219
- break;
220
- case RK_FORMAT_YCrCb_420_P :
221
- stride = (w + 3) & (~3);
222
- size_yrgb = stride * h;
223
- size_uv = ((stride >> 1) * (h >> 1));
224
- size_v = ((stride >> 1) * (h >> 1));
225
- start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
226
- start >>= PAGE_SHIFT;
227
- end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
228
- end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
229
- pageCount = end - start;
230
- break;
231
- #if 0
232
- case RK_FORMAT_BPP1 :
233
- break;
234
- case RK_FORMAT_BPP2 :
235
- break;
236
- case RK_FORMAT_BPP4 :
237
- break;
238
- case RK_FORMAT_BPP8 :
239
- break;
240
- #endif
241
- default :
242
- pageCount = 0;
243
- start = 0;
244
- break;
245
- }
246
-
247
- *StartAddr = start;
248
- return pageCount;
249
-}
250
-
30
+
31
+#define KERNEL_SPACE_VALID 0xc0000000
32
+
33
+void rga_dma_flush_range(void *pstart, void *pend)
34
+{
35
+ dma_sync_single_for_device(rga_drvdata->dev, virt_to_phys(pstart), pend - pstart, DMA_TO_DEVICE);
36
+}
37
+
38
+static int rga_mmu_buf_get(struct rga_mmu_buf_t *t, uint32_t size)
39
+{
40
+ mutex_lock(&rga_service.lock);
41
+ t->front += size;
42
+ mutex_unlock(&rga_service.lock);
43
+
44
+ return 0;
45
+}
46
+
47
+static void rga_current_mm_read_lock(struct mm_struct *mm)
48
+{
49
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
50
+ mmap_read_lock(mm);
51
+#else
52
+ down_read(&mm->mmap_sem);
53
+#endif
54
+}
55
+
56
+static void rga_current_mm_read_unlock(struct mm_struct *mm)
57
+{
58
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
59
+ mmap_read_unlock(mm);
60
+#else
61
+ up_read(&mm->mmap_sem);
62
+#endif
63
+}
64
+
65
+static long rga_get_user_pages(struct page **pages, unsigned long Memory,
66
+ uint32_t pageCount, int writeFlag,
67
+ struct mm_struct *current_mm)
68
+{
69
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 168) && \
70
+ LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
71
+ return get_user_pages(current, current_mm, Memory << PAGE_SHIFT,
72
+ pageCount, writeFlag ? FOLL_WRITE : 0, pages, NULL);
73
+ #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
74
+ return get_user_pages(current, current_mm, Memory << PAGE_SHIFT,
75
+ pageCount, writeFlag ? FOLL_WRITE : 0, 0, pages, NULL);
76
+ #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
77
+ return get_user_pages_remote(current, current_mm, Memory << PAGE_SHIFT,
78
+ pageCount, writeFlag ? FOLL_WRITE : 0, pages,
79
+ NULL, NULL);
80
+ #else
81
+ return get_user_pages_remote(current_mm, Memory << PAGE_SHIFT,
82
+ pageCount, writeFlag ? FOLL_WRITE : 0, pages,
83
+ NULL, NULL);
84
+ #endif
85
+}
86
+
87
+static int rga_mmu_buf_get_try(struct rga_mmu_buf_t *t, uint32_t size)
88
+{
89
+ int ret = 0;
90
+
91
+ mutex_lock(&rga_service.lock);
92
+ if ((t->back - t->front) > t->size) {
93
+ if(t->front + size > t->back - t->size) {
94
+ ret = -ENOMEM;
95
+ goto out;
96
+ }
97
+ } else {
98
+ if ((t->front + size) > t->back) {
99
+ ret = -ENOMEM;
100
+ goto out;
101
+ }
102
+ if (t->front + size > t->size) {
103
+ if (size > (t->back - t->size)) {
104
+ ret = -ENOMEM;
105
+ goto out;
106
+ }
107
+ t->front = 0;
108
+ }
109
+ }
110
+
111
+out:
112
+ mutex_unlock(&rga_service.lock);
113
+ return ret;
114
+}
115
+
116
+static int rga_mem_size_cal(unsigned long Mem, uint32_t MemSize, unsigned long *StartAddr)
117
+{
118
+ unsigned long start, end;
119
+ uint32_t pageCount;
120
+
121
+ end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
122
+ start = Mem >> PAGE_SHIFT;
123
+ pageCount = end - start;
124
+ *StartAddr = start;
125
+ return pageCount;
126
+}
127
+
128
+static int rga_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr, unsigned long v_addr,
129
+ int format, uint32_t w, uint32_t h, unsigned long *StartAddr )
130
+{
131
+ uint32_t size_yrgb = 0;
132
+ uint32_t size_uv = 0;
133
+ uint32_t size_v = 0;
134
+ uint32_t stride = 0;
135
+ unsigned long start, end;
136
+ uint32_t pageCount;
137
+
138
+ switch(format)
139
+ {
140
+ case RK_FORMAT_RGBA_8888 :
141
+ stride = (w * 4 + 3) & (~3);
142
+ size_yrgb = stride*h;
143
+ start = yrgb_addr >> PAGE_SHIFT;
144
+ pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
145
+ break;
146
+ case RK_FORMAT_RGBX_8888 :
147
+ stride = (w * 4 + 3) & (~3);
148
+ size_yrgb = stride*h;
149
+ start = yrgb_addr >> PAGE_SHIFT;
150
+ pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
151
+ break;
152
+ case RK_FORMAT_RGB_888 :
153
+ stride = (w * 3 + 3) & (~3);
154
+ size_yrgb = stride*h;
155
+ start = yrgb_addr >> PAGE_SHIFT;
156
+ pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
157
+ break;
158
+ case RK_FORMAT_BGRA_8888 :
159
+ size_yrgb = w*h*4;
160
+ start = yrgb_addr >> PAGE_SHIFT;
161
+ pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
162
+ break;
163
+ case RK_FORMAT_RGB_565 :
164
+ stride = (w*2 + 3) & (~3);
165
+ size_yrgb = stride * h;
166
+ start = yrgb_addr >> PAGE_SHIFT;
167
+ pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
168
+ break;
169
+ case RK_FORMAT_RGBA_5551 :
170
+ stride = (w*2 + 3) & (~3);
171
+ size_yrgb = stride * h;
172
+ start = yrgb_addr >> PAGE_SHIFT;
173
+ pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
174
+ break;
175
+ case RK_FORMAT_RGBA_4444 :
176
+ stride = (w*2 + 3) & (~3);
177
+ size_yrgb = stride * h;
178
+ start = yrgb_addr >> PAGE_SHIFT;
179
+ pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
180
+ break;
181
+ case RK_FORMAT_BGR_888 :
182
+ stride = (w*3 + 3) & (~3);
183
+ size_yrgb = stride * h;
184
+ start = yrgb_addr >> PAGE_SHIFT;
185
+ pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
186
+ break;
187
+
188
+ /* YUV FORMAT */
189
+ case RK_FORMAT_YCbCr_422_SP :
190
+ stride = (w + 3) & (~3);
191
+ size_yrgb = stride * h;
192
+ size_uv = stride * h;
193
+ start = MIN(yrgb_addr, uv_addr);
194
+
195
+ start >>= PAGE_SHIFT;
196
+ end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
197
+ end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
198
+ pageCount = end - start;
199
+ break;
200
+ case RK_FORMAT_YCbCr_422_P :
201
+ stride = (w + 3) & (~3);
202
+ size_yrgb = stride * h;
203
+ size_uv = ((stride >> 1) * h);
204
+ size_v = ((stride >> 1) * h);
205
+ start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
206
+ start = start >> PAGE_SHIFT;
207
+ end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
208
+ end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
209
+ pageCount = end - start;
210
+ break;
211
+ case RK_FORMAT_YCbCr_420_SP :
212
+ stride = (w + 3) & (~3);
213
+ size_yrgb = stride * h;
214
+ size_uv = (stride * (h >> 1));
215
+ start = MIN(yrgb_addr, uv_addr);
216
+ start >>= PAGE_SHIFT;
217
+ end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
218
+ end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
219
+ pageCount = end - start;
220
+ break;
221
+ case RK_FORMAT_YCbCr_420_P :
222
+ stride = (w + 3) & (~3);
223
+ size_yrgb = stride * h;
224
+ size_uv = ((stride >> 1) * (h >> 1));
225
+ size_v = ((stride >> 1) * (h >> 1));
226
+ start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
227
+ start >>= PAGE_SHIFT;
228
+ end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
229
+ end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
230
+ pageCount = end - start;
231
+ break;
232
+
233
+ case RK_FORMAT_YCrCb_422_SP :
234
+ stride = (w + 3) & (~3);
235
+ size_yrgb = stride * h;
236
+ size_uv = stride * h;
237
+ start = MIN(yrgb_addr, uv_addr);
238
+ start >>= PAGE_SHIFT;
239
+ end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
240
+ end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
241
+ pageCount = end - start;
242
+ break;
243
+ case RK_FORMAT_YCrCb_422_P :
244
+ stride = (w + 3) & (~3);
245
+ size_yrgb = stride * h;
246
+ size_uv = ((stride >> 1) * h);
247
+ size_v = ((stride >> 1) * h);
248
+ start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
249
+ start >>= PAGE_SHIFT;
250
+ end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
251
+ end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
252
+ pageCount = end - start;
253
+ break;
254
+
255
+ case RK_FORMAT_YCrCb_420_SP :
256
+ stride = (w + 3) & (~3);
257
+ size_yrgb = stride * h;
258
+ size_uv = (stride * (h >> 1));
259
+ start = MIN(yrgb_addr, uv_addr);
260
+ start >>= PAGE_SHIFT;
261
+ end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
262
+ end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
263
+ pageCount = end - start;
264
+ break;
265
+ case RK_FORMAT_YCrCb_420_P :
266
+ stride = (w + 3) & (~3);
267
+ size_yrgb = stride * h;
268
+ size_uv = ((stride >> 1) * (h >> 1));
269
+ size_v = ((stride >> 1) * (h >> 1));
270
+ start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
271
+ start >>= PAGE_SHIFT;
272
+ end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
273
+ end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
274
+ pageCount = end - start;
275
+ break;
276
+ #if 0
277
+ case RK_FORMAT_BPP1 :
278
+ break;
279
+ case RK_FORMAT_BPP2 :
280
+ break;
281
+ case RK_FORMAT_BPP4 :
282
+ break;
283
+ case RK_FORMAT_BPP8 :
284
+ break;
285
+ #endif
286
+ default :
287
+ pageCount = 0;
288
+ start = 0;
289
+ break;
290
+ }
291
+
292
+ *StartAddr = start;
293
+ return pageCount;
294
+}
295
+
251296 #if RGA_DEBUGFS
252297 static int rga_usermemory_cheeck(struct page **pages, u32 w, u32 h, u32 format, int flag)
253298 {
....@@ -314,289 +359,304 @@
314359 }
315360 #endif
316361
317
-static int rga_MapUserMemory(struct page **pages,
318
- uint32_t *pageTable,
319
- unsigned long Memory,
320
- uint32_t pageCount)
321
-{
322
- int32_t result;
323
- uint32_t i;
324
- uint32_t status;
325
- unsigned long Address;
326
-
327
- status = 0;
328
- Address = 0;
329
-
330
- do {
331
- down_read(&current->mm->mmap_sem);
332
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
333
- result = get_user_pages(current, current->mm,
334
- Memory << PAGE_SHIFT, pageCount, 1, 0,
335
- pages, NULL);
336
-#else
337
- result = get_user_pages_remote(current, current->mm,
338
- Memory << PAGE_SHIFT, pageCount, 1, pages, NULL, NULL);
362
+static int rga_MapUserMemory(struct page **pages,
363
+ uint32_t *pageTable,
364
+ unsigned long Memory,
365
+ uint32_t pageCount)
366
+{
367
+ int32_t result;
368
+ uint32_t i;
369
+ uint32_t status;
370
+ unsigned long Address;
371
+
372
+ status = 0;
373
+ Address = 0;
374
+
375
+ do {
376
+ rga_current_mm_read_lock(current->mm);
377
+
378
+ result = rga_get_user_pages(pages, Memory, pageCount, 1, current->mm);
379
+
380
+ rga_current_mm_read_unlock(current->mm);
381
+
382
+ #if 0
383
+ if(result <= 0 || result < pageCount)
384
+ {
385
+ status = 0;
386
+
387
+ for(i=0; i<pageCount; i++)
388
+ {
389
+ temp = armv7_va_to_pa((Memory + i) << PAGE_SHIFT);
390
+ if (temp == 0xffffffff)
391
+ {
392
+ printk("rga find mmu phy ddr error\n ");
393
+ status = RGA_OUT_OF_RESOURCES;
394
+ break;
395
+ }
396
+
397
+ pageTable[i] = temp;
398
+ }
399
+
400
+ return status;
401
+ }
402
+ #else
403
+ if(result <= 0 || result < pageCount)
404
+ {
405
+ struct vm_area_struct *vma;
406
+
407
+ if (result>0) {
408
+ rga_current_mm_read_lock(current->mm);
409
+
410
+ for (i = 0; i < result; i++)
411
+ put_page(pages[i]);
412
+
413
+ rga_current_mm_read_unlock(current->mm);
414
+ }
415
+
416
+ for(i=0; i<pageCount; i++)
417
+ {
418
+ vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);
419
+
420
+ if (vma)//&& (vma->vm_flags & VM_PFNMAP) )
421
+ {
422
+ do
423
+ {
424
+ pte_t * pte;
425
+ spinlock_t * ptl;
426
+ unsigned long pfn;
427
+ pgd_t * pgd;
428
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
429
+ p4d_t * p4d;
339430 #endif
340
- up_read(&current->mm->mmap_sem);
341
-
342
- #if 0
343
- if(result <= 0 || result < pageCount)
344
- {
345
- status = 0;
346
-
347
- for(i=0; i<pageCount; i++)
348
- {
349
- temp = armv7_va_to_pa((Memory + i) << PAGE_SHIFT);
350
- if (temp == 0xffffffff)
351
- {
352
- printk("rga find mmu phy ddr error\n ");
353
- status = RGA_OUT_OF_RESOURCES;
354
- break;
355
- }
356
-
357
- pageTable[i] = temp;
358
- }
359
-
360
- return status;
361
- }
362
- #else
363
- if(result <= 0 || result < pageCount)
364
- {
365
- struct vm_area_struct *vma;
366
-
367
- if (result>0) {
368
- down_read(&current->mm->mmap_sem);
369
- for (i = 0; i < result; i++)
370
- put_page(pages[i]);
371
- up_read(&current->mm->mmap_sem);
372
- }
373
-
374
- for(i=0; i<pageCount; i++)
375
- {
376
- vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);
377
-
378
- if (vma)//&& (vma->vm_flags & VM_PFNMAP) )
379
- {
380
- do
381
- {
382
- pte_t * pte;
383
- spinlock_t * ptl;
384
- unsigned long pfn;
385
- pgd_t * pgd;
386
- pud_t * pud;
387
-
388
- pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);
389
-
390
- if(pgd_val(*pgd) == 0)
391
- {
392
- //printk("rga pgd value is zero \n");
393
- break;
394
- }
395
-
396
- pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
397
- if (pud)
398
- {
399
- pmd_t * pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
400
- if (pmd)
401
- {
402
- pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);
403
- if (!pte)
404
- {
405
- pte_unmap_unlock(pte, ptl);
406
- break;
407
- }
408
- }
409
- else
410
- {
411
- break;
412
- }
413
- }
414
- else
415
- {
416
- break;
417
- }
418
-
419
- pfn = pte_pfn(*pte);
420
- Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));
421
- pte_unmap_unlock(pte, ptl);
422
- }
423
- while (0);
424
-
425
- pageTable[i] = Address;
426
- }
427
- else
428
- {
429
- status = RGA_OUT_OF_RESOURCES;
430
- break;
431
- }
432
- }
433
-
434
- return status;
435
- }
436
- #endif
437
-
438
- /* Fill the page table. */
439
- for(i=0; i<pageCount; i++)
440
- {
441
- /* Get the physical address from page struct. */
442
- pageTable[i] = page_to_phys(pages[i]);
443
- }
444
-
445
- down_read(&current->mm->mmap_sem);
446
- for (i = 0; i < result; i++)
447
- put_page(pages[i]);
448
- up_read(&current->mm->mmap_sem);
449
-
450
- return 0;
451
- }
452
- while(0);
453
-
454
- return status;
455
-}
456
-
457
-static int rga_MapION(struct sg_table *sg,
458
- uint32_t *Memory,
459
- int32_t pageCount,
460
- uint32_t offset)
461
-{
462
- uint32_t i;
463
- uint32_t status;
464
- unsigned long Address;
465
- uint32_t mapped_size = 0;
466
- uint32_t len = 0;
467
- struct scatterlist *sgl = sg->sgl;
468
- uint32_t sg_num = 0;
469
-
470
- status = 0;
471
- Address = 0;
472
- offset = offset >> PAGE_SHIFT;
473
- if (offset != 0) {
474
- do {
475
- len += (sg_dma_len(sgl) >> PAGE_SHIFT);
476
- if (len == offset) {
477
- sg_num += 1;
478
- break;
479
- }
480
- else {
481
- if (len > offset)
482
- break;
483
- }
484
- sg_num += 1;
485
- }
486
- while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
487
-
488
- sgl = sg->sgl;
489
- len = 0;
490
- do {
491
- len += (sg_dma_len(sgl) >> PAGE_SHIFT);
492
- sgl = sg_next(sgl);
493
- }
494
- while(--sg_num);
495
-
496
- offset -= len;
497
-
498
- len = sg_dma_len(sgl) >> PAGE_SHIFT;
499
- Address = sg_phys(sgl);
500
- Address += offset;
501
-
502
- for(i=offset; i<len; i++) {
503
- Memory[i - offset] = Address + (i << PAGE_SHIFT);
504
- }
505
- mapped_size += (len - offset);
506
- sg_num = 1;
507
- sgl = sg_next(sgl);
508
- do {
509
- len = sg_dma_len(sgl) >> PAGE_SHIFT;
510
- Address = sg_phys(sgl);
511
-
512
- for(i=0; i<len; i++) {
513
- Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);
514
- }
515
-
516
- mapped_size += len;
517
- sg_num += 1;
518
- }
519
- while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
520
- }
521
- else {
522
- do {
523
- len = sg_dma_len(sgl) >> PAGE_SHIFT;
524
- Address = sg_phys(sgl);
525
- for(i=0; i<len; i++) {
526
- Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);
527
- }
528
- mapped_size += len;
529
- sg_num += 1;
530
- }
531
- while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
532
- }
533
- return 0;
534
-}
535
-
536
-
537
-static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
538
-{
539
- int SrcMemSize, DstMemSize;
540
- unsigned long SrcStart, DstStart;
541
- uint32_t i;
542
- uint32_t AllSize;
543
- uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;
544
- int ret;
545
- int status;
546
- uint32_t uv_size, v_size;
547
-
548
- struct page **pages = NULL;
549
-
550
- MMU_Base = NULL;
551
-
552
- SrcMemSize = 0;
553
- DstMemSize = 0;
554
-
555
- do {
556
- /* cal src buf mmu info */
557
- SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
558
- req->src.format, req->src.vir_w, req->src.act_h + req->src.y_offset,
559
- &SrcStart);
560
- if(SrcMemSize == 0) {
561
- return -EINVAL;
562
- }
563
-
564
- /* cal dst buf mmu info */
565
-
566
- DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
567
- req->dst.format, req->dst.vir_w, req->dst.vir_h,
568
- &DstStart);
569
- if(DstMemSize == 0)
570
- return -EINVAL;
571
-
572
- /* Cal out the needed mem size */
573
- SrcMemSize = (SrcMemSize + 15) & (~15);
574
- DstMemSize = (DstMemSize + 15) & (~15);
575
- AllSize = SrcMemSize + DstMemSize;
576
-
577
- if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {
578
- pr_err("RGA Get MMU mem failed\n");
579
- status = RGA_MALLOC_ERROR;
580
- break;
581
- }
582
-
583
- mutex_lock(&rga_service.lock);
584
- MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
585
- MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
586
- mutex_unlock(&rga_service.lock);
587
-
588
- pages = rga_mmu_buf.pages;
589
-
590
- if((req->mmu_info.mmu_flag >> 8) & 1) {
591
- if (req->sg_src) {
592
- ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize, req->line_draw_info.flag);
593
- }
594
- else {
595
- ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
596
- if (ret < 0) {
597
- pr_err("rga map src memory failed\n");
598
- status = ret;
599
- break;
431
+ pud_t * pud;
432
+
433
+ pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);
434
+
435
+ if(pgd_val(*pgd) == 0)
436
+ {
437
+ //printk("rga pgd value is zero \n");
438
+ break;
439
+ }
440
+
441
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
442
+ /* In the four-level page table, it will do nothing and return pgd. */
443
+ p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT);
444
+ if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) {
445
+ pr_err("RGA2 failed to get p4d, result = %d, pageCount = %d\n",
446
+ result, pageCount);
447
+ status = RGA_OUT_OF_RESOURCES;
448
+ break;
449
+ }
450
+
451
+ pud = pud_offset(p4d, (Memory + i) << PAGE_SHIFT);
452
+#else
453
+ pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
454
+#endif
455
+ if (pud)
456
+ {
457
+ pmd_t * pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
458
+ if (pmd)
459
+ {
460
+ pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);
461
+ if (!pte)
462
+ {
463
+ pte_unmap_unlock(pte, ptl);
464
+ break;
465
+ }
466
+ }
467
+ else
468
+ {
469
+ break;
470
+ }
471
+ }
472
+ else
473
+ {
474
+ break;
475
+ }
476
+
477
+ pfn = pte_pfn(*pte);
478
+ Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));
479
+ pte_unmap_unlock(pte, ptl);
480
+ }
481
+ while (0);
482
+
483
+ pageTable[i] = Address;
484
+ }
485
+ else
486
+ {
487
+ status = RGA_OUT_OF_RESOURCES;
488
+ break;
489
+ }
490
+ }
491
+
492
+ return status;
493
+ }
494
+ #endif
495
+
496
+ /* Fill the page table. */
497
+ for(i=0; i<pageCount; i++)
498
+ {
499
+ /* Get the physical address from page struct. */
500
+ pageTable[i] = page_to_phys(pages[i]);
501
+ }
502
+
503
+ rga_current_mm_read_lock(current->mm);
504
+
505
+ for (i = 0; i < result; i++)
506
+ put_page(pages[i]);
507
+
508
+ rga_current_mm_read_unlock(current->mm);
509
+
510
+ return 0;
511
+ }
512
+ while(0);
513
+
514
+ return status;
515
+}
516
+
517
+static int rga_MapION(struct sg_table *sg,
518
+ uint32_t *Memory,
519
+ int32_t pageCount,
520
+ uint32_t offset)
521
+{
522
+ uint32_t i;
523
+ uint32_t status;
524
+ unsigned long Address;
525
+ uint32_t mapped_size = 0;
526
+ uint32_t len = 0;
527
+ struct scatterlist *sgl = sg->sgl;
528
+ uint32_t sg_num = 0;
529
+
530
+ status = 0;
531
+ Address = 0;
532
+ offset = offset >> PAGE_SHIFT;
533
+ if (offset != 0) {
534
+ do {
535
+ len += (sg_dma_len(sgl) >> PAGE_SHIFT);
536
+ if (len == offset) {
537
+ sg_num += 1;
538
+ break;
539
+ }
540
+ else {
541
+ if (len > offset)
542
+ break;
543
+ }
544
+ sg_num += 1;
545
+ }
546
+ while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
547
+
548
+ sgl = sg->sgl;
549
+ len = 0;
550
+ do {
551
+ len += (sg_dma_len(sgl) >> PAGE_SHIFT);
552
+ sgl = sg_next(sgl);
553
+ }
554
+ while(--sg_num);
555
+
556
+ offset -= len;
557
+
558
+ len = sg_dma_len(sgl) >> PAGE_SHIFT;
559
+ Address = sg_phys(sgl);
560
+ Address += offset;
561
+
562
+ for(i=offset; i<len; i++) {
563
+ Memory[i - offset] = Address + (i << PAGE_SHIFT);
564
+ }
565
+ mapped_size += (len - offset);
566
+ sg_num = 1;
567
+ sgl = sg_next(sgl);
568
+ do {
569
+ len = sg_dma_len(sgl) >> PAGE_SHIFT;
570
+ Address = sg_phys(sgl);
571
+
572
+ for(i=0; i<len; i++) {
573
+ Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);
574
+ }
575
+
576
+ mapped_size += len;
577
+ sg_num += 1;
578
+ }
579
+ while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
580
+ }
581
+ else {
582
+ do {
583
+ len = sg_dma_len(sgl) >> PAGE_SHIFT;
584
+ Address = sg_phys(sgl);
585
+ for(i=0; i<len; i++) {
586
+ Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);
587
+ }
588
+ mapped_size += len;
589
+ sg_num += 1;
590
+ }
591
+ while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
592
+ }
593
+ return 0;
594
+}
595
+
596
+
597
+static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
598
+{
599
+ int SrcMemSize, DstMemSize;
600
+ unsigned long SrcStart, DstStart;
601
+ uint32_t i;
602
+ uint32_t AllSize;
603
+ uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;
604
+ int ret;
605
+ int status;
606
+ uint32_t uv_size, v_size;
607
+
608
+ struct page **pages = NULL;
609
+
610
+ MMU_Base = NULL;
611
+
612
+ SrcMemSize = 0;
613
+ DstMemSize = 0;
614
+
615
+ do {
616
+ /* cal src buf mmu info */
617
+ SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
618
+ req->src.format, req->src.vir_w, req->src.act_h + req->src.y_offset,
619
+ &SrcStart);
620
+ if(SrcMemSize == 0) {
621
+ return -EINVAL;
622
+ }
623
+
624
+ /* cal dst buf mmu info */
625
+
626
+ DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
627
+ req->dst.format, req->dst.vir_w, req->dst.vir_h,
628
+ &DstStart);
629
+ if(DstMemSize == 0)
630
+ return -EINVAL;
631
+
632
+ /* Cal out the needed mem size */
633
+ SrcMemSize = (SrcMemSize + 15) & (~15);
634
+ DstMemSize = (DstMemSize + 15) & (~15);
635
+ AllSize = SrcMemSize + DstMemSize;
636
+
637
+ if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {
638
+ pr_err("RGA Get MMU mem failed\n");
639
+ status = RGA_MALLOC_ERROR;
640
+ break;
641
+ }
642
+
643
+ mutex_lock(&rga_service.lock);
644
+ MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
645
+ MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
646
+ mutex_unlock(&rga_service.lock);
647
+
648
+ pages = rga_mmu_buf.pages;
649
+
650
+ if((req->mmu_info.mmu_flag >> 8) & 1) {
651
+ if (req->sg_src) {
652
+ ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize, req->line_draw_info.flag);
653
+ }
654
+ else {
655
+ ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
656
+ if (ret < 0) {
657
+ pr_err("rga map src memory failed\n");
658
+ status = ret;
659
+ break;
600660 }
601661
602662 #if RGA_DEBUGFS
....@@ -604,692 +664,662 @@
604664 rga_usermemory_cheeck(&pages[0], req->src.vir_w,
605665 req->src.vir_h, req->src.format, 1);
606666 #endif
607
- }
608
- }
609
- else {
610
- MMU_p = MMU_Base;
611
-
612
- if(req->src.yrgb_addr == (unsigned long)rga_service.pre_scale_buf) {
613
- for(i=0; i<SrcMemSize; i++)
614
- MMU_p[i] = rga_service.pre_scale_buf[i];
615
- }
616
- else {
617
- for(i=0; i<SrcMemSize; i++)
618
- MMU_p[i] = (uint32_t)((SrcStart + i) << PAGE_SHIFT);
619
- }
620
- }
621
-
622
- if ((req->mmu_info.mmu_flag >> 10) & 1) {
623
- if (req->sg_dst) {
624
- ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize, req->line_draw_info.line_width);
625
- }
626
- else {
627
- ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
628
- if (ret < 0) {
629
- pr_err("rga map dst memory failed\n");
630
- status = ret;
631
- break;
632
- }
667
+ }
668
+ }
669
+ else {
670
+ MMU_p = MMU_Base;
671
+
672
+ if(req->src.yrgb_addr == (unsigned long)rga_service.pre_scale_buf) {
673
+ for(i=0; i<SrcMemSize; i++)
674
+ MMU_p[i] = rga_service.pre_scale_buf[i];
675
+ }
676
+ else {
677
+ for(i=0; i<SrcMemSize; i++)
678
+ MMU_p[i] = (uint32_t)((SrcStart + i) << PAGE_SHIFT);
679
+ }
680
+ }
681
+
682
+ if ((req->mmu_info.mmu_flag >> 10) & 1) {
683
+ if (req->sg_dst) {
684
+ ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize, req->line_draw_info.line_width);
685
+ }
686
+ else {
687
+ ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
688
+ if (ret < 0) {
689
+ pr_err("rga map dst memory failed\n");
690
+ status = ret;
691
+ break;
692
+ }
633693
634694 #if RGA_DEBUGFS
635695 if (RGA_CHECK_MODE)
636696 rga_usermemory_cheeck(&pages[0], req->src.vir_w,
637697 req->src.vir_h, req->src.format, 2);
638698 #endif
639
- }
640
- }
641
- else {
642
- MMU_p = MMU_Base + SrcMemSize;
643
- for(i=0; i<DstMemSize; i++)
644
- MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);
645
- }
646
-
647
- MMU_Base[AllSize] = MMU_Base[AllSize-1];
648
-
649
- /* zsq
650
- * change the buf address in req struct
651
- */
652
-
653
- req->mmu_info.base_addr = (unsigned long)MMU_Base_phys >> 2;
654
-
655
- uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
656
- v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
657
-
658
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
659
- req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
660
- req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
661
-
662
- uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
663
-
664
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);
665
- req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
666
-
667
- /* flush data to DDR */
668
- #ifdef CONFIG_ARM
669
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
670
- outer_flush_range(virt_to_phys(MMU_Base), virt_to_phys(MMU_Base + AllSize + 1));
671
- #elif defined(CONFIG_ARM64)
672
- __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
673
- #endif
674
-
675
- rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);
676
- reg->MMU_len = AllSize + 16;
677
-
678
- status = 0;
679
-
680
- return status;
681
- }
682
- while(0);
683
-
684
- return status;
685
-}
686
-
687
-static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req)
688
-{
689
- int SrcMemSize, DstMemSize, CMDMemSize;
690
- unsigned long SrcStart, DstStart, CMDStart;
691
- struct page **pages = NULL;
692
- uint32_t i;
693
- uint32_t AllSize;
694
- uint32_t *MMU_Base = NULL, *MMU_Base_phys = NULL;
695
- uint32_t *MMU_p;
696
- int ret, status = 0;
697
- uint32_t stride;
698
-
699
- uint8_t shift;
700
- uint16_t sw, byte_num;
701
-
702
- shift = 3 - (req->palette_mode & 3);
703
- sw = req->src.vir_w;
704
- byte_num = sw >> shift;
705
- stride = (byte_num + 3) & (~3);
706
-
707
- do {
708
- SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
709
- if(SrcMemSize == 0) {
710
- return -EINVAL;
711
- }
712
-
713
- DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
714
- req->dst.format, req->dst.vir_w, req->dst.vir_h,
715
- &DstStart);
716
- if(DstMemSize == 0) {
717
- return -EINVAL;
718
- }
719
-
720
- CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
721
- if(CMDMemSize == 0) {
722
- return -EINVAL;
723
- }
724
-
725
- SrcMemSize = (SrcMemSize + 15) & (~15);
726
- DstMemSize = (DstMemSize + 15) & (~15);
727
- CMDMemSize = (CMDMemSize + 15) & (~15);
728
-
729
- AllSize = SrcMemSize + DstMemSize + CMDMemSize;
730
-
731
- if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {
732
- pr_err("RGA Get MMU mem failed\n");
733
- status = RGA_MALLOC_ERROR;
734
- break;
735
- }
736
-
737
- mutex_lock(&rga_service.lock);
738
- MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
739
- MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
740
- mutex_unlock(&rga_service.lock);
741
-
742
- pages = rga_mmu_buf.pages;
743
-
744
- /* map CMD addr */
745
- for(i=0; i<CMDMemSize; i++) {
746
- MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));
747
- }
748
-
749
- /* map src addr */
750
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID) {
751
- ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
752
- if (ret < 0) {
753
- pr_err("rga map src memory failed\n");
754
- status = ret;
755
- break;
756
- }
757
- }
758
- else {
759
- MMU_p = MMU_Base + CMDMemSize;
760
-
761
- for(i=0; i<SrcMemSize; i++)
762
- {
763
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
764
- }
765
- }
766
-
767
- /* map dst addr */
768
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID) {
769
- ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
770
- if (ret < 0) {
771
- pr_err("rga map dst memory failed\n");
772
- status = ret;
773
- break;
774
- }
775
- }
776
- else {
777
- MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
778
- for(i=0; i<DstMemSize; i++)
779
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
780
- }
781
-
782
-
783
- /* zsq
784
- * change the buf address in req struct
785
- * for the reason of lie to MMU
786
- */
787
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
788
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
789
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
790
-
791
- /*record the malloc buf for the cmd end to release*/
792
- reg->MMU_base = MMU_Base;
793
-
794
- /* flush data to DDR */
795
- #ifdef CONFIG_ARM
796
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
797
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
798
- #elif defined(CONFIG_ARM64)
799
- __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
800
- #endif
801
-
802
- rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);
803
- reg->MMU_len = AllSize + 16;
804
-
699
+ }
700
+ }
701
+ else {
702
+ MMU_p = MMU_Base + SrcMemSize;
703
+ for(i=0; i<DstMemSize; i++)
704
+ MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);
705
+ }
706
+
707
+ MMU_Base[AllSize] = MMU_Base[AllSize-1];
708
+
709
+ /* zsq
710
+ * change the buf address in req struct
711
+ */
712
+
713
+ req->mmu_info.base_addr = (unsigned long)MMU_Base_phys >> 2;
714
+
715
+ uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
716
+ v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
717
+
718
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
719
+ req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
720
+ req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
721
+
722
+ uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
723
+
724
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);
725
+ req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
726
+
727
+ /* flush data to DDR */
728
+ rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
729
+
730
+ rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);
731
+ reg->MMU_len = AllSize + 16;
732
+
733
+ status = 0;
734
+
805735 return status;
806
-
807
- }
808
- while(0);
809
-
810
- return 0;
811
-}
812
-
813
-static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req)
814
-{
815
- int DstMemSize;
816
- unsigned long DstStart;
817
- struct page **pages = NULL;
818
- uint32_t i;
819
- uint32_t AllSize;
820
- uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;
821
- int ret;
822
- int status;
823
-
824
- MMU_Base = NULL;
825
-
826
- do {
827
- DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
828
- req->dst.format, req->dst.vir_w, req->dst.vir_h,
829
- &DstStart);
830
- if(DstMemSize == 0) {
831
- return -EINVAL;
832
- }
833
-
834
- AllSize = (DstMemSize + 15) & (~15);
835
-
836
- pages = rga_mmu_buf.pages;
837
-
838
- if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {
839
- pr_err("RGA Get MMU mem failed\n");
840
- status = RGA_MALLOC_ERROR;
841
- break;
842
- }
843
-
844
- mutex_lock(&rga_service.lock);
845
- MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
846
- MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
847
- mutex_unlock(&rga_service.lock);
848
-
849
- if (req->dst.yrgb_addr < KERNEL_SPACE_VALID) {
850
- if (req->sg_dst) {
851
- ret = rga_MapION(req->sg_dst, &MMU_Base[0], DstMemSize, req->line_draw_info.line_width);
852
- }
853
- else {
854
- ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);
855
- if (ret < 0) {
856
- pr_err("rga map dst memory failed\n");
857
- status = ret;
858
- break;
859
- }
860
- }
861
- }
862
- else {
863
- MMU_p = MMU_Base;
864
- for(i=0; i<DstMemSize; i++)
865
- MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);
866
- }
867
-
868
- MMU_Base[AllSize] = MMU_Base[AllSize - 1];
869
-
870
- /* zsq
871
- * change the buf address in req struct
872
- */
873
-
874
- req->mmu_info.base_addr = ((unsigned long)(MMU_Base_phys)>>2);
875
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
876
-
877
- /*record the malloc buf for the cmd end to release*/
878
- reg->MMU_base = MMU_Base;
879
-
880
- /* flush data to DDR */
881
- #ifdef CONFIG_ARM
882
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
883
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
884
- #elif defined(CONFIG_ARM64)
885
- __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
886
- #endif
887
-
888
- rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);
889
- reg->MMU_len = AllSize + 16;
890
-
891
- return 0;
892
- }
893
- while(0);
894
-
895
- return status;
896
-}
897
-
898
-
899
-static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req)
900
-{
901
- return 0;
902
-}
903
-
904
-static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)
905
-{
906
- return 0;
907
-}
908
-
909
-
910
-
911
-static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
912
-{
913
- int SrcMemSize, DstMemSize;
914
- unsigned long SrcStart, DstStart;
915
- struct page **pages = NULL;
916
- uint32_t i;
917
- uint32_t AllSize;
918
- uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;
919
- int ret;
920
- int status;
921
- uint32_t uv_size, v_size;
922
-
923
- MMU_Base = NULL;
924
-
925
- do {
926
- /* cal src buf mmu info */
927
- SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
928
- req->src.format, req->src.vir_w, req->src.vir_h,
929
- &SrcStart);
930
- if(SrcMemSize == 0) {
931
- return -EINVAL;
932
- }
933
-
934
- /* cal dst buf mmu info */
935
- DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
936
- req->dst.format, req->dst.vir_w, req->dst.vir_h,
937
- &DstStart);
938
- if(DstMemSize == 0) {
939
- return -EINVAL;
940
- }
941
-
942
- SrcMemSize = (SrcMemSize + 15) & (~15);
943
- DstMemSize = (DstMemSize + 15) & (~15);
944
-
945
- AllSize = SrcMemSize + DstMemSize;
946
-
947
- pages = rga_mmu_buf.pages;
948
-
949
- if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {
950
- pr_err("RGA Get MMU mem failed\n");
951
- status = RGA_MALLOC_ERROR;
952
- break;
953
- }
954
-
955
- mutex_lock(&rga_service.lock);
956
- MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
957
- MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
958
- mutex_unlock(&rga_service.lock);
959
-
960
- /* map src pages */
961
- if ((req->mmu_info.mmu_flag >> 8) & 1) {
962
- if (req->sg_src) {
963
- ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize,req->line_draw_info.flag);
964
- }
965
- else {
966
- ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
967
- if (ret < 0) {
968
- pr_err("rga map src memory failed\n");
969
- status = ret;
970
- break;
971
- }
972
- }
973
- }
974
- else {
975
- MMU_p = MMU_Base;
976
-
977
- for(i=0; i<SrcMemSize; i++)
978
- MMU_p[i] = (uint32_t)((SrcStart + i) << PAGE_SHIFT);
979
- }
980
-
981
- if((req->mmu_info.mmu_flag >> 10) & 1) {
982
- if (req->sg_dst) {
983
- ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize, req->line_draw_info.line_width);
984
- }
985
- else {
986
- ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
987
- if (ret < 0) {
988
- pr_err("rga map dst memory failed\n");
989
- status = ret;
990
- break;
991
- }
992
- }
993
- }
994
- else
995
- {
996
- /* kernel space */
997
- MMU_p = MMU_Base + SrcMemSize;
998
-
999
- if(req->dst.yrgb_addr == (unsigned long)rga_service.pre_scale_buf) {
1000
- for(i=0; i<DstMemSize; i++)
1001
- MMU_p[i] = rga_service.pre_scale_buf[i];
1002
- }
1003
- else {
1004
- for(i=0; i<DstMemSize; i++)
1005
- MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);
1006
- }
1007
- }
1008
-
1009
- MMU_Base[AllSize] = MMU_Base[AllSize];
1010
-
1011
- /* zsq
1012
- * change the buf address in req struct
1013
- * for the reason of lie to MMU
1014
- */
1015
-
1016
- req->mmu_info.base_addr = ((unsigned long)(MMU_Base_phys)>>2);
1017
-
1018
- uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1019
- v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1020
-
1021
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
1022
- req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
1023
- req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
1024
-
1025
- uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1026
- v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1027
-
1028
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((SrcMemSize) << PAGE_SHIFT);
1029
- req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
1030
- req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);
1031
-
1032
- /*record the malloc buf for the cmd end to release*/
1033
- reg->MMU_base = MMU_Base;
1034
-
1035
- /* flush data to DDR */
1036
- #ifdef CONFIG_ARM
1037
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
1038
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
1039
- #elif defined(CONFIG_ARM64)
1040
- __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
1041
- #endif
1042
-
1043
- rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);
1044
- reg->MMU_len = AllSize + 16;
1045
-
1046
- return 0;
1047
- }
1048
- while(0);
1049
-
1050
- return status;
1051
-}
1052
-
1053
-
1054
-static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)
1055
-{
1056
- int SrcMemSize, CMDMemSize;
1057
- unsigned long SrcStart, CMDStart;
1058
- struct page **pages = NULL;
1059
- uint32_t i;
1060
- uint32_t AllSize;
1061
- uint32_t *MMU_Base, *MMU_p;
1062
- int ret, status;
1063
-
1064
- MMU_Base = NULL;
1065
-
1066
- do {
1067
- /* cal src buf mmu info */
1068
- SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);
1069
- if(SrcMemSize == 0) {
1070
- return -EINVAL;
1071
- }
1072
-
1073
- /* cal cmd buf mmu info */
1074
- CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
1075
- if(CMDMemSize == 0) {
1076
- return -EINVAL;
1077
- }
1078
-
1079
- AllSize = SrcMemSize + CMDMemSize;
1080
-
1081
- pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
1082
- if(pages == NULL) {
1083
- pr_err("RGA MMU malloc pages mem failed\n");
1084
- status = RGA_MALLOC_ERROR;
1085
- break;
1086
- }
1087
-
1088
- MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);
1089
- if(pages == NULL) {
1090
- pr_err("RGA MMU malloc MMU_Base point failed\n");
1091
- status = RGA_MALLOC_ERROR;
1092
- break;
1093
- }
1094
-
1095
- for(i=0; i<CMDMemSize; i++) {
1096
- MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
1097
- }
1098
-
1099
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
1100
- {
1101
- ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
1102
- if (ret < 0) {
1103
- pr_err("rga map src memory failed\n");
1104
- return -EINVAL;
1105
- }
1106
- }
1107
- else
1108
- {
1109
- MMU_p = MMU_Base + CMDMemSize;
1110
-
1111
- for(i=0; i<SrcMemSize; i++)
1112
- {
1113
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
1114
- }
1115
- }
1116
-
1117
- /* zsq
1118
- * change the buf address in req struct
1119
- * for the reason of lie to MMU
1120
- */
1121
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
1122
-
1123
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
1124
-
1125
- /*record the malloc buf for the cmd end to release*/
1126
- reg->MMU_base = MMU_Base;
1127
-
1128
- /* flush data to DDR */
1129
- #ifdef CONFIG_ARM
1130
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
1131
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
1132
- #elif defined(CONFIG_ARM64)
1133
- __dma_flush_range(MMU_Base, (MMU_Base + AllSize));
1134
- #endif
1135
-
1136
-
1137
- if (pages != NULL) {
1138
- /* Free the page table */
1139
- kfree(pages);
1140
- }
1141
-
1142
- return 0;
1143
- }
1144
- while(0);
1145
-
1146
- if (pages != NULL)
1147
- kfree(pages);
1148
-
1149
- if (MMU_Base != NULL)
1150
- kfree(MMU_Base);
1151
-
1152
- return status;
1153
-}
1154
-
1155
-static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req)
1156
-{
1157
- int SrcMemSize, CMDMemSize;
1158
- unsigned long SrcStart, CMDStart;
1159
- struct page **pages = NULL;
1160
- uint32_t i;
1161
- uint32_t AllSize;
1162
- uint32_t *MMU_Base, *MMU_p;
1163
- int ret, status;
1164
-
1165
- MMU_Base = MMU_p = 0;
1166
-
1167
- do
1168
- {
1169
-
1170
- /* cal src buf mmu info */
1171
- SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart);
1172
- if(SrcMemSize == 0) {
1173
- return -EINVAL;
1174
- }
1175
-
1176
- /* cal cmd buf mmu info */
1177
- CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
1178
- if(CMDMemSize == 0) {
1179
- return -EINVAL;
1180
- }
1181
-
1182
- AllSize = SrcMemSize + CMDMemSize;
1183
-
1184
- pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
1185
- if(pages == NULL) {
1186
- pr_err("RGA MMU malloc pages mem failed\n");
1187
- status = RGA_MALLOC_ERROR;
1188
- break;
1189
- }
1190
-
1191
- MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
1192
- if(MMU_Base == NULL) {
1193
- pr_err("RGA MMU malloc MMU_Base point failed\n");
1194
- status = RGA_MALLOC_ERROR;
1195
- break;
1196
- }
1197
-
1198
- for(i=0; i<CMDMemSize; i++) {
1199
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
1200
- }
1201
-
1202
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
1203
- {
1204
- ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
1205
- if (ret < 0) {
1206
- pr_err("rga map src memory failed\n");
1207
- status = ret;
1208
- break;
1209
- }
1210
- }
1211
- else
1212
- {
1213
- MMU_p = MMU_Base + CMDMemSize;
1214
-
1215
- for(i=0; i<SrcMemSize; i++)
1216
- {
1217
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
1218
- }
1219
- }
1220
-
1221
- /* zsq
1222
- * change the buf address in req struct
1223
- * for the reason of lie to MMU
1224
- */
1225
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
1226
-
1227
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
1228
-
1229
- /*record the malloc buf for the cmd end to release*/
1230
- reg->MMU_base = MMU_Base;
1231
-
1232
- /* flush data to DDR */
1233
- #ifdef CONFIG_ARM
1234
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
1235
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
1236
- #elif defined(CONFIG_ARM64)
1237
- __dma_flush_range(MMU_Base, (MMU_Base + AllSize));
1238
- #endif
1239
-
1240
- if (pages != NULL) {
1241
- /* Free the page table */
1242
- kfree(pages);
1243
- }
1244
-
1245
- return 0;
1246
-
1247
- }
1248
- while(0);
1249
-
1250
- if (pages != NULL)
1251
- kfree(pages);
1252
-
1253
- if (MMU_Base != NULL)
1254
- kfree(MMU_Base);
1255
-
1256
- return status;
1257
-}
1258
-
1259
-int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req)
1260
-{
1261
- int ret;
1262
-
1263
- switch (req->render_mode) {
1264
- case bitblt_mode :
1265
- ret = rga_mmu_info_BitBlt_mode(reg, req);
1266
- break;
1267
- case color_palette_mode :
1268
- ret = rga_mmu_info_color_palette_mode(reg, req);
1269
- break;
1270
- case color_fill_mode :
1271
- ret = rga_mmu_info_color_fill_mode(reg, req);
1272
- break;
1273
- case line_point_drawing_mode :
1274
- ret = rga_mmu_info_line_point_drawing_mode(reg, req);
1275
- break;
1276
- case blur_sharp_filter_mode :
1277
- ret = rga_mmu_info_blur_sharp_filter_mode(reg, req);
1278
- break;
1279
- case pre_scaling_mode :
1280
- ret = rga_mmu_info_pre_scale_mode(reg, req);
1281
- break;
1282
- case update_palette_table_mode :
1283
- ret = rga_mmu_info_update_palette_table_mode(reg, req);
1284
- break;
1285
- case update_patten_buff_mode :
1286
- ret = rga_mmu_info_update_patten_buff_mode(reg, req);
1287
- break;
1288
- default :
1289
- ret = -1;
1290
- break;
1291
- }
1292
-
1293
- return ret;
1294
-}
1295
-
736
+ }
737
+ while(0);
738
+
739
+ return status;
740
+}
741
+
742
+static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req)
743
+{
744
+ int SrcMemSize, DstMemSize, CMDMemSize;
745
+ unsigned long SrcStart, DstStart, CMDStart;
746
+ struct page **pages = NULL;
747
+ uint32_t i;
748
+ uint32_t AllSize;
749
+ uint32_t *MMU_Base = NULL, *MMU_Base_phys = NULL;
750
+ uint32_t *MMU_p;
751
+ int ret, status = 0;
752
+ uint32_t stride;
753
+
754
+ uint8_t shift;
755
+ uint16_t sw, byte_num;
756
+
757
+ shift = 3 - (req->palette_mode & 3);
758
+ sw = req->src.vir_w;
759
+ byte_num = sw >> shift;
760
+ stride = (byte_num + 3) & (~3);
761
+
762
+ do {
763
+ SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
764
+ if(SrcMemSize == 0) {
765
+ return -EINVAL;
766
+ }
767
+
768
+ DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
769
+ req->dst.format, req->dst.vir_w, req->dst.vir_h,
770
+ &DstStart);
771
+ if(DstMemSize == 0) {
772
+ return -EINVAL;
773
+ }
774
+
775
+ CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
776
+ if(CMDMemSize == 0) {
777
+ return -EINVAL;
778
+ }
779
+
780
+ SrcMemSize = (SrcMemSize + 15) & (~15);
781
+ DstMemSize = (DstMemSize + 15) & (~15);
782
+ CMDMemSize = (CMDMemSize + 15) & (~15);
783
+
784
+ AllSize = SrcMemSize + DstMemSize + CMDMemSize;
785
+
786
+ if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {
787
+ pr_err("RGA Get MMU mem failed\n");
788
+ status = RGA_MALLOC_ERROR;
789
+ break;
790
+ }
791
+
792
+ mutex_lock(&rga_service.lock);
793
+ MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
794
+ MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
795
+ mutex_unlock(&rga_service.lock);
796
+
797
+ pages = rga_mmu_buf.pages;
798
+
799
+ /* map CMD addr */
800
+ for(i=0; i<CMDMemSize; i++) {
801
+ MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));
802
+ }
803
+
804
+ /* map src addr */
805
+ if (req->src.yrgb_addr < KERNEL_SPACE_VALID) {
806
+ ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
807
+ if (ret < 0) {
808
+ pr_err("rga map src memory failed\n");
809
+ status = ret;
810
+ break;
811
+ }
812
+ }
813
+ else {
814
+ MMU_p = MMU_Base + CMDMemSize;
815
+
816
+ for(i=0; i<SrcMemSize; i++)
817
+ {
818
+ MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
819
+ }
820
+ }
821
+
822
+ /* map dst addr */
823
+ if (req->src.yrgb_addr < KERNEL_SPACE_VALID) {
824
+ ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
825
+ if (ret < 0) {
826
+ pr_err("rga map dst memory failed\n");
827
+ status = ret;
828
+ break;
829
+ }
830
+ }
831
+ else {
832
+ MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
833
+ for(i=0; i<DstMemSize; i++)
834
+ MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
835
+ }
836
+
837
+
838
+ /* zsq
839
+ * change the buf address in req struct
840
+ * for the reason of lie to MMU
841
+ */
842
+ req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
843
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
844
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
845
+
846
+ /*record the malloc buf for the cmd end to release*/
847
+ reg->MMU_base = MMU_Base;
848
+
849
+ /* flush data to DDR */
850
+ rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
851
+
852
+ rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);
853
+ reg->MMU_len = AllSize + 16;
854
+
855
+ return status;
856
+
857
+ }
858
+ while(0);
859
+
860
+ return 0;
861
+}
862
+
863
+static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req)
864
+{
865
+ int DstMemSize;
866
+ unsigned long DstStart;
867
+ struct page **pages = NULL;
868
+ uint32_t i;
869
+ uint32_t AllSize;
870
+ uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;
871
+ int ret;
872
+ int status;
873
+
874
+ MMU_Base = NULL;
875
+
876
+ do {
877
+ DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
878
+ req->dst.format, req->dst.vir_w, req->dst.vir_h,
879
+ &DstStart);
880
+ if(DstMemSize == 0) {
881
+ return -EINVAL;
882
+ }
883
+
884
+ AllSize = (DstMemSize + 15) & (~15);
885
+
886
+ pages = rga_mmu_buf.pages;
887
+
888
+ if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {
889
+ pr_err("RGA Get MMU mem failed\n");
890
+ status = RGA_MALLOC_ERROR;
891
+ break;
892
+ }
893
+
894
+ mutex_lock(&rga_service.lock);
895
+ MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
896
+ MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
897
+ mutex_unlock(&rga_service.lock);
898
+
899
+ if (req->dst.yrgb_addr < KERNEL_SPACE_VALID) {
900
+ if (req->sg_dst) {
901
+ ret = rga_MapION(req->sg_dst, &MMU_Base[0], DstMemSize, req->line_draw_info.line_width);
902
+ }
903
+ else {
904
+ ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);
905
+ if (ret < 0) {
906
+ pr_err("rga map dst memory failed\n");
907
+ status = ret;
908
+ break;
909
+ }
910
+ }
911
+ }
912
+ else {
913
+ MMU_p = MMU_Base;
914
+ for(i=0; i<DstMemSize; i++)
915
+ MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);
916
+ }
917
+
918
+ MMU_Base[AllSize] = MMU_Base[AllSize - 1];
919
+
920
+ /* zsq
921
+ * change the buf address in req struct
922
+ */
923
+
924
+ req->mmu_info.base_addr = ((unsigned long)(MMU_Base_phys)>>2);
925
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
926
+
927
+ /*record the malloc buf for the cmd end to release*/
928
+ reg->MMU_base = MMU_Base;
929
+
930
+ /* flush data to DDR */
931
+ rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
932
+
933
+ rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);
934
+ reg->MMU_len = AllSize + 16;
935
+
936
+ return 0;
937
+ }
938
+ while(0);
939
+
940
+ return status;
941
+}
942
+
943
+
944
+static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req)
945
+{
946
+ return 0;
947
+}
948
+
949
+static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)
950
+{
951
+ return 0;
952
+}
953
+
954
+
955
+
956
+static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
957
+{
958
+ int SrcMemSize, DstMemSize;
959
+ unsigned long SrcStart, DstStart;
960
+ struct page **pages = NULL;
961
+ uint32_t i;
962
+ uint32_t AllSize;
963
+ uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;
964
+ int ret;
965
+ int status;
966
+ uint32_t uv_size, v_size;
967
+
968
+ MMU_Base = NULL;
969
+
970
+ do {
971
+ /* cal src buf mmu info */
972
+ SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
973
+ req->src.format, req->src.vir_w, req->src.vir_h,
974
+ &SrcStart);
975
+ if(SrcMemSize == 0) {
976
+ return -EINVAL;
977
+ }
978
+
979
+ /* cal dst buf mmu info */
980
+ DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
981
+ req->dst.format, req->dst.vir_w, req->dst.vir_h,
982
+ &DstStart);
983
+ if(DstMemSize == 0) {
984
+ return -EINVAL;
985
+ }
986
+
987
+ SrcMemSize = (SrcMemSize + 15) & (~15);
988
+ DstMemSize = (DstMemSize + 15) & (~15);
989
+
990
+ AllSize = SrcMemSize + DstMemSize;
991
+
992
+ pages = rga_mmu_buf.pages;
993
+
994
+ if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {
995
+ pr_err("RGA Get MMU mem failed\n");
996
+ status = RGA_MALLOC_ERROR;
997
+ break;
998
+ }
999
+
1000
+ mutex_lock(&rga_service.lock);
1001
+ MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
1002
+ MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
1003
+ mutex_unlock(&rga_service.lock);
1004
+
1005
+ /* map src pages */
1006
+ if ((req->mmu_info.mmu_flag >> 8) & 1) {
1007
+ if (req->sg_src) {
1008
+ ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize,req->line_draw_info.flag);
1009
+ }
1010
+ else {
1011
+ ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
1012
+ if (ret < 0) {
1013
+ pr_err("rga map src memory failed\n");
1014
+ status = ret;
1015
+ break;
1016
+ }
1017
+ }
1018
+ }
1019
+ else {
1020
+ MMU_p = MMU_Base;
1021
+
1022
+ for(i=0; i<SrcMemSize; i++)
1023
+ MMU_p[i] = (uint32_t)((SrcStart + i) << PAGE_SHIFT);
1024
+ }
1025
+
1026
+ if((req->mmu_info.mmu_flag >> 10) & 1) {
1027
+ if (req->sg_dst) {
1028
+ ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize, req->line_draw_info.line_width);
1029
+ }
1030
+ else {
1031
+ ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
1032
+ if (ret < 0) {
1033
+ pr_err("rga map dst memory failed\n");
1034
+ status = ret;
1035
+ break;
1036
+ }
1037
+ }
1038
+ }
1039
+ else
1040
+ {
1041
+ /* kernel space */
1042
+ MMU_p = MMU_Base + SrcMemSize;
1043
+
1044
+ if(req->dst.yrgb_addr == (unsigned long)rga_service.pre_scale_buf) {
1045
+ for(i=0; i<DstMemSize; i++)
1046
+ MMU_p[i] = rga_service.pre_scale_buf[i];
1047
+ }
1048
+ else {
1049
+ for(i=0; i<DstMemSize; i++)
1050
+ MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);
1051
+ }
1052
+ }
1053
+
1054
+ MMU_Base[AllSize] = MMU_Base[AllSize];
1055
+
1056
+ /* zsq
1057
+ * change the buf address in req struct
1058
+ * for the reason of lie to MMU
1059
+ */
1060
+
1061
+ req->mmu_info.base_addr = ((unsigned long)(MMU_Base_phys)>>2);
1062
+
1063
+ uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1064
+ v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1065
+
1066
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
1067
+ req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
1068
+ req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
1069
+
1070
+ uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1071
+ v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1072
+
1073
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((SrcMemSize) << PAGE_SHIFT);
1074
+ req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
1075
+ req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);
1076
+
1077
+ /*record the malloc buf for the cmd end to release*/
1078
+ reg->MMU_base = MMU_Base;
1079
+
1080
+ /* flush data to DDR */
1081
+ rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
1082
+
1083
+ rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);
1084
+ reg->MMU_len = AllSize + 16;
1085
+
1086
+ return 0;
1087
+ }
1088
+ while(0);
1089
+
1090
+ return status;
1091
+}
1092
+
1093
+
1094
+static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)
1095
+{
1096
+ int SrcMemSize, CMDMemSize;
1097
+ unsigned long SrcStart, CMDStart;
1098
+ struct page **pages = NULL;
1099
+ uint32_t i;
1100
+ uint32_t AllSize;
1101
+ uint32_t *MMU_Base, *MMU_p;
1102
+ int ret, status;
1103
+
1104
+ MMU_Base = NULL;
1105
+
1106
+ do {
1107
+ /* cal src buf mmu info */
1108
+ SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);
1109
+ if(SrcMemSize == 0) {
1110
+ return -EINVAL;
1111
+ }
1112
+
1113
+ /* cal cmd buf mmu info */
1114
+ CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
1115
+ if(CMDMemSize == 0) {
1116
+ return -EINVAL;
1117
+ }
1118
+
1119
+ AllSize = SrcMemSize + CMDMemSize;
1120
+
1121
+ pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
1122
+ if(pages == NULL) {
1123
+ pr_err("RGA MMU malloc pages mem failed\n");
1124
+ status = RGA_MALLOC_ERROR;
1125
+ break;
1126
+ }
1127
+
1128
+ MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);
1129
+ if(pages == NULL) {
1130
+ pr_err("RGA MMU malloc MMU_Base point failed\n");
1131
+ status = RGA_MALLOC_ERROR;
1132
+ break;
1133
+ }
1134
+
1135
+ for(i=0; i<CMDMemSize; i++) {
1136
+ MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
1137
+ }
1138
+
1139
+ if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
1140
+ {
1141
+ ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
1142
+ if (ret < 0) {
1143
+ pr_err("rga map src memory failed\n");
1144
+ return -EINVAL;
1145
+ }
1146
+ }
1147
+ else
1148
+ {
1149
+ MMU_p = MMU_Base + CMDMemSize;
1150
+
1151
+ for(i=0; i<SrcMemSize; i++)
1152
+ {
1153
+ MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
1154
+ }
1155
+ }
1156
+
1157
+ /* zsq
1158
+ * change the buf address in req struct
1159
+ * for the reason of lie to MMU
1160
+ */
1161
+ req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
1162
+
1163
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
1164
+
1165
+ /*record the malloc buf for the cmd end to release*/
1166
+ reg->MMU_base = MMU_Base;
1167
+
1168
+ /* flush data to DDR */
1169
+ rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
1170
+
1171
+
1172
+ if (pages != NULL) {
1173
+ /* Free the page table */
1174
+ kfree(pages);
1175
+ }
1176
+
1177
+ return 0;
1178
+ }
1179
+ while(0);
1180
+
1181
+ if (pages != NULL)
1182
+ kfree(pages);
1183
+
1184
+ if (MMU_Base != NULL)
1185
+ kfree(MMU_Base);
1186
+
1187
+ return status;
1188
+}
1189
+
1190
+static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req)
1191
+{
1192
+ int SrcMemSize, CMDMemSize;
1193
+ unsigned long SrcStart, CMDStart;
1194
+ struct page **pages = NULL;
1195
+ uint32_t i;
1196
+ uint32_t AllSize;
1197
+ uint32_t *MMU_Base, *MMU_p;
1198
+ int ret, status;
1199
+
1200
+ MMU_Base = MMU_p = 0;
1201
+
1202
+ do
1203
+ {
1204
+
1205
+ /* cal src buf mmu info */
1206
+ SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart);
1207
+ if(SrcMemSize == 0) {
1208
+ return -EINVAL;
1209
+ }
1210
+
1211
+ /* cal cmd buf mmu info */
1212
+ CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
1213
+ if(CMDMemSize == 0) {
1214
+ return -EINVAL;
1215
+ }
1216
+
1217
+ AllSize = SrcMemSize + CMDMemSize;
1218
+
1219
+ pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
1220
+ if(pages == NULL) {
1221
+ pr_err("RGA MMU malloc pages mem failed\n");
1222
+ status = RGA_MALLOC_ERROR;
1223
+ break;
1224
+ }
1225
+
1226
+ MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
1227
+ if(MMU_Base == NULL) {
1228
+ pr_err("RGA MMU malloc MMU_Base point failed\n");
1229
+ status = RGA_MALLOC_ERROR;
1230
+ break;
1231
+ }
1232
+
1233
+ for(i=0; i<CMDMemSize; i++) {
1234
+ MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
1235
+ }
1236
+
1237
+ if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
1238
+ {
1239
+ ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
1240
+ if (ret < 0) {
1241
+ pr_err("rga map src memory failed\n");
1242
+ status = ret;
1243
+ break;
1244
+ }
1245
+ }
1246
+ else
1247
+ {
1248
+ MMU_p = MMU_Base + CMDMemSize;
1249
+
1250
+ for(i=0; i<SrcMemSize; i++)
1251
+ {
1252
+ MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
1253
+ }
1254
+ }
1255
+
1256
+ /* zsq
1257
+ * change the buf address in req struct
1258
+ * for the reason of lie to MMU
1259
+ */
1260
+ req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
1261
+
1262
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
1263
+
1264
+ /*record the malloc buf for the cmd end to release*/
1265
+ reg->MMU_base = MMU_Base;
1266
+
1267
+ /* flush data to DDR */
1268
+ rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
1269
+
1270
+ if (pages != NULL) {
1271
+ /* Free the page table */
1272
+ kfree(pages);
1273
+ }
1274
+
1275
+ return 0;
1276
+
1277
+ }
1278
+ while(0);
1279
+
1280
+ if (pages != NULL)
1281
+ kfree(pages);
1282
+
1283
+ if (MMU_Base != NULL)
1284
+ kfree(MMU_Base);
1285
+
1286
+ return status;
1287
+}
1288
+
1289
+int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req)
1290
+{
1291
+ int ret;
1292
+
1293
+ switch (req->render_mode) {
1294
+ case bitblt_mode :
1295
+ ret = rga_mmu_info_BitBlt_mode(reg, req);
1296
+ break;
1297
+ case color_palette_mode :
1298
+ ret = rga_mmu_info_color_palette_mode(reg, req);
1299
+ break;
1300
+ case color_fill_mode :
1301
+ ret = rga_mmu_info_color_fill_mode(reg, req);
1302
+ break;
1303
+ case line_point_drawing_mode :
1304
+ ret = rga_mmu_info_line_point_drawing_mode(reg, req);
1305
+ break;
1306
+ case blur_sharp_filter_mode :
1307
+ ret = rga_mmu_info_blur_sharp_filter_mode(reg, req);
1308
+ break;
1309
+ case pre_scaling_mode :
1310
+ ret = rga_mmu_info_pre_scale_mode(reg, req);
1311
+ break;
1312
+ case update_palette_table_mode :
1313
+ ret = rga_mmu_info_update_palette_table_mode(reg, req);
1314
+ break;
1315
+ case update_patten_buff_mode :
1316
+ ret = rga_mmu_info_update_patten_buff_mode(reg, req);
1317
+ break;
1318
+ default :
1319
+ ret = -1;
1320
+ break;
1321
+ }
1322
+
1323
+ return ret;
1324
+}
1325
+