forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c
....@@ -12,115 +12,17 @@
1212 #include <linux/slab.h>
1313 #include <soc/rockchip/pm_domains.h>
1414 #include <soc/rockchip/rockchip_dmc.h>
15
+#include <soc/rockchip/rockchip_iommu.h>
1516
1617 #include "mpp_rkvdec2_link.h"
1718
1819 #include "hack/mpp_rkvdec2_link_hack_rk3568.c"
1920
20
-#ifdef CONFIG_PM_DEVFREQ
21
-#include "../../../devfreq/governor.h"
22
-#endif
23
-
21
+#define WORK_TIMEOUT_MS (500)
2422 #define WAIT_TIMEOUT_MS (2000)
23
+#define RKVDEC2_LINK_HACK_TASK_FLAG (0xff)
2524
26
-#define RKVDEC_MAX_WRITE_PART 6
27
-#define RKVDEC_MAX_READ_PART 2
28
-
29
-struct rkvdec_link_part {
30
- /* register offset of table buffer */
31
- u32 tb_reg_off;
32
- /* start idx of task register */
33
- u32 reg_start;
34
- /* number of task register */
35
- u32 reg_num;
36
-};
37
-
38
-struct rkvdec_link_status {
39
- u32 dec_num_mask;
40
- u32 err_flag_base;
41
- u32 err_flag_bit;
42
-};
43
-
44
-struct rkvdec_link_info {
45
- dma_addr_t iova;
46
- /* total register for link table buffer */
47
- u32 tb_reg_num;
48
- /* next link table addr in table buffer */
49
- u32 tb_reg_next;
50
- /* current read back addr in table buffer */
51
- u32 tb_reg_r;
52
- /* secondary enable in table buffer */
53
- u32 tb_reg_second_en;
54
- u32 part_w_num;
55
- u32 part_r_num;
56
-
57
- struct rkvdec_link_part part_w[RKVDEC_MAX_WRITE_PART];
58
- struct rkvdec_link_part part_r[RKVDEC_MAX_READ_PART];
59
-
60
- /* interrupt read back in table buffer */
61
- u32 tb_reg_int;
62
- bool hack_setup;
63
- struct rkvdec_link_status reg_status;
64
-};
65
-
66
-struct rkvdec_link_info rkvdec_link_rk3568_hw_info = {
67
- .tb_reg_num = 202,
68
- .tb_reg_next = 0,
69
- .tb_reg_r = 1,
70
- .tb_reg_second_en = 8,
71
-
72
- .part_w_num = 6,
73
- .part_r_num = 2,
74
- .part_w[0] = {
75
- .tb_reg_off = 4,
76
- .reg_start = 8,
77
- .reg_num = 20,
78
- },
79
- .part_w[1] = {
80
- .tb_reg_off = 24,
81
- .reg_start = 64,
82
- .reg_num = 52,
83
- },
84
- .part_w[2] = {
85
- .tb_reg_off = 76,
86
- .reg_start = 128,
87
- .reg_num = 16,
88
- },
89
- .part_w[3] = {
90
- .tb_reg_off = 92,
91
- .reg_start = 160,
92
- .reg_num = 40,
93
- },
94
- .part_w[4] = {
95
- .tb_reg_off = 132,
96
- .reg_start = 224,
97
- .reg_num = 16,
98
- },
99
- .part_w[5] = {
100
- .tb_reg_off = 148,
101
- .reg_start = 256,
102
- .reg_num = 16,
103
- },
104
- .part_r[0] = {
105
- .tb_reg_off = 164,
106
- .reg_start = 224,
107
- .reg_num = 10,
108
- },
109
- .part_r[1] = {
110
- .tb_reg_off = 174,
111
- .reg_start = 258,
112
- .reg_num = 28,
113
- },
114
- .tb_reg_int = 164,
115
- .hack_setup = 1,
116
- .reg_status = {
117
- .dec_num_mask = 0x3fffffff,
118
- .err_flag_base = 0x010,
119
- .err_flag_bit = BIT(31),
120
- },
121
-};
122
-
123
-/* vdpu382 link hw info */
25
+/* vdpu381 link hw info for rk3588 */
12426 struct rkvdec_link_info rkvdec_link_v2_hw_info = {
12527 .tb_reg_num = 218,
12628 .tb_reg_next = 0,
....@@ -170,13 +72,139 @@
17072 .reg_num = 28,
17173 },
17274 .tb_reg_int = 180,
75
+ .tb_reg_cycle = 195,
17376 .hack_setup = 0,
77
+ .reg_status = {
78
+ .dec_num_mask = 0x3fffffff,
79
+ .err_flag_base = 0x010,
80
+ .err_flag_bit = BIT(31),
81
+ },
82
+};
83
+
84
+/* vdpu34x link hw info for rk356x */
85
+struct rkvdec_link_info rkvdec_link_rk356x_hw_info = {
86
+ .tb_reg_num = 202,
87
+ .tb_reg_next = 0,
88
+ .tb_reg_r = 1,
89
+ .tb_reg_second_en = 8,
90
+
91
+ .part_w_num = 6,
92
+ .part_r_num = 2,
93
+ .part_w[0] = {
94
+ .tb_reg_off = 4,
95
+ .reg_start = 8,
96
+ .reg_num = 20,
97
+ },
98
+ .part_w[1] = {
99
+ .tb_reg_off = 24,
100
+ .reg_start = 64,
101
+ .reg_num = 52,
102
+ },
103
+ .part_w[2] = {
104
+ .tb_reg_off = 76,
105
+ .reg_start = 128,
106
+ .reg_num = 16,
107
+ },
108
+ .part_w[3] = {
109
+ .tb_reg_off = 92,
110
+ .reg_start = 160,
111
+ .reg_num = 40,
112
+ },
113
+ .part_w[4] = {
114
+ .tb_reg_off = 132,
115
+ .reg_start = 224,
116
+ .reg_num = 16,
117
+ },
118
+ .part_w[5] = {
119
+ .tb_reg_off = 148,
120
+ .reg_start = 256,
121
+ .reg_num = 16,
122
+ },
123
+ .part_r[0] = {
124
+ .tb_reg_off = 164,
125
+ .reg_start = 224,
126
+ .reg_num = 10,
127
+ },
128
+ .part_r[1] = {
129
+ .tb_reg_off = 174,
130
+ .reg_start = 258,
131
+ .reg_num = 28,
132
+ },
133
+ .tb_reg_int = 164,
134
+ .tb_reg_cycle = 179,
135
+ .hack_setup = 1,
136
+ .reg_status = {
137
+ .dec_num_mask = 0x3fffffff,
138
+ .err_flag_base = 0x010,
139
+ .err_flag_bit = BIT(31),
140
+ },
141
+};
142
+
143
+/* vdpu382 link hw info */
144
+struct rkvdec_link_info rkvdec_link_vdpu382_hw_info = {
145
+ .tb_reg_num = 222,
146
+ .tb_reg_next = 0,
147
+ .tb_reg_r = 1,
148
+ .tb_reg_second_en = 8,
149
+
150
+ .part_w_num = 6,
151
+ .part_r_num = 2,
152
+ .part_w[0] = {
153
+ .tb_reg_off = 4,
154
+ .reg_start = 8,
155
+ .reg_num = 28,
156
+ },
157
+ .part_w[1] = {
158
+ .tb_reg_off = 32,
159
+ .reg_start = 64,
160
+ .reg_num = 52,
161
+ },
162
+ .part_w[2] = {
163
+ .tb_reg_off = 84,
164
+ .reg_start = 128,
165
+ .reg_num = 16,
166
+ },
167
+ .part_w[3] = {
168
+ .tb_reg_off = 100,
169
+ .reg_start = 160,
170
+ .reg_num = 48,
171
+ },
172
+ .part_w[4] = {
173
+ .tb_reg_off = 148,
174
+ .reg_start = 224,
175
+ .reg_num = 16,
176
+ },
177
+ .part_w[5] = {
178
+ .tb_reg_off = 164,
179
+ .reg_start = 256,
180
+ .reg_num = 16,
181
+ },
182
+ .part_r[0] = {
183
+ .tb_reg_off = 180,
184
+ .reg_start = 224,
185
+ .reg_num = 12,
186
+ },
187
+ .part_r[1] = {
188
+ .tb_reg_off = 192,
189
+ .reg_start = 258,
190
+ .reg_num = 30,
191
+ },
192
+ .tb_reg_int = 180,
193
+ .hack_setup = 0,
194
+ .tb_reg_cycle = 197,
174195 .reg_status = {
175196 .dec_num_mask = 0x000fffff,
176197 .err_flag_base = 0x024,
177198 .err_flag_bit = BIT(8),
178199 },
179200 };
201
+
202
+static void rkvdec2_link_free_task(struct kref *ref);
203
+static void rkvdec2_link_timeout_proc(struct work_struct *work_s);
204
+static int rkvdec2_link_iommu_fault_handle(struct iommu_domain *iommu,
205
+ struct device *iommu_dev,
206
+ unsigned long iova,
207
+ int status, void *arg);
180208
181209 static void rkvdec_link_status_update(struct rkvdec_link_dev *dev)
182210 {
....@@ -228,7 +256,7 @@
228256 u32 *reg = NULL;
229257 u32 i, j;
230258
231
- for (i = 0; i < dev->task_size; i++) {
259
+ for (i = 0; i < dev->task_capacity; i++) {
232260 reg = table_base + i * reg_count;
233261
234262 mpp_err("slot %d link config iova %08x:\n", i,
....@@ -275,9 +303,8 @@
275303 {
276304 mpp_err("dump link counter from %s\n", func);
277305
278
- mpp_err("task write %d read %d send %d recv %d run %d decoded %d total %d\n",
279
- dev->task_write, dev->task_read, dev->task_send, dev->task_recv,
280
- dev->task_to_run, dev->task_decoded, dev->task_total);
306
+ mpp_err("task pending %d running %d\n",
307
+ atomic_read(&dev->task_pending), dev->task_running);
281308 }
282309
283310 int rkvdec_link_dump(struct mpp_dev *mpp)
....@@ -290,160 +317,6 @@
290317 rkvdec_link_counter(__func__, dev);
291318 rkvdec_core_reg_dump(__func__, dev);
292319 rkvdec_link_node_dump(__func__, dev);
293
-
294
- return 0;
295
-}
296
-
297
-static int rkvdec_link_get_task_write(struct rkvdec_link_dev *dev)
298
-{
299
- int idx = dev->task_write < dev->task_size ? dev->task_write :
300
- dev->task_write - dev->task_size;
301
-
302
- return idx;
303
-}
304
-static int rkvdec_link_inc_task_write(struct rkvdec_link_dev *dev)
305
-{
306
- int task_write = rkvdec_link_get_task_write(dev);
307
-
308
- dev->task_write++;
309
- if (dev->task_write >= dev->task_size * 2)
310
- dev->task_write = 0;
311
-
312
- return task_write;
313
-}
314
-static int rkvdec_link_get_task_read(struct rkvdec_link_dev *dev)
315
-{
316
- int idx = dev->task_read < dev->task_size ? dev->task_read :
317
- dev->task_read - dev->task_size;
318
-
319
- return idx;
320
-}
321
-static int rkvdec_link_inc_task_read(struct rkvdec_link_dev *dev)
322
-{
323
- int task_read = rkvdec_link_get_task_read(dev);
324
-
325
- dev->task_read++;
326
- if (dev->task_read >= dev->task_size * 2)
327
- dev->task_read = 0;
328
-
329
- return task_read;
330
-}
331
-static int rkvdec_link_get_task_hw_queue_length(struct rkvdec_link_dev *dev)
332
-{
333
- int len;
334
-
335
- if (dev->task_send <= dev->task_recv)
336
- len = dev->task_send + dev->task_size - dev->task_recv;
337
- else
338
- len = dev->task_send - dev->task_recv - dev->task_size;
339
-
340
- return len;
341
-}
342
-static int rkvdec_link_get_task_send(struct rkvdec_link_dev *dev)
343
-{
344
- int idx = dev->task_send < dev->task_size ? dev->task_send :
345
- dev->task_send - dev->task_size;
346
-
347
- return idx;
348
-}
349
-static int rkvdec_link_inc_task_send(struct rkvdec_link_dev *dev)
350
-{
351
- int task_send = rkvdec_link_get_task_send(dev);
352
-
353
- dev->task_send++;
354
- if (dev->task_send >= dev->task_size * 2)
355
- dev->task_send = 0;
356
-
357
- return task_send;
358
-}
359
-static int rkvdec_link_inc_task_recv(struct rkvdec_link_dev *dev)
360
-{
361
- int task_recv = dev->task_recv;
362
-
363
- dev->task_recv++;
364
- if (dev->task_recv >= dev->task_size * 2)
365
- dev->task_recv = 0;
366
-
367
- return task_recv;
368
-}
369
-
370
-static int rkvdec_link_get_next_slot(struct rkvdec_link_dev *dev)
371
-{
372
- int next = -1;
373
-
374
- if (dev->task_write == dev->task_read)
375
- return next;
376
-
377
- next = rkvdec_link_get_task_write(dev);
378
-
379
- return next;
380
-}
381
-
382
-static int rkvdec_link_write_task_to_slot(struct rkvdec_link_dev *dev, int idx,
383
- struct mpp_task *mpp_task)
384
-{
385
- u32 i, off, s, n;
386
- struct rkvdec_link_part *part;
387
- struct rkvdec_link_info *info;
388
- struct mpp_dma_buffer *table;
389
- struct rkvdec2_task *task;
390
- int slot_idx;
391
- u32 *tb_reg;
392
-
393
- if (idx < 0 || idx >= dev->task_size) {
394
- mpp_err("send invalid task index %d\n", idx);
395
- return -1;
396
- }
397
-
398
- info = dev->info;
399
- part = info->part_w;
400
- table = dev->table;
401
- task = to_rkvdec2_task(mpp_task);
402
-
403
- slot_idx = rkvdec_link_inc_task_write(dev);
404
- if (idx != slot_idx)
405
- dev_info(dev->dev, "slot index mismatch %d vs %d\n",
406
- idx, slot_idx);
407
-
408
- if (task->need_hack) {
409
- tb_reg = (u32 *)table->vaddr + slot_idx * dev->link_reg_count;
410
-
411
- rkvdec2_3568_hack_fix_link(tb_reg + 4);
412
-
413
- /* setup error mode flag */
414
- dev->tasks_hw[slot_idx] = NULL;
415
- dev->task_to_run++;
416
- dev->task_prepared++;
417
- slot_idx = rkvdec_link_inc_task_write(dev);
418
- }
419
-
420
- tb_reg = (u32 *)table->vaddr + slot_idx * dev->link_reg_count;
421
-
422
- for (i = 0; i < info->part_w_num; i++) {
423
- off = part[i].tb_reg_off;
424
- s = part[i].reg_start;
425
- n = part[i].reg_num;
426
- memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
427
- }
428
-
429
- /* setup error mode flag */
430
- tb_reg[9] |= BIT(18) | BIT(9);
431
- tb_reg[info->tb_reg_second_en] |= RKVDEC_WAIT_RESET_EN;
432
-
433
- /* memset read registers */
434
- part = info->part_r;
435
- for (i = 0; i < info->part_r_num; i++) {
436
- off = part[i].tb_reg_off;
437
- n = part[i].reg_num;
438
- memset(&tb_reg[off], 0, n * sizeof(u32));
439
- }
440
-
441
- dev->tasks_hw[slot_idx] = mpp_task;
442
- task->slot_idx = slot_idx;
443
- dev->task_to_run++;
444
- dev->task_prepared++;
445
- mpp_dbg_link_flow("slot %d write task %d\n", slot_idx,
446
- mpp_task->task_index);
447320
448321 return 0;
449322 }
....@@ -467,34 +340,20 @@
467340 mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
468341 }
469342
470
-static int rkvdec_link_send_task_to_hw(struct rkvdec_link_dev *dev,
471
- struct mpp_task *mpp_task,
472
- int slot_idx, u32 task_to_run,
473
- int resend)
343
+static int rkvdec2_link_enqueue(struct rkvdec_link_dev *link_dec,
344
+ struct mpp_task *mpp_task)
474345 {
475
- void __iomem *reg_base = dev->reg_base;
476
- struct mpp_dma_buffer *table = dev->table;
477
- u32 task_total = dev->task_total;
478
- u32 mode_start = 0;
479
- u32 val;
346
+ void __iomem *reg_base = link_dec->reg_base;
347
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
348
+ struct mpp_dma_buffer *table = task->table;
349
+ u32 link_en = 0;
350
+ u32 frame_num = 1;
351
+ u32 link_mode;
352
+ u32 timing_en = link_dec->mpp->srv->timing_en;
480353
481
- /* write address */
482
- if (!task_to_run || task_to_run > dev->task_size ||
483
- slot_idx < 0 || slot_idx >= dev->task_size) {
484
- mpp_err("invalid task send cfg at %d count %d\n",
485
- slot_idx, task_to_run);
486
- rkvdec_link_counter("error on send", dev);
487
- return 0;
488
- }
489
-
490
- val = task_to_run;
491
- if (!task_total || resend)
492
- mode_start = 1;
493
-
494
- if (mode_start) {
495
- u32 iova = table->iova + slot_idx * dev->link_node_size;
496
-
497
- rkvdec2_clear_cache(dev->mpp);
354
+ link_en = readl(reg_base + RKVDEC_LINK_EN_BASE);
355
+ if (!link_en) {
356
+ rkvdec2_clear_cache(link_dec->mpp);
498357 /* cleanup counter in hardware */
499358 writel(0, reg_base + RKVDEC_LINK_MODE_BASE);
500359 /* start config before all registers are set */
....@@ -504,54 +363,31 @@
504363 wmb();
505364 /* clear counter and enable link mode hardware */
506365 writel(RKVDEC_LINK_BIT_EN, reg_base + RKVDEC_LINK_EN_BASE);
507
-
508
- dev->task_total = 0;
509
- dev->task_decoded = 0;
510
-
511
- writel_relaxed(iova, reg_base + RKVDEC_LINK_CFG_ADDR_BASE);
512
- } else {
513
- val |= RKVDEC_LINK_BIT_ADD_MODE;
514
- }
515
-
516
- if (!resend) {
517
- u32 i;
518
-
519
- for (i = 0; i < task_to_run; i++) {
520
- int next_idx = rkvdec_link_inc_task_send(dev);
521
- struct mpp_task *task_ddr = dev->tasks_hw[next_idx];
522
-
523
- if (!task_ddr)
524
- continue;
525
-
526
- set_bit(TASK_STATE_START, &task_ddr->state);
527
- schedule_delayed_work(&task_ddr->timeout_work,
528
- msecs_to_jiffies(200));
529
- mpp_time_record(task_ddr);
530
- }
531
- } else {
532
- if (task_total)
533
- dev_info(dev->dev, "resend with total %d\n", task_total);
534
- }
366
+ writel_relaxed(table->iova, reg_base + RKVDEC_LINK_CFG_ADDR_BASE);
367
+ link_mode = frame_num;
368
+ } else
369
+ link_mode = (frame_num | RKVDEC_LINK_BIT_ADD_MODE);
535370
536371 /* set link mode */
537
- writel_relaxed(val, reg_base + RKVDEC_LINK_MODE_BASE);
372
+ writel_relaxed(link_mode, reg_base + RKVDEC_LINK_MODE_BASE);
538373
539374 /* start config before all registers are set */
540375 wmb();
541376
377
+ mpp_iommu_flush_tlb(link_dec->mpp->iommu_info);
378
+ mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
379
+
380
+ link_dec->task_running++;
542381 /* configure done */
543382 writel(RKVDEC_LINK_BIT_CFG_DONE, reg_base + RKVDEC_LINK_CFG_CTRL_BASE);
544
-
545
- mpp_dbg_link_flow("slot %d enable task %d mode %s\n", slot_idx,
546
- task_to_run, mode_start ? "start" : "add");
547
- if (mode_start) {
383
+ if (!link_en) {
548384 /* start hardware before all registers are set */
549385 wmb();
550386 /* clear counter and enable link mode hardware */
551387 writel(RKVDEC_LINK_BIT_EN, reg_base + RKVDEC_LINK_EN_BASE);
552388 }
389
+ mpp_task_run_end(mpp_task, timing_en);
553390
554
- dev->task_total += task_to_run;
555391 return 0;
556392 }
557393
....@@ -563,8 +399,7 @@
563399 struct mpp_dma_buffer *table = link_dec->table;
564400 struct rkvdec_link_info *info = link_dec->info;
565401 struct rkvdec_link_part *part = info->part_r;
566
- int slot_idx = task->slot_idx;
567
- u32 *tb_reg = (u32 *)(table->vaddr + slot_idx * link_dec->link_node_size);
402
+ u32 *tb_reg = (u32 *)table->vaddr;
568403 u32 off, s, n;
569404 u32 i;
570405
....@@ -584,154 +419,71 @@
584419 return 0;
585420 }
586421
587
-static int rkvdec_link_isr_recv_task(struct mpp_dev *mpp,
588
- struct rkvdec_link_dev *link_dec,
589
- int count)
590
-{
591
- struct rkvdec_link_info *info = link_dec->info;
592
- u32 *table_base = (u32 *)link_dec->table->vaddr;
593
- int i;
594
-
595
- for (i = 0; i < count; i++) {
596
- int idx = rkvdec_link_get_task_read(link_dec);
597
- struct mpp_task *mpp_task = link_dec->tasks_hw[idx];
598
- struct rkvdec2_task *task = NULL;
599
- u32 *regs = NULL;
600
- u32 irq_status = 0;
601
-
602
- if (!mpp_task) {
603
- regs = table_base + idx * link_dec->link_reg_count;
604
- mpp_dbg_link_flow("slot %d read task stuff\n", idx);
605
-
606
- link_dec->stuff_total++;
607
- if (link_dec->statistic_count &&
608
- regs[RKVDEC_LINK_REG_CYCLE_CNT]) {
609
- link_dec->stuff_cycle_sum +=
610
- regs[RKVDEC_LINK_REG_CYCLE_CNT];
611
- link_dec->stuff_cnt++;
612
- if (link_dec->stuff_cnt >=
613
- link_dec->statistic_count) {
614
- dev_info(
615
- link_dec->dev, "hw cycle %u\n",
616
- (u32)(link_dec->stuff_cycle_sum /
617
- link_dec->statistic_count));
618
- link_dec->stuff_cycle_sum = 0;
619
- link_dec->stuff_cnt = 0;
620
- }
621
- }
622
-
623
- if (link_dec->error && (i == (count - 1))) {
624
- link_dec->stuff_err++;
625
-
626
- irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
627
- dev_info(link_dec->dev, "found stuff task error irq %08x %u/%u\n",
628
- irq_status, link_dec->stuff_err,
629
- link_dec->stuff_total);
630
-
631
- if (link_dec->stuff_on_error) {
632
- dev_info(link_dec->dev, "stuff task error again %u/%u\n",
633
- link_dec->stuff_err,
634
- link_dec->stuff_total);
635
- }
636
-
637
- link_dec->stuff_on_error = 1;
638
- /* resend task */
639
- link_dec->decoded--;
640
- } else {
641
- link_dec->stuff_on_error = 0;
642
- rkvdec_link_inc_task_recv(link_dec);
643
- rkvdec_link_inc_task_read(link_dec);
644
- link_dec->task_running--;
645
- link_dec->task_prepared--;
646
- }
647
-
648
- continue;
649
- }
650
-
651
- mpp_time_diff(mpp_task);
652
- task = to_rkvdec2_task(mpp_task);
653
- regs = table_base + idx * link_dec->link_reg_count;
654
- irq_status = regs[info->tb_reg_int];
655
- mpp_dbg_link_flow("slot %d rd task %d\n", idx,
656
- mpp_task->task_index);
657
-
658
- task->irq_status = irq_status ? irq_status : mpp->irq_status;
659
-
660
- cancel_delayed_work_sync(&mpp_task->timeout_work);
661
- set_bit(TASK_STATE_HANDLE, &mpp_task->state);
662
-
663
- if (link_dec->statistic_count &&
664
- regs[RKVDEC_LINK_REG_CYCLE_CNT]) {
665
- link_dec->task_cycle_sum +=
666
- regs[RKVDEC_LINK_REG_CYCLE_CNT];
667
- link_dec->task_cnt++;
668
- if (link_dec->task_cnt >= link_dec->statistic_count) {
669
- dev_info(link_dec->dev, "hw cycle %u\n",
670
- (u32)(link_dec->task_cycle_sum /
671
- link_dec->statistic_count));
672
- link_dec->task_cycle_sum = 0;
673
- link_dec->task_cnt = 0;
674
- }
675
- }
676
-
677
- rkvdec2_link_finish(mpp, mpp_task);
678
-
679
- set_bit(TASK_STATE_FINISH, &mpp_task->state);
680
-
681
- list_del_init(&mpp_task->queue_link);
682
- link_dec->task_running--;
683
- link_dec->task_prepared--;
684
-
685
- rkvdec_link_inc_task_recv(link_dec);
686
- rkvdec_link_inc_task_read(link_dec);
687
-
688
- if (test_bit(TASK_STATE_ABORT, &mpp_task->state))
689
- set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
690
-
691
- set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
692
- /* Wake up the GET thread */
693
- wake_up(&task->wait);
694
- }
695
-
696
- return 0;
697
-}
698
-
699422 static void *rkvdec2_link_prepare(struct mpp_dev *mpp,
700423 struct mpp_task *mpp_task)
701424 {
702
- struct mpp_task *out_task = NULL;
703425 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
704426 struct rkvdec_link_dev *link_dec = dec->link_dec;
705
- int ret = 0;
706
- int slot_idx;
427
+ struct mpp_dma_buffer *table = NULL;
428
+ struct rkvdec_link_part *part;
429
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
430
+ struct rkvdec_link_info *info = link_dec->info;
431
+ u32 i, off, s, n;
432
+ u32 *tb_reg;
707433
708434 mpp_debug_enter();
709435
710
- slot_idx = rkvdec_link_get_next_slot(link_dec);
711
- if (slot_idx < 0) {
712
- mpp_err("capacity %d running %d\n",
713
- mpp->task_capacity, link_dec->task_running);
714
- dev_err(link_dec->dev, "no slot to write on get next slot\n");
715
- goto done;
436
+ if (test_bit(TASK_STATE_PREPARE, &mpp_task->state)) {
437
+ dev_err(mpp->dev, "task %d has prepared\n", mpp_task->task_index);
438
+ return mpp_task;
716439 }
717440
718
- ret = rkvdec_link_write_task_to_slot(link_dec, slot_idx, mpp_task);
719
- if (ret >= 0)
720
- out_task = mpp_task;
721
- else
722
- dev_err(mpp->dev, "no slot to write\n");
441
+ table = list_first_entry_or_null(&link_dec->unused_list, struct mpp_dma_buffer, link);
723442
724
-done:
443
+ if (!table)
444
+ return NULL;
445
+
446
+ /* fill regs value */
447
+ tb_reg = (u32 *)table->vaddr;
448
+ part = info->part_w;
449
+ for (i = 0; i < info->part_w_num; i++) {
450
+ off = part[i].tb_reg_off;
451
+ s = part[i].reg_start;
452
+ n = part[i].reg_num;
453
+ memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
454
+ }
455
+
456
+ /* setup error mode flag */
457
+ tb_reg[9] |= BIT(18) | BIT(9);
458
+ tb_reg[info->tb_reg_second_en] |= RKVDEC_WAIT_RESET_EN;
459
+
460
+ /* memset read registers */
461
+ part = info->part_r;
462
+ for (i = 0; i < info->part_r_num; i++) {
463
+ off = part[i].tb_reg_off;
464
+ n = part[i].reg_num;
465
+ memset(&tb_reg[off], 0, n * sizeof(u32));
466
+ }
467
+
468
+ list_move_tail(&table->link, &link_dec->used_list);
469
+ task->table = table;
470
+ set_bit(TASK_STATE_PREPARE, &mpp_task->state);
471
+
472
+ mpp_dbg_link("session %d task %d prepare pending %d running %d\n",
473
+ mpp_task->session->index, mpp_task->task_index,
474
+ atomic_read(&link_dec->task_pending), link_dec->task_running);
725475 mpp_debug_leave();
726476
727
- return out_task;
477
+ return mpp_task;
728478 }
729479
730480 static int rkvdec2_link_reset(struct mpp_dev *mpp)
731481 {
732
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
733482
734483 dev_info(mpp->dev, "resetting...\n");
484
+
485
+ disable_irq(mpp->irq);
486
+ mpp_iommu_disable_irq(mpp->iommu_info);
735487
736488 /* FIXME lock resource lock of the other devices in combo */
737489 mpp_iommu_down_write(mpp->iommu_info);
....@@ -740,11 +492,8 @@
740492
741493 rockchip_save_qos(mpp->dev);
742494
743
- mutex_lock(&dec->sip_reset_lock);
744
- rockchip_dmcfreq_lock();
745
- sip_smc_vpu_reset(0, 0, 0);
746
- rockchip_dmcfreq_unlock();
747
- mutex_unlock(&dec->sip_reset_lock);
495
+ if (mpp->hw_ops->reset)
496
+ mpp->hw_ops->reset(mpp);
748497
749498 rockchip_restore_qos(mpp->dev);
750499
....@@ -757,6 +506,8 @@
757506 mpp_reset_up_write(mpp->reset_group);
758507 mpp_iommu_up_write(mpp->iommu_info);
759508
509
+ enable_irq(mpp->irq);
510
+ mpp_iommu_enable_irq(mpp->iommu_info);
760511 dev_info(mpp->dev, "reset done\n");
761512
762513 return 0;
....@@ -775,9 +526,6 @@
775526
776527 irq_status = readl(link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
777528
778
- mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", irq_status);
779
- mpp_dbg_link_flow("link irq %08x\n", irq_status);
780
-
781529 if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) {
782530 u32 enabled = readl(link_dec->reg_base + RKVDEC_LINK_EN_BASE);
783531
....@@ -791,96 +539,14 @@
791539
792540 link_dec->irq_status = irq_status;
793541 mpp->irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
794
- mpp_dbg_link_flow("core irq %08x\n", mpp->irq_status);
795542
796543 writel_relaxed(0, link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
797544 }
798545
546
+ mpp_debug(DEBUG_IRQ_STATUS | DEBUG_LINK_TABLE, "irq_status: %08x : %08x\n",
547
+ irq_status, mpp->irq_status);
548
+
799549 return 0;
800
-}
801
-
802
-static int rkvdec2_link_isr(struct mpp_dev *mpp)
803
-{
804
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
805
- struct rkvdec_link_dev *link_dec = dec->link_dec;
806
- struct rkvdec_link_info *link_info = link_dec->info;
807
- /* keep irq_status */
808
- u32 irq_status = link_dec->irq_status;
809
- u32 prev_dec_num;
810
- int count = 0;
811
- u32 len = 0;
812
- u32 need_reset = atomic_read(&mpp->reset_request);
813
- u32 task_timeout = link_dec->task_on_timeout;
814
-
815
- mpp_debug_enter();
816
-
817
- disable_irq(mpp->irq);
818
- rkvdec_link_status_update(link_dec);
819
- link_dec->irq_status = irq_status;
820
- prev_dec_num = link_dec->task_decoded;
821
-
822
- if (!link_dec->enabled || task_timeout) {
823
- u32 val;
824
-
825
- if (task_timeout)
826
- rkvdec_link_reg_dump("timeout", link_dec);
827
-
828
- val = mpp_read(mpp, 224 * 4);
829
- if (link_info->hack_setup && !(val & BIT(2))) {
830
- /* only for rk356x */
831
- dev_info(mpp->dev, "frame not complete\n");
832
- link_dec->decoded++;
833
- }
834
- }
835
- count = (int)link_dec->decoded - (int)prev_dec_num;
836
-
837
- /* handle counter wrap */
838
- if (link_dec->enabled && !count && !need_reset) {
839
- /* process extra isr when task is processed */
840
- enable_irq(mpp->irq);
841
- goto done;
842
- }
843
-
844
- /* get previous ready task */
845
- if (count) {
846
- rkvdec_link_isr_recv_task(mpp, link_dec, count);
847
- link_dec->task_decoded = link_dec->decoded;
848
- }
849
-
850
- if (!link_dec->enabled || need_reset)
851
- goto do_reset;
852
-
853
- enable_irq(mpp->irq);
854
- goto done;
855
-
856
-do_reset:
857
- /* NOTE: irq may run with reset */
858
- atomic_inc(&mpp->reset_request);
859
- rkvdec2_link_reset(mpp);
860
- link_dec->task_decoded = 0;
861
- link_dec->task_total = 0;
862
- enable_irq(mpp->irq);
863
-
864
- if (link_dec->total == link_dec->decoded)
865
- goto done;
866
-
867
- len = rkvdec_link_get_task_hw_queue_length(link_dec);
868
- if (len > link_dec->task_size)
869
- rkvdec_link_counter("invalid len", link_dec);
870
-
871
- if (len) {
872
- int slot_idx = rkvdec_link_get_task_read(link_dec);
873
- struct mpp_task *mpp_task = NULL;
874
-
875
- mpp_task = link_dec->tasks_hw[slot_idx];
876
- rkvdec_link_send_task_to_hw(link_dec, mpp_task,
877
- slot_idx, len, 1);
878
- }
879
-
880
-done:
881
- mpp_debug_leave();
882
-
883
- return IRQ_HANDLED;
884550 }
885551
886552 int rkvdec2_link_remove(struct mpp_dev *mpp, struct rkvdec_link_dev *link_dec)
....@@ -933,12 +599,6 @@
933599 }
934600
935601 link_dec->table = table;
936
- link_dec->task_size = task_capacity;
937
- link_dec->task_count = 0;
938
- link_dec->task_write = 0;
939
- link_dec->task_read = link_dec->task_size;
940
- link_dec->task_send = 0;
941
- link_dec->task_recv = link_dec->task_size;
942602
943603 return 0;
944604 err_free_node:
....@@ -977,18 +637,13 @@
977637 struct rkvdec_link_dev *link_dec = NULL;
978638 struct device *dev = &pdev->dev;
979639 struct mpp_dev *mpp = &dec->mpp;
640
+ struct mpp_dma_buffer *table;
641
+ int i;
980642
981643 mpp_debug_enter();
982644
983645 link_dec = devm_kzalloc(dev, sizeof(*link_dec), GFP_KERNEL);
984646 if (!link_dec) {
985
- ret = -ENOMEM;
986
- goto done;
987
- }
988
-
989
- link_dec->tasks_hw = devm_kzalloc(dev, sizeof(*link_dec->tasks_hw) *
990
- mpp->task_capacity, GFP_KERNEL);
991
- if (!link_dec->tasks_hw) {
992647 ret = -ENOMEM;
993648 goto done;
994649 }
....@@ -1014,12 +669,33 @@
1014669 if (ret)
1015670 goto done;
1016671
1017
- if (link_dec->info->hack_setup)
672
+ /* alloc table pointer array */
673
+ table = devm_kmalloc_array(mpp->dev, mpp->task_capacity,
674
+ sizeof(*table), GFP_KERNEL | __GFP_ZERO);
675
+ if (!table)
676
+ return -ENOMEM;
677
+
678
+ /* init table array */
679
+ link_dec->table_array = table;
680
+ INIT_LIST_HEAD(&link_dec->used_list);
681
+ INIT_LIST_HEAD(&link_dec->unused_list);
682
+ for (i = 0; i < mpp->task_capacity; i++) {
683
+ table[i].iova = link_dec->table->iova + i * link_dec->link_node_size;
684
+ table[i].vaddr = link_dec->table->vaddr + i * link_dec->link_node_size;
685
+ table[i].size = link_dec->link_node_size;
686
+ INIT_LIST_HEAD(&table[i].link);
687
+ list_add_tail(&table[i].link, &link_dec->unused_list);
688
+ }
689
+
690
+ if (dec->fix)
1018691 rkvdec2_link_hack_data_setup(dec->fix);
692
+
693
+ mpp->fault_handler = rkvdec2_link_iommu_fault_handle;
1019694
1020695 link_dec->mpp = mpp;
1021696 link_dec->dev = dev;
1022697 atomic_set(&link_dec->task_timeout, 0);
698
+ atomic_set(&link_dec->task_pending, 0);
1023699 atomic_set(&link_dec->power_enabled, 0);
1024700 link_dec->irq_enabled = 1;
1025701
....@@ -1033,11 +709,6 @@
1033709 devm_iounmap(dev, link_dec->reg_base);
1034710 link_dec->reg_base = NULL;
1035711 }
1036
- if (link_dec->tasks_hw) {
1037
- devm_kfree(dev, link_dec->tasks_hw);
1038
- link_dec->tasks_hw = NULL;
1039
- }
1040
-
1041712 devm_kfree(dev, link_dec);
1042713 link_dec = NULL;
1043714 }
....@@ -1055,13 +726,13 @@
1055726 struct mpp_task *task = container_of(ref, struct mpp_task, ref);
1056727
1057728 if (!task->session) {
1058
- mpp_err("task %d task->session is null.\n", task->task_index);
729
+ mpp_err("task %d task->session is null.\n", task->task_id);
1059730 return;
1060731 }
1061732 session = task->session;
1062733
1063734 mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d state 0x%lx\n",
1064
- session->index, task->task_index, task->state);
735
+ session->index, task->task_id, task->state);
1065736 if (!session->mpp) {
1066737 mpp_err("session %d session->mpp is null.\n", session->index);
1067738 return;
....@@ -1080,30 +751,16 @@
1080751 kthread_queue_work(&mpp->queue->worker, &mpp->work);
1081752 }
1082753
1083
-static void rkvdec2_link_trigger_timeout(struct mpp_dev *mpp)
1084
-{
1085
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1086
- struct rkvdec_link_dev *link_dec = dec->link_dec;
1087
-
1088
- atomic_inc(&link_dec->task_timeout);
1089
- rkvdec2_link_trigger_work(mpp);
1090
-}
1091
-
1092
-static void rkvdec2_link_trigger_irq(struct mpp_dev *mpp)
1093
-{
1094
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1095
- struct rkvdec_link_dev *link_dec = dec->link_dec;
1096
-
1097
- link_dec->task_irq++;
1098
- rkvdec2_link_trigger_work(mpp);
1099
-}
1100
-
1101
-static void rkvdec2_link_power_on(struct mpp_dev *mpp)
754
+static int rkvdec2_link_power_on(struct mpp_dev *mpp)
1102755 {
1103756 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1104757 struct rkvdec_link_dev *link_dec = dec->link_dec;
1105758
1106759 if (!atomic_xchg(&link_dec->power_enabled, 1)) {
760
+ if (mpp_iommu_attach(mpp->iommu_info)) {
761
+ dev_err(mpp->dev, "mpp_iommu_attach failed\n");
762
+ return -ENODATA;
763
+ }
1107764 pm_runtime_get_sync(mpp->dev);
1108765 pm_stay_awake(mpp->dev);
1109766
....@@ -1112,31 +769,17 @@
1112769
1113770 if (!link_dec->irq_enabled) {
1114771 enable_irq(mpp->irq);
772
+ mpp_iommu_enable_irq(mpp->iommu_info);
1115773 link_dec->irq_enabled = 1;
1116774 }
1117775
1118776 mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_ADVANCED);
1119777 mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_ADVANCED);
1120778 mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_ADVANCED);
1121
-
1122
-#ifdef CONFIG_PM_DEVFREQ
1123
- if (dec->devfreq) {
1124
- unsigned long core_rate_hz;
1125
-
1126
- mutex_lock(&dec->devfreq->lock);
1127
- core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info,
1128
- CLK_MODE_ADVANCED);
1129
- if (dec->core_rate_hz != core_rate_hz) {
1130
- dec->core_rate_hz = core_rate_hz;
1131
- update_devfreq(dec->devfreq);
1132
- }
1133
- mutex_unlock(&dec->devfreq->lock);
1134
-
1135
- return;
1136
- }
1137
-#endif
1138
- mpp_clk_set_rate(&dec->core_clk_info, CLK_MODE_ADVANCED);
779
+ mpp_devfreq_set_core_rate(mpp, CLK_MODE_ADVANCED);
780
+ mpp_iommu_dev_activate(mpp->iommu_info, mpp);
1139781 }
782
+ return 0;
1140783 }
1141784
1142785 static void rkvdec2_link_power_off(struct mpp_dev *mpp)
....@@ -1146,6 +789,7 @@
1146789
1147790 if (atomic_xchg(&link_dec->power_enabled, 0)) {
1148791 disable_irq(mpp->irq);
792
+ mpp_iommu_disable_irq(mpp->iommu_info);
1149793 link_dec->irq_enabled = 0;
1150794
1151795 if (mpp->hw_ops->clk_off)
....@@ -1154,175 +798,281 @@
1154798 pm_relax(mpp->dev);
1155799 pm_runtime_put_sync_suspend(mpp->dev);
1156800
1157
- link_dec->task_decoded = 0;
1158
- link_dec->task_total = 0;
1159
-
1160801 mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_NORMAL);
1161802 mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_NORMAL);
1162803 mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_NORMAL);
1163
-
1164
-#ifdef CONFIG_PM_DEVFREQ
1165
- if (dec->devfreq) {
1166
- unsigned long core_rate_hz;
1167
-
1168
- mutex_lock(&dec->devfreq->lock);
1169
- core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info,
1170
- CLK_MODE_NORMAL);
1171
- if (dec->core_rate_hz != core_rate_hz) {
1172
- dec->core_rate_hz = core_rate_hz;
1173
- update_devfreq(dec->devfreq);
1174
- }
1175
- mutex_unlock(&dec->devfreq->lock);
1176
-
1177
- return;
1178
- }
1179
-#endif
1180
- mpp_clk_set_rate(&dec->core_clk_info, CLK_MODE_NORMAL);
804
+ mpp_devfreq_set_core_rate(mpp, CLK_MODE_NORMAL);
805
+ mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
1181806 }
1182807 }
1183808
1184809 static void rkvdec2_link_timeout_proc(struct work_struct *work_s)
1185810 {
1186811 struct mpp_dev *mpp;
812
+ struct rkvdec2_dev *dec;
1187813 struct mpp_session *session;
1188814 struct mpp_task *task = container_of(to_delayed_work(work_s),
1189815 struct mpp_task, timeout_work);
1190816
1191817 if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
1192818 mpp_err("task %d state %lx has been handled\n",
1193
- task->task_index, task->state);
819
+ task->task_id, task->state);
1194820 return;
1195821 }
1196822
1197823 if (!task->session) {
1198
- mpp_err("task %d session is null.\n", task->task_index);
824
+ mpp_err("task %d session is null.\n", task->task_id);
1199825 return;
1200826 }
1201827 session = task->session;
1202828
1203829 if (!session->mpp) {
1204830 mpp_err("task %d:%d mpp is null.\n", session->index,
1205
- task->task_index);
831
+ task->task_id);
1206832 return;
1207833 }
1208834 mpp = session->mpp;
1209
- rkvdec2_link_trigger_timeout(mpp);
835
+ set_bit(TASK_STATE_TIMEOUT, &task->state);
836
+
837
+ dec = to_rkvdec2_dev(mpp);
838
+ atomic_inc(&dec->link_dec->task_timeout);
839
+
840
+ dev_err(mpp->dev, "session %d task %d state %#lx timeout, cnt %d\n",
841
+ session->index, task->task_index, task->state,
842
+ atomic_read(&dec->link_dec->task_timeout));
843
+
844
+ rkvdec2_link_trigger_work(mpp);
1210845 }
1211846
1212
-static void mpp_taskqueue_scan_pending_abort_task(struct mpp_taskqueue *queue)
847
+static int rkvdec2_link_iommu_fault_handle(struct iommu_domain *iommu,
848
+ struct device *iommu_dev,
849
+ unsigned long iova,
850
+ int status, void *arg)
1213851 {
1214
- struct mpp_task *task, *n;
852
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
853
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
854
+ struct mpp_task *mpp_task = NULL, *n;
855
+ struct mpp_taskqueue *queue;
1215856
1216
- mutex_lock(&queue->pending_lock);
1217
- /* Check and pop all timeout task */
1218
- list_for_each_entry_safe(task, n, &queue->pending_list, queue_link) {
1219
- struct mpp_session *session = task->session;
857
+ dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n",
858
+ iova, status, arg);
1220859
1221
- if (test_bit(TASK_STATE_ABORT, &task->state)) {
1222
- mutex_lock(&session->pending_lock);
1223
- /* wait and signal */
1224
- list_del_init(&task->queue_link);
1225
- mutex_unlock(&session->pending_lock);
1226
- kref_put(&task->ref, rkvdec2_link_free_task);
860
+ if (!mpp) {
861
+ dev_err(iommu_dev, "pagefault without device to handle\n");
862
+ return 0;
863
+ }
864
+ queue = mpp->queue;
865
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
866
+ struct rkvdec_link_info *info = dec->link_dec->info;
867
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
868
+ u32 *tb_reg = (u32 *)task->table->vaddr;
869
+ u32 irq_status = tb_reg[info->tb_reg_int];
870
+
871
+ if (!irq_status) {
872
+ mpp_task_dump_mem_region(mpp, mpp_task);
873
+ break;
1227874 }
1228875 }
1229
- mutex_unlock(&queue->pending_lock);
876
+
877
+ mpp_task_dump_hw_reg(mpp);
878
+ /*
879
+ * Mask iommu irq, in order for iommu not repeatedly trigger pagefault.
880
+ * Until the pagefault task finish by hw timeout.
881
+ */
882
+ rockchip_iommu_mask_irq(mpp->dev);
883
+ dec->mmu_fault = 1;
884
+
885
+ return 0;
886
+}
887
+
888
+static void rkvdec2_link_resend(struct mpp_dev *mpp)
889
+{
890
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
891
+ struct rkvdec_link_dev *link_dec = dec->link_dec;
892
+ struct mpp_taskqueue *queue = mpp->queue;
893
+ struct mpp_task *mpp_task, *n;
894
+
895
+ link_dec->task_running = 0;
896
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
897
+ dev_err(mpp->dev, "resend task %d\n", mpp_task->task_index);
898
+ cancel_delayed_work_sync(&mpp_task->timeout_work);
899
+ clear_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
900
+ clear_bit(TASK_STATE_HANDLE, &mpp_task->state);
901
+ rkvdec2_link_enqueue(link_dec, mpp_task);
902
+ }
1230903 }
1231904
1232905 static void rkvdec2_link_try_dequeue(struct mpp_dev *mpp)
1233906 {
1234907 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1235908 struct rkvdec_link_dev *link_dec = dec->link_dec;
1236
- struct mpp_task *task;
1237909 struct mpp_taskqueue *queue = mpp->queue;
1238
- int task_irq = link_dec->task_irq;
1239
- int task_irq_prev = link_dec->task_irq_prev;
1240
- int task_timeout = atomic_read(&link_dec->task_timeout);
910
+ struct mpp_task *mpp_task = NULL, *n;
911
+ struct rkvdec_link_info *info = link_dec->info;
912
+ u32 reset_flag = 0;
913
+ u32 iommu_fault = dec->mmu_fault && (mpp->irq_status & RKVDEC_TIMEOUT_STA);
914
+ u32 link_en = atomic_read(&link_dec->power_enabled) ?
915
+ readl(link_dec->reg_base + RKVDEC_LINK_EN_BASE) : 0;
916
+ u32 force_dequeue = iommu_fault || !link_en;
917
+ u32 dequeue_cnt = 0;
1241918
1242
- if (!link_dec->task_running)
1243
- goto done;
919
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
920
+ /*
921
+ * Because there are multiple tasks enqueue at the same time,
922
+ * soft timeout may be triggered at the same time, but in reality only
923
+ * first task is being timeout because of the hardware stuck,
924
+ * so only process the first task.
925
+ */
926
+ u32 timeout_flag = dequeue_cnt ? 0 : test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
927
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
928
+ u32 *tb_reg = (u32 *)task->table->vaddr;
929
+ u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
930
+ u32 irq_status = tb_reg[info->tb_reg_int];
931
+ u32 task_done = irq_status || timeout_flag || abort_flag;
1244932
1245
- if (task_timeout != link_dec->task_timeout_prev) {
1246
- dev_info(link_dec->dev, "process task timeout\n");
1247
- atomic_inc(&mpp->reset_request);
1248
- link_dec->task_on_timeout =
1249
- task_timeout - link_dec->task_timeout_prev;
1250
- goto proc;
933
+ /*
934
+ * there are some cases will cause hw cannot write reg to ddr:
935
+ * 1. iommu pagefault
936
+ * 2. link stop(link_en == 0) because of err task, it is a rk356x issue.
937
+ * so need force dequeue one task.
938
+ */
939
+ if (force_dequeue)
940
+ task_done = 1;
941
+
942
+ if (!task_done)
943
+ break;
944
+
945
+ dequeue_cnt++;
946
+ /* check hack task only for rk356x*/
947
+ if (task->need_hack == RKVDEC2_LINK_HACK_TASK_FLAG) {
948
+ cancel_delayed_work_sync(&mpp_task->timeout_work);
949
+ list_move_tail(&task->table->link, &link_dec->unused_list);
950
+ list_del_init(&mpp_task->queue_link);
951
+ link_dec->task_running--;
952
+ link_dec->hack_task_running--;
953
+ kfree(task);
954
+ mpp_dbg_link("hack running %d irq_status %#08x timeout %d abort %d\n",
955
+ link_dec->hack_task_running, irq_status,
956
+ timeout_flag, abort_flag);
957
+ continue;
958
+ }
959
+
960
+ /*
961
+ * if timeout/abort/force dequeue found, reset and stop hw first.
962
+ */
963
+ if ((timeout_flag || abort_flag || force_dequeue) && !reset_flag) {
964
+ dev_err(mpp->dev, "session %d task %d timeout %d abort %d force_dequeue %d\n",
965
+ mpp_task->session->index, mpp_task->task_index,
966
+ timeout_flag, abort_flag, force_dequeue);
967
+ rkvdec2_link_reset(mpp);
968
+ reset_flag = 1;
969
+ dec->mmu_fault = 0;
970
+ mpp->irq_status = 0;
971
+ force_dequeue = 0;
972
+ }
973
+
974
+ cancel_delayed_work_sync(&mpp_task->timeout_work);
975
+
976
+ task->irq_status = irq_status;
977
+ mpp_task->hw_cycles = tb_reg[info->tb_reg_cycle];
978
+ mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
979
+ rkvdec2_link_finish(mpp, mpp_task);
980
+
981
+ list_move_tail(&task->table->link, &link_dec->unused_list);
982
+ list_del_init(&mpp_task->queue_link);
983
+
984
+ set_bit(TASK_STATE_HANDLE, &mpp_task->state);
985
+ set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
986
+ set_bit(TASK_STATE_FINISH, &mpp_task->state);
987
+ set_bit(TASK_STATE_DONE, &mpp_task->state);
988
+ if (test_bit(TASK_STATE_ABORT, &mpp_task->state))
989
+ set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
990
+
991
+ wake_up(&mpp_task->wait);
992
+ kref_put(&mpp_task->ref, rkvdec2_link_free_task);
993
+ link_dec->task_running--;
994
+
995
+ mpp_dbg_link("session %d task %d irq_status %#08x timeout %d abort %d\n",
996
+ mpp_task->session->index, mpp_task->task_index,
997
+ irq_status, timeout_flag, abort_flag);
998
+ if (irq_status & RKVDEC_INT_ERROR_MASK) {
999
+ dev_err(mpp->dev,
1000
+ "session %d task %d irq_status %#08x timeout %u abort %u\n",
1001
+ mpp_task->session->index, mpp_task->task_index,
1002
+ irq_status, timeout_flag, abort_flag);
1003
+ if (!reset_flag)
1004
+ atomic_inc(&mpp->reset_request);
1005
+ }
12511006 }
12521007
1253
- if (task_irq == task_irq_prev)
1254
- goto done;
1255
-
1256
- if (!atomic_read(&link_dec->power_enabled)) {
1257
- dev_info(link_dec->dev, "dequeue on power off\n");
1258
- goto done;
1259
- }
1260
-
1261
-proc:
1262
- task = list_first_entry_or_null(&queue->running_list, struct mpp_task,
1263
- queue_link);
1264
- if (!task) {
1265
- mpp_err("can found task on trydequeue with %d running task\n",
1266
- link_dec->task_running);
1267
- goto done;
1268
- }
1269
-
1270
- /* Check and process all finished task */
1271
- rkvdec2_link_isr(mpp);
1272
-
1273
-done:
1274
- link_dec->task_irq_prev = task_irq;
1275
- link_dec->task_timeout_prev = task_timeout;
1276
- link_dec->task_on_timeout = 0;
1277
-
1278
- mpp_taskqueue_scan_pending_abort_task(queue);
1279
-
1280
- /* TODO: if reset is needed do reset here */
1008
+ /* resend running task after reset */
1009
+ if (reset_flag && !list_empty(&queue->running_list))
1010
+ rkvdec2_link_resend(mpp);
12811011 }
12821012
1283
-static int mpp_task_queue(struct mpp_dev *mpp, struct mpp_task *task)
1013
+static int mpp_task_queue(struct mpp_dev *mpp, struct mpp_task *mpp_task)
12841014 {
12851015 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
12861016 struct rkvdec_link_dev *link_dec = dec->link_dec;
1287
- u32 task_to_run = 0;
1288
- int slot_idx = 0;
1289
- int ret;
1017
+ struct mpp_taskqueue *queue = mpp->queue;
1018
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
12901019
12911020 mpp_debug_enter();
12921021
1293
- /*
1294
- * for iommu share hardware, should attach to ensure
1295
- * working in current device
1296
- */
1297
- ret = mpp_iommu_attach(mpp->iommu_info);
1298
- if (ret) {
1299
- dev_err(mpp->dev, "mpp_iommu_attach failed\n");
1300
- return -ENODATA;
1301
- }
1302
-
13031022 rkvdec2_link_power_on(mpp);
1304
- mpp_debug(DEBUG_TASK_INFO, "pid %d, start hw %s\n",
1305
- task->session->pid, dev_name(mpp->dev));
13061023
1307
- /* prepare the task for running */
1308
- if (test_and_set_bit(TASK_STATE_PREPARE, &task->state))
1309
- mpp_err("task %d has been prepare twice\n", task->task_index);
1024
+ /* hack for rk356x */
1025
+ if (task->need_hack) {
1026
+ u32 *tb_reg;
1027
+ struct mpp_dma_buffer *table;
1028
+ struct rkvdec2_task *hack_task;
1029
+ struct rkvdec_link_info *info = link_dec->info;
13101030
1311
- rkvdec2_link_prepare(mpp, task);
1031
+ /* need reserved 2 unused task for need hack task */
1032
+ if (link_dec->task_running > (link_dec->task_capacity - 2))
1033
+ return -EBUSY;
13121034
1313
- task_to_run = link_dec->task_to_run;
1314
- if (!task_to_run) {
1315
- dev_err(link_dec->dev, "nothing to run\n");
1316
- goto done;
1035
+ table = list_first_entry_or_null(&link_dec->unused_list,
1036
+ struct mpp_dma_buffer,
1037
+ link);
1038
+ if (!table)
1039
+ return -EBUSY;
1040
+
1041
+ hack_task = kzalloc(sizeof(*hack_task), GFP_KERNEL);
1042
+
1043
+ if (!hack_task)
1044
+ return -ENOMEM;
1045
+
1046
+ mpp_task_init(mpp_task->session, &hack_task->mpp_task);
1047
+ INIT_DELAYED_WORK(&hack_task->mpp_task.timeout_work,
1048
+ rkvdec2_link_timeout_proc);
1049
+
1050
+ tb_reg = (u32 *)table->vaddr;
1051
+ memset(tb_reg + info->part_r[0].tb_reg_off, 0, info->part_r[0].reg_num);
1052
+ rkvdec2_3568_hack_fix_link(tb_reg + 4);
1053
+ list_move_tail(&table->link, &link_dec->used_list);
1054
+ hack_task->table = table;
1055
+ hack_task->need_hack = RKVDEC2_LINK_HACK_TASK_FLAG;
1056
+ rkvdec2_link_enqueue(link_dec, &hack_task->mpp_task);
1057
+ mpp_taskqueue_pending_to_run(queue, &hack_task->mpp_task);
1058
+ link_dec->hack_task_running++;
1059
+ mpp_dbg_link("hack task send to hw, hack running %d\n",
1060
+ link_dec->hack_task_running);
13171061 }
13181062
1319
- mpp_reset_down_read(mpp->reset_group);
1320
- link_dec->task_to_run = 0;
1321
- slot_idx = rkvdec_link_get_task_send(link_dec);
1322
- link_dec->task_running += task_to_run;
1323
- rkvdec_link_send_task_to_hw(link_dec, task, slot_idx, task_to_run, 0);
1063
+ /* process normal */
1064
+ if (!rkvdec2_link_prepare(mpp, mpp_task))
1065
+ return -EBUSY;
13241066
1325
-done:
1067
+ rkvdec2_link_enqueue(link_dec, mpp_task);
1068
+
1069
+ set_bit(TASK_STATE_RUNNING, &mpp_task->state);
1070
+ atomic_dec(&link_dec->task_pending);
1071
+ mpp_taskqueue_pending_to_run(queue, mpp_task);
1072
+
1073
+ mpp_dbg_link("session %d task %d send to hw pending %d running %d\n",
1074
+ mpp_task->session->index, mpp_task->task_index,
1075
+ atomic_read(&link_dec->task_pending), link_dec->task_running);
13261076 mpp_debug_leave();
13271077
13281078 return 0;
....@@ -1334,7 +1084,7 @@
13341084 int ret = rkvdec2_link_irq(mpp);
13351085
13361086 if (!ret)
1337
- rkvdec2_link_trigger_irq(mpp);
1087
+ rkvdec2_link_trigger_work(mpp);
13381088
13391089 return IRQ_HANDLED;
13401090 }
....@@ -1372,7 +1122,6 @@
13721122 struct mpp_task *task)
13731123 {
13741124 set_bit(TASK_STATE_DONE, &task->state);
1375
- kref_put(&task->ref, rkvdec2_link_free_task);
13761125
13771126 return 0;
13781127 }
....@@ -1381,10 +1130,10 @@
13811130 struct mpp_task_msgs *msgs)
13821131 {
13831132 struct mpp_task *task = NULL;
1384
- struct rkvdec2_task *dec_task = NULL;
13851133 struct mpp_dev *mpp = session->mpp;
1386
- u32 fmt;
13871134 struct rkvdec_link_info *link_info = mpp->var->hw_info->link_info;
1135
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1136
+ struct rkvdec_link_dev *link_dec = dec->link_dec;
13881137
13891138 task = rkvdec2_alloc_task(session, msgs);
13901139 if (!task) {
....@@ -1393,6 +1142,9 @@
13931142 }
13941143
13951144 if (link_info->hack_setup) {
1145
+ u32 fmt;
1146
+ struct rkvdec2_task *dec_task = NULL;
1147
+
13961148 dec_task = to_rkvdec2_task(task);
13971149 fmt = RKVDEC_GET_FORMAT(dec_task->reg[RKVDEC_REG_FORMAT_INDEX]);
13981150 dec_task->need_hack = (fmt == RKVDEC_FMT_H264D);
....@@ -1401,6 +1153,7 @@
14011153 kref_init(&task->ref);
14021154 atomic_set(&task->abort_request, 0);
14031155 task->task_index = atomic_fetch_inc(&mpp->task_index);
1156
+ task->task_id = atomic_fetch_inc(&mpp->queue->task_id);
14041157 INIT_DELAYED_WORK(&task->timeout_work, rkvdec2_link_timeout_proc);
14051158
14061159 atomic_inc(&session->task_count);
....@@ -1414,6 +1167,7 @@
14141167 mutex_lock(&mpp->queue->pending_lock);
14151168 list_add_tail(&task->queue_link, &mpp->queue->pending_list);
14161169 mutex_unlock(&mpp->queue->pending_lock);
1170
+ atomic_inc(&link_dec->task_pending);
14171171
14181172 /* push current task to queue */
14191173 atomic_inc(&mpp->task_count);
....@@ -1430,7 +1184,6 @@
14301184 {
14311185 struct mpp_dev *mpp = session->mpp;
14321186 struct mpp_task *mpp_task;
1433
- struct rkvdec2_task *task;
14341187 int ret;
14351188
14361189 mpp_task = mpp_session_get_pending_task(session);
....@@ -1439,16 +1192,15 @@
14391192 return -EIO;
14401193 }
14411194
1442
- task = to_rkvdec2_task(mpp_task);
1443
- ret = wait_event_timeout(task->wait, task_is_done(mpp_task),
1195
+ ret = wait_event_timeout(mpp_task->wait, task_is_done(mpp_task),
14441196 msecs_to_jiffies(WAIT_TIMEOUT_MS));
14451197 if (ret) {
14461198 ret = rkvdec2_result(mpp, mpp_task, msgs);
14471199
14481200 mpp_session_pop_done(session, mpp_task);
14491201 } else {
1450
- mpp_err("task %d:%d statue %lx timeout -> abort\n",
1451
- session->index, mpp_task->task_index, mpp_task->state);
1202
+ mpp_err("task %d:%d state %lx timeout -> abort\n",
1203
+ session->index, mpp_task->task_id, mpp_task->state);
14521204
14531205 atomic_inc(&mpp_task->abort_request);
14541206 set_bit(TASK_STATE_ABORT, &mpp_task->state);
....@@ -1461,32 +1213,25 @@
14611213 void rkvdec2_link_worker(struct kthread_work *work_s)
14621214 {
14631215 struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
1464
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1465
- struct rkvdec_link_dev *link_dec = dec->link_dec;
14661216 struct mpp_task *task;
14671217 struct mpp_taskqueue *queue = mpp->queue;
1218
+ u32 all_done;
14681219
14691220 mpp_debug_enter();
14701221
1471
- /*
1472
- * process timeout and finished task.
1473
- */
1222
+ /* dequeue running task */
14741223 rkvdec2_link_try_dequeue(mpp);
14751224
1476
-again:
1225
+ /* process reset */
14771226 if (atomic_read(&mpp->reset_request)) {
1478
- if (link_dec->task_running || link_dec->task_prepared)
1479
- goto done;
1480
-
1481
- disable_irq(mpp->irq);
14821227 rkvdec2_link_reset(mpp);
1483
- link_dec->task_decoded = 0;
1484
- link_dec->task_total = 0;
1485
- enable_irq(mpp->irq);
1228
+ /* resend running task after reset */
1229
+ if (!list_empty(&queue->running_list))
1230
+ rkvdec2_link_resend(mpp);
14861231 }
1487
- /*
1488
- * process pending queue to find the task to accept.
1489
- */
1232
+
1233
+again:
1234
+ /* get pending task to process */
14901235 mutex_lock(&queue->pending_lock);
14911236 task = list_first_entry_or_null(&queue->pending_list, struct mpp_task,
14921237 queue_link);
....@@ -1494,80 +1239,37 @@
14941239 if (!task)
14951240 goto done;
14961241
1497
- if (test_bit(TASK_STATE_ABORT, &task->state)) {
1498
- struct rkvdec2_task *dec_task = to_rkvdec2_task(task);
1499
-
1242
+ /* check abort task */
1243
+ if (atomic_read(&task->abort_request)) {
15001244 mutex_lock(&queue->pending_lock);
15011245 list_del_init(&task->queue_link);
15021246
1503
- kref_get(&task->ref);
15041247 set_bit(TASK_STATE_ABORT_READY, &task->state);
15051248 set_bit(TASK_STATE_PROC_DONE, &task->state);
15061249
15071250 mutex_unlock(&queue->pending_lock);
1508
- wake_up(&dec_task->wait);
1251
+ wake_up(&task->wait);
15091252 kref_put(&task->ref, rkvdec2_link_free_task);
15101253 goto again;
15111254 }
15121255
1513
- /*
1514
- * if target device can accept more task send the task to run.
1515
- */
1516
- if (link_dec->task_running >= link_dec->task_capacity - 2)
1517
- goto done;
1518
-
1519
- if (mpp_task_queue(mpp, task)) {
1520
- /* failed to run */
1521
- mpp_err("%p failed to process task %p:%d\n",
1522
- mpp, task, task->task_index);
1523
- } else {
1524
- mutex_lock(&queue->pending_lock);
1525
- set_bit(TASK_STATE_RUNNING, &task->state);
1526
- list_move_tail(&task->queue_link, &queue->running_list);
1527
- mutex_unlock(&queue->pending_lock);
1256
+ /* queue task to hw */
1257
+ if (!mpp_task_queue(mpp, task))
15281258 goto again;
1529
- }
1259
+
15301260 done:
1261
+
1262
+ /* if no task in pending and running list, power off device */
1263
+ mutex_lock(&queue->pending_lock);
1264
+ all_done = list_empty(&queue->pending_list) && list_empty(&queue->running_list);
1265
+ mutex_unlock(&queue->pending_lock);
1266
+
1267
+ if (all_done)
1268
+ rkvdec2_link_power_off(mpp);
1269
+
1270
+ mpp_session_cleanup_detach(queue, work_s);
1271
+
15311272 mpp_debug_leave();
1532
-
1533
- if (link_dec->task_irq != link_dec->task_irq_prev ||
1534
- atomic_read(&link_dec->task_timeout) != link_dec->task_timeout_prev)
1535
- rkvdec2_link_trigger_work(mpp);
1536
-
1537
- /* if no task for running power off device */
1538
- {
1539
- u32 all_done = 0;
1540
-
1541
- mutex_lock(&queue->pending_lock);
1542
- all_done = list_empty(&queue->pending_list);
1543
- mutex_unlock(&queue->pending_lock);
1544
-
1545
- if (all_done && !link_dec->task_running && !link_dec->task_prepared)
1546
- rkvdec2_link_power_off(mpp);
1547
- }
1548
-
1549
- mutex_lock(&queue->session_lock);
1550
- while (queue->detach_count) {
1551
- struct mpp_session *session = NULL;
1552
-
1553
- session = list_first_entry_or_null(&queue->session_detach, struct mpp_session,
1554
- session_link);
1555
- if (session) {
1556
- list_del_init(&session->session_link);
1557
- queue->detach_count--;
1558
- }
1559
-
1560
- mutex_unlock(&queue->session_lock);
1561
-
1562
- if (session) {
1563
- mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev),
1564
- queue->detach_count);
1565
- mpp_session_deinit(session);
1566
- }
1567
-
1568
- mutex_lock(&queue->session_lock);
1569
- }
1570
- mutex_unlock(&queue->session_lock);
15711273 }
15721274
15731275 void rkvdec2_link_session_deinit(struct mpp_session *session)
....@@ -1580,9 +1282,9 @@
15801282
15811283 if (session->dma) {
15821284 mpp_dbg_session("session %d destroy dma\n", session->index);
1583
- mpp_iommu_down_read(mpp->iommu_info);
1285
+ mpp_iommu_down_write(mpp->iommu_info);
15841286 mpp_dma_session_destroy(session->dma);
1585
- mpp_iommu_up_read(mpp->iommu_info);
1287
+ mpp_iommu_up_write(mpp->iommu_info);
15861288 session->dma = NULL;
15871289 }
15881290 if (session->srv) {
....@@ -1598,3 +1300,1258 @@
15981300
15991301 mpp_debug_leave();
16001302 }
1303
+
1304
+#define RKVDEC2_1080P_PIXELS (1920*1080)
1305
+#define RKVDEC2_4K_PIXELS (4096*2304)
1306
+#define RKVDEC2_8K_PIXELS (7680*4320)
1307
+#define RKVDEC2_CCU_TIMEOUT_20MS (0xefffff)
1308
+#define RKVDEC2_CCU_TIMEOUT_50MS (0x2cfffff)
1309
+#define RKVDEC2_CCU_TIMEOUT_100MS (0x4ffffff)
1310
+
1311
+static u32 rkvdec2_ccu_get_timeout_threshold(struct rkvdec2_task *task)
1312
+{
1313
+ u32 pixels = task->pixels;
1314
+
1315
+ if (pixels < RKVDEC2_1080P_PIXELS)
1316
+ return RKVDEC2_CCU_TIMEOUT_20MS;
1317
+ else if (pixels < RKVDEC2_4K_PIXELS)
1318
+ return RKVDEC2_CCU_TIMEOUT_50MS;
1319
+ else
1320
+ return RKVDEC2_CCU_TIMEOUT_100MS;
1321
+}
1322
+
1323
+int rkvdec2_attach_ccu(struct device *dev, struct rkvdec2_dev *dec)
1324
+{
1325
+ int ret;
1326
+ struct device_node *np;
1327
+ struct platform_device *pdev;
1328
+ struct rkvdec2_ccu *ccu;
1329
+
1330
+ mpp_debug_enter();
1331
+
1332
+ np = of_parse_phandle(dev->of_node, "rockchip,ccu", 0);
1333
+ if (!np || !of_device_is_available(np))
1334
+ return -ENODEV;
1335
+
1336
+ pdev = of_find_device_by_node(np);
1337
+ of_node_put(np);
1338
+ if (!pdev)
1339
+ return -ENODEV;
1340
+
1341
+ ccu = platform_get_drvdata(pdev);
1342
+ if (!ccu)
1343
+ return -ENOMEM;
1344
+
1345
+ ret = of_property_read_u32(dev->of_node, "rockchip,core-mask", &dec->core_mask);
1346
+ if (ret)
1347
+ return ret;
1348
+ dev_info(dev, "core_mask=%08x\n", dec->core_mask);
1349
+
1350
+ /* if not the main-core, then attach the main core domain to current */
1351
+ if (dec->mpp.core_id != 0) {
1352
+ struct mpp_taskqueue *queue;
1353
+ struct mpp_iommu_info *ccu_info, *cur_info;
1354
+
1355
+ queue = dec->mpp.queue;
1356
+ /* set the ccu-domain for current device */
1357
+ ccu_info = queue->cores[0]->iommu_info;
1358
+ cur_info = dec->mpp.iommu_info;
1359
+ cur_info->domain = ccu_info->domain;
1360
+ mpp_iommu_attach(cur_info);
1361
+ }
1362
+
1363
+ dec->ccu = ccu;
1364
+
1365
+ dev_info(dev, "attach ccu as core %d\n", dec->mpp.core_id);
1366
+ mpp_debug_enter();
1367
+
1368
+ return 0;
1369
+}
1370
+
1371
+static void rkvdec2_ccu_timeout_work(struct work_struct *work_s)
1372
+{
1373
+ struct mpp_dev *mpp;
1374
+ struct mpp_task *task = container_of(to_delayed_work(work_s),
1375
+ struct mpp_task, timeout_work);
1376
+
1377
+ if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
1378
+ mpp_err("task %d state %lx has been handled\n",
1379
+ task->task_id, task->state);
1380
+ return;
1381
+ }
1382
+
1383
+ if (!task->session) {
1384
+ mpp_err("task %d session is null.\n", task->task_id);
1385
+ return;
1386
+ }
1387
+ mpp = mpp_get_task_used_device(task, task->session);
1388
+ mpp_err("%s, task %d state %#lx timeout\n", dev_name(mpp->dev),
1389
+ task->task_index, task->state);
1390
+ set_bit(TASK_STATE_TIMEOUT, &task->state);
1391
+ atomic_inc(&mpp->reset_request);
1392
+ atomic_inc(&mpp->queue->reset_request);
1393
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
1394
+}
1395
+
1396
+int rkvdec2_ccu_link_init(struct platform_device *pdev, struct rkvdec2_dev *dec)
1397
+{
1398
+ struct resource *res;
1399
+ struct rkvdec_link_dev *link_dec;
1400
+ struct device *dev = &pdev->dev;
1401
+
1402
+ mpp_debug_enter();
1403
+
1404
+ /* link structure */
1405
+ link_dec = devm_kzalloc(dev, sizeof(*link_dec), GFP_KERNEL);
1406
+ if (!link_dec)
1407
+ return -ENOMEM;
1408
+
1409
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
1410
+ if (!res)
1411
+ return -ENOMEM;
1412
+
1413
+ link_dec->info = dec->mpp.var->hw_info->link_info;
1414
+ link_dec->reg_base = devm_ioremap(dev, res->start, resource_size(res));
1415
+ if (!link_dec->reg_base) {
1416
+ dev_err(dev, "ioremap failed for resource %pR\n", res);
1417
+ return -ENOMEM;
1418
+ }
1419
+
1420
+ dec->link_dec = link_dec;
1421
+
1422
+ mpp_debug_leave();
1423
+
1424
+ return 0;
1425
+}
1426
+
1427
+static int rkvdec2_ccu_power_on(struct mpp_taskqueue *queue,
1428
+ struct rkvdec2_ccu *ccu)
1429
+{
1430
+ if (!atomic_xchg(&ccu->power_enabled, 1)) {
1431
+ u32 i;
1432
+ struct mpp_dev *mpp;
1433
+
1434
+ /* ccu pd and clk on */
1435
+ pm_runtime_get_sync(ccu->dev);
1436
+ pm_stay_awake(ccu->dev);
1437
+ mpp_clk_safe_enable(ccu->aclk_info.clk);
1438
+ /* core pd and clk on */
1439
+ for (i = 0; i < queue->core_count; i++) {
1440
+ struct rkvdec2_dev *dec;
1441
+
1442
+ mpp = queue->cores[i];
1443
+ dec = to_rkvdec2_dev(mpp);
1444
+ pm_runtime_get_sync(mpp->dev);
1445
+ pm_stay_awake(mpp->dev);
1446
+ if (mpp->hw_ops->clk_on)
1447
+ mpp->hw_ops->clk_on(mpp);
1448
+
1449
+ mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_NORMAL);
1450
+ mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_NORMAL);
1451
+ mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_NORMAL);
1452
+ mpp_devfreq_set_core_rate(mpp, CLK_MODE_NORMAL);
1453
+ mpp_iommu_dev_activate(mpp->iommu_info, mpp);
1454
+ }
1455
+ mpp_debug(DEBUG_CCU, "power on\n");
1456
+ }
1457
+
1458
+ return 0;
1459
+}
1460
+
1461
+static int rkvdec2_ccu_power_off(struct mpp_taskqueue *queue,
1462
+ struct rkvdec2_ccu *ccu)
1463
+{
1464
+ if (atomic_xchg(&ccu->power_enabled, 0)) {
1465
+ u32 i;
1466
+ struct mpp_dev *mpp;
1467
+
1468
+ /* ccu pd and clk off */
1469
+ mpp_clk_safe_disable(ccu->aclk_info.clk);
1470
+ pm_relax(ccu->dev);
1471
+ pm_runtime_mark_last_busy(ccu->dev);
1472
+ pm_runtime_put_autosuspend(ccu->dev);
1473
+ /* core pd and clk off */
1474
+ for (i = 0; i < queue->core_count; i++) {
1475
+ mpp = queue->cores[i];
1476
+
1477
+ if (mpp->hw_ops->clk_off)
1478
+ mpp->hw_ops->clk_off(mpp);
1479
+ pm_relax(mpp->dev);
1480
+ pm_runtime_mark_last_busy(mpp->dev);
1481
+ pm_runtime_put_autosuspend(mpp->dev);
1482
+ mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
1483
+ }
1484
+ mpp_debug(DEBUG_CCU, "power off\n");
1485
+ }
1486
+
1487
+ return 0;
1488
+}
1489
+
1490
+static int rkvdec2_soft_ccu_dequeue(struct mpp_taskqueue *queue)
1491
+{
1492
+ struct mpp_task *mpp_task = NULL, *n;
1493
+
1494
+ mpp_debug_enter();
1495
+
1496
+ list_for_each_entry_safe(mpp_task, n,
1497
+ &queue->running_list,
1498
+ queue_link) {
1499
+ struct mpp_dev *mpp = mpp_get_task_used_device(mpp_task, mpp_task->session);
1500
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1501
+ u32 irq_status = mpp->irq_status;
1502
+ u32 timeout_flag = test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
1503
+ u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
1504
+ u32 timing_en = mpp->srv->timing_en;
1505
+
1506
+ if (irq_status || timeout_flag || abort_flag) {
1507
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1508
+
1509
+ if (timing_en) {
1510
+ mpp_task->on_irq = ktime_get();
1511
+ set_bit(TASK_TIMING_IRQ, &mpp_task->state);
1512
+
1513
+ mpp_task->on_cancel_timeout = mpp_task->on_irq;
1514
+ set_bit(TASK_TIMING_TO_CANCEL, &mpp_task->state);
1515
+
1516
+ mpp_task->on_isr = mpp_task->on_irq;
1517
+ set_bit(TASK_TIMING_ISR, &mpp_task->state);
1518
+ }
1519
+
1520
+ set_bit(TASK_STATE_HANDLE, &mpp_task->state);
1521
+ cancel_delayed_work(&mpp_task->timeout_work);
1522
+ mpp_task->hw_cycles = mpp_read(mpp, RKVDEC_PERF_WORKING_CNT);
1523
+ mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
1524
+ task->irq_status = irq_status;
1525
+ mpp_debug(DEBUG_IRQ_CHECK, "irq_status=%08x, timeout=%u, abort=%u\n",
1526
+ irq_status, timeout_flag, abort_flag);
1527
+ if (irq_status && mpp->dev_ops->finish)
1528
+ mpp->dev_ops->finish(mpp, mpp_task);
1529
+ else
1530
+ task->reg[RKVDEC_REG_INT_EN_INDEX] = RKVDEC_TIMEOUT_STA;
1531
+
1532
+ set_bit(TASK_STATE_FINISH, &mpp_task->state);
1533
+ set_bit(TASK_STATE_DONE, &mpp_task->state);
1534
+
1535
+ set_bit(mpp->core_id, &queue->core_idle);
1536
+ mpp_dbg_core("set core %d idle %lx\n", mpp->core_id, queue->core_idle);
1537
+ /* Wake up the GET thread */
1538
+ wake_up(&mpp_task->wait);
1539
+ /* free task */
1540
+ list_del_init(&mpp_task->queue_link);
1541
+ kref_put(&mpp_task->ref, mpp_free_task);
1542
+ } else {
1543
+ /* NOTE: break when meet not finish */
1544
+ break;
1545
+ }
1546
+ }
1547
+
1548
+ mpp_debug_leave();
1549
+ return 0;
1550
+}
1551
+
1552
+static int rkvdec2_soft_ccu_reset(struct mpp_taskqueue *queue,
1553
+ struct rkvdec2_ccu *ccu)
1554
+{
1555
+ int i;
1556
+
1557
+ for (i = queue->core_count - 1; i >= 0; i--) {
1558
+ u32 val;
1559
+
1560
+ struct mpp_dev *mpp = queue->cores[i];
1561
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1562
+
1563
+ if (mpp->disable)
1564
+ continue;
1565
+
1566
+ dev_info(mpp->dev, "resetting...\n");
1567
+ disable_hardirq(mpp->irq);
1568
+
1569
+ /* foce idle, disconnect core and ccu */
1570
+ writel(dec->core_mask, ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
1571
+
1572
+ /* soft reset */
1573
+ mpp_write(mpp, RKVDEC_REG_IMPORTANT_BASE, RKVDEC_SOFTREST_EN);
1574
+ udelay(5);
1575
+ val = mpp_read(mpp, RKVDEC_REG_INT_EN);
1576
+ if (!(val & RKVDEC_SOFT_RESET_READY))
1577
+ mpp_err("soft reset fail, int %08x\n", val);
1578
+ mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1579
+
1580
+ /* check bus idle */
1581
+ val = mpp_read(mpp, RKVDEC_REG_DEBUG_INT_BASE);
1582
+ if (!(val & RKVDEC_BIT_BUS_IDLE))
1583
+ mpp_err("bus busy\n");
1584
+
1585
+ if (IS_REACHABLE(CONFIG_ROCKCHIP_SIP)) {
1586
+ /* sip reset */
1587
+ rockchip_dmcfreq_lock();
1588
+ sip_smc_vpu_reset(i, 0, 0);
1589
+ rockchip_dmcfreq_unlock();
1590
+ } else {
1591
+ rkvdec2_reset(mpp);
1592
+ }
1593
+ /* clear error mask */
1594
+ writel(dec->core_mask & RKVDEC_CCU_CORE_RW_MASK,
1595
+ ccu->reg_base + RKVDEC_CCU_CORE_ERR_BASE);
1596
+ /* connect core and ccu */
1597
+ writel(dec->core_mask & RKVDEC_CCU_CORE_RW_MASK,
1598
+ ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
1599
+ mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
1600
+ atomic_set(&mpp->reset_request, 0);
1601
+
1602
+ enable_irq(mpp->irq);
1603
+ dev_info(mpp->dev, "reset done\n");
1604
+ }
1605
+ atomic_set(&queue->reset_request, 0);
1606
+
1607
+ return 0;
1608
+}
1609
+
1610
+void *rkvdec2_ccu_alloc_task(struct mpp_session *session,
1611
+ struct mpp_task_msgs *msgs)
1612
+{
1613
+ int ret;
1614
+ struct rkvdec2_task *task;
1615
+
1616
+ task = kzalloc(sizeof(*task), GFP_KERNEL);
1617
+ if (!task)
1618
+ return NULL;
1619
+
1620
+ ret = rkvdec2_task_init(session->mpp, session, task, msgs);
1621
+ if (ret) {
1622
+ kfree(task);
1623
+ return NULL;
1624
+ }
1625
+
1626
+ return &task->mpp_task;
1627
+}
1628
+
1629
+static void rkvdec2_ccu_check_pagefault_info(struct mpp_dev *mpp)
1630
+{
1631
+ u32 i = 0;
1632
+
1633
+ for (i = 0; i < mpp->queue->core_count; i++) {
1634
+ struct mpp_dev *core = mpp->queue->cores[i];
1635
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(core);
1636
+ void __iomem *mmu_base = dec->mmu_base;
1637
+ u32 mmu0_st;
1638
+ u32 mmu1_st;
1639
+ u32 mmu0_pta;
1640
+ u32 mmu1_pta;
1641
+
1642
+ if (!mmu_base)
1643
+ return;
1644
+
1645
+ #define FAULT_STATUS 0x7e2
1646
+ rkvdec2_ccu_power_on(mpp->queue, dec->ccu);
1647
+
1648
+ mmu0_st = readl(mmu_base + 0x4);
1649
+ mmu1_st = readl(mmu_base + 0x44);
1650
+ mmu0_pta = readl(mmu_base + 0xc);
1651
+ mmu1_pta = readl(mmu_base + 0x4c);
1652
+
1653
+ dec->mmu0_st = mmu0_st;
1654
+ dec->mmu1_st = mmu1_st;
1655
+ dec->mmu0_pta = mmu0_pta;
1656
+ dec->mmu1_pta = mmu1_pta;
1657
+
1658
+ pr_err("core %d mmu0 %08x %08x mm1 %08x %08x\n",
1659
+ core->core_id, mmu0_st, mmu0_pta, mmu1_st, mmu1_pta);
1660
+ if ((mmu0_st & FAULT_STATUS) || (mmu1_st & FAULT_STATUS) ||
1661
+ mmu0_pta || mmu1_pta) {
1662
+ dec->fault_iova = readl(dec->link_dec->reg_base + 0x4);
1663
+ dec->mmu_fault = 1;
1664
+ pr_err("core %d fault iova %08x\n", core->core_id, dec->fault_iova);
1665
+ rockchip_iommu_mask_irq(core->dev);
1666
+ } else {
1667
+ dec->mmu_fault = 0;
1668
+ dec->fault_iova = 0;
1669
+ }
1670
+ }
1671
+}
1672
+
1673
+int rkvdec2_ccu_iommu_fault_handle(struct iommu_domain *iommu,
1674
+ struct device *iommu_dev,
1675
+ unsigned long iova, int status, void *arg)
1676
+{
1677
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
1678
+
1679
+ mpp_debug_enter();
1680
+
1681
+ rkvdec2_ccu_check_pagefault_info(mpp);
1682
+
1683
+ mpp->queue->iommu_fault = 1;
1684
+ atomic_inc(&mpp->queue->reset_request);
1685
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
1686
+
1687
+ mpp_debug_leave();
1688
+
1689
+ return 0;
1690
+}
1691
+
1692
+irqreturn_t rkvdec2_soft_ccu_irq(int irq, void *param)
1693
+{
1694
+ struct mpp_dev *mpp = param;
1695
+ u32 irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
1696
+
1697
+ if (irq_status & RKVDEC_IRQ_RAW) {
1698
+ mpp_debug(DEBUG_IRQ_STATUS, "irq_status=%08x\n", irq_status);
1699
+ if (irq_status & RKVDEC_INT_ERROR_MASK) {
1700
+ atomic_inc(&mpp->reset_request);
1701
+ atomic_inc(&mpp->queue->reset_request);
1702
+ }
1703
+ mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1704
+ mpp->irq_status = irq_status;
1705
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
1706
+ return IRQ_HANDLED;
1707
+ }
1708
+ return IRQ_NONE;
1709
+}
1710
+
1711
+static inline int rkvdec2_set_core_info(u32 *reg, int idx)
1712
+{
1713
+ u32 val = (idx << 16) & RKVDEC_REG_FILM_IDX_MASK;
1714
+
1715
+ reg[RKVDEC_REG_CORE_CTRL_INDEX] &= ~RKVDEC_REG_FILM_IDX_MASK;
1716
+
1717
+ reg[RKVDEC_REG_CORE_CTRL_INDEX] |= val;
1718
+
1719
+ return 0;
1720
+}
1721
+
1722
+static int rkvdec2_soft_ccu_enqueue(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1723
+{
1724
+ u32 i, reg_en, reg;
1725
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1726
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1727
+ u32 timing_en = mpp->srv->timing_en;
1728
+
1729
+ mpp_debug_enter();
1730
+
1731
+ /* set reg for link */
1732
+ reg = RKVDEC_LINK_BIT_CORE_WORK_MODE | RKVDEC_LINK_BIT_CCU_WORK_MODE;
1733
+ writel_relaxed(reg, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
1734
+
1735
+ /* set reg for ccu */
1736
+ writel_relaxed(RKVDEC_CCU_BIT_WORK_EN, dec->ccu->reg_base + RKVDEC_CCU_WORK_BASE);
1737
+ writel_relaxed(RKVDEC_CCU_BIT_WORK_MODE, dec->ccu->reg_base + RKVDEC_CCU_WORK_MODE_BASE);
1738
+ writel_relaxed(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
1739
+
1740
+ /* set cache size */
1741
+ reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS |
1742
+ RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
1743
+ if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
1744
+ reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
1745
+
1746
+ mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
1747
+ mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
1748
+ mpp_write_relaxed(mpp, RKVDEC_REG_CACHE2_SIZE_BASE, reg);
1749
+ /* clear cache */
1750
+ mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
1751
+ mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
1752
+ mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
1753
+
1754
+ mpp_iommu_flush_tlb(mpp->iommu_info);
1755
+ /* disable multicore pu/colmv offset req timeout reset */
1756
+ task->reg[RKVDEC_REG_EN_MODE_SET] |= BIT(1);
1757
+ task->reg[RKVDEC_REG_TIMEOUT_THRESHOLD] = rkvdec2_ccu_get_timeout_threshold(task);
1758
+ /* set registers for hardware */
1759
+ reg_en = mpp_task->hw_info->reg_en;
1760
+ for (i = 0; i < task->w_req_cnt; i++) {
1761
+ int s, e;
1762
+ struct mpp_request *req = &task->w_reqs[i];
1763
+
1764
+ s = req->offset / sizeof(u32);
1765
+ e = s + req->size / sizeof(u32);
1766
+ mpp_write_req(mpp, task->reg, s, e, reg_en);
1767
+ }
1768
+ /* init current task */
1769
+ mpp->cur_task = mpp_task;
1770
+
1771
+ mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
1772
+
1773
+ mpp->irq_status = 0;
1774
+ writel_relaxed(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_STA_BASE);
1775
+ /* Flush the register before the start the device */
1776
+ wmb();
1777
+ mpp_write(mpp, RKVDEC_REG_START_EN_BASE, task->reg[reg_en] | RKVDEC_START_EN);
1778
+
1779
+ mpp_task_run_end(mpp_task, timing_en);
1780
+
1781
+ mpp_debug_leave();
1782
+
1783
+ return 0;
1784
+}
1785
+
1786
+static struct mpp_dev *rkvdec2_get_idle_core(struct mpp_taskqueue *queue,
1787
+ struct mpp_task *mpp_task)
1788
+{
1789
+ u32 i = 0;
1790
+ struct rkvdec2_dev *dec = NULL;
1791
+
1792
+ for (i = 0; i < queue->core_count; i++) {
1793
+ struct mpp_dev *mpp = queue->cores[i];
1794
+ struct rkvdec2_dev *core = to_rkvdec2_dev(mpp);
1795
+
1796
+ if (mpp->disable)
1797
+ continue;
1798
+
1799
+ if (test_bit(i, &queue->core_idle)) {
1800
+ if (!dec) {
1801
+ dec = core;
1802
+ continue;
1803
+ }
1804
+ /* set the less work core */
1805
+ if (core->task_index < dec->task_index)
1806
+ dec = core;
1807
+ }
1808
+ }
1809
+ /* if get core */
1810
+ if (dec) {
1811
+ mpp_task->mpp = &dec->mpp;
1812
+ mpp_task->core_id = dec->mpp.core_id;
1813
+ clear_bit(mpp_task->core_id, &queue->core_idle);
1814
+ dec->task_index++;
1815
+ atomic_inc(&dec->mpp.task_count);
1816
+ mpp_dbg_core("clear core %d idle\n", mpp_task->core_id);
1817
+ return mpp_task->mpp;
1818
+ }
1819
+
1820
+ return NULL;
1821
+}
1822
+
1823
+static bool rkvdec2_core_working(struct mpp_taskqueue *queue)
1824
+{
1825
+ struct mpp_dev *mpp;
1826
+ bool flag = false;
1827
+ u32 i = 0;
1828
+
1829
+ for (i = 0; i < queue->core_count; i++) {
1830
+ mpp = queue->cores[i];
1831
+ if (mpp->disable)
1832
+ continue;
1833
+ if (!test_bit(i, &queue->core_idle)) {
1834
+ flag = true;
1835
+ break;
1836
+ }
1837
+ }
1838
+
1839
+ return flag;
1840
+}
1841
+
1842
+static int rkvdec2_ccu_link_session_detach(struct mpp_dev *mpp,
1843
+ struct mpp_taskqueue *queue)
1844
+{
1845
+ mutex_lock(&queue->session_lock);
1846
+ while (atomic_read(&queue->detach_count)) {
1847
+ struct mpp_session *session = NULL;
1848
+
1849
+ session = list_first_entry_or_null(&queue->session_detach,
1850
+ struct mpp_session,
1851
+ session_link);
1852
+ if (session) {
1853
+ list_del_init(&session->session_link);
1854
+ atomic_dec(&queue->detach_count);
1855
+ }
1856
+
1857
+ mutex_unlock(&queue->session_lock);
1858
+
1859
+ if (session) {
1860
+ mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev),
1861
+ atomic_read(&queue->detach_count));
1862
+ mpp_session_deinit(session);
1863
+ }
1864
+
1865
+ mutex_lock(&queue->session_lock);
1866
+ }
1867
+ mutex_unlock(&queue->session_lock);
1868
+
1869
+ return 0;
1870
+}
1871
+
1872
+void rkvdec2_soft_ccu_worker(struct kthread_work *work_s)
1873
+{
1874
+ struct mpp_task *mpp_task;
1875
+ struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
1876
+ struct mpp_taskqueue *queue = mpp->queue;
1877
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1878
+ u32 timing_en = mpp->srv->timing_en;
1879
+
1880
+ mpp_debug_enter();
1881
+
1882
+ /* 1. process all finished task in running list */
1883
+ rkvdec2_soft_ccu_dequeue(queue);
1884
+
1885
+ /* 2. process reset request */
1886
+ if (atomic_read(&queue->reset_request)) {
1887
+ if (!rkvdec2_core_working(queue)) {
1888
+ rkvdec2_ccu_power_on(queue, dec->ccu);
1889
+ rkvdec2_soft_ccu_reset(queue, dec->ccu);
1890
+ }
1891
+ }
1892
+
1893
+ /* 3. process pending task */
1894
+ while (1) {
1895
+ if (atomic_read(&queue->reset_request))
1896
+ break;
1897
+ /* get one task form pending list */
1898
+ mutex_lock(&queue->pending_lock);
1899
+ mpp_task = list_first_entry_or_null(&queue->pending_list,
1900
+ struct mpp_task, queue_link);
1901
+ mutex_unlock(&queue->pending_lock);
1902
+ if (!mpp_task)
1903
+ break;
1904
+
1905
+ if (test_bit(TASK_STATE_ABORT, &mpp_task->state)) {
1906
+ mutex_lock(&queue->pending_lock);
1907
+ list_del_init(&mpp_task->queue_link);
1908
+
1909
+ set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
1910
+ set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
1911
+
1912
+ mutex_unlock(&queue->pending_lock);
1913
+ wake_up(&mpp_task->wait);
1914
+ kref_put(&mpp_task->ref, rkvdec2_link_free_task);
1915
+ continue;
1916
+ }
1917
+ /* find one core is idle */
1918
+ mpp = rkvdec2_get_idle_core(queue, mpp_task);
1919
+ if (!mpp)
1920
+ break;
1921
+
1922
+ if (timing_en) {
1923
+ mpp_task->on_run = ktime_get();
1924
+ set_bit(TASK_TIMING_RUN, &mpp_task->state);
1925
+ }
1926
+
1927
+ /* set session index */
1928
+ rkvdec2_set_core_info(mpp_task->reg, mpp_task->session->index);
1929
+ /* set rcb buffer */
1930
+ mpp_set_rcbbuf(mpp, mpp_task->session, mpp_task);
1931
+
1932
+ INIT_DELAYED_WORK(&mpp_task->timeout_work, rkvdec2_ccu_timeout_work);
1933
+ rkvdec2_ccu_power_on(queue, dec->ccu);
1934
+ rkvdec2_soft_ccu_enqueue(mpp, mpp_task);
1935
+ /* pending to running */
1936
+ mpp_taskqueue_pending_to_run(queue, mpp_task);
1937
+ set_bit(TASK_STATE_RUNNING, &mpp_task->state);
1938
+ }
1939
+
1940
+ /* 4. poweroff when running and pending list are empty */
1941
+ if (list_empty(&queue->running_list) &&
1942
+ list_empty(&queue->pending_list))
1943
+ rkvdec2_ccu_power_off(queue, dec->ccu);
1944
+
1945
+ /* 5. check session detach out of queue */
1946
+ rkvdec2_ccu_link_session_detach(mpp, queue);
1947
+
1948
+ mpp_debug_leave();
1949
+}
1950
+
1951
+int rkvdec2_ccu_alloc_table(struct rkvdec2_dev *dec,
1952
+ struct rkvdec_link_dev *link_dec)
1953
+{
1954
+ int ret, i;
1955
+ struct mpp_dma_buffer *table;
1956
+ struct mpp_dev *mpp = &dec->mpp;
1957
+
1958
+ mpp_debug_enter();
1959
+
1960
+ /* alloc table pointer array */
1961
+ table = devm_kmalloc_array(mpp->dev, mpp->task_capacity,
1962
+ sizeof(*table), GFP_KERNEL | __GFP_ZERO);
1963
+ if (!table)
1964
+ return -ENOMEM;
1965
+
1966
+ /* alloc table buffer */
1967
+ ret = rkvdec2_link_alloc_table(mpp, link_dec);
1968
+ if (ret)
1969
+ return ret;
1970
+
1971
+ /* init table array */
1972
+ dec->ccu->table_array = table;
1973
+ for (i = 0; i < mpp->task_capacity; i++) {
1974
+ table[i].iova = link_dec->table->iova + i * link_dec->link_node_size;
1975
+ table[i].vaddr = link_dec->table->vaddr + i * link_dec->link_node_size;
1976
+ table[i].size = link_dec->link_node_size;
1977
+ INIT_LIST_HEAD(&table[i].link);
1978
+ list_add_tail(&table[i].link, &dec->ccu->unused_list);
1979
+ }
1980
+
1981
+ return 0;
1982
+}
1983
+
1984
+static void rkvdec2_dump_ccu(struct rkvdec2_ccu *ccu)
1985
+{
1986
+ u32 i;
1987
+
1988
+ for (i = 0; i < 10; i++)
1989
+ mpp_err("ccu:reg[%d]=%08x\n", i, readl(ccu->reg_base + 4 * i));
1990
+
1991
+ for (i = 16; i < 22; i++)
1992
+ mpp_err("ccu:reg[%d]=%08x\n", i, readl(ccu->reg_base + 4 * i));
1993
+}
1994
+
1995
+static void rkvdec2_dump_link(struct rkvdec2_dev *dec)
1996
+{
1997
+ u32 i;
1998
+
1999
+ for (i = 0; i < 10; i++)
2000
+ mpp_err("link:reg[%d]=%08x\n", i, readl(dec->link_dec->reg_base + 4 * i));
2001
+}
2002
+
2003
+static void rkvdec2_dump_core(struct mpp_dev *mpp, struct rkvdec2_task *task)
2004
+{
2005
+ u32 j;
2006
+
2007
+ if (task) {
2008
+ for (j = 0; j < 273; j++)
2009
+ mpp_err("reg[%d]=%08x, %08x\n", j, mpp_read(mpp, j*4), task->reg[j]);
2010
+ } else {
2011
+ for (j = 0; j < 273; j++)
2012
+ mpp_err("reg[%d]=%08x\n", j, mpp_read(mpp, j*4));
2013
+ }
2014
+}
2015
+
2016
+irqreturn_t rkvdec2_hard_ccu_irq(int irq, void *param)
2017
+{
2018
+ u32 irq_status;
2019
+ struct mpp_dev *mpp = param;
2020
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2021
+
2022
+ irq_status = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2023
+ dec->ccu->ccu_core_work_mode = readl(dec->ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2024
+ if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) {
2025
+ dec->link_dec->irq_status = irq_status;
2026
+ mpp->irq_status = mpp_read(mpp, RKVDEC_REG_INT_EN);
2027
+ mpp_debug(DEBUG_IRQ_STATUS, "core %d link_irq=%08x, core_irq=%08x\n",
2028
+ mpp->core_id, irq_status, mpp->irq_status);
2029
+
2030
+ writel(irq_status & 0xfffff0ff,
2031
+ dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2032
+
2033
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
2034
+ return IRQ_HANDLED;
2035
+ }
2036
+
2037
+ return IRQ_NONE;
2038
+}
2039
+
2040
+static int rkvdec2_hard_ccu_finish(struct rkvdec_link_info *hw, struct rkvdec2_task *task)
2041
+{
2042
+ u32 i, off, s, n;
2043
+ struct rkvdec_link_part *part = hw->part_r;
2044
+ u32 *tb_reg = (u32 *)task->table->vaddr;
2045
+
2046
+ mpp_debug_enter();
2047
+
2048
+ for (i = 0; i < hw->part_r_num; i++) {
2049
+ off = part[i].tb_reg_off;
2050
+ s = part[i].reg_start;
2051
+ n = part[i].reg_num;
2052
+ memcpy(&task->reg[s], &tb_reg[off], n * sizeof(u32));
2053
+ }
2054
+ /* revert hack for irq status */
2055
+ task->reg[RKVDEC_REG_INT_EN_INDEX] = task->irq_status;
2056
+
2057
+ mpp_debug_leave();
2058
+
2059
+ return 0;
2060
+}
2061
+
2062
+static int rkvdec2_hard_ccu_dequeue(struct mpp_taskqueue *queue,
2063
+ struct rkvdec2_ccu *ccu,
2064
+ struct rkvdec_link_info *hw)
2065
+{
2066
+ struct mpp_task *mpp_task = NULL, *n;
2067
+ u32 dump_reg = 0;
2068
+ u32 dequeue_none = 0;
2069
+
2070
+ mpp_debug_enter();
2071
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
2072
+ u32 timeout_flag = test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
2073
+ u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
2074
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2075
+ u32 *tb_reg = (u32 *)task->table->vaddr;
2076
+ u32 irq_status = tb_reg[hw->tb_reg_int];
2077
+ u32 ccu_decoded_num, ccu_total_dec_num;
2078
+
2079
+ ccu_decoded_num = readl(ccu->reg_base + RKVDEC_CCU_DEC_NUM_BASE);
2080
+ ccu_total_dec_num = readl(ccu->reg_base + RKVDEC_CCU_TOTAL_NUM_BASE);
2081
+ mpp_debug(DEBUG_IRQ_CHECK,
2082
+ "session %d task %d w:h[%d %d] err %d irq_status %08x timeout=%u abort=%u iova %08x next %08x ccu[%d %d]\n",
2083
+ mpp_task->session->index, mpp_task->task_index, task->width,
2084
+ task->height, !!(irq_status & RKVDEC_INT_ERROR_MASK), irq_status,
2085
+ timeout_flag, abort_flag, (u32)task->table->iova,
2086
+ ((u32 *)task->table->vaddr)[hw->tb_reg_next],
2087
+ ccu_decoded_num, ccu_total_dec_num);
2088
+
2089
+ if (irq_status || timeout_flag || abort_flag) {
2090
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(queue->cores[0]);
2091
+
2092
+ set_bit(TASK_STATE_HANDLE, &mpp_task->state);
2093
+ cancel_delayed_work(&mpp_task->timeout_work);
2094
+ mpp_task->hw_cycles = tb_reg[hw->tb_reg_cycle];
2095
+ mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
2096
+ task->irq_status = irq_status;
2097
+
2098
+ if (irq_status)
2099
+ rkvdec2_hard_ccu_finish(hw, task);
2100
+
2101
+ set_bit(TASK_STATE_FINISH, &mpp_task->state);
2102
+ set_bit(TASK_STATE_DONE, &mpp_task->state);
2103
+
2104
+ if (timeout_flag && !dump_reg && mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
2105
+ u32 i;
2106
+
2107
+ mpp_err("###### ccu #####\n");
2108
+ rkvdec2_dump_ccu(ccu);
2109
+ for (i = 0; i < queue->core_count; i++) {
2110
+ mpp_err("###### core %d #####\n", i);
2111
+ rkvdec2_dump_link(to_rkvdec2_dev(queue->cores[i]));
2112
+ rkvdec2_dump_core(queue->cores[i], task);
2113
+ }
2114
+ dump_reg = 1;
2115
+ }
2116
+ list_move_tail(&task->table->link, &ccu->unused_list);
2117
+ /* free task */
2118
+ list_del_init(&mpp_task->queue_link);
2119
+ /* Wake up the GET thread */
2120
+ wake_up(&mpp_task->wait);
2121
+ if ((irq_status & RKVDEC_INT_ERROR_MASK) || timeout_flag) {
2122
+ pr_err("session %d task %d irq_status %08x timeout=%u abort=%u\n",
2123
+ mpp_task->session->index, mpp_task->task_index,
2124
+ irq_status, timeout_flag, abort_flag);
2125
+ atomic_inc(&queue->reset_request);
2126
+ }
2127
+
2128
+ kref_put(&mpp_task->ref, mpp_free_task);
2129
+ } else {
2130
+ dequeue_none++;
2131
+ /*
2132
+ * there are only 2 cores,
2133
+ * if dequeue not finish task more than 2,
2134
+ * means the others task still not get run by hw, can break early.
2135
+ */
2136
+ if (dequeue_none > 2)
2137
+ break;
2138
+ }
2139
+ }
2140
+
2141
+ mpp_debug_leave();
2142
+ return 0;
2143
+}
2144
+
2145
+static int rkvdec2_hard_ccu_reset(struct mpp_taskqueue *queue, struct rkvdec2_ccu *ccu)
2146
+{
2147
+ int i = 0;
2148
+
2149
+ mpp_debug_enter();
2150
+
2151
+ /* reset and active core */
2152
+ for (i = 0; i < queue->core_count; i++) {
2153
+ u32 val = 0;
2154
+ struct mpp_dev *mpp = queue->cores[i];
2155
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2156
+
2157
+ if (mpp->disable)
2158
+ continue;
2159
+ dev_info(mpp->dev, "resetting...\n");
2160
+ disable_hardirq(mpp->irq);
2161
+ /* force idle */
2162
+ writel(dec->core_mask, ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
2163
+ writel(0, ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2164
+
2165
+ {
2166
+ /* soft reset */
2167
+ u32 val;
2168
+
2169
+ mpp_write(mpp, RKVDEC_REG_IMPORTANT_BASE, RKVDEC_SOFTREST_EN);
2170
+ udelay(5);
2171
+ val = mpp_read(mpp, RKVDEC_REG_INT_EN);
2172
+ if (!(val & RKVDEC_SOFT_RESET_READY))
2173
+ mpp_err("soft reset fail, int %08x\n", val);
2174
+
2175
+ // /* cru reset */
2176
+ // dev_info(mpp->dev, "cru reset\n");
2177
+ // rkvdec2_reset(mpp);
2178
+ }
2179
+#if IS_ENABLED(CONFIG_ROCKCHIP_SIP)
2180
+ rockchip_dmcfreq_lock();
2181
+ sip_smc_vpu_reset(i, 0, 0);
2182
+ rockchip_dmcfreq_unlock();
2183
+#else
2184
+ rkvdec2_reset(mpp);
2185
+#endif
2186
+ mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
2187
+ enable_irq(mpp->irq);
2188
+ atomic_set(&mpp->reset_request, 0);
2189
+ val = mpp_read_relaxed(mpp, 272*4);
2190
+ dev_info(mpp->dev, "reset done, idle %d\n", (val & 1));
2191
+ }
2192
+ /* reset ccu */
2193
+ mpp_safe_reset(ccu->rst_a);
2194
+ udelay(5);
2195
+ mpp_safe_unreset(ccu->rst_a);
2196
+
2197
+ mpp_debug_leave();
2198
+ return 0;
2199
+}
2200
+
2201
+static struct mpp_task *
2202
+rkvdec2_hard_ccu_prepare(struct mpp_task *mpp_task,
2203
+ struct rkvdec2_ccu *ccu, struct rkvdec_link_info *hw)
2204
+{
2205
+ u32 i, off, s, n;
2206
+ u32 *tb_reg;
2207
+ struct mpp_dma_buffer *table = NULL;
2208
+ struct rkvdec_link_part *part;
2209
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2210
+
2211
+ mpp_debug_enter();
2212
+
2213
+ if (test_bit(TASK_STATE_PREPARE, &mpp_task->state))
2214
+ return mpp_task;
2215
+
2216
+ /* ensure that cur table iova points to the next link table*/
2217
+ {
2218
+ struct mpp_dma_buffer *table0 = NULL, *table1 = NULL, *n;
2219
+
2220
+ list_for_each_entry_safe(table, n, &ccu->unused_list, link) {
2221
+ if (!table0) {
2222
+ table0 = table;
2223
+ continue;
2224
+ }
2225
+ if (!table1)
2226
+ table1 = table;
2227
+ break;
2228
+ }
2229
+ if (!table0 || !table1)
2230
+ return NULL;
2231
+ ((u32 *)table0->vaddr)[hw->tb_reg_next] = table1->iova;
2232
+ table = table0;
2233
+ }
2234
+
2235
+ /* set session idx */
2236
+ rkvdec2_set_core_info(task->reg, mpp_task->session->index);
2237
+ tb_reg = (u32 *)table->vaddr;
2238
+ part = hw->part_w;
2239
+
2240
+ /* disable multicore pu/colmv offset req timeout reset */
2241
+ task->reg[RKVDEC_REG_EN_MODE_SET] |= BIT(1);
2242
+ task->reg[RKVDEC_REG_TIMEOUT_THRESHOLD] = rkvdec2_ccu_get_timeout_threshold(task);
2243
+
2244
+ for (i = 0; i < hw->part_w_num; i++) {
2245
+ off = part[i].tb_reg_off;
2246
+ s = part[i].reg_start;
2247
+ n = part[i].reg_num;
2248
+ memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
2249
+ }
2250
+
2251
+ /* memset read registers */
2252
+ part = hw->part_r;
2253
+ for (i = 0; i < hw->part_r_num; i++) {
2254
+ off = part[i].tb_reg_off;
2255
+ n = part[i].reg_num;
2256
+ memset(&tb_reg[off], 0, n * sizeof(u32));
2257
+ }
2258
+ list_move_tail(&table->link, &ccu->used_list);
2259
+ task->table = table;
2260
+ set_bit(TASK_STATE_PREPARE, &mpp_task->state);
2261
+ mpp_dbg_ccu("session %d task %d iova %08x next %08x\n",
2262
+ mpp_task->session->index, mpp_task->task_index, (u32)task->table->iova,
2263
+ ((u32 *)task->table->vaddr)[hw->tb_reg_next]);
2264
+
2265
+ mpp_debug_leave();
2266
+
2267
+ return mpp_task;
2268
+}
2269
+
2270
+static int rkvdec2_ccu_link_fix_rcb_regs(struct rkvdec2_dev *dec)
2271
+{
2272
+ int ret = 0;
2273
+ u32 i, val;
2274
+ u32 reg, reg_idx, rcb_size, rcb_offset;
2275
+
2276
+ if (!dec->rcb_iova && !dec->rcb_info_count)
2277
+ goto done;
2278
+ /* check whether fixed */
2279
+ val = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2280
+ if (val & RKVDEC_CCU_BIT_FIX_RCB)
2281
+ goto done;
2282
+ /* set registers */
2283
+ rcb_offset = 0;
2284
+ for (i = 0; i < dec->rcb_info_count; i += 2) {
2285
+ reg_idx = dec->rcb_infos[i];
2286
+ rcb_size = dec->rcb_infos[i + 1];
2287
+ mpp_debug(DEBUG_SRAM_INFO,
2288
+ "rcb: reg %u size %u offset %u sram_size %u rcb_size %u\n",
2289
+ reg_idx, rcb_size, rcb_offset, dec->sram_size, dec->rcb_size);
2290
+ if ((rcb_offset + rcb_size) > dec->rcb_size) {
2291
+ mpp_err("rcb: reg[%u] set failed.\n", reg_idx);
2292
+ ret = -ENOMEM;
2293
+ goto done;
2294
+ }
2295
+ reg = dec->rcb_iova + rcb_offset;
2296
+ mpp_write(&dec->mpp, reg_idx * sizeof(u32), reg);
2297
+ rcb_offset += rcb_size;
2298
+ }
2299
+
2300
+ val |= RKVDEC_CCU_BIT_FIX_RCB;
2301
+ writel(val, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2302
+done:
2303
+ return ret;
2304
+}
2305
+
2306
+static int rkvdec2_hard_ccu_enqueue(struct rkvdec2_ccu *ccu,
2307
+ struct mpp_task *mpp_task,
2308
+ struct mpp_taskqueue *queue,
2309
+ struct mpp_dev *mpp)
2310
+{
2311
+ u32 ccu_en, work_mode, link_mode;
2312
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2313
+ u32 timing_en = mpp->srv->timing_en;
2314
+
2315
+ mpp_debug_enter();
2316
+
2317
+ if (test_bit(TASK_STATE_START, &mpp_task->state))
2318
+ goto done;
2319
+
2320
+ ccu_en = readl(ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2321
+ mpp_dbg_ccu("ccu_en=%d\n", ccu_en);
2322
+ if (!ccu_en) {
2323
+ u32 i;
2324
+
2325
+ /* set work mode */
2326
+ work_mode = 0;
2327
+ for (i = 0; i < queue->core_count; i++) {
2328
+ u32 val;
2329
+ struct mpp_dev *core = queue->cores[i];
2330
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(core);
2331
+
2332
+ if (mpp->disable)
2333
+ continue;
2334
+ work_mode |= dec->core_mask;
2335
+ rkvdec2_ccu_link_fix_rcb_regs(dec);
2336
+ /* control by ccu */
2337
+ val = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2338
+ val |= RKVDEC_LINK_BIT_CCU_WORK_MODE;
2339
+ writel(val, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2340
+ }
2341
+ writel(work_mode, ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2342
+ ccu->ccu_core_work_mode = readl(ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2343
+ mpp_dbg_ccu("ccu_work_mode=%08x, ccu_work_status=%08x\n",
2344
+ readl(ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE),
2345
+ readl(ccu->reg_base + RKVDEC_CCU_CORE_STA_BASE));
2346
+
2347
+ /* set auto gating */
2348
+ writel(RKVDEC_CCU_BIT_AUTOGATE, ccu->reg_base + RKVDEC_CCU_CTRL_BASE);
2349
+ /* link start base */
2350
+ writel(task->table->iova, ccu->reg_base + RKVDEC_CCU_CFG_ADDR_BASE);
2351
+ /* enable link */
2352
+ writel(RKVDEC_CCU_BIT_WORK_EN, ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2353
+ }
2354
+
2355
+ /* set link mode */
2356
+ link_mode = ccu_en ? RKVDEC_CCU_BIT_ADD_MODE : 0;
2357
+ writel(link_mode | RKVDEC_LINK_ADD_CFG_NUM, ccu->reg_base + RKVDEC_CCU_LINK_MODE_BASE);
2358
+
2359
+ /* flush tlb before starting hardware */
2360
+ mpp_iommu_flush_tlb(mpp->iommu_info);
2361
+ /* wmb */
2362
+ wmb();
2363
+ INIT_DELAYED_WORK(&mpp_task->timeout_work, rkvdec2_ccu_timeout_work);
2364
+ mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
2365
+ /* configure done */
2366
+ writel(RKVDEC_CCU_BIT_CFG_DONE, ccu->reg_base + RKVDEC_CCU_CFG_DONE_BASE);
2367
+ mpp_task_run_end(mpp_task, timing_en);
2368
+
2369
+ /* pending to running */
2370
+ set_bit(TASK_STATE_RUNNING, &mpp_task->state);
2371
+ mpp_taskqueue_pending_to_run(queue, mpp_task);
2372
+ mpp_dbg_ccu("session %d task %d iova=%08x task->state=%lx link_mode=%08x\n",
2373
+ mpp_task->session->index, mpp_task->task_index,
2374
+ (u32)task->table->iova, mpp_task->state,
2375
+ readl(ccu->reg_base + RKVDEC_CCU_LINK_MODE_BASE));
2376
+done:
2377
+ mpp_debug_leave();
2378
+
2379
+ return 0;
2380
+}
2381
+
2382
+static void rkvdec2_hard_ccu_handle_pagefault_task(struct rkvdec2_dev *dec,
2383
+ struct mpp_task *mpp_task)
2384
+{
2385
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2386
+
2387
+ mpp_dbg_ccu("session %d task %d w:h[%d %d] pagefault mmu0[%08x %08x] mmu1[%08x %08x] fault_iova %08x\n",
2388
+ mpp_task->session->index, mpp_task->task_index,
2389
+ task->width, task->height, dec->mmu0_st, dec->mmu0_pta,
2390
+ dec->mmu1_st, dec->mmu1_pta, dec->fault_iova);
2391
+
2392
+ set_bit(TASK_STATE_HANDLE, &mpp_task->state);
2393
+ task->irq_status |= BIT(4);
2394
+ cancel_delayed_work(&mpp_task->timeout_work);
2395
+ rkvdec2_hard_ccu_finish(dec->link_dec->info, task);
2396
+ set_bit(TASK_STATE_FINISH, &mpp_task->state);
2397
+ set_bit(TASK_STATE_DONE, &mpp_task->state);
2398
+ list_move_tail(&task->table->link, &dec->ccu->unused_list);
2399
+ list_del_init(&mpp_task->queue_link);
2400
+ /* Wake up the GET thread */
2401
+ wake_up(&mpp_task->wait);
2402
+ kref_put(&mpp_task->ref, mpp_free_task);
2403
+ dec->mmu_fault = 0;
2404
+ dec->fault_iova = 0;
2405
+}
2406
+
2407
+static void rkvdec2_hard_ccu_pagefault_proc(struct mpp_taskqueue *queue)
2408
+{
2409
+ struct mpp_task *loop = NULL, *n;
2410
+
2411
+ list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2412
+ struct rkvdec2_task *task = to_rkvdec2_task(loop);
2413
+ u32 iova = (u32)task->table->iova;
2414
+ u32 i;
2415
+
2416
+ for (i = 0; i < queue->core_count; i++) {
2417
+ struct mpp_dev *core = queue->cores[i];
2418
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(core);
2419
+
2420
+ if (!dec->mmu_fault || dec->fault_iova != iova)
2421
+ continue;
2422
+ rkvdec2_hard_ccu_handle_pagefault_task(dec, loop);
2423
+ }
2424
+ }
2425
+}
2426
+
2427
+static void rkvdec2_hard_ccu_resend_tasks(struct mpp_dev *mpp, struct mpp_taskqueue *queue)
2428
+{
2429
+ struct rkvdec2_task *task_pre = NULL;
2430
+ struct mpp_task *loop = NULL, *n;
2431
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2432
+
2433
+ /* re sort running list */
2434
+ list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2435
+ struct rkvdec2_task *task = to_rkvdec2_task(loop);
2436
+ u32 *tb_reg = (u32 *)task->table->vaddr;
2437
+ u32 irq_status = tb_reg[dec->link_dec->info->tb_reg_int];
2438
+
2439
+ if (!irq_status) {
2440
+ if (task_pre) {
2441
+ tb_reg = (u32 *)task_pre->table->vaddr;
2442
+ tb_reg[dec->link_dec->info->tb_reg_next] = task->table->iova;
2443
+ }
2444
+ task_pre = task;
2445
+ }
2446
+ }
2447
+
2448
+ if (task_pre) {
2449
+ struct mpp_dma_buffer *tbl;
2450
+ u32 *tb_reg;
2451
+
2452
+ tbl = list_first_entry_or_null(&dec->ccu->unused_list,
2453
+ struct mpp_dma_buffer, link);
2454
+ WARN_ON(!tbl);
2455
+ if (tbl) {
2456
+ tb_reg = (u32 *)task_pre->table->vaddr;
2457
+ tb_reg[dec->link_dec->info->tb_reg_next] = tbl->iova;
2458
+ }
2459
+ }
2460
+
2461
+ /* resend */
2462
+ list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2463
+ struct rkvdec2_task *task = to_rkvdec2_task(loop);
2464
+ u32 *tb_reg = (u32 *)task->table->vaddr;
2465
+ u32 irq_status = tb_reg[dec->link_dec->info->tb_reg_int];
2466
+
2467
+ mpp_dbg_ccu("reback: session %d task %d iova %08x next %08x irq_status 0x%08x\n",
2468
+ loop->session->index, loop->task_index, (u32)task->table->iova,
2469
+ tb_reg[dec->link_dec->info->tb_reg_next], irq_status);
2470
+
2471
+ if (!irq_status) {
2472
+ cancel_delayed_work(&loop->timeout_work);
2473
+ clear_bit(TASK_STATE_START, &loop->state);
2474
+ rkvdec2_hard_ccu_enqueue(dec->ccu, loop, queue, mpp);
2475
+ }
2476
+ }
2477
+}
2478
+
2479
+void rkvdec2_hard_ccu_worker(struct kthread_work *work_s)
2480
+{
2481
+ struct mpp_task *mpp_task;
2482
+ struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
2483
+ struct mpp_taskqueue *queue = mpp->queue;
2484
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2485
+
2486
+ mpp_debug_enter();
2487
+
2488
+ /* 1. process all finished task in running list */
2489
+ rkvdec2_hard_ccu_dequeue(queue, dec->ccu, dec->link_dec->info);
2490
+
2491
+ /* 2. process reset request */
2492
+ if (atomic_read(&queue->reset_request) &&
2493
+ (list_empty(&queue->running_list) || !dec->ccu->ccu_core_work_mode)) {
2494
+ /*
2495
+ * cancel running list timeout work to avoid
2496
+ * sw timeout causeby reset long time
2497
+ */
2498
+ struct mpp_task *loop = NULL, *n;
2499
+
2500
+ list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2501
+ cancel_delayed_work(&loop->timeout_work);
2502
+ }
2503
+ /* reset process */
2504
+ rkvdec2_hard_ccu_reset(queue, dec->ccu);
2505
+ atomic_set(&queue->reset_request, 0);
2506
+ /* if iommu pagefault, find the fault task and drop it */
2507
+ if (queue->iommu_fault) {
2508
+ rkvdec2_hard_ccu_pagefault_proc(queue);
2509
+ queue->iommu_fault = 0;
2510
+ }
2511
+
2512
+ /* relink running task iova in list, and resend them to hw */
2513
+ if (!list_empty(&queue->running_list))
2514
+ rkvdec2_hard_ccu_resend_tasks(mpp, queue);
2515
+ }
2516
+
2517
+ /* 3. process pending task */
2518
+ while (1) {
2519
+ if (atomic_read(&queue->reset_request))
2520
+ break;
2521
+
2522
+ /* get one task form pending list */
2523
+ mutex_lock(&queue->pending_lock);
2524
+ mpp_task = list_first_entry_or_null(&queue->pending_list,
2525
+ struct mpp_task, queue_link);
2526
+ mutex_unlock(&queue->pending_lock);
2527
+
2528
+ if (!mpp_task)
2529
+ break;
2530
+ if (test_bit(TASK_STATE_ABORT, &mpp_task->state)) {
2531
+ mutex_lock(&queue->pending_lock);
2532
+ list_del_init(&mpp_task->queue_link);
2533
+ mutex_unlock(&queue->pending_lock);
2534
+ kref_put(&mpp_task->ref, mpp_free_task);
2535
+ continue;
2536
+ }
2537
+
2538
+ mpp_task = rkvdec2_hard_ccu_prepare(mpp_task, dec->ccu, dec->link_dec->info);
2539
+ if (!mpp_task)
2540
+ break;
2541
+
2542
+ rkvdec2_ccu_power_on(queue, dec->ccu);
2543
+ rkvdec2_hard_ccu_enqueue(dec->ccu, mpp_task, queue, mpp);
2544
+ }
2545
+
2546
+ /* 4. poweroff when running and pending list are empty */
2547
+ mutex_lock(&queue->pending_lock);
2548
+ if (list_empty(&queue->running_list) &&
2549
+ list_empty(&queue->pending_list))
2550
+ rkvdec2_ccu_power_off(queue, dec->ccu);
2551
+ mutex_unlock(&queue->pending_lock);
2552
+
2553
+ /* 5. check session detach out of queue */
2554
+ mpp_session_cleanup_detach(queue, work_s);
2555
+
2556
+ mpp_debug_leave();
2557
+}