hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c
....@@ -12,115 +12,15 @@
1212 #include <linux/slab.h>
1313 #include <soc/rockchip/pm_domains.h>
1414 #include <soc/rockchip/rockchip_dmc.h>
15
+#include <soc/rockchip/rockchip_iommu.h>
1516
1617 #include "mpp_rkvdec2_link.h"
1718
1819 #include "hack/mpp_rkvdec2_link_hack_rk3568.c"
1920
20
-#ifdef CONFIG_PM_DEVFREQ
21
-#include "../../../devfreq/governor.h"
22
-#endif
21
+#define RKVDEC2_LINK_HACK_TASK_FLAG (0xff)
2322
24
-#define WAIT_TIMEOUT_MS (2000)
25
-
26
-#define RKVDEC_MAX_WRITE_PART 6
27
-#define RKVDEC_MAX_READ_PART 2
28
-
29
-struct rkvdec_link_part {
30
- /* register offset of table buffer */
31
- u32 tb_reg_off;
32
- /* start idx of task register */
33
- u32 reg_start;
34
- /* number of task register */
35
- u32 reg_num;
36
-};
37
-
38
-struct rkvdec_link_status {
39
- u32 dec_num_mask;
40
- u32 err_flag_base;
41
- u32 err_flag_bit;
42
-};
43
-
44
-struct rkvdec_link_info {
45
- dma_addr_t iova;
46
- /* total register for link table buffer */
47
- u32 tb_reg_num;
48
- /* next link table addr in table buffer */
49
- u32 tb_reg_next;
50
- /* current read back addr in table buffer */
51
- u32 tb_reg_r;
52
- /* secondary enable in table buffer */
53
- u32 tb_reg_second_en;
54
- u32 part_w_num;
55
- u32 part_r_num;
56
-
57
- struct rkvdec_link_part part_w[RKVDEC_MAX_WRITE_PART];
58
- struct rkvdec_link_part part_r[RKVDEC_MAX_READ_PART];
59
-
60
- /* interrupt read back in table buffer */
61
- u32 tb_reg_int;
62
- bool hack_setup;
63
- struct rkvdec_link_status reg_status;
64
-};
65
-
66
-struct rkvdec_link_info rkvdec_link_rk3568_hw_info = {
67
- .tb_reg_num = 202,
68
- .tb_reg_next = 0,
69
- .tb_reg_r = 1,
70
- .tb_reg_second_en = 8,
71
-
72
- .part_w_num = 6,
73
- .part_r_num = 2,
74
- .part_w[0] = {
75
- .tb_reg_off = 4,
76
- .reg_start = 8,
77
- .reg_num = 20,
78
- },
79
- .part_w[1] = {
80
- .tb_reg_off = 24,
81
- .reg_start = 64,
82
- .reg_num = 52,
83
- },
84
- .part_w[2] = {
85
- .tb_reg_off = 76,
86
- .reg_start = 128,
87
- .reg_num = 16,
88
- },
89
- .part_w[3] = {
90
- .tb_reg_off = 92,
91
- .reg_start = 160,
92
- .reg_num = 40,
93
- },
94
- .part_w[4] = {
95
- .tb_reg_off = 132,
96
- .reg_start = 224,
97
- .reg_num = 16,
98
- },
99
- .part_w[5] = {
100
- .tb_reg_off = 148,
101
- .reg_start = 256,
102
- .reg_num = 16,
103
- },
104
- .part_r[0] = {
105
- .tb_reg_off = 164,
106
- .reg_start = 224,
107
- .reg_num = 10,
108
- },
109
- .part_r[1] = {
110
- .tb_reg_off = 174,
111
- .reg_start = 258,
112
- .reg_num = 28,
113
- },
114
- .tb_reg_int = 164,
115
- .hack_setup = 1,
116
- .reg_status = {
117
- .dec_num_mask = 0x3fffffff,
118
- .err_flag_base = 0x010,
119
- .err_flag_bit = BIT(31),
120
- },
121
-};
122
-
123
-/* vdpu382 link hw info */
23
+/* vdpu381 link hw info for rk3588 */
12424 struct rkvdec_link_info rkvdec_link_v2_hw_info = {
12525 .tb_reg_num = 218,
12626 .tb_reg_next = 0,
....@@ -170,13 +70,139 @@
17070 .reg_num = 28,
17171 },
17272 .tb_reg_int = 180,
73
+ .tb_reg_cycle = 195,
17374 .hack_setup = 0,
75
+ .reg_status = {
76
+ .dec_num_mask = 0x3fffffff,
77
+ .err_flag_base = 0x010,
78
+ .err_flag_bit = BIT(31),
79
+ },
80
+};
81
+
82
+/* vdpu34x link hw info for rk356x */
83
+struct rkvdec_link_info rkvdec_link_rk356x_hw_info = {
84
+ .tb_reg_num = 202,
85
+ .tb_reg_next = 0,
86
+ .tb_reg_r = 1,
87
+ .tb_reg_second_en = 8,
88
+
89
+ .part_w_num = 6,
90
+ .part_r_num = 2,
91
+ .part_w[0] = {
92
+ .tb_reg_off = 4,
93
+ .reg_start = 8,
94
+ .reg_num = 20,
95
+ },
96
+ .part_w[1] = {
97
+ .tb_reg_off = 24,
98
+ .reg_start = 64,
99
+ .reg_num = 52,
100
+ },
101
+ .part_w[2] = {
102
+ .tb_reg_off = 76,
103
+ .reg_start = 128,
104
+ .reg_num = 16,
105
+ },
106
+ .part_w[3] = {
107
+ .tb_reg_off = 92,
108
+ .reg_start = 160,
109
+ .reg_num = 40,
110
+ },
111
+ .part_w[4] = {
112
+ .tb_reg_off = 132,
113
+ .reg_start = 224,
114
+ .reg_num = 16,
115
+ },
116
+ .part_w[5] = {
117
+ .tb_reg_off = 148,
118
+ .reg_start = 256,
119
+ .reg_num = 16,
120
+ },
121
+ .part_r[0] = {
122
+ .tb_reg_off = 164,
123
+ .reg_start = 224,
124
+ .reg_num = 10,
125
+ },
126
+ .part_r[1] = {
127
+ .tb_reg_off = 174,
128
+ .reg_start = 258,
129
+ .reg_num = 28,
130
+ },
131
+ .tb_reg_int = 164,
132
+ .tb_reg_cycle = 179,
133
+ .hack_setup = 1,
134
+ .reg_status = {
135
+ .dec_num_mask = 0x3fffffff,
136
+ .err_flag_base = 0x010,
137
+ .err_flag_bit = BIT(31),
138
+ },
139
+};
140
+
141
+/* vdpu382 link hw info */
142
+struct rkvdec_link_info rkvdec_link_vdpu382_hw_info = {
143
+ .tb_reg_num = 222,
144
+ .tb_reg_next = 0,
145
+ .tb_reg_r = 1,
146
+ .tb_reg_second_en = 8,
147
+
148
+ .part_w_num = 6,
149
+ .part_r_num = 2,
150
+ .part_w[0] = {
151
+ .tb_reg_off = 4,
152
+ .reg_start = 8,
153
+ .reg_num = 28,
154
+ },
155
+ .part_w[1] = {
156
+ .tb_reg_off = 32,
157
+ .reg_start = 64,
158
+ .reg_num = 52,
159
+ },
160
+ .part_w[2] = {
161
+ .tb_reg_off = 84,
162
+ .reg_start = 128,
163
+ .reg_num = 16,
164
+ },
165
+ .part_w[3] = {
166
+ .tb_reg_off = 100,
167
+ .reg_start = 160,
168
+ .reg_num = 48,
169
+ },
170
+ .part_w[4] = {
171
+ .tb_reg_off = 148,
172
+ .reg_start = 224,
173
+ .reg_num = 16,
174
+ },
175
+ .part_w[5] = {
176
+ .tb_reg_off = 164,
177
+ .reg_start = 256,
178
+ .reg_num = 16,
179
+ },
180
+ .part_r[0] = {
181
+ .tb_reg_off = 180,
182
+ .reg_start = 224,
183
+ .reg_num = 12,
184
+ },
185
+ .part_r[1] = {
186
+ .tb_reg_off = 192,
187
+ .reg_start = 258,
188
+ .reg_num = 30,
189
+ },
190
+ .tb_reg_int = 180,
191
+ .hack_setup = 0,
192
+ .tb_reg_cycle = 197,
174193 .reg_status = {
175194 .dec_num_mask = 0x000fffff,
176195 .err_flag_base = 0x024,
177196 .err_flag_bit = BIT(8),
178197 },
179198 };
199
+
200
+static void rkvdec2_link_free_task(struct kref *ref);
201
+static void rkvdec2_link_timeout_proc(struct work_struct *work_s);
202
+static int rkvdec2_link_iommu_fault_handle(struct iommu_domain *iommu,
203
+ struct device *iommu_dev,
204
+ unsigned long iova,
205
+ int status, void *arg);
180206
181207 static void rkvdec_link_status_update(struct rkvdec_link_dev *dev)
182208 {
....@@ -228,7 +254,7 @@
228254 u32 *reg = NULL;
229255 u32 i, j;
230256
231
- for (i = 0; i < dev->task_size; i++) {
257
+ for (i = 0; i < dev->task_capacity; i++) {
232258 reg = table_base + i * reg_count;
233259
234260 mpp_err("slot %d link config iova %08x:\n", i,
....@@ -275,9 +301,8 @@
275301 {
276302 mpp_err("dump link counter from %s\n", func);
277303
278
- mpp_err("task write %d read %d send %d recv %d run %d decoded %d total %d\n",
279
- dev->task_write, dev->task_read, dev->task_send, dev->task_recv,
280
- dev->task_to_run, dev->task_decoded, dev->task_total);
304
+ mpp_err("task pending %d running %d\n",
305
+ atomic_read(&dev->task_pending), dev->task_running);
281306 }
282307
283308 int rkvdec_link_dump(struct mpp_dev *mpp)
....@@ -290,160 +315,6 @@
290315 rkvdec_link_counter(__func__, dev);
291316 rkvdec_core_reg_dump(__func__, dev);
292317 rkvdec_link_node_dump(__func__, dev);
293
-
294
- return 0;
295
-}
296
-
297
-static int rkvdec_link_get_task_write(struct rkvdec_link_dev *dev)
298
-{
299
- int idx = dev->task_write < dev->task_size ? dev->task_write :
300
- dev->task_write - dev->task_size;
301
-
302
- return idx;
303
-}
304
-static int rkvdec_link_inc_task_write(struct rkvdec_link_dev *dev)
305
-{
306
- int task_write = rkvdec_link_get_task_write(dev);
307
-
308
- dev->task_write++;
309
- if (dev->task_write >= dev->task_size * 2)
310
- dev->task_write = 0;
311
-
312
- return task_write;
313
-}
314
-static int rkvdec_link_get_task_read(struct rkvdec_link_dev *dev)
315
-{
316
- int idx = dev->task_read < dev->task_size ? dev->task_read :
317
- dev->task_read - dev->task_size;
318
-
319
- return idx;
320
-}
321
-static int rkvdec_link_inc_task_read(struct rkvdec_link_dev *dev)
322
-{
323
- int task_read = rkvdec_link_get_task_read(dev);
324
-
325
- dev->task_read++;
326
- if (dev->task_read >= dev->task_size * 2)
327
- dev->task_read = 0;
328
-
329
- return task_read;
330
-}
331
-static int rkvdec_link_get_task_hw_queue_length(struct rkvdec_link_dev *dev)
332
-{
333
- int len;
334
-
335
- if (dev->task_send <= dev->task_recv)
336
- len = dev->task_send + dev->task_size - dev->task_recv;
337
- else
338
- len = dev->task_send - dev->task_recv - dev->task_size;
339
-
340
- return len;
341
-}
342
-static int rkvdec_link_get_task_send(struct rkvdec_link_dev *dev)
343
-{
344
- int idx = dev->task_send < dev->task_size ? dev->task_send :
345
- dev->task_send - dev->task_size;
346
-
347
- return idx;
348
-}
349
-static int rkvdec_link_inc_task_send(struct rkvdec_link_dev *dev)
350
-{
351
- int task_send = rkvdec_link_get_task_send(dev);
352
-
353
- dev->task_send++;
354
- if (dev->task_send >= dev->task_size * 2)
355
- dev->task_send = 0;
356
-
357
- return task_send;
358
-}
359
-static int rkvdec_link_inc_task_recv(struct rkvdec_link_dev *dev)
360
-{
361
- int task_recv = dev->task_recv;
362
-
363
- dev->task_recv++;
364
- if (dev->task_recv >= dev->task_size * 2)
365
- dev->task_recv = 0;
366
-
367
- return task_recv;
368
-}
369
-
370
-static int rkvdec_link_get_next_slot(struct rkvdec_link_dev *dev)
371
-{
372
- int next = -1;
373
-
374
- if (dev->task_write == dev->task_read)
375
- return next;
376
-
377
- next = rkvdec_link_get_task_write(dev);
378
-
379
- return next;
380
-}
381
-
382
-static int rkvdec_link_write_task_to_slot(struct rkvdec_link_dev *dev, int idx,
383
- struct mpp_task *mpp_task)
384
-{
385
- u32 i, off, s, n;
386
- struct rkvdec_link_part *part;
387
- struct rkvdec_link_info *info;
388
- struct mpp_dma_buffer *table;
389
- struct rkvdec2_task *task;
390
- int slot_idx;
391
- u32 *tb_reg;
392
-
393
- if (idx < 0 || idx >= dev->task_size) {
394
- mpp_err("send invalid task index %d\n", idx);
395
- return -1;
396
- }
397
-
398
- info = dev->info;
399
- part = info->part_w;
400
- table = dev->table;
401
- task = to_rkvdec2_task(mpp_task);
402
-
403
- slot_idx = rkvdec_link_inc_task_write(dev);
404
- if (idx != slot_idx)
405
- dev_info(dev->dev, "slot index mismatch %d vs %d\n",
406
- idx, slot_idx);
407
-
408
- if (task->need_hack) {
409
- tb_reg = (u32 *)table->vaddr + slot_idx * dev->link_reg_count;
410
-
411
- rkvdec2_3568_hack_fix_link(tb_reg + 4);
412
-
413
- /* setup error mode flag */
414
- dev->tasks_hw[slot_idx] = NULL;
415
- dev->task_to_run++;
416
- dev->task_prepared++;
417
- slot_idx = rkvdec_link_inc_task_write(dev);
418
- }
419
-
420
- tb_reg = (u32 *)table->vaddr + slot_idx * dev->link_reg_count;
421
-
422
- for (i = 0; i < info->part_w_num; i++) {
423
- off = part[i].tb_reg_off;
424
- s = part[i].reg_start;
425
- n = part[i].reg_num;
426
- memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
427
- }
428
-
429
- /* setup error mode flag */
430
- tb_reg[9] |= BIT(18) | BIT(9);
431
- tb_reg[info->tb_reg_second_en] |= RKVDEC_WAIT_RESET_EN;
432
-
433
- /* memset read registers */
434
- part = info->part_r;
435
- for (i = 0; i < info->part_r_num; i++) {
436
- off = part[i].tb_reg_off;
437
- n = part[i].reg_num;
438
- memset(&tb_reg[off], 0, n * sizeof(u32));
439
- }
440
-
441
- dev->tasks_hw[slot_idx] = mpp_task;
442
- task->slot_idx = slot_idx;
443
- dev->task_to_run++;
444
- dev->task_prepared++;
445
- mpp_dbg_link_flow("slot %d write task %d\n", slot_idx,
446
- mpp_task->task_index);
447318
448319 return 0;
449320 }
....@@ -467,34 +338,20 @@
467338 mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
468339 }
469340
470
-static int rkvdec_link_send_task_to_hw(struct rkvdec_link_dev *dev,
471
- struct mpp_task *mpp_task,
472
- int slot_idx, u32 task_to_run,
473
- int resend)
341
+static int rkvdec2_link_enqueue(struct rkvdec_link_dev *link_dec,
342
+ struct mpp_task *mpp_task)
474343 {
475
- void __iomem *reg_base = dev->reg_base;
476
- struct mpp_dma_buffer *table = dev->table;
477
- u32 task_total = dev->task_total;
478
- u32 mode_start = 0;
479
- u32 val;
344
+ void __iomem *reg_base = link_dec->reg_base;
345
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
346
+ struct mpp_dma_buffer *table = task->table;
347
+ u32 link_en = 0;
348
+ u32 frame_num = 1;
349
+ u32 link_mode;
350
+ u32 timing_en = link_dec->mpp->srv->timing_en;
480351
481
- /* write address */
482
- if (!task_to_run || task_to_run > dev->task_size ||
483
- slot_idx < 0 || slot_idx >= dev->task_size) {
484
- mpp_err("invalid task send cfg at %d count %d\n",
485
- slot_idx, task_to_run);
486
- rkvdec_link_counter("error on send", dev);
487
- return 0;
488
- }
489
-
490
- val = task_to_run;
491
- if (!task_total || resend)
492
- mode_start = 1;
493
-
494
- if (mode_start) {
495
- u32 iova = table->iova + slot_idx * dev->link_node_size;
496
-
497
- rkvdec2_clear_cache(dev->mpp);
352
+ link_en = readl(reg_base + RKVDEC_LINK_EN_BASE);
353
+ if (!link_en) {
354
+ rkvdec2_clear_cache(link_dec->mpp);
498355 /* cleanup counter in hardware */
499356 writel(0, reg_base + RKVDEC_LINK_MODE_BASE);
500357 /* start config before all registers are set */
....@@ -504,54 +361,31 @@
504361 wmb();
505362 /* clear counter and enable link mode hardware */
506363 writel(RKVDEC_LINK_BIT_EN, reg_base + RKVDEC_LINK_EN_BASE);
507
-
508
- dev->task_total = 0;
509
- dev->task_decoded = 0;
510
-
511
- writel_relaxed(iova, reg_base + RKVDEC_LINK_CFG_ADDR_BASE);
512
- } else {
513
- val |= RKVDEC_LINK_BIT_ADD_MODE;
514
- }
515
-
516
- if (!resend) {
517
- u32 i;
518
-
519
- for (i = 0; i < task_to_run; i++) {
520
- int next_idx = rkvdec_link_inc_task_send(dev);
521
- struct mpp_task *task_ddr = dev->tasks_hw[next_idx];
522
-
523
- if (!task_ddr)
524
- continue;
525
-
526
- set_bit(TASK_STATE_START, &task_ddr->state);
527
- schedule_delayed_work(&task_ddr->timeout_work,
528
- msecs_to_jiffies(200));
529
- mpp_time_record(task_ddr);
530
- }
531
- } else {
532
- if (task_total)
533
- dev_info(dev->dev, "resend with total %d\n", task_total);
534
- }
364
+ writel_relaxed(table->iova, reg_base + RKVDEC_LINK_CFG_ADDR_BASE);
365
+ link_mode = frame_num;
366
+ } else
367
+ link_mode = (frame_num | RKVDEC_LINK_BIT_ADD_MODE);
535368
536369 /* set link mode */
537
- writel_relaxed(val, reg_base + RKVDEC_LINK_MODE_BASE);
370
+ writel_relaxed(link_mode, reg_base + RKVDEC_LINK_MODE_BASE);
538371
539372 /* start config before all registers are set */
540373 wmb();
541374
375
+ mpp_iommu_flush_tlb(link_dec->mpp->iommu_info);
376
+ mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
377
+
378
+ link_dec->task_running++;
542379 /* configure done */
543380 writel(RKVDEC_LINK_BIT_CFG_DONE, reg_base + RKVDEC_LINK_CFG_CTRL_BASE);
544
-
545
- mpp_dbg_link_flow("slot %d enable task %d mode %s\n", slot_idx,
546
- task_to_run, mode_start ? "start" : "add");
547
- if (mode_start) {
381
+ if (!link_en) {
548382 /* start hardware before all registers are set */
549383 wmb();
550384 /* clear counter and enable link mode hardware */
551385 writel(RKVDEC_LINK_BIT_EN, reg_base + RKVDEC_LINK_EN_BASE);
552386 }
387
+ mpp_task_run_end(mpp_task, timing_en);
553388
554
- dev->task_total += task_to_run;
555389 return 0;
556390 }
557391
....@@ -563,8 +397,7 @@
563397 struct mpp_dma_buffer *table = link_dec->table;
564398 struct rkvdec_link_info *info = link_dec->info;
565399 struct rkvdec_link_part *part = info->part_r;
566
- int slot_idx = task->slot_idx;
567
- u32 *tb_reg = (u32 *)(table->vaddr + slot_idx * link_dec->link_node_size);
400
+ u32 *tb_reg = (u32 *)table->vaddr;
568401 u32 off, s, n;
569402 u32 i;
570403
....@@ -584,154 +417,71 @@
584417 return 0;
585418 }
586419
587
-static int rkvdec_link_isr_recv_task(struct mpp_dev *mpp,
588
- struct rkvdec_link_dev *link_dec,
589
- int count)
590
-{
591
- struct rkvdec_link_info *info = link_dec->info;
592
- u32 *table_base = (u32 *)link_dec->table->vaddr;
593
- int i;
594
-
595
- for (i = 0; i < count; i++) {
596
- int idx = rkvdec_link_get_task_read(link_dec);
597
- struct mpp_task *mpp_task = link_dec->tasks_hw[idx];
598
- struct rkvdec2_task *task = NULL;
599
- u32 *regs = NULL;
600
- u32 irq_status = 0;
601
-
602
- if (!mpp_task) {
603
- regs = table_base + idx * link_dec->link_reg_count;
604
- mpp_dbg_link_flow("slot %d read task stuff\n", idx);
605
-
606
- link_dec->stuff_total++;
607
- if (link_dec->statistic_count &&
608
- regs[RKVDEC_LINK_REG_CYCLE_CNT]) {
609
- link_dec->stuff_cycle_sum +=
610
- regs[RKVDEC_LINK_REG_CYCLE_CNT];
611
- link_dec->stuff_cnt++;
612
- if (link_dec->stuff_cnt >=
613
- link_dec->statistic_count) {
614
- dev_info(
615
- link_dec->dev, "hw cycle %u\n",
616
- (u32)(link_dec->stuff_cycle_sum /
617
- link_dec->statistic_count));
618
- link_dec->stuff_cycle_sum = 0;
619
- link_dec->stuff_cnt = 0;
620
- }
621
- }
622
-
623
- if (link_dec->error && (i == (count - 1))) {
624
- link_dec->stuff_err++;
625
-
626
- irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
627
- dev_info(link_dec->dev, "found stuff task error irq %08x %u/%u\n",
628
- irq_status, link_dec->stuff_err,
629
- link_dec->stuff_total);
630
-
631
- if (link_dec->stuff_on_error) {
632
- dev_info(link_dec->dev, "stuff task error again %u/%u\n",
633
- link_dec->stuff_err,
634
- link_dec->stuff_total);
635
- }
636
-
637
- link_dec->stuff_on_error = 1;
638
- /* resend task */
639
- link_dec->decoded--;
640
- } else {
641
- link_dec->stuff_on_error = 0;
642
- rkvdec_link_inc_task_recv(link_dec);
643
- rkvdec_link_inc_task_read(link_dec);
644
- link_dec->task_running--;
645
- link_dec->task_prepared--;
646
- }
647
-
648
- continue;
649
- }
650
-
651
- mpp_time_diff(mpp_task);
652
- task = to_rkvdec2_task(mpp_task);
653
- regs = table_base + idx * link_dec->link_reg_count;
654
- irq_status = regs[info->tb_reg_int];
655
- mpp_dbg_link_flow("slot %d rd task %d\n", idx,
656
- mpp_task->task_index);
657
-
658
- task->irq_status = irq_status ? irq_status : mpp->irq_status;
659
-
660
- cancel_delayed_work_sync(&mpp_task->timeout_work);
661
- set_bit(TASK_STATE_HANDLE, &mpp_task->state);
662
-
663
- if (link_dec->statistic_count &&
664
- regs[RKVDEC_LINK_REG_CYCLE_CNT]) {
665
- link_dec->task_cycle_sum +=
666
- regs[RKVDEC_LINK_REG_CYCLE_CNT];
667
- link_dec->task_cnt++;
668
- if (link_dec->task_cnt >= link_dec->statistic_count) {
669
- dev_info(link_dec->dev, "hw cycle %u\n",
670
- (u32)(link_dec->task_cycle_sum /
671
- link_dec->statistic_count));
672
- link_dec->task_cycle_sum = 0;
673
- link_dec->task_cnt = 0;
674
- }
675
- }
676
-
677
- rkvdec2_link_finish(mpp, mpp_task);
678
-
679
- set_bit(TASK_STATE_FINISH, &mpp_task->state);
680
-
681
- list_del_init(&mpp_task->queue_link);
682
- link_dec->task_running--;
683
- link_dec->task_prepared--;
684
-
685
- rkvdec_link_inc_task_recv(link_dec);
686
- rkvdec_link_inc_task_read(link_dec);
687
-
688
- if (test_bit(TASK_STATE_ABORT, &mpp_task->state))
689
- set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
690
-
691
- set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
692
- /* Wake up the GET thread */
693
- wake_up(&task->wait);
694
- }
695
-
696
- return 0;
697
-}
698
-
699420 static void *rkvdec2_link_prepare(struct mpp_dev *mpp,
700421 struct mpp_task *mpp_task)
701422 {
702
- struct mpp_task *out_task = NULL;
703423 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
704424 struct rkvdec_link_dev *link_dec = dec->link_dec;
705
- int ret = 0;
706
- int slot_idx;
425
+ struct mpp_dma_buffer *table = NULL;
426
+ struct rkvdec_link_part *part;
427
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
428
+ struct rkvdec_link_info *info = link_dec->info;
429
+ u32 i, off, s, n;
430
+ u32 *tb_reg;
707431
708432 mpp_debug_enter();
709433
710
- slot_idx = rkvdec_link_get_next_slot(link_dec);
711
- if (slot_idx < 0) {
712
- mpp_err("capacity %d running %d\n",
713
- mpp->task_capacity, link_dec->task_running);
714
- dev_err(link_dec->dev, "no slot to write on get next slot\n");
715
- goto done;
434
+ if (test_bit(TASK_STATE_PREPARE, &mpp_task->state)) {
435
+ dev_err(mpp->dev, "task %d has prepared\n", mpp_task->task_index);
436
+ return mpp_task;
716437 }
717438
718
- ret = rkvdec_link_write_task_to_slot(link_dec, slot_idx, mpp_task);
719
- if (ret >= 0)
720
- out_task = mpp_task;
721
- else
722
- dev_err(mpp->dev, "no slot to write\n");
439
+ table = list_first_entry_or_null(&link_dec->unused_list, struct mpp_dma_buffer, link);
723440
724
-done:
441
+ if (!table)
442
+ return NULL;
443
+
444
+ /* fill regs value */
445
+ tb_reg = (u32 *)table->vaddr;
446
+ part = info->part_w;
447
+ for (i = 0; i < info->part_w_num; i++) {
448
+ off = part[i].tb_reg_off;
449
+ s = part[i].reg_start;
450
+ n = part[i].reg_num;
451
+ memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
452
+ }
453
+
454
+ /* setup error mode flag */
455
+ tb_reg[9] |= BIT(18) | BIT(9);
456
+ tb_reg[info->tb_reg_second_en] |= RKVDEC_WAIT_RESET_EN;
457
+
458
+ /* memset read registers */
459
+ part = info->part_r;
460
+ for (i = 0; i < info->part_r_num; i++) {
461
+ off = part[i].tb_reg_off;
462
+ n = part[i].reg_num;
463
+ memset(&tb_reg[off], 0, n * sizeof(u32));
464
+ }
465
+
466
+ list_move_tail(&table->link, &link_dec->used_list);
467
+ task->table = table;
468
+ set_bit(TASK_STATE_PREPARE, &mpp_task->state);
469
+
470
+ mpp_dbg_link("session %d task %d prepare pending %d running %d\n",
471
+ mpp_task->session->index, mpp_task->task_index,
472
+ atomic_read(&link_dec->task_pending), link_dec->task_running);
725473 mpp_debug_leave();
726474
727
- return out_task;
475
+ return mpp_task;
728476 }
729477
730478 static int rkvdec2_link_reset(struct mpp_dev *mpp)
731479 {
732
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
733480
734481 dev_info(mpp->dev, "resetting...\n");
482
+
483
+ disable_irq(mpp->irq);
484
+ mpp_iommu_disable_irq(mpp->iommu_info);
735485
736486 /* FIXME lock resource lock of the other devices in combo */
737487 mpp_iommu_down_write(mpp->iommu_info);
....@@ -740,11 +490,8 @@
740490
741491 rockchip_save_qos(mpp->dev);
742492
743
- mutex_lock(&dec->sip_reset_lock);
744
- rockchip_dmcfreq_lock();
745
- sip_smc_vpu_reset(0, 0, 0);
746
- rockchip_dmcfreq_unlock();
747
- mutex_unlock(&dec->sip_reset_lock);
493
+ if (mpp->hw_ops->reset)
494
+ mpp->hw_ops->reset(mpp);
748495
749496 rockchip_restore_qos(mpp->dev);
750497
....@@ -757,6 +504,8 @@
757504 mpp_reset_up_write(mpp->reset_group);
758505 mpp_iommu_up_write(mpp->iommu_info);
759506
507
+ enable_irq(mpp->irq);
508
+ mpp_iommu_enable_irq(mpp->iommu_info);
760509 dev_info(mpp->dev, "reset done\n");
761510
762511 return 0;
....@@ -768,15 +517,7 @@
768517 struct rkvdec_link_dev *link_dec = dec->link_dec;
769518 u32 irq_status = 0;
770519
771
- if (!atomic_read(&link_dec->power_enabled)) {
772
- dev_info(link_dec->dev, "irq on power off\n");
773
- return -1;
774
- }
775
-
776520 irq_status = readl(link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
777
-
778
- mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", irq_status);
779
- mpp_dbg_link_flow("link irq %08x\n", irq_status);
780521
781522 if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) {
782523 u32 enabled = readl(link_dec->reg_base + RKVDEC_LINK_EN_BASE);
....@@ -791,96 +532,14 @@
791532
792533 link_dec->irq_status = irq_status;
793534 mpp->irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
794
- mpp_dbg_link_flow("core irq %08x\n", mpp->irq_status);
795535
796536 writel_relaxed(0, link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
797537 }
798538
539
+ mpp_debug(DEBUG_IRQ_STATUS | DEBUG_LINK_TABLE, "irq_status: %08x : %08x\n",
540
+ irq_status, mpp->irq_status);
541
+
799542 return 0;
800
-}
801
-
802
-static int rkvdec2_link_isr(struct mpp_dev *mpp)
803
-{
804
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
805
- struct rkvdec_link_dev *link_dec = dec->link_dec;
806
- struct rkvdec_link_info *link_info = link_dec->info;
807
- /* keep irq_status */
808
- u32 irq_status = link_dec->irq_status;
809
- u32 prev_dec_num;
810
- int count = 0;
811
- u32 len = 0;
812
- u32 need_reset = atomic_read(&mpp->reset_request);
813
- u32 task_timeout = link_dec->task_on_timeout;
814
-
815
- mpp_debug_enter();
816
-
817
- disable_irq(mpp->irq);
818
- rkvdec_link_status_update(link_dec);
819
- link_dec->irq_status = irq_status;
820
- prev_dec_num = link_dec->task_decoded;
821
-
822
- if (!link_dec->enabled || task_timeout) {
823
- u32 val;
824
-
825
- if (task_timeout)
826
- rkvdec_link_reg_dump("timeout", link_dec);
827
-
828
- val = mpp_read(mpp, 224 * 4);
829
- if (link_info->hack_setup && !(val & BIT(2))) {
830
- /* only for rk356x */
831
- dev_info(mpp->dev, "frame not complete\n");
832
- link_dec->decoded++;
833
- }
834
- }
835
- count = (int)link_dec->decoded - (int)prev_dec_num;
836
-
837
- /* handle counter wrap */
838
- if (link_dec->enabled && !count && !need_reset) {
839
- /* process extra isr when task is processed */
840
- enable_irq(mpp->irq);
841
- goto done;
842
- }
843
-
844
- /* get previous ready task */
845
- if (count) {
846
- rkvdec_link_isr_recv_task(mpp, link_dec, count);
847
- link_dec->task_decoded = link_dec->decoded;
848
- }
849
-
850
- if (!link_dec->enabled || need_reset)
851
- goto do_reset;
852
-
853
- enable_irq(mpp->irq);
854
- goto done;
855
-
856
-do_reset:
857
- /* NOTE: irq may run with reset */
858
- atomic_inc(&mpp->reset_request);
859
- rkvdec2_link_reset(mpp);
860
- link_dec->task_decoded = 0;
861
- link_dec->task_total = 0;
862
- enable_irq(mpp->irq);
863
-
864
- if (link_dec->total == link_dec->decoded)
865
- goto done;
866
-
867
- len = rkvdec_link_get_task_hw_queue_length(link_dec);
868
- if (len > link_dec->task_size)
869
- rkvdec_link_counter("invalid len", link_dec);
870
-
871
- if (len) {
872
- int slot_idx = rkvdec_link_get_task_read(link_dec);
873
- struct mpp_task *mpp_task = NULL;
874
-
875
- mpp_task = link_dec->tasks_hw[slot_idx];
876
- rkvdec_link_send_task_to_hw(link_dec, mpp_task,
877
- slot_idx, len, 1);
878
- }
879
-
880
-done:
881
- mpp_debug_leave();
882
-
883
- return IRQ_HANDLED;
884543 }
885544
886545 int rkvdec2_link_remove(struct mpp_dev *mpp, struct rkvdec_link_dev *link_dec)
....@@ -933,12 +592,6 @@
933592 }
934593
935594 link_dec->table = table;
936
- link_dec->task_size = task_capacity;
937
- link_dec->task_count = 0;
938
- link_dec->task_write = 0;
939
- link_dec->task_read = link_dec->task_size;
940
- link_dec->task_send = 0;
941
- link_dec->task_recv = link_dec->task_size;
942595
943596 return 0;
944597 err_free_node:
....@@ -977,18 +630,13 @@
977630 struct rkvdec_link_dev *link_dec = NULL;
978631 struct device *dev = &pdev->dev;
979632 struct mpp_dev *mpp = &dec->mpp;
633
+ struct mpp_dma_buffer *table;
634
+ int i;
980635
981636 mpp_debug_enter();
982637
983638 link_dec = devm_kzalloc(dev, sizeof(*link_dec), GFP_KERNEL);
984639 if (!link_dec) {
985
- ret = -ENOMEM;
986
- goto done;
987
- }
988
-
989
- link_dec->tasks_hw = devm_kzalloc(dev, sizeof(*link_dec->tasks_hw) *
990
- mpp->task_capacity, GFP_KERNEL);
991
- if (!link_dec->tasks_hw) {
992640 ret = -ENOMEM;
993641 goto done;
994642 }
....@@ -1014,12 +662,33 @@
1014662 if (ret)
1015663 goto done;
1016664
1017
- if (link_dec->info->hack_setup)
665
+ /* alloc table pointer array */
666
+ table = devm_kmalloc_array(mpp->dev, mpp->task_capacity,
667
+ sizeof(*table), GFP_KERNEL | __GFP_ZERO);
668
+ if (!table)
669
+ return -ENOMEM;
670
+
671
+ /* init table array */
672
+ link_dec->table_array = table;
673
+ INIT_LIST_HEAD(&link_dec->used_list);
674
+ INIT_LIST_HEAD(&link_dec->unused_list);
675
+ for (i = 0; i < mpp->task_capacity; i++) {
676
+ table[i].iova = link_dec->table->iova + i * link_dec->link_node_size;
677
+ table[i].vaddr = link_dec->table->vaddr + i * link_dec->link_node_size;
678
+ table[i].size = link_dec->link_node_size;
679
+ INIT_LIST_HEAD(&table[i].link);
680
+ list_add_tail(&table[i].link, &link_dec->unused_list);
681
+ }
682
+
683
+ if (dec->fix)
1018684 rkvdec2_link_hack_data_setup(dec->fix);
685
+
686
+ mpp->fault_handler = rkvdec2_link_iommu_fault_handle;
1019687
1020688 link_dec->mpp = mpp;
1021689 link_dec->dev = dev;
1022690 atomic_set(&link_dec->task_timeout, 0);
691
+ atomic_set(&link_dec->task_pending, 0);
1023692 atomic_set(&link_dec->power_enabled, 0);
1024693 link_dec->irq_enabled = 1;
1025694
....@@ -1033,11 +702,6 @@
1033702 devm_iounmap(dev, link_dec->reg_base);
1034703 link_dec->reg_base = NULL;
1035704 }
1036
- if (link_dec->tasks_hw) {
1037
- devm_kfree(dev, link_dec->tasks_hw);
1038
- link_dec->tasks_hw = NULL;
1039
- }
1040
-
1041705 devm_kfree(dev, link_dec);
1042706 link_dec = NULL;
1043707 }
....@@ -1055,13 +719,13 @@
1055719 struct mpp_task *task = container_of(ref, struct mpp_task, ref);
1056720
1057721 if (!task->session) {
1058
- mpp_err("task %d task->session is null.\n", task->task_index);
722
+ mpp_err("task %d task->session is null.\n", task->task_id);
1059723 return;
1060724 }
1061725 session = task->session;
1062726
1063727 mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d state 0x%lx\n",
1064
- session->index, task->task_index, task->state);
728
+ session->index, task->task_id, task->state);
1065729 if (!session->mpp) {
1066730 mpp_err("session %d session->mpp is null.\n", session->index);
1067731 return;
....@@ -1080,30 +744,16 @@
1080744 kthread_queue_work(&mpp->queue->worker, &mpp->work);
1081745 }
1082746
1083
-static void rkvdec2_link_trigger_timeout(struct mpp_dev *mpp)
1084
-{
1085
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1086
- struct rkvdec_link_dev *link_dec = dec->link_dec;
1087
-
1088
- atomic_inc(&link_dec->task_timeout);
1089
- rkvdec2_link_trigger_work(mpp);
1090
-}
1091
-
1092
-static void rkvdec2_link_trigger_irq(struct mpp_dev *mpp)
1093
-{
1094
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1095
- struct rkvdec_link_dev *link_dec = dec->link_dec;
1096
-
1097
- link_dec->task_irq++;
1098
- rkvdec2_link_trigger_work(mpp);
1099
-}
1100
-
1101
-static void rkvdec2_link_power_on(struct mpp_dev *mpp)
747
+static int rkvdec2_link_power_on(struct mpp_dev *mpp)
1102748 {
1103749 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1104750 struct rkvdec_link_dev *link_dec = dec->link_dec;
1105751
1106752 if (!atomic_xchg(&link_dec->power_enabled, 1)) {
753
+ if (mpp_iommu_attach(mpp->iommu_info)) {
754
+ dev_err(mpp->dev, "mpp_iommu_attach failed\n");
755
+ return -ENODATA;
756
+ }
1107757 pm_runtime_get_sync(mpp->dev);
1108758 pm_stay_awake(mpp->dev);
1109759
....@@ -1112,31 +762,17 @@
1112762
1113763 if (!link_dec->irq_enabled) {
1114764 enable_irq(mpp->irq);
765
+ mpp_iommu_enable_irq(mpp->iommu_info);
1115766 link_dec->irq_enabled = 1;
1116767 }
1117768
1118769 mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_ADVANCED);
1119770 mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_ADVANCED);
1120771 mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_ADVANCED);
1121
-
1122
-#ifdef CONFIG_PM_DEVFREQ
1123
- if (dec->devfreq) {
1124
- unsigned long core_rate_hz;
1125
-
1126
- mutex_lock(&dec->devfreq->lock);
1127
- core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info,
1128
- CLK_MODE_ADVANCED);
1129
- if (dec->core_rate_hz != core_rate_hz) {
1130
- dec->core_rate_hz = core_rate_hz;
1131
- update_devfreq(dec->devfreq);
1132
- }
1133
- mutex_unlock(&dec->devfreq->lock);
1134
-
1135
- return;
1136
- }
1137
-#endif
1138
- mpp_clk_set_rate(&dec->core_clk_info, CLK_MODE_ADVANCED);
772
+ mpp_devfreq_set_core_rate(mpp, CLK_MODE_ADVANCED);
773
+ mpp_iommu_dev_activate(mpp->iommu_info, mpp);
1139774 }
775
+ return 0;
1140776 }
1141777
1142778 static void rkvdec2_link_power_off(struct mpp_dev *mpp)
....@@ -1146,6 +782,7 @@
1146782
1147783 if (atomic_xchg(&link_dec->power_enabled, 0)) {
1148784 disable_irq(mpp->irq);
785
+ mpp_iommu_disable_irq(mpp->iommu_info);
1149786 link_dec->irq_enabled = 0;
1150787
1151788 if (mpp->hw_ops->clk_off)
....@@ -1154,175 +791,282 @@
1154791 pm_relax(mpp->dev);
1155792 pm_runtime_put_sync_suspend(mpp->dev);
1156793
1157
- link_dec->task_decoded = 0;
1158
- link_dec->task_total = 0;
1159
-
1160794 mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_NORMAL);
1161795 mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_NORMAL);
1162796 mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_NORMAL);
1163
-
1164
-#ifdef CONFIG_PM_DEVFREQ
1165
- if (dec->devfreq) {
1166
- unsigned long core_rate_hz;
1167
-
1168
- mutex_lock(&dec->devfreq->lock);
1169
- core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info,
1170
- CLK_MODE_NORMAL);
1171
- if (dec->core_rate_hz != core_rate_hz) {
1172
- dec->core_rate_hz = core_rate_hz;
1173
- update_devfreq(dec->devfreq);
1174
- }
1175
- mutex_unlock(&dec->devfreq->lock);
1176
-
1177
- return;
1178
- }
1179
-#endif
1180
- mpp_clk_set_rate(&dec->core_clk_info, CLK_MODE_NORMAL);
797
+ mpp_devfreq_set_core_rate(mpp, CLK_MODE_NORMAL);
798
+ mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
1181799 }
1182800 }
1183801
1184802 static void rkvdec2_link_timeout_proc(struct work_struct *work_s)
1185803 {
1186804 struct mpp_dev *mpp;
805
+ struct rkvdec2_dev *dec;
1187806 struct mpp_session *session;
1188807 struct mpp_task *task = container_of(to_delayed_work(work_s),
1189808 struct mpp_task, timeout_work);
1190809
1191810 if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
1192811 mpp_err("task %d state %lx has been handled\n",
1193
- task->task_index, task->state);
812
+ task->task_id, task->state);
1194813 return;
1195814 }
1196815
1197816 if (!task->session) {
1198
- mpp_err("task %d session is null.\n", task->task_index);
817
+ mpp_err("task %d session is null.\n", task->task_id);
1199818 return;
1200819 }
1201820 session = task->session;
1202821
1203822 if (!session->mpp) {
1204823 mpp_err("task %d:%d mpp is null.\n", session->index,
1205
- task->task_index);
824
+ task->task_id);
1206825 return;
1207826 }
1208827 mpp = session->mpp;
1209
- rkvdec2_link_trigger_timeout(mpp);
828
+ set_bit(TASK_STATE_TIMEOUT, &task->state);
829
+
830
+ dec = to_rkvdec2_dev(mpp);
831
+ atomic_inc(&dec->link_dec->task_timeout);
832
+
833
+ dev_err(mpp->dev, "session %d task %d state %#lx timeout, cnt %d\n",
834
+ session->index, task->task_index, task->state,
835
+ atomic_read(&dec->link_dec->task_timeout));
836
+
837
+ rkvdec2_link_trigger_work(mpp);
1210838 }
1211839
1212
-static void mpp_taskqueue_scan_pending_abort_task(struct mpp_taskqueue *queue)
840
+static int rkvdec2_link_iommu_fault_handle(struct iommu_domain *iommu,
841
+ struct device *iommu_dev,
842
+ unsigned long iova,
843
+ int status, void *arg)
1213844 {
1214
- struct mpp_task *task, *n;
845
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
846
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
847
+ struct mpp_task *mpp_task = NULL, *n;
848
+ struct mpp_taskqueue *queue;
1215849
1216
- mutex_lock(&queue->pending_lock);
1217
- /* Check and pop all timeout task */
1218
- list_for_each_entry_safe(task, n, &queue->pending_list, queue_link) {
1219
- struct mpp_session *session = task->session;
850
+ dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n",
851
+ iova, status, arg);
1220852
1221
- if (test_bit(TASK_STATE_ABORT, &task->state)) {
1222
- mutex_lock(&session->pending_lock);
1223
- /* wait and signal */
1224
- list_del_init(&task->queue_link);
1225
- mutex_unlock(&session->pending_lock);
1226
- kref_put(&task->ref, rkvdec2_link_free_task);
853
+ if (!mpp) {
854
+ dev_err(iommu_dev, "pagefault without device to handle\n");
855
+ return 0;
856
+ }
857
+ queue = mpp->queue;
858
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
859
+ struct rkvdec_link_info *info = dec->link_dec->info;
860
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
861
+ u32 *tb_reg = (u32 *)task->table->vaddr;
862
+ u32 irq_status = tb_reg[info->tb_reg_int];
863
+
864
+ if (!irq_status) {
865
+ mpp_task_dump_mem_region(mpp, mpp_task);
866
+ break;
1227867 }
1228868 }
1229
- mutex_unlock(&queue->pending_lock);
869
+
870
+ mpp_task_dump_hw_reg(mpp);
871
+ /*
872
+ * Mask iommu irq, in order for iommu not repeatedly trigger pagefault.
873
+ * Until the pagefault task finish by hw timeout.
874
+ */
875
+ rockchip_iommu_mask_irq(mpp->dev);
876
+ dec->mmu_fault = 1;
877
+
878
+ return 0;
879
+}
880
+
881
+static void rkvdec2_link_resend(struct mpp_dev *mpp)
882
+{
883
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
884
+ struct rkvdec_link_dev *link_dec = dec->link_dec;
885
+ struct mpp_taskqueue *queue = mpp->queue;
886
+ struct mpp_task *mpp_task, *n;
887
+
888
+ link_dec->task_running = 0;
889
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
890
+ dev_err(mpp->dev, "resend task %d\n", mpp_task->task_index);
891
+ cancel_delayed_work_sync(&mpp_task->timeout_work);
892
+ clear_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
893
+ clear_bit(TASK_STATE_HANDLE, &mpp_task->state);
894
+ rkvdec2_link_enqueue(link_dec, mpp_task);
895
+ }
1230896 }
1231897
1232898 static void rkvdec2_link_try_dequeue(struct mpp_dev *mpp)
1233899 {
1234900 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1235901 struct rkvdec_link_dev *link_dec = dec->link_dec;
1236
- struct mpp_task *task;
1237902 struct mpp_taskqueue *queue = mpp->queue;
1238
- int task_irq = link_dec->task_irq;
1239
- int task_irq_prev = link_dec->task_irq_prev;
1240
- int task_timeout = atomic_read(&link_dec->task_timeout);
903
+ struct mpp_task *mpp_task = NULL, *n;
904
+ struct rkvdec_link_info *info = link_dec->info;
905
+ u32 reset_flag = 0;
906
+ u32 iommu_fault = dec->mmu_fault && (mpp->irq_status & RKVDEC_TIMEOUT_STA);
907
+ u32 link_en = atomic_read(&link_dec->power_enabled) ?
908
+ readl(link_dec->reg_base + RKVDEC_LINK_EN_BASE) : 0;
909
+ u32 force_dequeue = iommu_fault || !link_en;
910
+ u32 dequeue_cnt = 0;
1241911
1242
- if (!link_dec->task_running)
1243
- goto done;
912
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
913
+ /*
914
+ * Because there are multiple tasks enqueue at the same time,
915
+ * soft timeout may be triggered at the same time, but in reality only
916
+ * first task is being timeout because of the hardware stuck,
917
+ * so only process the first task.
918
+ */
919
+ u32 timeout_flag = dequeue_cnt ? 0 : test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
920
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
921
+ u32 *tb_reg = (u32 *)task->table->vaddr;
922
+ u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
923
+ u32 irq_status = tb_reg[info->tb_reg_int];
924
+ u32 task_done = irq_status || timeout_flag || abort_flag;
1244925
1245
- if (task_timeout != link_dec->task_timeout_prev) {
1246
- dev_info(link_dec->dev, "process task timeout\n");
1247
- atomic_inc(&mpp->reset_request);
1248
- link_dec->task_on_timeout =
1249
- task_timeout - link_dec->task_timeout_prev;
1250
- goto proc;
926
+ /*
927
+ * there are some cases will cause hw cannot write reg to ddr:
928
+ * 1. iommu pagefault
929
+ * 2. link stop(link_en == 0) because of err task, it is a rk356x issue.
930
+ * so need force dequeue one task.
931
+ */
932
+ if (force_dequeue)
933
+ task_done = 1;
934
+
935
+ if (!task_done)
936
+ break;
937
+
938
+ dequeue_cnt++;
939
+ /* check hack task only for rk356x*/
940
+ if (task->need_hack == RKVDEC2_LINK_HACK_TASK_FLAG) {
941
+ cancel_delayed_work_sync(&mpp_task->timeout_work);
942
+ list_move_tail(&task->table->link, &link_dec->unused_list);
943
+ list_del_init(&mpp_task->queue_link);
944
+ link_dec->task_running--;
945
+ link_dec->hack_task_running--;
946
+ kfree(task);
947
+ mpp_dbg_link("hack running %d irq_status %#08x timeout %d abort %d\n",
948
+ link_dec->hack_task_running, irq_status,
949
+ timeout_flag, abort_flag);
950
+ continue;
951
+ }
952
+
953
+ /*
954
+ * if timeout/abort/force dequeue found, reset and stop hw first.
955
+ */
956
+ if ((timeout_flag || abort_flag || force_dequeue) && !reset_flag) {
957
+ dev_err(mpp->dev, "session %d task %d timeout %d abort %d force_dequeue %d\n",
958
+ mpp_task->session->index, mpp_task->task_index,
959
+ timeout_flag, abort_flag, force_dequeue);
960
+ rkvdec2_link_reset(mpp);
961
+ reset_flag = 1;
962
+ dec->mmu_fault = 0;
963
+ mpp->irq_status = 0;
964
+ force_dequeue = 0;
965
+ }
966
+
967
+ cancel_delayed_work_sync(&mpp_task->timeout_work);
968
+
969
+ task->irq_status = irq_status;
970
+ mpp_task->hw_cycles = tb_reg[info->tb_reg_cycle];
971
+ mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
972
+ rkvdec2_link_finish(mpp, mpp_task);
973
+
974
+ list_move_tail(&task->table->link, &link_dec->unused_list);
975
+ list_del_init(&mpp_task->queue_link);
976
+ link_dec->task_running--;
977
+
978
+ set_bit(TASK_STATE_HANDLE, &mpp_task->state);
979
+ set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
980
+ set_bit(TASK_STATE_FINISH, &mpp_task->state);
981
+ set_bit(TASK_STATE_DONE, &mpp_task->state);
982
+ if (test_bit(TASK_STATE_ABORT, &mpp_task->state))
983
+ set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
984
+
985
+ mpp_dbg_link("session %d task %d irq_status %#08x timeout %d abort %d\n",
986
+ mpp_task->session->index, mpp_task->task_index,
987
+ irq_status, timeout_flag, abort_flag);
988
+
989
+ if (irq_status & RKVDEC_INT_ERROR_MASK) {
990
+ dev_err(mpp->dev,
991
+ "session %d task %d irq_status %#08x timeout %u abort %u\n",
992
+ mpp_task->session->index, mpp_task->task_index,
993
+ irq_status, timeout_flag, abort_flag);
994
+ if (!reset_flag)
995
+ atomic_inc(&mpp->reset_request);
996
+ }
997
+
998
+ wake_up(&mpp_task->wait);
999
+ kref_put(&mpp_task->ref, rkvdec2_link_free_task);
12511000 }
12521001
1253
- if (task_irq == task_irq_prev)
1254
- goto done;
1255
-
1256
- if (!atomic_read(&link_dec->power_enabled)) {
1257
- dev_info(link_dec->dev, "dequeue on power off\n");
1258
- goto done;
1259
- }
1260
-
1261
-proc:
1262
- task = list_first_entry_or_null(&queue->running_list, struct mpp_task,
1263
- queue_link);
1264
- if (!task) {
1265
- mpp_err("can found task on trydequeue with %d running task\n",
1266
- link_dec->task_running);
1267
- goto done;
1268
- }
1269
-
1270
- /* Check and process all finished task */
1271
- rkvdec2_link_isr(mpp);
1272
-
1273
-done:
1274
- link_dec->task_irq_prev = task_irq;
1275
- link_dec->task_timeout_prev = task_timeout;
1276
- link_dec->task_on_timeout = 0;
1277
-
1278
- mpp_taskqueue_scan_pending_abort_task(queue);
1279
-
1280
- /* TODO: if reset is needed do reset here */
1002
+ /* resend running task after reset */
1003
+ if (reset_flag && !list_empty(&queue->running_list))
1004
+ rkvdec2_link_resend(mpp);
12811005 }
12821006
1283
-static int mpp_task_queue(struct mpp_dev *mpp, struct mpp_task *task)
1007
+static int mpp_task_queue(struct mpp_dev *mpp, struct mpp_task *mpp_task)
12841008 {
12851009 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
12861010 struct rkvdec_link_dev *link_dec = dec->link_dec;
1287
- u32 task_to_run = 0;
1288
- int slot_idx = 0;
1289
- int ret;
1011
+ struct mpp_taskqueue *queue = mpp->queue;
1012
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
12901013
12911014 mpp_debug_enter();
12921015
1293
- /*
1294
- * for iommu share hardware, should attach to ensure
1295
- * working in current device
1296
- */
1297
- ret = mpp_iommu_attach(mpp->iommu_info);
1298
- if (ret) {
1299
- dev_err(mpp->dev, "mpp_iommu_attach failed\n");
1300
- return -ENODATA;
1301
- }
1302
-
13031016 rkvdec2_link_power_on(mpp);
1304
- mpp_debug(DEBUG_TASK_INFO, "pid %d, start hw %s\n",
1305
- task->session->pid, dev_name(mpp->dev));
13061017
1307
- /* prepare the task for running */
1308
- if (test_and_set_bit(TASK_STATE_PREPARE, &task->state))
1309
- mpp_err("task %d has been prepare twice\n", task->task_index);
1018
+ /* hack for rk356x */
1019
+ if (task->need_hack) {
1020
+ u32 *tb_reg;
1021
+ struct mpp_dma_buffer *table;
1022
+ struct rkvdec2_task *hack_task;
1023
+ struct rkvdec_link_info *info = link_dec->info;
13101024
1311
- rkvdec2_link_prepare(mpp, task);
1025
+ /* need reserved 2 unused task for need hack task */
1026
+ if (link_dec->task_running > (link_dec->task_capacity - 2))
1027
+ return -EBUSY;
13121028
1313
- task_to_run = link_dec->task_to_run;
1314
- if (!task_to_run) {
1315
- dev_err(link_dec->dev, "nothing to run\n");
1316
- goto done;
1029
+ table = list_first_entry_or_null(&link_dec->unused_list,
1030
+ struct mpp_dma_buffer,
1031
+ link);
1032
+ if (!table)
1033
+ return -EBUSY;
1034
+
1035
+ hack_task = kzalloc(sizeof(*hack_task), GFP_KERNEL);
1036
+
1037
+ if (!hack_task)
1038
+ return -ENOMEM;
1039
+
1040
+ mpp_task_init(mpp_task->session, &hack_task->mpp_task);
1041
+ INIT_DELAYED_WORK(&hack_task->mpp_task.timeout_work,
1042
+ rkvdec2_link_timeout_proc);
1043
+
1044
+ tb_reg = (u32 *)table->vaddr;
1045
+ memset(tb_reg + info->part_r[0].tb_reg_off, 0, info->part_r[0].reg_num);
1046
+ rkvdec2_3568_hack_fix_link(tb_reg + 4);
1047
+ list_move_tail(&table->link, &link_dec->used_list);
1048
+ hack_task->table = table;
1049
+ hack_task->need_hack = RKVDEC2_LINK_HACK_TASK_FLAG;
1050
+ rkvdec2_link_enqueue(link_dec, &hack_task->mpp_task);
1051
+ mpp_taskqueue_pending_to_run(queue, &hack_task->mpp_task);
1052
+ link_dec->hack_task_running++;
1053
+ mpp_dbg_link("hack task send to hw, hack running %d\n",
1054
+ link_dec->hack_task_running);
13171055 }
13181056
1319
- mpp_reset_down_read(mpp->reset_group);
1320
- link_dec->task_to_run = 0;
1321
- slot_idx = rkvdec_link_get_task_send(link_dec);
1322
- link_dec->task_running += task_to_run;
1323
- rkvdec_link_send_task_to_hw(link_dec, task, slot_idx, task_to_run, 0);
1057
+ /* process normal */
1058
+ if (!rkvdec2_link_prepare(mpp, mpp_task))
1059
+ return -EBUSY;
13241060
1325
-done:
1061
+ rkvdec2_link_enqueue(link_dec, mpp_task);
1062
+
1063
+ set_bit(TASK_STATE_RUNNING, &mpp_task->state);
1064
+ atomic_dec(&link_dec->task_pending);
1065
+ mpp_taskqueue_pending_to_run(queue, mpp_task);
1066
+
1067
+ mpp_dbg_link("session %d task %d send to hw pending %d running %d\n",
1068
+ mpp_task->session->index, mpp_task->task_index,
1069
+ atomic_read(&link_dec->task_pending), link_dec->task_running);
13261070 mpp_debug_leave();
13271071
13281072 return 0;
....@@ -1334,7 +1078,7 @@
13341078 int ret = rkvdec2_link_irq(mpp);
13351079
13361080 if (!ret)
1337
- rkvdec2_link_trigger_irq(mpp);
1081
+ rkvdec2_link_trigger_work(mpp);
13381082
13391083 return IRQ_HANDLED;
13401084 }
....@@ -1372,7 +1116,6 @@
13721116 struct mpp_task *task)
13731117 {
13741118 set_bit(TASK_STATE_DONE, &task->state);
1375
- kref_put(&task->ref, rkvdec2_link_free_task);
13761119
13771120 return 0;
13781121 }
....@@ -1381,10 +1124,10 @@
13811124 struct mpp_task_msgs *msgs)
13821125 {
13831126 struct mpp_task *task = NULL;
1384
- struct rkvdec2_task *dec_task = NULL;
13851127 struct mpp_dev *mpp = session->mpp;
1386
- u32 fmt;
13871128 struct rkvdec_link_info *link_info = mpp->var->hw_info->link_info;
1129
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1130
+ struct rkvdec_link_dev *link_dec = dec->link_dec;
13881131
13891132 task = rkvdec2_alloc_task(session, msgs);
13901133 if (!task) {
....@@ -1393,6 +1136,9 @@
13931136 }
13941137
13951138 if (link_info->hack_setup) {
1139
+ u32 fmt;
1140
+ struct rkvdec2_task *dec_task = NULL;
1141
+
13961142 dec_task = to_rkvdec2_task(task);
13971143 fmt = RKVDEC_GET_FORMAT(dec_task->reg[RKVDEC_REG_FORMAT_INDEX]);
13981144 dec_task->need_hack = (fmt == RKVDEC_FMT_H264D);
....@@ -1401,6 +1147,7 @@
14011147 kref_init(&task->ref);
14021148 atomic_set(&task->abort_request, 0);
14031149 task->task_index = atomic_fetch_inc(&mpp->task_index);
1150
+ task->task_id = atomic_fetch_inc(&mpp->queue->task_id);
14041151 INIT_DELAYED_WORK(&task->timeout_work, rkvdec2_link_timeout_proc);
14051152
14061153 atomic_inc(&session->task_count);
....@@ -1414,6 +1161,7 @@
14141161 mutex_lock(&mpp->queue->pending_lock);
14151162 list_add_tail(&task->queue_link, &mpp->queue->pending_list);
14161163 mutex_unlock(&mpp->queue->pending_lock);
1164
+ atomic_inc(&link_dec->task_pending);
14171165
14181166 /* push current task to queue */
14191167 atomic_inc(&mpp->task_count);
....@@ -1430,7 +1178,6 @@
14301178 {
14311179 struct mpp_dev *mpp = session->mpp;
14321180 struct mpp_task *mpp_task;
1433
- struct rkvdec2_task *task;
14341181 int ret;
14351182
14361183 mpp_task = mpp_session_get_pending_task(session);
....@@ -1439,20 +1186,16 @@
14391186 return -EIO;
14401187 }
14411188
1442
- task = to_rkvdec2_task(mpp_task);
1443
- ret = wait_event_timeout(task->wait, task_is_done(mpp_task),
1444
- msecs_to_jiffies(WAIT_TIMEOUT_MS));
1445
- if (ret) {
1446
- ret = rkvdec2_result(mpp, mpp_task, msgs);
1189
+ ret = wait_event_interruptible(mpp_task->wait, task_is_done(mpp_task));
1190
+ if (ret == -ERESTARTSYS)
1191
+ mpp_err("wait task break by signal\n");
14471192
1448
- mpp_session_pop_done(session, mpp_task);
1449
- } else {
1450
- mpp_err("task %d:%d statue %lx timeout -> abort\n",
1451
- session->index, mpp_task->task_index, mpp_task->state);
1193
+ ret = rkvdec2_result(mpp, mpp_task, msgs);
14521194
1453
- atomic_inc(&mpp_task->abort_request);
1454
- set_bit(TASK_STATE_ABORT, &mpp_task->state);
1455
- }
1195
+ mpp_session_pop_done(session, mpp_task);
1196
+ mpp_debug_func(DEBUG_TASK_INFO, "wait done session %d:%d count %d task %d state %lx\n",
1197
+ session->device_type, session->index, atomic_read(&session->task_count),
1198
+ mpp_task->task_index, mpp_task->state);
14561199
14571200 mpp_session_pop_pending(session, mpp_task);
14581201 return ret;
....@@ -1461,32 +1204,25 @@
14611204 void rkvdec2_link_worker(struct kthread_work *work_s)
14621205 {
14631206 struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
1464
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1465
- struct rkvdec_link_dev *link_dec = dec->link_dec;
14661207 struct mpp_task *task;
14671208 struct mpp_taskqueue *queue = mpp->queue;
1209
+ u32 all_done;
14681210
14691211 mpp_debug_enter();
14701212
1471
- /*
1472
- * process timeout and finished task.
1473
- */
1213
+ /* dequeue running task */
14741214 rkvdec2_link_try_dequeue(mpp);
14751215
1476
-again:
1216
+ /* process reset */
14771217 if (atomic_read(&mpp->reset_request)) {
1478
- if (link_dec->task_running || link_dec->task_prepared)
1479
- goto done;
1480
-
1481
- disable_irq(mpp->irq);
14821218 rkvdec2_link_reset(mpp);
1483
- link_dec->task_decoded = 0;
1484
- link_dec->task_total = 0;
1485
- enable_irq(mpp->irq);
1219
+ /* resend running task after reset */
1220
+ if (!list_empty(&queue->running_list))
1221
+ rkvdec2_link_resend(mpp);
14861222 }
1487
- /*
1488
- * process pending queue to find the task to accept.
1489
- */
1223
+
1224
+again:
1225
+ /* get pending task to process */
14901226 mutex_lock(&queue->pending_lock);
14911227 task = list_first_entry_or_null(&queue->pending_list, struct mpp_task,
14921228 queue_link);
....@@ -1494,80 +1230,37 @@
14941230 if (!task)
14951231 goto done;
14961232
1497
- if (test_bit(TASK_STATE_ABORT, &task->state)) {
1498
- struct rkvdec2_task *dec_task = to_rkvdec2_task(task);
1499
-
1233
+ /* check abort task */
1234
+ if (atomic_read(&task->abort_request)) {
15001235 mutex_lock(&queue->pending_lock);
15011236 list_del_init(&task->queue_link);
15021237
1503
- kref_get(&task->ref);
15041238 set_bit(TASK_STATE_ABORT_READY, &task->state);
15051239 set_bit(TASK_STATE_PROC_DONE, &task->state);
15061240
15071241 mutex_unlock(&queue->pending_lock);
1508
- wake_up(&dec_task->wait);
1242
+ wake_up(&task->wait);
15091243 kref_put(&task->ref, rkvdec2_link_free_task);
15101244 goto again;
15111245 }
15121246
1513
- /*
1514
- * if target device can accept more task send the task to run.
1515
- */
1516
- if (link_dec->task_running >= link_dec->task_capacity - 2)
1517
- goto done;
1518
-
1519
- if (mpp_task_queue(mpp, task)) {
1520
- /* failed to run */
1521
- mpp_err("%p failed to process task %p:%d\n",
1522
- mpp, task, task->task_index);
1523
- } else {
1524
- mutex_lock(&queue->pending_lock);
1525
- set_bit(TASK_STATE_RUNNING, &task->state);
1526
- list_move_tail(&task->queue_link, &queue->running_list);
1527
- mutex_unlock(&queue->pending_lock);
1247
+ /* queue task to hw */
1248
+ if (!mpp_task_queue(mpp, task))
15281249 goto again;
1529
- }
1250
+
15301251 done:
1252
+
1253
+ /* if no task in pending and running list, power off device */
1254
+ mutex_lock(&queue->pending_lock);
1255
+ all_done = list_empty(&queue->pending_list) && list_empty(&queue->running_list);
1256
+ mutex_unlock(&queue->pending_lock);
1257
+
1258
+ if (all_done)
1259
+ rkvdec2_link_power_off(mpp);
1260
+
1261
+ mpp_session_cleanup_detach(queue, work_s);
1262
+
15311263 mpp_debug_leave();
1532
-
1533
- if (link_dec->task_irq != link_dec->task_irq_prev ||
1534
- atomic_read(&link_dec->task_timeout) != link_dec->task_timeout_prev)
1535
- rkvdec2_link_trigger_work(mpp);
1536
-
1537
- /* if no task for running power off device */
1538
- {
1539
- u32 all_done = 0;
1540
-
1541
- mutex_lock(&queue->pending_lock);
1542
- all_done = list_empty(&queue->pending_list);
1543
- mutex_unlock(&queue->pending_lock);
1544
-
1545
- if (all_done && !link_dec->task_running && !link_dec->task_prepared)
1546
- rkvdec2_link_power_off(mpp);
1547
- }
1548
-
1549
- mutex_lock(&queue->session_lock);
1550
- while (queue->detach_count) {
1551
- struct mpp_session *session = NULL;
1552
-
1553
- session = list_first_entry_or_null(&queue->session_detach, struct mpp_session,
1554
- session_link);
1555
- if (session) {
1556
- list_del_init(&session->session_link);
1557
- queue->detach_count--;
1558
- }
1559
-
1560
- mutex_unlock(&queue->session_lock);
1561
-
1562
- if (session) {
1563
- mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev),
1564
- queue->detach_count);
1565
- mpp_session_deinit(session);
1566
- }
1567
-
1568
- mutex_lock(&queue->session_lock);
1569
- }
1570
- mutex_unlock(&queue->session_lock);
15711264 }
15721265
15731266 void rkvdec2_link_session_deinit(struct mpp_session *session)
....@@ -1580,9 +1273,9 @@
15801273
15811274 if (session->dma) {
15821275 mpp_dbg_session("session %d destroy dma\n", session->index);
1583
- mpp_iommu_down_read(mpp->iommu_info);
1276
+ mpp_iommu_down_write(mpp->iommu_info);
15841277 mpp_dma_session_destroy(session->dma);
1585
- mpp_iommu_up_read(mpp->iommu_info);
1278
+ mpp_iommu_up_write(mpp->iommu_info);
15861279 session->dma = NULL;
15871280 }
15881281 if (session->srv) {
....@@ -1598,3 +1291,1221 @@
15981291
15991292 mpp_debug_leave();
16001293 }
1294
+
1295
+#define RKVDEC2_1080P_PIXELS (1920*1080)
1296
+#define RKVDEC2_4K_PIXELS (4096*2304)
1297
+#define RKVDEC2_8K_PIXELS (7680*4320)
1298
+#define RKVDEC2_CCU_TIMEOUT_20MS (0xefffff)
1299
+#define RKVDEC2_CCU_TIMEOUT_50MS (0x2cfffff)
1300
+#define RKVDEC2_CCU_TIMEOUT_100MS (0x4ffffff)
1301
+
1302
+static u32 rkvdec2_ccu_get_timeout_threshold(struct rkvdec2_task *task)
1303
+{
1304
+ u32 pixels = task->pixels;
1305
+
1306
+ if (pixels < RKVDEC2_1080P_PIXELS)
1307
+ return RKVDEC2_CCU_TIMEOUT_20MS;
1308
+ else if (pixels < RKVDEC2_4K_PIXELS)
1309
+ return RKVDEC2_CCU_TIMEOUT_50MS;
1310
+ else
1311
+ return RKVDEC2_CCU_TIMEOUT_100MS;
1312
+}
1313
+
1314
+int rkvdec2_attach_ccu(struct device *dev, struct rkvdec2_dev *dec)
1315
+{
1316
+ int ret;
1317
+ struct device_node *np;
1318
+ struct platform_device *pdev;
1319
+ struct rkvdec2_ccu *ccu;
1320
+
1321
+ mpp_debug_enter();
1322
+
1323
+ np = of_parse_phandle(dev->of_node, "rockchip,ccu", 0);
1324
+ if (!np || !of_device_is_available(np))
1325
+ return -ENODEV;
1326
+
1327
+ pdev = of_find_device_by_node(np);
1328
+ of_node_put(np);
1329
+ if (!pdev)
1330
+ return -ENODEV;
1331
+
1332
+ ccu = platform_get_drvdata(pdev);
1333
+ if (!ccu)
1334
+ return -ENOMEM;
1335
+
1336
+ ret = of_property_read_u32(dev->of_node, "rockchip,core-mask", &dec->core_mask);
1337
+ if (ret)
1338
+ return ret;
1339
+ dev_info(dev, "core_mask=%08x\n", dec->core_mask);
1340
+
1341
+ /* if not the main-core, then attach the main core domain to current */
1342
+ if (dec->mpp.core_id != 0) {
1343
+ struct mpp_taskqueue *queue;
1344
+ struct mpp_iommu_info *ccu_info, *cur_info;
1345
+
1346
+ queue = dec->mpp.queue;
1347
+ /* set the ccu-domain for current device */
1348
+ ccu_info = queue->cores[0]->iommu_info;
1349
+ cur_info = dec->mpp.iommu_info;
1350
+ if (cur_info)
1351
+ cur_info->domain = ccu_info->domain;
1352
+ mpp_iommu_attach(cur_info);
1353
+ }
1354
+
1355
+ dec->ccu = ccu;
1356
+
1357
+ dev_info(dev, "attach ccu as core %d\n", dec->mpp.core_id);
1358
+ mpp_debug_enter();
1359
+
1360
+ return 0;
1361
+}
1362
+
1363
+static void rkvdec2_ccu_timeout_work(struct work_struct *work_s)
1364
+{
1365
+ struct mpp_dev *mpp;
1366
+ struct mpp_task *task = container_of(to_delayed_work(work_s),
1367
+ struct mpp_task, timeout_work);
1368
+
1369
+ if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
1370
+ mpp_err("task %d state %lx has been handled\n",
1371
+ task->task_id, task->state);
1372
+ return;
1373
+ }
1374
+
1375
+ if (!task->session) {
1376
+ mpp_err("task %d session is null.\n", task->task_id);
1377
+ return;
1378
+ }
1379
+ mpp = mpp_get_task_used_device(task, task->session);
1380
+ mpp_err("%s, task %d state %#lx timeout\n", dev_name(mpp->dev),
1381
+ task->task_index, task->state);
1382
+ set_bit(TASK_STATE_TIMEOUT, &task->state);
1383
+ atomic_inc(&mpp->reset_request);
1384
+ atomic_inc(&mpp->queue->reset_request);
1385
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
1386
+}
1387
+
1388
+int rkvdec2_ccu_link_init(struct platform_device *pdev, struct rkvdec2_dev *dec)
1389
+{
1390
+ struct resource *res;
1391
+ struct rkvdec_link_dev *link_dec;
1392
+ struct device *dev = &pdev->dev;
1393
+
1394
+ mpp_debug_enter();
1395
+
1396
+ /* link structure */
1397
+ link_dec = devm_kzalloc(dev, sizeof(*link_dec), GFP_KERNEL);
1398
+ if (!link_dec)
1399
+ return -ENOMEM;
1400
+
1401
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
1402
+ if (!res)
1403
+ return -ENOMEM;
1404
+
1405
+ link_dec->info = dec->mpp.var->hw_info->link_info;
1406
+ link_dec->reg_base = devm_ioremap(dev, res->start, resource_size(res));
1407
+ if (!link_dec->reg_base) {
1408
+ dev_err(dev, "ioremap failed for resource %pR\n", res);
1409
+ return -ENOMEM;
1410
+ }
1411
+
1412
+ dec->link_dec = link_dec;
1413
+
1414
+ mpp_debug_leave();
1415
+
1416
+ return 0;
1417
+}
1418
+
1419
+static int rkvdec2_ccu_power_on(struct mpp_taskqueue *queue,
1420
+ struct rkvdec2_ccu *ccu)
1421
+{
1422
+ if (!atomic_xchg(&ccu->power_enabled, 1)) {
1423
+ u32 i;
1424
+ struct mpp_dev *mpp;
1425
+
1426
+ /* ccu pd and clk on */
1427
+ pm_runtime_get_sync(ccu->dev);
1428
+ pm_stay_awake(ccu->dev);
1429
+ mpp_clk_safe_enable(ccu->aclk_info.clk);
1430
+ /* core pd and clk on */
1431
+ for (i = 0; i < queue->core_count; i++) {
1432
+ struct rkvdec2_dev *dec;
1433
+
1434
+ mpp = queue->cores[i];
1435
+ dec = to_rkvdec2_dev(mpp);
1436
+ pm_runtime_get_sync(mpp->dev);
1437
+ pm_stay_awake(mpp->dev);
1438
+ if (mpp->hw_ops->clk_on)
1439
+ mpp->hw_ops->clk_on(mpp);
1440
+
1441
+ mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_NORMAL);
1442
+ mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_NORMAL);
1443
+ mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_NORMAL);
1444
+ mpp_devfreq_set_core_rate(mpp, CLK_MODE_NORMAL);
1445
+ mpp_iommu_dev_activate(mpp->iommu_info, mpp);
1446
+ }
1447
+ mpp_debug(DEBUG_CCU, "power on\n");
1448
+ }
1449
+
1450
+ return 0;
1451
+}
1452
+
1453
+static int rkvdec2_ccu_power_off(struct mpp_taskqueue *queue,
1454
+ struct rkvdec2_ccu *ccu)
1455
+{
1456
+ if (atomic_xchg(&ccu->power_enabled, 0)) {
1457
+ u32 i;
1458
+ struct mpp_dev *mpp;
1459
+
1460
+ /* ccu pd and clk off */
1461
+ mpp_clk_safe_disable(ccu->aclk_info.clk);
1462
+ pm_relax(ccu->dev);
1463
+ pm_runtime_mark_last_busy(ccu->dev);
1464
+ pm_runtime_put_autosuspend(ccu->dev);
1465
+ /* core pd and clk off */
1466
+ for (i = 0; i < queue->core_count; i++) {
1467
+ mpp = queue->cores[i];
1468
+
1469
+ if (mpp->hw_ops->clk_off)
1470
+ mpp->hw_ops->clk_off(mpp);
1471
+ pm_relax(mpp->dev);
1472
+ pm_runtime_mark_last_busy(mpp->dev);
1473
+ pm_runtime_put_autosuspend(mpp->dev);
1474
+ mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
1475
+ }
1476
+ mpp_debug(DEBUG_CCU, "power off\n");
1477
+ }
1478
+
1479
+ return 0;
1480
+}
1481
+
1482
+static int rkvdec2_soft_ccu_dequeue(struct mpp_taskqueue *queue)
1483
+{
1484
+ struct mpp_task *mpp_task = NULL, *n;
1485
+
1486
+ mpp_debug_enter();
1487
+
1488
+ list_for_each_entry_safe(mpp_task, n,
1489
+ &queue->running_list,
1490
+ queue_link) {
1491
+ struct mpp_dev *mpp = mpp_get_task_used_device(mpp_task, mpp_task->session);
1492
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1493
+ u32 irq_status = mpp->irq_status;
1494
+ u32 timeout_flag = test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
1495
+ u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
1496
+ u32 timing_en = mpp->srv->timing_en;
1497
+
1498
+ if (irq_status || timeout_flag || abort_flag) {
1499
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1500
+
1501
+ if (timing_en) {
1502
+ mpp_task->on_irq = ktime_get();
1503
+ set_bit(TASK_TIMING_IRQ, &mpp_task->state);
1504
+
1505
+ mpp_task->on_cancel_timeout = mpp_task->on_irq;
1506
+ set_bit(TASK_TIMING_TO_CANCEL, &mpp_task->state);
1507
+
1508
+ mpp_task->on_isr = mpp_task->on_irq;
1509
+ set_bit(TASK_TIMING_ISR, &mpp_task->state);
1510
+ }
1511
+
1512
+ set_bit(TASK_STATE_HANDLE, &mpp_task->state);
1513
+ cancel_delayed_work(&mpp_task->timeout_work);
1514
+ mpp_task->hw_cycles = mpp_read(mpp, RKVDEC_PERF_WORKING_CNT);
1515
+ mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
1516
+ task->irq_status = irq_status;
1517
+ mpp_debug(DEBUG_IRQ_CHECK, "irq_status=%08x, timeout=%u, abort=%u\n",
1518
+ irq_status, timeout_flag, abort_flag);
1519
+ if (irq_status && mpp->dev_ops->finish)
1520
+ mpp->dev_ops->finish(mpp, mpp_task);
1521
+ else
1522
+ task->reg[RKVDEC_REG_INT_EN_INDEX] = RKVDEC_TIMEOUT_STA;
1523
+
1524
+ set_bit(TASK_STATE_FINISH, &mpp_task->state);
1525
+ set_bit(TASK_STATE_DONE, &mpp_task->state);
1526
+
1527
+ set_bit(mpp->core_id, &queue->core_idle);
1528
+ mpp_dbg_core("set core %d idle %lx\n", mpp->core_id, queue->core_idle);
1529
+ /* Wake up the GET thread */
1530
+ wake_up(&mpp_task->wait);
1531
+ /* free task */
1532
+ list_del_init(&mpp_task->queue_link);
1533
+ kref_put(&mpp_task->ref, mpp_free_task);
1534
+ } else {
1535
+ /* NOTE: break when meet not finish */
1536
+ break;
1537
+ }
1538
+ }
1539
+
1540
+ mpp_debug_leave();
1541
+ return 0;
1542
+}
1543
+
1544
+static int rkvdec2_soft_ccu_reset(struct mpp_taskqueue *queue,
1545
+ struct rkvdec2_ccu *ccu)
1546
+{
1547
+ int i;
1548
+
1549
+ for (i = queue->core_count - 1; i >= 0; i--) {
1550
+ u32 val;
1551
+
1552
+ struct mpp_dev *mpp = queue->cores[i];
1553
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1554
+
1555
+ if (mpp->disable)
1556
+ continue;
1557
+
1558
+ dev_info(mpp->dev, "resetting for err %#x\n", mpp->irq_status);
1559
+ disable_hardirq(mpp->irq);
1560
+
1561
+ /* foce idle, disconnect core and ccu */
1562
+ writel(dec->core_mask, ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
1563
+
1564
+ /* soft reset */
1565
+ mpp_write(mpp, RKVDEC_REG_IMPORTANT_BASE, RKVDEC_SOFTREST_EN);
1566
+ udelay(5);
1567
+ val = mpp_read(mpp, RKVDEC_REG_INT_EN);
1568
+ if (!(val & RKVDEC_SOFT_RESET_READY))
1569
+ mpp_err("soft reset fail, int %08x\n", val);
1570
+ mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1571
+
1572
+ /* check bus idle */
1573
+ val = mpp_read(mpp, RKVDEC_REG_DEBUG_INT_BASE);
1574
+ if (!(val & RKVDEC_BIT_BUS_IDLE))
1575
+ mpp_err("bus busy\n");
1576
+
1577
+ if (IS_REACHABLE(CONFIG_ROCKCHIP_SIP)) {
1578
+ /* sip reset */
1579
+ rockchip_dmcfreq_lock();
1580
+ sip_smc_vpu_reset(i, 0, 0);
1581
+ rockchip_dmcfreq_unlock();
1582
+ } else {
1583
+ rkvdec2_reset(mpp);
1584
+ }
1585
+ /* clear error mask */
1586
+ writel(dec->core_mask & RKVDEC_CCU_CORE_RW_MASK,
1587
+ ccu->reg_base + RKVDEC_CCU_CORE_ERR_BASE);
1588
+ /* connect core and ccu */
1589
+ writel(dec->core_mask & RKVDEC_CCU_CORE_RW_MASK,
1590
+ ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
1591
+ mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
1592
+ atomic_set(&mpp->reset_request, 0);
1593
+
1594
+ enable_irq(mpp->irq);
1595
+ dev_info(mpp->dev, "reset done\n");
1596
+ }
1597
+ atomic_set(&queue->reset_request, 0);
1598
+
1599
+ return 0;
1600
+}
1601
+
1602
+void *rkvdec2_ccu_alloc_task(struct mpp_session *session,
1603
+ struct mpp_task_msgs *msgs)
1604
+{
1605
+ int ret;
1606
+ struct rkvdec2_task *task;
1607
+
1608
+ task = kzalloc(sizeof(*task), GFP_KERNEL);
1609
+ if (!task)
1610
+ return NULL;
1611
+
1612
+ ret = rkvdec2_task_init(session->mpp, session, task, msgs);
1613
+ if (ret) {
1614
+ kfree(task);
1615
+ return NULL;
1616
+ }
1617
+
1618
+ return &task->mpp_task;
1619
+}
1620
+
1621
+static struct mpp_dev *rkvdec2_ccu_dev_match_by_iommu(struct mpp_taskqueue *queue,
1622
+ struct device *iommu_dev)
1623
+{
1624
+ struct mpp_dev *mpp = NULL;
1625
+ struct rkvdec2_dev *dec = NULL;
1626
+ u32 mmu[2] = {0, 0x40};
1627
+ u32 i;
1628
+
1629
+ for (i = 0; i < queue->core_count; i++) {
1630
+ struct mpp_dev *core = queue->cores[i];
1631
+
1632
+ if (&core->iommu_info->pdev->dev == iommu_dev) {
1633
+ mpp = core;
1634
+ dec = to_rkvdec2_dev(mpp);
1635
+ }
1636
+ }
1637
+
1638
+ if (!dec || !dec->mmu_base)
1639
+ goto out;
1640
+
1641
+ /* there are two iommus */
1642
+ for (i = 0; i < 2; i++) {
1643
+ u32 status = readl(dec->mmu_base + mmu[i] + 0x4);
1644
+ u32 iova = readl(dec->mmu_base + mmu[i] + 0xc);
1645
+ u32 is_write = (status & BIT(5)) ? 1 : 0;
1646
+
1647
+ if (status && iova)
1648
+ dev_err(iommu_dev, "core %d pagfault at iova %#08x type %s status %#x\n",
1649
+ mpp->core_id, iova, is_write ? "write" : "read", status);
1650
+ }
1651
+out:
1652
+ return mpp;
1653
+}
1654
+
1655
+int rkvdec2_soft_ccu_iommu_fault_handle(struct iommu_domain *iommu,
1656
+ struct device *iommu_dev,
1657
+ unsigned long iova, int status, void *arg)
1658
+{
1659
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
1660
+ struct mpp_taskqueue *queue = mpp->queue;
1661
+ struct mpp_task *mpp_task;
1662
+
1663
+ mpp_debug_enter();
1664
+
1665
+ mpp = rkvdec2_ccu_dev_match_by_iommu(queue, iommu_dev);
1666
+ if (!mpp) {
1667
+ dev_err(iommu_dev, "iommu fault, but no dev match\n");
1668
+ return 0;
1669
+ }
1670
+ mpp_task = mpp->cur_task;
1671
+ if (mpp_task)
1672
+ mpp_task_dump_mem_region(mpp, mpp_task);
1673
+
1674
+ /*
1675
+ * Mask iommu irq, in order for iommu not repeatedly trigger pagefault.
1676
+ * Until the pagefault task finish by hw timeout.
1677
+ */
1678
+ rockchip_iommu_mask_irq(mpp->dev);
1679
+ atomic_inc(&mpp->queue->reset_request);
1680
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
1681
+
1682
+ mpp_debug_leave();
1683
+
1684
+ return 0;
1685
+}
1686
+
1687
+int rkvdec2_hard_ccu_iommu_fault_handle(struct iommu_domain *iommu,
1688
+ struct device *iommu_dev,
1689
+ unsigned long iova, int status, void *arg)
1690
+{
1691
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
1692
+ struct mpp_taskqueue *queue = mpp->queue;
1693
+ struct mpp_task *mpp_task = NULL, *n;
1694
+ struct rkvdec2_dev *dec;
1695
+ u32 err_task_iova;
1696
+
1697
+ mpp_debug_enter();
1698
+
1699
+ mpp = rkvdec2_ccu_dev_match_by_iommu(queue, iommu_dev);
1700
+ if (!mpp) {
1701
+ dev_err(iommu_dev, "iommu fault, but no dev match\n");
1702
+ return 0;
1703
+ }
1704
+
1705
+ dec = to_rkvdec2_dev(mpp);
1706
+ err_task_iova = readl(dec->link_dec->reg_base + 0x4);
1707
+ dev_err(mpp->dev, "core %d err task iova %#08x\n", mpp->core_id, err_task_iova);
1708
+ rockchip_iommu_mask_irq(mpp->dev);
1709
+
1710
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
1711
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1712
+
1713
+ if ((u32)task->table->iova == err_task_iova) {
1714
+ mpp_task_dump_mem_region(mpp, mpp_task);
1715
+ set_bit(TASK_STATE_ABORT, &mpp_task->state);
1716
+ break;
1717
+ }
1718
+ }
1719
+ atomic_inc(&mpp->queue->reset_request);
1720
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
1721
+
1722
+ mpp_debug_leave();
1723
+
1724
+ return 0;
1725
+}
1726
+
1727
+irqreturn_t rkvdec2_soft_ccu_irq(int irq, void *param)
1728
+{
1729
+ struct mpp_dev *mpp = param;
1730
+ u32 irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
1731
+
1732
+ if (irq_status & RKVDEC_IRQ_RAW) {
1733
+ mpp_debug(DEBUG_IRQ_STATUS, "irq_status=%08x\n", irq_status);
1734
+ if (irq_status & RKVDEC_INT_ERROR_MASK) {
1735
+ atomic_inc(&mpp->reset_request);
1736
+ atomic_inc(&mpp->queue->reset_request);
1737
+ }
1738
+ mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1739
+ mpp->irq_status = irq_status;
1740
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
1741
+ return IRQ_HANDLED;
1742
+ }
1743
+ return IRQ_NONE;
1744
+}
1745
+
1746
+static inline int rkvdec2_set_core_info(u32 *reg, int idx)
1747
+{
1748
+ u32 val = (idx << 16) & RKVDEC_REG_FILM_IDX_MASK;
1749
+
1750
+ reg[RKVDEC_REG_CORE_CTRL_INDEX] &= ~RKVDEC_REG_FILM_IDX_MASK;
1751
+
1752
+ reg[RKVDEC_REG_CORE_CTRL_INDEX] |= val;
1753
+
1754
+ return 0;
1755
+}
1756
+
1757
+static int rkvdec2_soft_ccu_enqueue(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1758
+{
1759
+ u32 i, reg_en, reg;
1760
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1761
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1762
+ u32 timing_en = mpp->srv->timing_en;
1763
+
1764
+ mpp_debug_enter();
1765
+
1766
+ /* set reg for link */
1767
+ reg = RKVDEC_LINK_BIT_CORE_WORK_MODE | RKVDEC_LINK_BIT_CCU_WORK_MODE;
1768
+ writel_relaxed(reg, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
1769
+
1770
+ /* set reg for ccu */
1771
+ writel_relaxed(RKVDEC_CCU_BIT_WORK_EN, dec->ccu->reg_base + RKVDEC_CCU_WORK_BASE);
1772
+ writel_relaxed(RKVDEC_CCU_BIT_WORK_MODE, dec->ccu->reg_base + RKVDEC_CCU_WORK_MODE_BASE);
1773
+ writel_relaxed(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
1774
+
1775
+ /* set cache size */
1776
+ reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS |
1777
+ RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
1778
+ if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
1779
+ reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
1780
+
1781
+ mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
1782
+ mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
1783
+ mpp_write_relaxed(mpp, RKVDEC_REG_CACHE2_SIZE_BASE, reg);
1784
+ /* clear cache */
1785
+ mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
1786
+ mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
1787
+ mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
1788
+
1789
+ mpp_iommu_flush_tlb(mpp->iommu_info);
1790
+ /* disable multicore pu/colmv offset req timeout reset */
1791
+ task->reg[RKVDEC_REG_EN_MODE_SET] |= BIT(1);
1792
+ task->reg[RKVDEC_REG_TIMEOUT_THRESHOLD] = rkvdec2_ccu_get_timeout_threshold(task);
1793
+ /* set registers for hardware */
1794
+ reg_en = mpp_task->hw_info->reg_en;
1795
+ for (i = 0; i < task->w_req_cnt; i++) {
1796
+ int s, e;
1797
+ struct mpp_request *req = &task->w_reqs[i];
1798
+
1799
+ s = req->offset / sizeof(u32);
1800
+ e = s + req->size / sizeof(u32);
1801
+ mpp_write_req(mpp, task->reg, s, e, reg_en);
1802
+ }
1803
+ /* init current task */
1804
+ mpp->cur_task = mpp_task;
1805
+
1806
+ mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
1807
+
1808
+ mpp->irq_status = 0;
1809
+ writel_relaxed(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_STA_BASE);
1810
+ /* Flush the register before the start the device */
1811
+ wmb();
1812
+ mpp_write(mpp, RKVDEC_REG_START_EN_BASE, task->reg[reg_en] | RKVDEC_START_EN);
1813
+
1814
+ mpp_task_run_end(mpp_task, timing_en);
1815
+
1816
+ mpp_debug_leave();
1817
+
1818
+ return 0;
1819
+}
1820
+
1821
+static struct mpp_dev *rkvdec2_get_idle_core(struct mpp_taskqueue *queue,
1822
+ struct mpp_task *mpp_task)
1823
+{
1824
+ u32 i = 0;
1825
+ struct rkvdec2_dev *dec = NULL;
1826
+
1827
+ for (i = 0; i < queue->core_count; i++) {
1828
+ struct mpp_dev *mpp = queue->cores[i];
1829
+ struct rkvdec2_dev *core = to_rkvdec2_dev(mpp);
1830
+
1831
+ if (mpp->disable)
1832
+ continue;
1833
+
1834
+ if (test_bit(i, &queue->core_idle)) {
1835
+ if (!dec) {
1836
+ dec = core;
1837
+ continue;
1838
+ }
1839
+ /* set the less work core */
1840
+ if (core->task_index < dec->task_index)
1841
+ dec = core;
1842
+ }
1843
+ }
1844
+ /* if get core */
1845
+ if (dec) {
1846
+ mpp_task->mpp = &dec->mpp;
1847
+ mpp_task->core_id = dec->mpp.core_id;
1848
+ clear_bit(mpp_task->core_id, &queue->core_idle);
1849
+ dec->task_index++;
1850
+ atomic_inc(&dec->mpp.task_count);
1851
+ mpp_dbg_core("clear core %d idle\n", mpp_task->core_id);
1852
+ return mpp_task->mpp;
1853
+ }
1854
+
1855
+ return NULL;
1856
+}
1857
+
1858
+static bool rkvdec2_core_working(struct mpp_taskqueue *queue)
1859
+{
1860
+ struct mpp_dev *mpp;
1861
+ bool flag = false;
1862
+ u32 i = 0;
1863
+
1864
+ for (i = 0; i < queue->core_count; i++) {
1865
+ mpp = queue->cores[i];
1866
+ if (mpp->disable)
1867
+ continue;
1868
+ if (!test_bit(i, &queue->core_idle)) {
1869
+ flag = true;
1870
+ break;
1871
+ }
1872
+ }
1873
+
1874
+ return flag;
1875
+}
1876
+
1877
+void rkvdec2_soft_ccu_worker(struct kthread_work *work_s)
1878
+{
1879
+ struct mpp_task *mpp_task;
1880
+ struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
1881
+ struct mpp_taskqueue *queue = mpp->queue;
1882
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1883
+ u32 timing_en = mpp->srv->timing_en;
1884
+
1885
+ mpp_debug_enter();
1886
+
1887
+ /* 1. process all finished task in running list */
1888
+ rkvdec2_soft_ccu_dequeue(queue);
1889
+
1890
+ /* 2. process reset request */
1891
+ if (atomic_read(&queue->reset_request)) {
1892
+ if (!rkvdec2_core_working(queue)) {
1893
+ rkvdec2_ccu_power_on(queue, dec->ccu);
1894
+ rkvdec2_soft_ccu_reset(queue, dec->ccu);
1895
+ }
1896
+ }
1897
+
1898
+ /* 3. process pending task */
1899
+ while (1) {
1900
+ if (atomic_read(&queue->reset_request))
1901
+ break;
1902
+ /* get one task form pending list */
1903
+ mutex_lock(&queue->pending_lock);
1904
+ mpp_task = list_first_entry_or_null(&queue->pending_list,
1905
+ struct mpp_task, queue_link);
1906
+ mutex_unlock(&queue->pending_lock);
1907
+ if (!mpp_task)
1908
+ break;
1909
+
1910
+ if (test_bit(TASK_STATE_ABORT, &mpp_task->state)) {
1911
+ mutex_lock(&queue->pending_lock);
1912
+ list_del_init(&mpp_task->queue_link);
1913
+
1914
+ set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
1915
+ set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
1916
+
1917
+ mutex_unlock(&queue->pending_lock);
1918
+ wake_up(&mpp_task->wait);
1919
+ kref_put(&mpp_task->ref, rkvdec2_link_free_task);
1920
+ continue;
1921
+ }
1922
+ /* find one core is idle */
1923
+ mpp = rkvdec2_get_idle_core(queue, mpp_task);
1924
+ if (!mpp)
1925
+ break;
1926
+
1927
+ if (timing_en) {
1928
+ mpp_task->on_run = ktime_get();
1929
+ set_bit(TASK_TIMING_RUN, &mpp_task->state);
1930
+ }
1931
+
1932
+ /* set session index */
1933
+ rkvdec2_set_core_info(mpp_task->reg, mpp_task->session->index);
1934
+ /* set rcb buffer */
1935
+ mpp_set_rcbbuf(mpp, mpp_task->session, mpp_task);
1936
+
1937
+ INIT_DELAYED_WORK(&mpp_task->timeout_work, rkvdec2_ccu_timeout_work);
1938
+ rkvdec2_ccu_power_on(queue, dec->ccu);
1939
+ rkvdec2_soft_ccu_enqueue(mpp, mpp_task);
1940
+ /* pending to running */
1941
+ mpp_taskqueue_pending_to_run(queue, mpp_task);
1942
+ set_bit(TASK_STATE_RUNNING, &mpp_task->state);
1943
+ }
1944
+
1945
+ /* 4. poweroff when running and pending list are empty */
1946
+ if (list_empty(&queue->running_list) &&
1947
+ list_empty(&queue->pending_list))
1948
+ rkvdec2_ccu_power_off(queue, dec->ccu);
1949
+
1950
+ /* 5. check session detach out of queue */
1951
+ mpp_session_cleanup_detach(queue, work_s);
1952
+
1953
+ mpp_debug_leave();
1954
+}
1955
+
1956
+int rkvdec2_ccu_alloc_table(struct rkvdec2_dev *dec,
1957
+ struct rkvdec_link_dev *link_dec)
1958
+{
1959
+ int ret, i;
1960
+ struct mpp_dma_buffer *table;
1961
+ struct mpp_dev *mpp = &dec->mpp;
1962
+
1963
+ mpp_debug_enter();
1964
+
1965
+ /* alloc table pointer array */
1966
+ table = devm_kmalloc_array(mpp->dev, mpp->task_capacity,
1967
+ sizeof(*table), GFP_KERNEL | __GFP_ZERO);
1968
+ if (!table)
1969
+ return -ENOMEM;
1970
+
1971
+ /* alloc table buffer */
1972
+ ret = rkvdec2_link_alloc_table(mpp, link_dec);
1973
+ if (ret)
1974
+ return ret;
1975
+
1976
+ /* init table array */
1977
+ dec->ccu->table_array = table;
1978
+ for (i = 0; i < mpp->task_capacity; i++) {
1979
+ table[i].iova = link_dec->table->iova + i * link_dec->link_node_size;
1980
+ table[i].vaddr = link_dec->table->vaddr + i * link_dec->link_node_size;
1981
+ table[i].size = link_dec->link_node_size;
1982
+ INIT_LIST_HEAD(&table[i].link);
1983
+ list_add_tail(&table[i].link, &dec->ccu->unused_list);
1984
+ }
1985
+
1986
+ return 0;
1987
+}
1988
+
1989
+static void rkvdec2_dump_ccu(struct rkvdec2_ccu *ccu)
1990
+{
1991
+ u32 i;
1992
+
1993
+ for (i = 0; i < 10; i++)
1994
+ mpp_err("ccu:reg[%d]=%08x\n", i, readl(ccu->reg_base + 4 * i));
1995
+
1996
+ for (i = 16; i < 22; i++)
1997
+ mpp_err("ccu:reg[%d]=%08x\n", i, readl(ccu->reg_base + 4 * i));
1998
+}
1999
+
2000
+static void rkvdec2_dump_link(struct rkvdec2_dev *dec)
2001
+{
2002
+ u32 i;
2003
+
2004
+ for (i = 0; i < 10; i++)
2005
+ mpp_err("link:reg[%d]=%08x\n", i, readl(dec->link_dec->reg_base + 4 * i));
2006
+}
2007
+
2008
+static void rkvdec2_dump_core(struct mpp_dev *mpp, struct rkvdec2_task *task)
2009
+{
2010
+ u32 j;
2011
+
2012
+ if (task) {
2013
+ for (j = 0; j < 273; j++)
2014
+ mpp_err("reg[%d]=%08x, %08x\n", j, mpp_read(mpp, j*4), task->reg[j]);
2015
+ } else {
2016
+ for (j = 0; j < 273; j++)
2017
+ mpp_err("reg[%d]=%08x\n", j, mpp_read(mpp, j*4));
2018
+ }
2019
+}
2020
+
2021
+irqreturn_t rkvdec2_hard_ccu_irq(int irq, void *param)
2022
+{
2023
+ u32 irq_status;
2024
+ struct mpp_dev *mpp = param;
2025
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2026
+
2027
+ irq_status = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2028
+ dec->ccu->ccu_core_work_mode = readl(dec->ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2029
+ if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) {
2030
+ dec->link_dec->irq_status = irq_status;
2031
+ mpp->irq_status = mpp_read(mpp, RKVDEC_REG_INT_EN);
2032
+ mpp_debug(DEBUG_IRQ_STATUS, "core %d link_irq=%08x, core_irq=%08x\n",
2033
+ mpp->core_id, irq_status, mpp->irq_status);
2034
+
2035
+ writel(irq_status & 0xfffff0ff,
2036
+ dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2037
+
2038
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
2039
+ return IRQ_HANDLED;
2040
+ }
2041
+
2042
+ return IRQ_NONE;
2043
+}
2044
+
2045
+static int rkvdec2_hard_ccu_finish(struct rkvdec_link_info *hw, struct rkvdec2_task *task)
2046
+{
2047
+ u32 i, off, s, n;
2048
+ struct rkvdec_link_part *part = hw->part_r;
2049
+ u32 *tb_reg = (u32 *)task->table->vaddr;
2050
+
2051
+ mpp_debug_enter();
2052
+
2053
+ for (i = 0; i < hw->part_r_num; i++) {
2054
+ off = part[i].tb_reg_off;
2055
+ s = part[i].reg_start;
2056
+ n = part[i].reg_num;
2057
+ memcpy(&task->reg[s], &tb_reg[off], n * sizeof(u32));
2058
+ }
2059
+ /* revert hack for irq status */
2060
+ task->reg[RKVDEC_REG_INT_EN_INDEX] = task->irq_status;
2061
+
2062
+ mpp_debug_leave();
2063
+
2064
+ return 0;
2065
+}
2066
+
2067
+static int rkvdec2_hard_ccu_dequeue(struct mpp_taskqueue *queue,
2068
+ struct rkvdec2_ccu *ccu,
2069
+ struct rkvdec_link_info *hw)
2070
+{
2071
+ struct mpp_task *mpp_task = NULL, *n;
2072
+ u32 dump_reg = 0;
2073
+ u32 dequeue_none = 0;
2074
+
2075
+ mpp_debug_enter();
2076
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
2077
+ u32 timeout_flag = test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
2078
+ u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
2079
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2080
+ u32 *tb_reg = (u32 *)task->table->vaddr;
2081
+ u32 irq_status = tb_reg[hw->tb_reg_int];
2082
+ u32 ccu_decoded_num, ccu_total_dec_num;
2083
+
2084
+ ccu_decoded_num = readl(ccu->reg_base + RKVDEC_CCU_DEC_NUM_BASE);
2085
+ ccu_total_dec_num = readl(ccu->reg_base + RKVDEC_CCU_TOTAL_NUM_BASE);
2086
+ mpp_debug(DEBUG_IRQ_CHECK,
2087
+ "session %d task %d w:h[%d %d] err %d irq_status %#x timeout=%u abort=%u iova %08x next %08x ccu[%d %d]\n",
2088
+ mpp_task->session->index, mpp_task->task_index, task->width,
2089
+ task->height, !!(irq_status & RKVDEC_INT_ERROR_MASK), irq_status,
2090
+ timeout_flag, abort_flag, (u32)task->table->iova,
2091
+ ((u32 *)task->table->vaddr)[hw->tb_reg_next],
2092
+ ccu_decoded_num, ccu_total_dec_num);
2093
+
2094
+ if (irq_status || timeout_flag || abort_flag) {
2095
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(queue->cores[0]);
2096
+
2097
+ set_bit(TASK_STATE_HANDLE, &mpp_task->state);
2098
+ cancel_delayed_work(&mpp_task->timeout_work);
2099
+ mpp_task->hw_cycles = tb_reg[hw->tb_reg_cycle];
2100
+ mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
2101
+ task->irq_status = irq_status ? irq_status : RKVDEC_ERROR_STA;
2102
+
2103
+ if (irq_status)
2104
+ rkvdec2_hard_ccu_finish(hw, task);
2105
+
2106
+ set_bit(TASK_STATE_FINISH, &mpp_task->state);
2107
+ set_bit(TASK_STATE_DONE, &mpp_task->state);
2108
+
2109
+ if (timeout_flag && !dump_reg && mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
2110
+ u32 i;
2111
+
2112
+ mpp_err("###### ccu #####\n");
2113
+ rkvdec2_dump_ccu(ccu);
2114
+ for (i = 0; i < queue->core_count; i++) {
2115
+ mpp_err("###### core %d #####\n", i);
2116
+ rkvdec2_dump_link(to_rkvdec2_dev(queue->cores[i]));
2117
+ rkvdec2_dump_core(queue->cores[i], task);
2118
+ }
2119
+ dump_reg = 1;
2120
+ }
2121
+ list_move_tail(&task->table->link, &ccu->unused_list);
2122
+ /* free task */
2123
+ list_del_init(&mpp_task->queue_link);
2124
+ /* Wake up the GET thread */
2125
+ wake_up(&mpp_task->wait);
2126
+ if ((irq_status & RKVDEC_INT_ERROR_MASK) || timeout_flag) {
2127
+ pr_err("session %d task %d irq_status %#x timeout=%u abort=%u\n",
2128
+ mpp_task->session->index, mpp_task->task_index,
2129
+ irq_status, timeout_flag, abort_flag);
2130
+ atomic_inc(&queue->reset_request);
2131
+ }
2132
+
2133
+ kref_put(&mpp_task->ref, mpp_free_task);
2134
+ } else {
2135
+ dequeue_none++;
2136
+ /*
2137
+ * there are only 2 cores,
2138
+ * if dequeue not finish task more than 2,
2139
+ * means the others task still not get run by hw, can break early.
2140
+ */
2141
+ if (dequeue_none > 2)
2142
+ break;
2143
+ }
2144
+ }
2145
+
2146
+ mpp_debug_leave();
2147
+ return 0;
2148
+}
2149
+
2150
+static int rkvdec2_hard_ccu_reset(struct mpp_taskqueue *queue, struct rkvdec2_ccu *ccu)
2151
+{
2152
+ int i = 0;
2153
+
2154
+ mpp_debug_enter();
2155
+
2156
+ /* reset and active core */
2157
+ for (i = 0; i < queue->core_count; i++) {
2158
+ u32 val = 0;
2159
+ struct mpp_dev *mpp = queue->cores[i];
2160
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2161
+
2162
+ if (mpp->disable)
2163
+ continue;
2164
+ dev_info(mpp->dev, "resetting...\n");
2165
+ disable_hardirq(mpp->irq);
2166
+ /* force idle */
2167
+ writel(dec->core_mask, ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
2168
+ writel(0, ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2169
+
2170
+ {
2171
+ /* soft reset */
2172
+ u32 val;
2173
+
2174
+ mpp_write(mpp, RKVDEC_REG_IMPORTANT_BASE, RKVDEC_SOFTREST_EN);
2175
+ udelay(5);
2176
+ val = mpp_read(mpp, RKVDEC_REG_INT_EN);
2177
+ if (!(val & RKVDEC_SOFT_RESET_READY))
2178
+ mpp_err("soft reset fail, int %08x\n", val);
2179
+
2180
+ // /* cru reset */
2181
+ // dev_info(mpp->dev, "cru reset\n");
2182
+ // rkvdec2_reset(mpp);
2183
+ }
2184
+#if IS_ENABLED(CONFIG_ROCKCHIP_SIP)
2185
+ rockchip_dmcfreq_lock();
2186
+ sip_smc_vpu_reset(i, 0, 0);
2187
+ rockchip_dmcfreq_unlock();
2188
+#else
2189
+ rkvdec2_reset(mpp);
2190
+#endif
2191
+ mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
2192
+ enable_irq(mpp->irq);
2193
+ atomic_set(&mpp->reset_request, 0);
2194
+ val = mpp_read_relaxed(mpp, 272*4);
2195
+ dev_info(mpp->dev, "reset done, idle %d\n", (val & 1));
2196
+ }
2197
+ /* reset ccu */
2198
+ mpp_safe_reset(ccu->rst_a);
2199
+ udelay(5);
2200
+ mpp_safe_unreset(ccu->rst_a);
2201
+
2202
+ mpp_debug_leave();
2203
+ return 0;
2204
+}
2205
+
2206
+static struct mpp_task *
2207
+rkvdec2_hard_ccu_prepare(struct mpp_task *mpp_task,
2208
+ struct rkvdec2_ccu *ccu, struct rkvdec_link_info *hw)
2209
+{
2210
+ u32 i, off, s, n;
2211
+ u32 *tb_reg;
2212
+ struct mpp_dma_buffer *table = NULL;
2213
+ struct rkvdec_link_part *part;
2214
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2215
+
2216
+ mpp_debug_enter();
2217
+
2218
+ if (test_bit(TASK_STATE_PREPARE, &mpp_task->state))
2219
+ return mpp_task;
2220
+
2221
+ /* ensure that cur table iova points to the next link table*/
2222
+ {
2223
+ struct mpp_dma_buffer *table0 = NULL, *table1 = NULL, *n;
2224
+
2225
+ list_for_each_entry_safe(table, n, &ccu->unused_list, link) {
2226
+ if (!table0) {
2227
+ table0 = table;
2228
+ continue;
2229
+ }
2230
+ if (!table1)
2231
+ table1 = table;
2232
+ break;
2233
+ }
2234
+ if (!table0 || !table1)
2235
+ return NULL;
2236
+ ((u32 *)table0->vaddr)[hw->tb_reg_next] = table1->iova;
2237
+ table = table0;
2238
+ }
2239
+
2240
+ /* set session idx */
2241
+ rkvdec2_set_core_info(task->reg, mpp_task->session->index);
2242
+ tb_reg = (u32 *)table->vaddr;
2243
+ part = hw->part_w;
2244
+
2245
+ /* disable multicore pu/colmv offset req timeout reset */
2246
+ task->reg[RKVDEC_REG_EN_MODE_SET] |= BIT(1);
2247
+ task->reg[RKVDEC_REG_TIMEOUT_THRESHOLD] = rkvdec2_ccu_get_timeout_threshold(task);
2248
+
2249
+ for (i = 0; i < hw->part_w_num; i++) {
2250
+ off = part[i].tb_reg_off;
2251
+ s = part[i].reg_start;
2252
+ n = part[i].reg_num;
2253
+ memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
2254
+ }
2255
+
2256
+ /* memset read registers */
2257
+ part = hw->part_r;
2258
+ for (i = 0; i < hw->part_r_num; i++) {
2259
+ off = part[i].tb_reg_off;
2260
+ n = part[i].reg_num;
2261
+ memset(&tb_reg[off], 0, n * sizeof(u32));
2262
+ }
2263
+ list_move_tail(&table->link, &ccu->used_list);
2264
+ task->table = table;
2265
+ set_bit(TASK_STATE_PREPARE, &mpp_task->state);
2266
+ mpp_dbg_ccu("session %d task %d iova %08x next %08x\n",
2267
+ mpp_task->session->index, mpp_task->task_index, (u32)task->table->iova,
2268
+ ((u32 *)task->table->vaddr)[hw->tb_reg_next]);
2269
+
2270
+ mpp_debug_leave();
2271
+
2272
+ return mpp_task;
2273
+}
2274
+
2275
+static int rkvdec2_ccu_link_fix_rcb_regs(struct rkvdec2_dev *dec)
2276
+{
2277
+ int ret = 0;
2278
+ u32 i, val;
2279
+ u32 reg, reg_idx, rcb_size, rcb_offset;
2280
+
2281
+ if (!dec->rcb_iova && !dec->rcb_info_count)
2282
+ goto done;
2283
+ /* check whether fixed */
2284
+ val = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2285
+ if (val & RKVDEC_CCU_BIT_FIX_RCB)
2286
+ goto done;
2287
+ /* set registers */
2288
+ rcb_offset = 0;
2289
+ for (i = 0; i < dec->rcb_info_count; i += 2) {
2290
+ reg_idx = dec->rcb_infos[i];
2291
+ rcb_size = dec->rcb_infos[i + 1];
2292
+ mpp_debug(DEBUG_SRAM_INFO,
2293
+ "rcb: reg %u size %u offset %u sram_size %u rcb_size %u\n",
2294
+ reg_idx, rcb_size, rcb_offset, dec->sram_size, dec->rcb_size);
2295
+ if ((rcb_offset + rcb_size) > dec->rcb_size) {
2296
+ mpp_err("rcb: reg[%u] set failed.\n", reg_idx);
2297
+ ret = -ENOMEM;
2298
+ goto done;
2299
+ }
2300
+ reg = dec->rcb_iova + rcb_offset;
2301
+ mpp_write(&dec->mpp, reg_idx * sizeof(u32), reg);
2302
+ rcb_offset += rcb_size;
2303
+ }
2304
+
2305
+ val |= RKVDEC_CCU_BIT_FIX_RCB;
2306
+ writel(val, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2307
+done:
2308
+ return ret;
2309
+}
2310
+
2311
+static int rkvdec2_hard_ccu_enqueue(struct rkvdec2_ccu *ccu,
2312
+ struct mpp_task *mpp_task,
2313
+ struct mpp_taskqueue *queue,
2314
+ struct mpp_dev *mpp)
2315
+{
2316
+ u32 ccu_en, work_mode, link_mode;
2317
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2318
+ u32 timing_en = mpp->srv->timing_en;
2319
+
2320
+ mpp_debug_enter();
2321
+
2322
+ if (test_bit(TASK_STATE_START, &mpp_task->state))
2323
+ goto done;
2324
+
2325
+ ccu_en = readl(ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2326
+ mpp_dbg_ccu("ccu_en=%d\n", ccu_en);
2327
+ if (!ccu_en) {
2328
+ u32 i;
2329
+
2330
+ /* set work mode */
2331
+ work_mode = 0;
2332
+ for (i = 0; i < queue->core_count; i++) {
2333
+ u32 val;
2334
+ struct mpp_dev *core = queue->cores[i];
2335
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(core);
2336
+
2337
+ if (mpp->disable)
2338
+ continue;
2339
+ work_mode |= dec->core_mask;
2340
+ rkvdec2_ccu_link_fix_rcb_regs(dec);
2341
+ /* control by ccu */
2342
+ val = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2343
+ val |= RKVDEC_LINK_BIT_CCU_WORK_MODE;
2344
+ writel(val, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2345
+ }
2346
+ writel(work_mode, ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2347
+ ccu->ccu_core_work_mode = readl(ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2348
+ mpp_dbg_ccu("ccu_work_mode=%08x, ccu_work_status=%08x\n",
2349
+ readl(ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE),
2350
+ readl(ccu->reg_base + RKVDEC_CCU_CORE_STA_BASE));
2351
+
2352
+ /* set auto gating */
2353
+ writel(RKVDEC_CCU_BIT_AUTOGATE, ccu->reg_base + RKVDEC_CCU_CTRL_BASE);
2354
+ /* link start base */
2355
+ writel(task->table->iova, ccu->reg_base + RKVDEC_CCU_CFG_ADDR_BASE);
2356
+ /* enable link */
2357
+ writel(RKVDEC_CCU_BIT_WORK_EN, ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2358
+ }
2359
+
2360
+ /* set link mode */
2361
+ link_mode = ccu_en ? RKVDEC_CCU_BIT_ADD_MODE : 0;
2362
+ writel(link_mode | RKVDEC_LINK_ADD_CFG_NUM, ccu->reg_base + RKVDEC_CCU_LINK_MODE_BASE);
2363
+
2364
+ /* flush tlb before starting hardware */
2365
+ mpp_iommu_flush_tlb(mpp->iommu_info);
2366
+ /* wmb */
2367
+ wmb();
2368
+ INIT_DELAYED_WORK(&mpp_task->timeout_work, rkvdec2_ccu_timeout_work);
2369
+ mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
2370
+ /* configure done */
2371
+ writel(RKVDEC_CCU_BIT_CFG_DONE, ccu->reg_base + RKVDEC_CCU_CFG_DONE_BASE);
2372
+ mpp_task_run_end(mpp_task, timing_en);
2373
+
2374
+ set_bit(TASK_STATE_RUNNING, &mpp_task->state);
2375
+ mpp_dbg_ccu("session %d task %d iova=%08x task->state=%lx link_mode=%08x\n",
2376
+ mpp_task->session->index, mpp_task->task_index,
2377
+ (u32)task->table->iova, mpp_task->state,
2378
+ readl(ccu->reg_base + RKVDEC_CCU_LINK_MODE_BASE));
2379
+done:
2380
+ mpp_debug_leave();
2381
+
2382
+ return 0;
2383
+}
2384
+
2385
+static void rkvdec2_hard_ccu_resend_tasks(struct mpp_dev *mpp, struct mpp_taskqueue *queue)
2386
+{
2387
+ struct rkvdec2_task *task_pre = NULL;
2388
+ struct mpp_task *loop = NULL, *n;
2389
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2390
+
2391
+ /* re sort running list */
2392
+ list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2393
+ struct rkvdec2_task *task = to_rkvdec2_task(loop);
2394
+ u32 *tb_reg = (u32 *)task->table->vaddr;
2395
+ u32 irq_status = tb_reg[dec->link_dec->info->tb_reg_int];
2396
+
2397
+ if (!irq_status) {
2398
+ if (task_pre) {
2399
+ tb_reg = (u32 *)task_pre->table->vaddr;
2400
+ tb_reg[dec->link_dec->info->tb_reg_next] = task->table->iova;
2401
+ }
2402
+ task_pre = task;
2403
+ }
2404
+ }
2405
+
2406
+ if (task_pre) {
2407
+ struct mpp_dma_buffer *tbl;
2408
+ u32 *tb_reg;
2409
+
2410
+ tbl = list_first_entry_or_null(&dec->ccu->unused_list,
2411
+ struct mpp_dma_buffer, link);
2412
+ WARN_ON(!tbl);
2413
+ if (tbl) {
2414
+ tb_reg = (u32 *)task_pre->table->vaddr;
2415
+ tb_reg[dec->link_dec->info->tb_reg_next] = tbl->iova;
2416
+ }
2417
+ }
2418
+
2419
+ /* resend */
2420
+ list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2421
+ struct rkvdec2_task *task = to_rkvdec2_task(loop);
2422
+ u32 *tb_reg = (u32 *)task->table->vaddr;
2423
+ u32 irq_status = tb_reg[dec->link_dec->info->tb_reg_int];
2424
+
2425
+ mpp_dbg_ccu("reback: session %d task %d iova %08x next %08x irq_status 0x%08x\n",
2426
+ loop->session->index, loop->task_index, (u32)task->table->iova,
2427
+ tb_reg[dec->link_dec->info->tb_reg_next], irq_status);
2428
+
2429
+ if (!irq_status) {
2430
+ cancel_delayed_work(&loop->timeout_work);
2431
+ clear_bit(TASK_STATE_START, &loop->state);
2432
+ rkvdec2_hard_ccu_enqueue(dec->ccu, loop, queue, mpp);
2433
+ }
2434
+ }
2435
+}
2436
+
2437
+void rkvdec2_hard_ccu_worker(struct kthread_work *work_s)
2438
+{
2439
+ struct mpp_task *mpp_task;
2440
+ struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
2441
+ struct mpp_taskqueue *queue = mpp->queue;
2442
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2443
+
2444
+ mpp_debug_enter();
2445
+
2446
+ /* 1. process all finished task in running list */
2447
+ rkvdec2_hard_ccu_dequeue(queue, dec->ccu, dec->link_dec->info);
2448
+
2449
+ /* 2. process reset request */
2450
+ if (atomic_read(&queue->reset_request) &&
2451
+ (list_empty(&queue->running_list) || !dec->ccu->ccu_core_work_mode)) {
2452
+ /*
2453
+ * cancel running list timeout work to avoid
2454
+ * sw timeout causeby reset long time
2455
+ */
2456
+ struct mpp_task *loop = NULL, *n;
2457
+
2458
+ list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2459
+ cancel_delayed_work(&loop->timeout_work);
2460
+ }
2461
+ /* reset process */
2462
+ rkvdec2_hard_ccu_reset(queue, dec->ccu);
2463
+ atomic_set(&queue->reset_request, 0);
2464
+
2465
+ /* relink running task iova in list, and resend them to hw */
2466
+ if (!list_empty(&queue->running_list))
2467
+ rkvdec2_hard_ccu_resend_tasks(mpp, queue);
2468
+ }
2469
+
2470
+ /* 3. process pending task */
2471
+ while (1) {
2472
+ if (atomic_read(&queue->reset_request))
2473
+ break;
2474
+
2475
+ /* get one task form pending list */
2476
+ mutex_lock(&queue->pending_lock);
2477
+ mpp_task = list_first_entry_or_null(&queue->pending_list,
2478
+ struct mpp_task, queue_link);
2479
+ mutex_unlock(&queue->pending_lock);
2480
+
2481
+ if (!mpp_task)
2482
+ break;
2483
+ if (test_bit(TASK_STATE_ABORT, &mpp_task->state)) {
2484
+ mutex_lock(&queue->pending_lock);
2485
+ list_del_init(&mpp_task->queue_link);
2486
+ mutex_unlock(&queue->pending_lock);
2487
+ kref_put(&mpp_task->ref, mpp_free_task);
2488
+ continue;
2489
+ }
2490
+
2491
+ mpp_task = rkvdec2_hard_ccu_prepare(mpp_task, dec->ccu, dec->link_dec->info);
2492
+ if (!mpp_task)
2493
+ break;
2494
+
2495
+ rkvdec2_ccu_power_on(queue, dec->ccu);
2496
+ rkvdec2_hard_ccu_enqueue(dec->ccu, mpp_task, queue, mpp);
2497
+ mpp_taskqueue_pending_to_run(queue, mpp_task);
2498
+ }
2499
+
2500
+ /* 4. poweroff when running and pending list are empty */
2501
+ mutex_lock(&queue->pending_lock);
2502
+ if (list_empty(&queue->running_list) &&
2503
+ list_empty(&queue->pending_list))
2504
+ rkvdec2_ccu_power_off(queue, dec->ccu);
2505
+ mutex_unlock(&queue->pending_lock);
2506
+
2507
+ /* 5. check session detach out of queue */
2508
+ mpp_session_cleanup_detach(queue, work_s);
2509
+
2510
+ mpp_debug_leave();
2511
+}