forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c
....@@ -12,62 +12,77 @@
1212 #include <linux/slab.h>
1313 #include <soc/rockchip/pm_domains.h>
1414 #include <soc/rockchip/rockchip_dmc.h>
15
+#include <soc/rockchip/rockchip_iommu.h>
1516
1617 #include "mpp_rkvdec2_link.h"
1718
1819 #include "hack/mpp_rkvdec2_link_hack_rk3568.c"
1920
20
-#ifdef CONFIG_PM_DEVFREQ
21
-#include "../../../devfreq/governor.h"
22
-#endif
23
-
21
+#define WORK_TIMEOUT_MS (500)
2422 #define WAIT_TIMEOUT_MS (2000)
23
+#define RKVDEC2_LINK_HACK_TASK_FLAG (0xff)
2524
26
-#define RKVDEC_MAX_WRITE_PART 6
27
-#define RKVDEC_MAX_READ_PART 2
25
+/* vdpu381 link hw info for rk3588 */
26
+struct rkvdec_link_info rkvdec_link_v2_hw_info = {
27
+ .tb_reg_num = 218,
28
+ .tb_reg_next = 0,
29
+ .tb_reg_r = 1,
30
+ .tb_reg_second_en = 8,
2831
29
-struct rkvdec_link_part {
30
- /* register offset of table buffer */
31
- u32 tb_reg_off;
32
- /* start idx of task register */
33
- u32 reg_start;
34
- /* number of task register */
35
- u32 reg_num;
32
+ .part_w_num = 6,
33
+ .part_r_num = 2,
34
+ .part_w[0] = {
35
+ .tb_reg_off = 4,
36
+ .reg_start = 8,
37
+ .reg_num = 28,
38
+ },
39
+ .part_w[1] = {
40
+ .tb_reg_off = 32,
41
+ .reg_start = 64,
42
+ .reg_num = 52,
43
+ },
44
+ .part_w[2] = {
45
+ .tb_reg_off = 84,
46
+ .reg_start = 128,
47
+ .reg_num = 16,
48
+ },
49
+ .part_w[3] = {
50
+ .tb_reg_off = 100,
51
+ .reg_start = 160,
52
+ .reg_num = 48,
53
+ },
54
+ .part_w[4] = {
55
+ .tb_reg_off = 148,
56
+ .reg_start = 224,
57
+ .reg_num = 16,
58
+ },
59
+ .part_w[5] = {
60
+ .tb_reg_off = 164,
61
+ .reg_start = 256,
62
+ .reg_num = 16,
63
+ },
64
+ .part_r[0] = {
65
+ .tb_reg_off = 180,
66
+ .reg_start = 224,
67
+ .reg_num = 10,
68
+ },
69
+ .part_r[1] = {
70
+ .tb_reg_off = 190,
71
+ .reg_start = 258,
72
+ .reg_num = 28,
73
+ },
74
+ .tb_reg_int = 180,
75
+ .tb_reg_cycle = 195,
76
+ .hack_setup = 0,
77
+ .reg_status = {
78
+ .dec_num_mask = 0x3fffffff,
79
+ .err_flag_base = 0x010,
80
+ .err_flag_bit = BIT(31),
81
+ },
3682 };
3783
38
-struct rkvdec_link_status {
39
- u32 dec_num_mask;
40
- u32 err_flag_base;
41
- u32 err_flag_bit;
42
-};
43
-
44
-struct rkvdec_link_info {
45
- dma_addr_t iova;
46
- /* total register for link table buffer */
47
- u32 tb_reg_num;
48
- /* next link table addr in table buffer */
49
- u32 tb_reg_next;
50
- /* current read back addr in table buffer */
51
- u32 tb_reg_r;
52
- /* secondary enable in table buffer */
53
- u32 tb_reg_second_en;
54
- u32 part_w_num;
55
- u32 part_r_num;
56
-
57
- struct rkvdec_link_part part_w[RKVDEC_MAX_WRITE_PART];
58
- struct rkvdec_link_part part_r[RKVDEC_MAX_READ_PART];
59
-
60
- /* interrupt read back in table buffer */
61
- u32 tb_reg_int;
62
- bool hack_setup;
63
- u32 tb_reg_cycle;
64
- u32 tb_reg_out;
65
- u32 tb_reg_ref_s;
66
- u32 tb_reg_ref_e;
67
- struct rkvdec_link_status reg_status;
68
-};
69
-
70
-struct rkvdec_link_info rkvdec_link_rk3568_hw_info = {
84
+/* vdpu34x link hw info for rk356x */
85
+struct rkvdec_link_info rkvdec_link_rk356x_hw_info = {
7186 .tb_reg_num = 202,
7287 .tb_reg_next = 0,
7388 .tb_reg_r = 1,
....@@ -116,8 +131,8 @@
116131 .reg_num = 28,
117132 },
118133 .tb_reg_int = 164,
119
- .hack_setup = 1,
120134 .tb_reg_cycle = 179,
135
+ .hack_setup = 1,
121136 .reg_status = {
122137 .dec_num_mask = 0x3fffffff,
123138 .err_flag_base = 0x010,
....@@ -126,7 +141,7 @@
126141 };
127142
128143 /* vdpu382 link hw info */
129
-struct rkvdec_link_info rkvdec_link_v2_hw_info = {
144
+struct rkvdec_link_info rkvdec_link_vdpu382_hw_info = {
130145 .tb_reg_num = 222,
131146 .tb_reg_next = 0,
132147 .tb_reg_r = 1,
....@@ -174,12 +189,9 @@
174189 .reg_start = 258,
175190 .reg_num = 30,
176191 },
177
- .tb_reg_int = 180,
178
- .hack_setup = 0,
179
- .tb_reg_cycle = 197,
180
- .tb_reg_out = 86,
181
- .tb_reg_ref_s = 104,
182
- .tb_reg_ref_e = 119,
192
+ .tb_reg_int = 180,
193
+ .hack_setup = 0,
194
+ .tb_reg_cycle = 197,
183195 .reg_status = {
184196 .dec_num_mask = 0x000fffff,
185197 .err_flag_base = 0x024,
....@@ -188,6 +200,11 @@
188200 };
189201
190202 static void rkvdec2_link_free_task(struct kref *ref);
203
+static void rkvdec2_link_timeout_proc(struct work_struct *work_s);
204
+static int rkvdec2_link_iommu_fault_handle(struct iommu_domain *iommu,
205
+ struct device *iommu_dev,
206
+ unsigned long iova,
207
+ int status, void *arg);
191208
192209 static void rkvdec_link_status_update(struct rkvdec_link_dev *dev)
193210 {
....@@ -239,7 +256,7 @@
239256 u32 *reg = NULL;
240257 u32 i, j;
241258
242
- for (i = 0; i < dev->task_size; i++) {
259
+ for (i = 0; i < dev->task_capacity; i++) {
243260 reg = table_base + i * reg_count;
244261
245262 mpp_err("slot %d link config iova %08x:\n", i,
....@@ -286,9 +303,8 @@
286303 {
287304 mpp_err("dump link counter from %s\n", func);
288305
289
- mpp_err("task write %d read %d send %d recv %d run %d decoded %d total %d\n",
290
- dev->task_write, dev->task_read, dev->task_send, dev->task_recv,
291
- dev->task_to_run, dev->task_decoded, dev->task_total);
306
+ mpp_err("task pending %d running %d\n",
307
+ atomic_read(&dev->task_pending), dev->task_running);
292308 }
293309
294310 int rkvdec_link_dump(struct mpp_dev *mpp)
....@@ -301,158 +317,6 @@
301317 rkvdec_link_counter(__func__, dev);
302318 rkvdec_core_reg_dump(__func__, dev);
303319 rkvdec_link_node_dump(__func__, dev);
304
-
305
- return 0;
306
-}
307
-
308
-static int rkvdec_link_get_task_write(struct rkvdec_link_dev *dev)
309
-{
310
- int idx = dev->task_write < dev->task_size ? dev->task_write :
311
- dev->task_write - dev->task_size;
312
-
313
- return idx;
314
-}
315
-static int rkvdec_link_inc_task_write(struct rkvdec_link_dev *dev)
316
-{
317
- int task_write = rkvdec_link_get_task_write(dev);
318
-
319
- dev->task_write++;
320
- if (dev->task_write >= dev->task_size * 2)
321
- dev->task_write = 0;
322
-
323
- return task_write;
324
-}
325
-static int rkvdec_link_get_task_read(struct rkvdec_link_dev *dev)
326
-{
327
- int idx = dev->task_read < dev->task_size ? dev->task_read :
328
- dev->task_read - dev->task_size;
329
-
330
- return idx;
331
-}
332
-static int rkvdec_link_inc_task_read(struct rkvdec_link_dev *dev)
333
-{
334
- int task_read = rkvdec_link_get_task_read(dev);
335
-
336
- dev->task_read++;
337
- if (dev->task_read >= dev->task_size * 2)
338
- dev->task_read = 0;
339
-
340
- return task_read;
341
-}
342
-static int rkvdec_link_get_task_hw_queue_length(struct rkvdec_link_dev *dev)
343
-{
344
- int len;
345
-
346
- if (dev->task_send <= dev->task_recv)
347
- len = dev->task_send + dev->task_size - dev->task_recv;
348
- else
349
- len = dev->task_send - dev->task_recv - dev->task_size;
350
-
351
- return len;
352
-}
353
-static int rkvdec_link_get_task_send(struct rkvdec_link_dev *dev)
354
-{
355
- int idx = dev->task_send < dev->task_size ? dev->task_send :
356
- dev->task_send - dev->task_size;
357
-
358
- return idx;
359
-}
360
-static int rkvdec_link_inc_task_send(struct rkvdec_link_dev *dev)
361
-{
362
- int task_send = rkvdec_link_get_task_send(dev);
363
-
364
- dev->task_send++;
365
- if (dev->task_send >= dev->task_size * 2)
366
- dev->task_send = 0;
367
-
368
- return task_send;
369
-}
370
-static int rkvdec_link_inc_task_recv(struct rkvdec_link_dev *dev)
371
-{
372
- int task_recv = dev->task_recv;
373
-
374
- dev->task_recv++;
375
- if (dev->task_recv >= dev->task_size * 2)
376
- dev->task_recv = 0;
377
-
378
- return task_recv;
379
-}
380
-
381
-static int rkvdec_link_get_next_slot(struct rkvdec_link_dev *dev)
382
-{
383
- int next = -1;
384
-
385
- if (dev->task_write == dev->task_read)
386
- return next;
387
-
388
- next = rkvdec_link_get_task_write(dev);
389
-
390
- return next;
391
-}
392
-
393
-static int rkvdec_link_write_task_to_slot(struct rkvdec_link_dev *dev, int idx,
394
- struct mpp_task *mpp_task)
395
-{
396
- u32 i, off, s, n;
397
- struct rkvdec_link_part *part;
398
- struct rkvdec_link_info *info;
399
- struct mpp_dma_buffer *table;
400
- struct rkvdec2_task *task;
401
- int slot_idx;
402
- u32 *tb_reg;
403
-
404
- if (idx < 0 || idx >= dev->task_size) {
405
- mpp_err("send invalid task index %d\n", idx);
406
- return -1;
407
- }
408
-
409
- info = dev->info;
410
- part = info->part_w;
411
- table = dev->table;
412
- task = to_rkvdec2_task(mpp_task);
413
-
414
- slot_idx = rkvdec_link_inc_task_write(dev);
415
- if (idx != slot_idx)
416
- dev_info(dev->dev, "slot index mismatch %d vs %d\n",
417
- idx, slot_idx);
418
-
419
- if (task->need_hack) {
420
- tb_reg = (u32 *)table->vaddr + slot_idx * dev->link_reg_count;
421
-
422
- rkvdec2_3568_hack_fix_link(tb_reg + 4);
423
-
424
- /* setup error mode flag */
425
- dev->tasks_hw[slot_idx] = NULL;
426
- dev->task_to_run++;
427
- dev->task_prepared++;
428
- slot_idx = rkvdec_link_inc_task_write(dev);
429
- }
430
-
431
- tb_reg = (u32 *)table->vaddr + slot_idx * dev->link_reg_count;
432
-
433
- for (i = 0; i < info->part_w_num; i++) {
434
- off = part[i].tb_reg_off;
435
- s = part[i].reg_start;
436
- n = part[i].reg_num;
437
- memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
438
- }
439
-
440
- tb_reg[info->tb_reg_second_en] |= RKVDEC_WAIT_RESET_EN;
441
-
442
- /* memset read registers */
443
- part = info->part_r;
444
- for (i = 0; i < info->part_r_num; i++) {
445
- off = part[i].tb_reg_off;
446
- n = part[i].reg_num;
447
- memset(&tb_reg[off], 0, n * sizeof(u32));
448
- }
449
-
450
- dev->tasks_hw[slot_idx] = mpp_task;
451
- task->slot_idx = slot_idx;
452
- dev->task_to_run++;
453
- dev->task_prepared++;
454
- mpp_dbg_link_flow("slot %d write task %d\n", slot_idx,
455
- mpp_task->task_index);
456320
457321 return 0;
458322 }
....@@ -476,34 +340,20 @@
476340 mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
477341 }
478342
479
-static int rkvdec_link_send_task_to_hw(struct rkvdec_link_dev *dev,
480
- struct mpp_task *mpp_task,
481
- int slot_idx, u32 task_to_run,
482
- int resend)
343
+static int rkvdec2_link_enqueue(struct rkvdec_link_dev *link_dec,
344
+ struct mpp_task *mpp_task)
483345 {
484
- void __iomem *reg_base = dev->reg_base;
485
- struct mpp_dma_buffer *table = dev->table;
486
- u32 task_total = dev->task_total;
487
- u32 mode_start = 0;
488
- u32 val;
346
+ void __iomem *reg_base = link_dec->reg_base;
347
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
348
+ struct mpp_dma_buffer *table = task->table;
349
+ u32 link_en = 0;
350
+ u32 frame_num = 1;
351
+ u32 link_mode;
352
+ u32 timing_en = link_dec->mpp->srv->timing_en;
489353
490
- /* write address */
491
- if (!task_to_run || task_to_run > dev->task_size ||
492
- slot_idx < 0 || slot_idx >= dev->task_size) {
493
- mpp_err("invalid task send cfg at %d count %d\n",
494
- slot_idx, task_to_run);
495
- rkvdec_link_counter("error on send", dev);
496
- return 0;
497
- }
498
-
499
- val = task_to_run;
500
- if (!task_total || resend)
501
- mode_start = 1;
502
-
503
- if (mode_start) {
504
- u32 iova = table->iova + slot_idx * dev->link_node_size;
505
-
506
- rkvdec2_clear_cache(dev->mpp);
354
+ link_en = readl(reg_base + RKVDEC_LINK_EN_BASE);
355
+ if (!link_en) {
356
+ rkvdec2_clear_cache(link_dec->mpp);
507357 /* cleanup counter in hardware */
508358 writel(0, reg_base + RKVDEC_LINK_MODE_BASE);
509359 /* start config before all registers are set */
....@@ -513,55 +363,31 @@
513363 wmb();
514364 /* clear counter and enable link mode hardware */
515365 writel(RKVDEC_LINK_BIT_EN, reg_base + RKVDEC_LINK_EN_BASE);
516
-
517
- dev->task_total = 0;
518
- dev->task_decoded = 0;
519
-
520
- writel_relaxed(iova, reg_base + RKVDEC_LINK_CFG_ADDR_BASE);
521
- } else {
522
- val |= RKVDEC_LINK_BIT_ADD_MODE;
523
- }
524
-
525
- if (!resend) {
526
- u32 timing_en = dev->mpp->srv->timing_en;
527
- u32 i;
528
-
529
- for (i = 0; i < task_to_run; i++) {
530
- int next_idx = rkvdec_link_inc_task_send(dev);
531
- struct mpp_task *task_ddr = dev->tasks_hw[next_idx];
532
-
533
- if (!task_ddr)
534
- continue;
535
-
536
- mpp_task_run_begin(task_ddr, timing_en, MPP_WORK_TIMEOUT_DELAY);
537
- mpp_task_run_end(task_ddr, timing_en);
538
- }
539
- } else {
540
- if (task_total)
541
- dev_info(dev->dev, "resend with total %d\n", task_total);
542
- }
366
+ writel_relaxed(table->iova, reg_base + RKVDEC_LINK_CFG_ADDR_BASE);
367
+ link_mode = frame_num;
368
+ } else
369
+ link_mode = (frame_num | RKVDEC_LINK_BIT_ADD_MODE);
543370
544371 /* set link mode */
545
- writel_relaxed(val, reg_base + RKVDEC_LINK_MODE_BASE);
372
+ writel_relaxed(link_mode, reg_base + RKVDEC_LINK_MODE_BASE);
546373
547374 /* start config before all registers are set */
548375 wmb();
549376
550
- mpp_iommu_flush_tlb(dev->mpp->iommu_info);
377
+ mpp_iommu_flush_tlb(link_dec->mpp->iommu_info);
378
+ mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
551379
380
+ link_dec->task_running++;
552381 /* configure done */
553382 writel(RKVDEC_LINK_BIT_CFG_DONE, reg_base + RKVDEC_LINK_CFG_CTRL_BASE);
554
-
555
- mpp_dbg_link_flow("slot %d enable task %d mode %s\n", slot_idx,
556
- task_to_run, mode_start ? "start" : "add");
557
- if (mode_start) {
383
+ if (!link_en) {
558384 /* start hardware before all registers are set */
559385 wmb();
560386 /* clear counter and enable link mode hardware */
561387 writel(RKVDEC_LINK_BIT_EN, reg_base + RKVDEC_LINK_EN_BASE);
562388 }
389
+ mpp_task_run_end(mpp_task, timing_en);
563390
564
- dev->task_total += task_to_run;
565391 return 0;
566392 }
567393
....@@ -573,8 +399,7 @@
573399 struct mpp_dma_buffer *table = link_dec->table;
574400 struct rkvdec_link_info *info = link_dec->info;
575401 struct rkvdec_link_part *part = info->part_r;
576
- int slot_idx = task->slot_idx;
577
- u32 *tb_reg = (u32 *)(table->vaddr + slot_idx * link_dec->link_node_size);
402
+ u32 *tb_reg = (u32 *)table->vaddr;
578403 u32 off, s, n;
579404 u32 i;
580405
....@@ -594,161 +419,71 @@
594419 return 0;
595420 }
596421
597
-static int rkvdec_link_isr_recv_task(struct mpp_dev *mpp,
598
- struct rkvdec_link_dev *link_dec,
599
- int count)
600
-{
601
- struct rkvdec_link_info *info = link_dec->info;
602
- u32 *table_base = (u32 *)link_dec->table->vaddr;
603
- int i;
604
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
605
-
606
- for (i = 0; i < count; i++) {
607
- int idx = rkvdec_link_get_task_read(link_dec);
608
- struct mpp_task *mpp_task = link_dec->tasks_hw[idx];
609
- struct rkvdec2_task *task = NULL;
610
- u32 *regs = NULL;
611
- u32 irq_status = 0;
612
-
613
- if (!mpp_task && info->hack_setup) {
614
- regs = table_base + idx * link_dec->link_reg_count;
615
- mpp_dbg_link_flow("slot %d read task stuff\n", idx);
616
-
617
- link_dec->stuff_total++;
618
- if (link_dec->statistic_count &&
619
- regs[info->tb_reg_cycle]) {
620
- link_dec->stuff_cycle_sum +=
621
- regs[info->tb_reg_cycle];
622
- link_dec->stuff_cnt++;
623
- if (link_dec->stuff_cnt >=
624
- link_dec->statistic_count) {
625
- dev_info(
626
- link_dec->dev, "hw cycle %u\n",
627
- (u32)(link_dec->stuff_cycle_sum /
628
- link_dec->statistic_count));
629
- link_dec->stuff_cycle_sum = 0;
630
- link_dec->stuff_cnt = 0;
631
- }
632
- }
633
-
634
- if (link_dec->error && (i == (count - 1))) {
635
- link_dec->stuff_err++;
636
-
637
- irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
638
- dev_info(link_dec->dev, "found stuff task error irq %08x %u/%u\n",
639
- irq_status, link_dec->stuff_err,
640
- link_dec->stuff_total);
641
-
642
- if (link_dec->stuff_on_error) {
643
- dev_info(link_dec->dev, "stuff task error again %u/%u\n",
644
- link_dec->stuff_err,
645
- link_dec->stuff_total);
646
- }
647
-
648
- link_dec->stuff_on_error = 1;
649
- /* resend task */
650
- link_dec->decoded--;
651
- } else {
652
- link_dec->stuff_on_error = 0;
653
- rkvdec_link_inc_task_recv(link_dec);
654
- rkvdec_link_inc_task_read(link_dec);
655
- link_dec->task_running--;
656
- link_dec->task_prepared--;
657
- }
658
-
659
- continue;
660
- }
661
-
662
- if (!mpp_task)
663
- return 0;
664
-
665
- task = to_rkvdec2_task(mpp_task);
666
- regs = table_base + idx * link_dec->link_reg_count;
667
- link_dec->error_iova = regs[info->tb_reg_out];
668
- irq_status = regs[info->tb_reg_int];
669
- mpp_task->hw_cycles = regs[info->tb_reg_cycle];
670
- mpp_time_diff_with_hw_time(mpp_task, dec->aclk_info.real_rate_hz);
671
- mpp_dbg_link_flow("slot %d rd task %d\n", idx,
672
- mpp_task->task_index);
673
-
674
- task->irq_status = irq_status ? irq_status : mpp->irq_status;
675
- mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
676
- cancel_delayed_work_sync(&mpp_task->timeout_work);
677
- set_bit(TASK_STATE_HANDLE, &mpp_task->state);
678
-
679
- if (link_dec->statistic_count &&
680
- regs[info->tb_reg_cycle]) {
681
- link_dec->task_cycle_sum +=
682
- regs[info->tb_reg_cycle];
683
- link_dec->task_cnt++;
684
- if (link_dec->task_cnt >= link_dec->statistic_count) {
685
- dev_info(link_dec->dev, "hw cycle %u\n",
686
- (u32)(link_dec->task_cycle_sum /
687
- link_dec->statistic_count));
688
- link_dec->task_cycle_sum = 0;
689
- link_dec->task_cnt = 0;
690
- }
691
- }
692
-
693
- rkvdec2_link_finish(mpp, mpp_task);
694
-
695
- set_bit(TASK_STATE_FINISH, &mpp_task->state);
696
-
697
- list_del_init(&mpp_task->queue_link);
698
- link_dec->task_running--;
699
- link_dec->task_prepared--;
700
-
701
- rkvdec_link_inc_task_recv(link_dec);
702
- rkvdec_link_inc_task_read(link_dec);
703
-
704
- if (test_bit(TASK_STATE_ABORT, &mpp_task->state))
705
- set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
706
-
707
- set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
708
- /* Wake up the GET thread */
709
- wake_up(&task->wait);
710
- kref_put(&mpp_task->ref, rkvdec2_link_free_task);
711
- link_dec->tasks_hw[idx] = NULL;
712
- }
713
-
714
- return 0;
715
-}
716
-
717422 static void *rkvdec2_link_prepare(struct mpp_dev *mpp,
718423 struct mpp_task *mpp_task)
719424 {
720
- struct mpp_task *out_task = NULL;
721425 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
722426 struct rkvdec_link_dev *link_dec = dec->link_dec;
723
- int ret = 0;
724
- int slot_idx;
427
+ struct mpp_dma_buffer *table = NULL;
428
+ struct rkvdec_link_part *part;
429
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
430
+ struct rkvdec_link_info *info = link_dec->info;
431
+ u32 i, off, s, n;
432
+ u32 *tb_reg;
725433
726434 mpp_debug_enter();
727435
728
- slot_idx = rkvdec_link_get_next_slot(link_dec);
729
- if (slot_idx < 0) {
730
- mpp_err("capacity %d running %d\n",
731
- mpp->task_capacity, link_dec->task_running);
732
- dev_err(link_dec->dev, "no slot to write on get next slot\n");
733
- goto done;
436
+ if (test_bit(TASK_STATE_PREPARE, &mpp_task->state)) {
437
+ dev_err(mpp->dev, "task %d has prepared\n", mpp_task->task_index);
438
+ return mpp_task;
734439 }
735440
736
- ret = rkvdec_link_write_task_to_slot(link_dec, slot_idx, mpp_task);
737
- if (ret >= 0)
738
- out_task = mpp_task;
739
- else
740
- dev_err(mpp->dev, "no slot to write\n");
441
+ table = list_first_entry_or_null(&link_dec->unused_list, struct mpp_dma_buffer, link);
741442
742
-done:
443
+ if (!table)
444
+ return NULL;
445
+
446
+ /* fill regs value */
447
+ tb_reg = (u32 *)table->vaddr;
448
+ part = info->part_w;
449
+ for (i = 0; i < info->part_w_num; i++) {
450
+ off = part[i].tb_reg_off;
451
+ s = part[i].reg_start;
452
+ n = part[i].reg_num;
453
+ memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
454
+ }
455
+
456
+ /* setup error mode flag */
457
+ tb_reg[9] |= BIT(18) | BIT(9);
458
+ tb_reg[info->tb_reg_second_en] |= RKVDEC_WAIT_RESET_EN;
459
+
460
+ /* memset read registers */
461
+ part = info->part_r;
462
+ for (i = 0; i < info->part_r_num; i++) {
463
+ off = part[i].tb_reg_off;
464
+ n = part[i].reg_num;
465
+ memset(&tb_reg[off], 0, n * sizeof(u32));
466
+ }
467
+
468
+ list_move_tail(&table->link, &link_dec->used_list);
469
+ task->table = table;
470
+ set_bit(TASK_STATE_PREPARE, &mpp_task->state);
471
+
472
+ mpp_dbg_link("session %d task %d prepare pending %d running %d\n",
473
+ mpp_task->session->index, mpp_task->task_index,
474
+ atomic_read(&link_dec->task_pending), link_dec->task_running);
743475 mpp_debug_leave();
744476
745
- return out_task;
477
+ return mpp_task;
746478 }
747479
748480 static int rkvdec2_link_reset(struct mpp_dev *mpp)
749481 {
750482
751483 dev_info(mpp->dev, "resetting...\n");
484
+
485
+ disable_irq(mpp->irq);
486
+ mpp_iommu_disable_irq(mpp->iommu_info);
752487
753488 /* FIXME lock resource lock of the other devices in combo */
754489 mpp_iommu_down_write(mpp->iommu_info);
....@@ -771,52 +506,11 @@
771506 mpp_reset_up_write(mpp->reset_group);
772507 mpp_iommu_up_write(mpp->iommu_info);
773508
509
+ enable_irq(mpp->irq);
510
+ mpp_iommu_enable_irq(mpp->iommu_info);
774511 dev_info(mpp->dev, "reset done\n");
775512
776513 return 0;
777
-}
778
-
779
-static void rkvdec2_check_err_ref(struct mpp_dev *mpp)
780
-{
781
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
782
- struct rkvdec_link_dev *link_dec = dec->link_dec;
783
- struct rkvdec_link_info *link_info = link_dec->info;
784
- struct mpp_taskqueue *queue = mpp->queue;
785
- struct mpp_task *mpp_task = NULL, *n;
786
- struct rkvdec2_task *task;
787
- int i;
788
-
789
- if (!link_dec->error_iova || !dec->err_ref_hack)
790
- return;
791
-
792
- dev_err(mpp->dev, "err task iova %#08x\n", link_dec->error_iova);
793
- list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
794
- if (mpp_task) {
795
- u32 *regs = NULL;
796
- u32 *table_base = (u32 *)link_dec->table->vaddr;
797
-
798
- task = to_rkvdec2_task(mpp_task);
799
- regs = table_base + task->slot_idx * link_dec->link_reg_count;
800
-
801
- for (i = link_info->tb_reg_ref_s; i <= link_info->tb_reg_ref_e; i++) {
802
- if (regs[i] == link_dec->error_iova)
803
- regs[i] = 0;
804
- }
805
- }
806
- }
807
-
808
- mutex_lock(&queue->pending_lock);
809
- list_for_each_entry_safe(mpp_task, n, &queue->pending_list, queue_link) {
810
- task = to_rkvdec2_task(mpp_task);
811
-
812
- /* ref frame reg index start - end */
813
- for (i = 164; i <= 179; i++) {
814
- if (task->reg[i] == link_dec->error_iova)
815
- task->reg[i] = 0;
816
- }
817
- }
818
- mutex_unlock(&queue->pending_lock);
819
- link_dec->error_iova = 0;
820514 }
821515
822516 static int rkvdec2_link_irq(struct mpp_dev *mpp)
....@@ -845,122 +539,12 @@
845539
846540 link_dec->irq_status = irq_status;
847541 mpp->irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
848
- mpp_dbg_link_flow("core irq %08x\n", mpp->irq_status);
849542
850543 writel_relaxed(0, link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
851544 }
852
- mpp_debug((DEBUG_IRQ_STATUS | DEBUG_LINK_TABLE), "irq_status: %08x : %08x\n",
545
+
546
+ mpp_debug(DEBUG_IRQ_STATUS | DEBUG_LINK_TABLE, "irq_status: %08x : %08x\n",
853547 irq_status, mpp->irq_status);
854
- return 0;
855
-}
856
-
857
-static int rkvdec2_link_isr(struct mpp_dev *mpp)
858
-{
859
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
860
- struct rkvdec_link_dev *link_dec = dec->link_dec;
861
- struct rkvdec_link_info *link_info = link_dec->info;
862
- /* keep irq_status */
863
- u32 irq_status = link_dec->irq_status;
864
- u32 prev_dec_num;
865
- int count = 0;
866
- u32 len = 0;
867
- u32 need_reset = atomic_read(&mpp->reset_request);
868
- u32 task_timeout = link_dec->task_on_timeout;
869
-
870
- mpp_debug_enter();
871
-
872
- disable_irq(mpp->irq);
873
- mpp_iommu_disable_irq(mpp->iommu_info);
874
- rkvdec_link_status_update(link_dec);
875
- link_dec->irq_status = irq_status;
876
- prev_dec_num = link_dec->task_decoded;
877
-
878
- if (!link_dec->enabled || task_timeout) {
879
- u32 val;
880
-
881
- if (task_timeout) {
882
- rkvdec_link_reg_dump("timeout", link_dec);
883
- link_dec->decoded += task_timeout;
884
- }
885
-
886
- val = mpp_read(mpp, 224 * 4);
887
- if (link_info->hack_setup && !(val & BIT(2))) {
888
- /* only for rk356x */
889
- dev_info(mpp->dev, "frame not complete\n");
890
- link_dec->decoded++;
891
- }
892
- }
893
- count = (int)link_dec->decoded - (int)prev_dec_num;
894
-
895
- /* handle counter wrap */
896
- if (link_dec->enabled && !count && !need_reset) {
897
- /* process extra isr when task is processed */
898
- enable_irq(mpp->irq);
899
- mpp_iommu_enable_irq(mpp->iommu_info);
900
- goto done;
901
- }
902
-
903
- /* get previous ready task */
904
- if (count) {
905
- rkvdec_link_isr_recv_task(mpp, link_dec, count);
906
- link_dec->task_decoded = link_dec->decoded;
907
- }
908
-
909
- if (!link_dec->enabled || need_reset)
910
- goto do_reset;
911
-
912
- enable_irq(mpp->irq);
913
- mpp_iommu_enable_irq(mpp->iommu_info);
914
- goto done;
915
-
916
-do_reset:
917
- rkvdec2_check_err_ref(mpp);
918
- /* NOTE: irq may run with reset */
919
- atomic_inc(&mpp->reset_request);
920
- rkvdec2_link_reset(mpp);
921
- link_dec->task_decoded = 0;
922
- link_dec->task_total = 0;
923
- enable_irq(mpp->irq);
924
- mpp_iommu_enable_irq(mpp->iommu_info);
925
-
926
- if (link_dec->total == link_dec->decoded)
927
- goto done;
928
-
929
- len = rkvdec_link_get_task_hw_queue_length(link_dec);
930
- if (len > link_dec->task_size)
931
- rkvdec_link_counter("invalid len", link_dec);
932
-
933
- if (len) {
934
- int slot_idx = rkvdec_link_get_task_read(link_dec);
935
- struct mpp_task *mpp_task = NULL;
936
-
937
- mpp_task = link_dec->tasks_hw[slot_idx];
938
- rkvdec_link_send_task_to_hw(link_dec, mpp_task,
939
- slot_idx, len, 1);
940
- }
941
-
942
-done:
943
- mpp_debug_leave();
944
-
945
- return IRQ_HANDLED;
946
-}
947
-
948
-static int rkvdec2_link_iommu_handle(struct iommu_domain *iommu,
949
- struct device *iommu_dev,
950
- unsigned long iova,
951
- int status, void *arg)
952
-{
953
- struct mpp_dev *mpp = (struct mpp_dev *)arg;
954
-
955
- dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n",
956
- iova, status, arg);
957
-
958
- if (!mpp) {
959
- dev_err(iommu_dev, "pagefault without device to handle\n");
960
- return 0;
961
- }
962
-
963
- rk_iommu_mask_irq(mpp->dev);
964548
965549 return 0;
966550 }
....@@ -1015,12 +599,6 @@
1015599 }
1016600
1017601 link_dec->table = table;
1018
- link_dec->task_size = task_capacity;
1019
- link_dec->task_count = 0;
1020
- link_dec->task_write = 0;
1021
- link_dec->task_read = link_dec->task_size;
1022
- link_dec->task_send = 0;
1023
- link_dec->task_recv = link_dec->task_size;
1024602
1025603 return 0;
1026604 err_free_node:
....@@ -1059,18 +637,13 @@
1059637 struct rkvdec_link_dev *link_dec = NULL;
1060638 struct device *dev = &pdev->dev;
1061639 struct mpp_dev *mpp = &dec->mpp;
640
+ struct mpp_dma_buffer *table;
641
+ int i;
1062642
1063643 mpp_debug_enter();
1064644
1065645 link_dec = devm_kzalloc(dev, sizeof(*link_dec), GFP_KERNEL);
1066646 if (!link_dec) {
1067
- ret = -ENOMEM;
1068
- goto done;
1069
- }
1070
-
1071
- link_dec->tasks_hw = devm_kzalloc(dev, sizeof(*link_dec->tasks_hw) *
1072
- mpp->task_capacity, GFP_KERNEL);
1073
- if (!link_dec->tasks_hw) {
1074647 ret = -ENOMEM;
1075648 goto done;
1076649 }
....@@ -1096,13 +669,33 @@
1096669 if (ret)
1097670 goto done;
1098671
1099
- if (link_dec->info->hack_setup)
672
+ /* alloc table pointer array */
673
+ table = devm_kmalloc_array(mpp->dev, mpp->task_capacity,
674
+ sizeof(*table), GFP_KERNEL | __GFP_ZERO);
675
+ if (!table)
676
+ return -ENOMEM;
677
+
678
+ /* init table array */
679
+ link_dec->table_array = table;
680
+ INIT_LIST_HEAD(&link_dec->used_list);
681
+ INIT_LIST_HEAD(&link_dec->unused_list);
682
+ for (i = 0; i < mpp->task_capacity; i++) {
683
+ table[i].iova = link_dec->table->iova + i * link_dec->link_node_size;
684
+ table[i].vaddr = link_dec->table->vaddr + i * link_dec->link_node_size;
685
+ table[i].size = link_dec->link_node_size;
686
+ INIT_LIST_HEAD(&table[i].link);
687
+ list_add_tail(&table[i].link, &link_dec->unused_list);
688
+ }
689
+
690
+ if (dec->fix)
1100691 rkvdec2_link_hack_data_setup(dec->fix);
1101
- iommu_set_fault_handler(mpp->iommu_info->domain,
1102
- rkvdec2_link_iommu_handle, mpp);
692
+
693
+ mpp->fault_handler = rkvdec2_link_iommu_fault_handle;
694
+
1103695 link_dec->mpp = mpp;
1104696 link_dec->dev = dev;
1105697 atomic_set(&link_dec->task_timeout, 0);
698
+ atomic_set(&link_dec->task_pending, 0);
1106699 atomic_set(&link_dec->power_enabled, 0);
1107700 link_dec->irq_enabled = 1;
1108701
....@@ -1116,11 +709,6 @@
1116709 devm_iounmap(dev, link_dec->reg_base);
1117710 link_dec->reg_base = NULL;
1118711 }
1119
- if (link_dec->tasks_hw) {
1120
- devm_kfree(dev, link_dec->tasks_hw);
1121
- link_dec->tasks_hw = NULL;
1122
- }
1123
-
1124712 devm_kfree(dev, link_dec);
1125713 link_dec = NULL;
1126714 }
....@@ -1138,15 +726,13 @@
1138726 struct mpp_task *task = container_of(ref, struct mpp_task, ref);
1139727
1140728 if (!task->session) {
1141
- mpp_err("task %d task->session is null.\n", task->task_index);
729
+ mpp_err("task %d task->session is null.\n", task->task_id);
1142730 return;
1143731 }
1144732 session = task->session;
1145733
1146
- mpp_debug_func(DEBUG_TASK_INFO,
1147
- "session %d:%d task %d state 0x%lx abort_request %d\n",
1148
- session->device_type, session->index, task->task_index,
1149
- task->state, atomic_read(&task->abort_request));
734
+ mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d state 0x%lx\n",
735
+ session->index, task->task_id, task->state);
1150736 if (!session->mpp) {
1151737 mpp_err("session %d session->mpp is null.\n", session->index);
1152738 return;
....@@ -1165,30 +751,16 @@
1165751 kthread_queue_work(&mpp->queue->worker, &mpp->work);
1166752 }
1167753
1168
-static void rkvdec2_link_trigger_timeout(struct mpp_dev *mpp)
1169
-{
1170
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1171
- struct rkvdec_link_dev *link_dec = dec->link_dec;
1172
-
1173
- atomic_inc(&link_dec->task_timeout);
1174
- rkvdec2_link_trigger_work(mpp);
1175
-}
1176
-
1177
-static void rkvdec2_link_trigger_irq(struct mpp_dev *mpp)
1178
-{
1179
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1180
- struct rkvdec_link_dev *link_dec = dec->link_dec;
1181
-
1182
- link_dec->task_irq++;
1183
- rkvdec2_link_trigger_work(mpp);
1184
-}
1185
-
1186
-static void rkvdec2_link_power_on(struct mpp_dev *mpp)
754
+static int rkvdec2_link_power_on(struct mpp_dev *mpp)
1187755 {
1188756 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1189757 struct rkvdec_link_dev *link_dec = dec->link_dec;
1190758
1191759 if (!atomic_xchg(&link_dec->power_enabled, 1)) {
760
+ if (mpp_iommu_attach(mpp->iommu_info)) {
761
+ dev_err(mpp->dev, "mpp_iommu_attach failed\n");
762
+ return -ENODATA;
763
+ }
1192764 pm_runtime_get_sync(mpp->dev);
1193765 pm_stay_awake(mpp->dev);
1194766
....@@ -1204,25 +776,10 @@
1204776 mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_ADVANCED);
1205777 mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_ADVANCED);
1206778 mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_ADVANCED);
1207
-
1208
-#ifdef CONFIG_PM_DEVFREQ
1209
- if (dec->devfreq) {
1210
- unsigned long core_rate_hz;
1211
-
1212
- mutex_lock(&dec->devfreq->lock);
1213
- core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info,
1214
- CLK_MODE_ADVANCED);
1215
- if (dec->core_rate_hz != core_rate_hz) {
1216
- dec->core_rate_hz = core_rate_hz;
1217
- update_devfreq(dec->devfreq);
1218
- }
1219
- mutex_unlock(&dec->devfreq->lock);
1220
-
1221
- return;
1222
- }
1223
-#endif
1224
- mpp_clk_set_rate(&dec->core_clk_info, CLK_MODE_ADVANCED);
779
+ mpp_devfreq_set_core_rate(mpp, CLK_MODE_ADVANCED);
780
+ mpp_iommu_dev_activate(mpp->iommu_info, mpp);
1225781 }
782
+ return 0;
1226783 }
1227784
1228785 static void rkvdec2_link_power_off(struct mpp_dev *mpp)
....@@ -1241,178 +798,281 @@
1241798 pm_relax(mpp->dev);
1242799 pm_runtime_put_sync_suspend(mpp->dev);
1243800
1244
- link_dec->task_decoded = 0;
1245
- link_dec->task_total = 0;
1246
-
1247801 mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_NORMAL);
1248802 mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_NORMAL);
1249803 mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_NORMAL);
1250
-
1251
-#ifdef CONFIG_PM_DEVFREQ
1252
- if (dec->devfreq) {
1253
- unsigned long core_rate_hz;
1254
-
1255
- mutex_lock(&dec->devfreq->lock);
1256
- core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info,
1257
- CLK_MODE_NORMAL);
1258
- if (dec->core_rate_hz != core_rate_hz) {
1259
- dec->core_rate_hz = core_rate_hz;
1260
- update_devfreq(dec->devfreq);
1261
- }
1262
- mutex_unlock(&dec->devfreq->lock);
1263
-
1264
- return;
1265
- }
1266
-#endif
1267
- mpp_clk_set_rate(&dec->core_clk_info, CLK_MODE_NORMAL);
804
+ mpp_devfreq_set_core_rate(mpp, CLK_MODE_NORMAL);
805
+ mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
1268806 }
1269807 }
1270808
1271809 static void rkvdec2_link_timeout_proc(struct work_struct *work_s)
1272810 {
1273811 struct mpp_dev *mpp;
812
+ struct rkvdec2_dev *dec;
1274813 struct mpp_session *session;
1275814 struct mpp_task *task = container_of(to_delayed_work(work_s),
1276815 struct mpp_task, timeout_work);
1277816
1278817 if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
1279818 mpp_err("task %d state %lx has been handled\n",
1280
- task->task_index, task->state);
819
+ task->task_id, task->state);
1281820 return;
1282821 }
1283822
1284823 if (!task->session) {
1285
- mpp_err("task %d session is null.\n", task->task_index);
824
+ mpp_err("task %d session is null.\n", task->task_id);
1286825 return;
1287826 }
1288827 session = task->session;
1289828
1290829 if (!session->mpp) {
1291830 mpp_err("task %d:%d mpp is null.\n", session->index,
1292
- task->task_index);
831
+ task->task_id);
1293832 return;
1294833 }
1295834 mpp = session->mpp;
1296
- rkvdec2_link_trigger_timeout(mpp);
835
+ set_bit(TASK_STATE_TIMEOUT, &task->state);
836
+
837
+ dec = to_rkvdec2_dev(mpp);
838
+ atomic_inc(&dec->link_dec->task_timeout);
839
+
840
+ dev_err(mpp->dev, "session %d task %d state %#lx timeout, cnt %d\n",
841
+ session->index, task->task_index, task->state,
842
+ atomic_read(&dec->link_dec->task_timeout));
843
+
844
+ rkvdec2_link_trigger_work(mpp);
1297845 }
1298846
1299
-static void mpp_taskqueue_scan_pending_abort_task(struct mpp_taskqueue *queue)
847
+static int rkvdec2_link_iommu_fault_handle(struct iommu_domain *iommu,
848
+ struct device *iommu_dev,
849
+ unsigned long iova,
850
+ int status, void *arg)
1300851 {
1301
- struct mpp_task *task, *n;
852
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
853
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
854
+ struct mpp_task *mpp_task = NULL, *n;
855
+ struct mpp_taskqueue *queue;
1302856
1303
- mutex_lock(&queue->pending_lock);
1304
- /* Check and pop all timeout task */
1305
- list_for_each_entry_safe(task, n, &queue->pending_list, queue_link) {
1306
- struct mpp_session *session = task->session;
857
+ dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n",
858
+ iova, status, arg);
1307859
1308
- if (test_bit(TASK_STATE_ABORT, &task->state)) {
1309
- mutex_lock(&session->pending_lock);
1310
- /* wait and signal */
1311
- list_del_init(&task->queue_link);
1312
- mutex_unlock(&session->pending_lock);
1313
- kref_put(&task->ref, rkvdec2_link_free_task);
860
+ if (!mpp) {
861
+ dev_err(iommu_dev, "pagefault without device to handle\n");
862
+ return 0;
863
+ }
864
+ queue = mpp->queue;
865
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
866
+ struct rkvdec_link_info *info = dec->link_dec->info;
867
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
868
+ u32 *tb_reg = (u32 *)task->table->vaddr;
869
+ u32 irq_status = tb_reg[info->tb_reg_int];
870
+
871
+ if (!irq_status) {
872
+ mpp_task_dump_mem_region(mpp, mpp_task);
873
+ break;
1314874 }
1315875 }
1316
- mutex_unlock(&queue->pending_lock);
876
+
877
+ mpp_task_dump_hw_reg(mpp);
878
+ /*
879
+ * Mask iommu irq, in order for iommu not repeatedly trigger pagefault.
880
+ * Until the pagefault task finish by hw timeout.
881
+ */
882
+ rockchip_iommu_mask_irq(mpp->dev);
883
+ dec->mmu_fault = 1;
884
+
885
+ return 0;
886
+}
887
+
888
+static void rkvdec2_link_resend(struct mpp_dev *mpp)
889
+{
890
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
891
+ struct rkvdec_link_dev *link_dec = dec->link_dec;
892
+ struct mpp_taskqueue *queue = mpp->queue;
893
+ struct mpp_task *mpp_task, *n;
894
+
895
+ link_dec->task_running = 0;
896
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
897
+ dev_err(mpp->dev, "resend task %d\n", mpp_task->task_index);
898
+ cancel_delayed_work_sync(&mpp_task->timeout_work);
899
+ clear_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
900
+ clear_bit(TASK_STATE_HANDLE, &mpp_task->state);
901
+ rkvdec2_link_enqueue(link_dec, mpp_task);
902
+ }
1317903 }
1318904
1319905 static void rkvdec2_link_try_dequeue(struct mpp_dev *mpp)
1320906 {
1321907 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1322908 struct rkvdec_link_dev *link_dec = dec->link_dec;
1323
- struct mpp_task *task;
1324909 struct mpp_taskqueue *queue = mpp->queue;
1325
- int task_irq = link_dec->task_irq;
1326
- int task_irq_prev = link_dec->task_irq_prev;
1327
- int task_timeout = atomic_read(&link_dec->task_timeout);
910
+ struct mpp_task *mpp_task = NULL, *n;
911
+ struct rkvdec_link_info *info = link_dec->info;
912
+ u32 reset_flag = 0;
913
+ u32 iommu_fault = dec->mmu_fault && (mpp->irq_status & RKVDEC_TIMEOUT_STA);
914
+ u32 link_en = atomic_read(&link_dec->power_enabled) ?
915
+ readl(link_dec->reg_base + RKVDEC_LINK_EN_BASE) : 0;
916
+ u32 force_dequeue = iommu_fault || !link_en;
917
+ u32 dequeue_cnt = 0;
1328918
1329
- if (!link_dec->task_running)
1330
- goto done;
919
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
920
+ /*
921
+ * Because there are multiple tasks enqueue at the same time,
922
+ * soft timeout may be triggered at the same time, but in reality only
923
+ * first task is being timeout because of the hardware stuck,
924
+ * so only process the first task.
925
+ */
926
+ u32 timeout_flag = dequeue_cnt ? 0 : test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
927
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
928
+ u32 *tb_reg = (u32 *)task->table->vaddr;
929
+ u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
930
+ u32 irq_status = tb_reg[info->tb_reg_int];
931
+ u32 task_done = irq_status || timeout_flag || abort_flag;
1331932
1332
- if (task_timeout != link_dec->task_timeout_prev) {
1333
- dev_info(link_dec->dev, "process task timeout\n");
1334
- atomic_inc(&mpp->reset_request);
1335
- link_dec->task_on_timeout =
1336
- task_timeout - link_dec->task_timeout_prev;
1337
- goto proc;
933
+ /*
934
+ * there are some cases will cause hw cannot write reg to ddr:
935
+ * 1. iommu pagefault
936
+ * 2. link stop(link_en == 0) because of err task, it is a rk356x issue.
937
+ * so need force dequeue one task.
938
+ */
939
+ if (force_dequeue)
940
+ task_done = 1;
941
+
942
+ if (!task_done)
943
+ break;
944
+
945
+ dequeue_cnt++;
946
+ /* check hack task only for rk356x*/
947
+ if (task->need_hack == RKVDEC2_LINK_HACK_TASK_FLAG) {
948
+ cancel_delayed_work_sync(&mpp_task->timeout_work);
949
+ list_move_tail(&task->table->link, &link_dec->unused_list);
950
+ list_del_init(&mpp_task->queue_link);
951
+ link_dec->task_running--;
952
+ link_dec->hack_task_running--;
953
+ kfree(task);
954
+ mpp_dbg_link("hack running %d irq_status %#08x timeout %d abort %d\n",
955
+ link_dec->hack_task_running, irq_status,
956
+ timeout_flag, abort_flag);
957
+ continue;
958
+ }
959
+
960
+ /*
961
+ * if timeout/abort/force dequeue found, reset and stop hw first.
962
+ */
963
+ if ((timeout_flag || abort_flag || force_dequeue) && !reset_flag) {
964
+ dev_err(mpp->dev, "session %d task %d timeout %d abort %d force_dequeue %d\n",
965
+ mpp_task->session->index, mpp_task->task_index,
966
+ timeout_flag, abort_flag, force_dequeue);
967
+ rkvdec2_link_reset(mpp);
968
+ reset_flag = 1;
969
+ dec->mmu_fault = 0;
970
+ mpp->irq_status = 0;
971
+ force_dequeue = 0;
972
+ }
973
+
974
+ cancel_delayed_work_sync(&mpp_task->timeout_work);
975
+
976
+ task->irq_status = irq_status;
977
+ mpp_task->hw_cycles = tb_reg[info->tb_reg_cycle];
978
+ mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
979
+ rkvdec2_link_finish(mpp, mpp_task);
980
+
981
+ list_move_tail(&task->table->link, &link_dec->unused_list);
982
+ list_del_init(&mpp_task->queue_link);
983
+
984
+ set_bit(TASK_STATE_HANDLE, &mpp_task->state);
985
+ set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
986
+ set_bit(TASK_STATE_FINISH, &mpp_task->state);
987
+ set_bit(TASK_STATE_DONE, &mpp_task->state);
988
+ if (test_bit(TASK_STATE_ABORT, &mpp_task->state))
989
+ set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
990
+
991
+ wake_up(&mpp_task->wait);
992
+ kref_put(&mpp_task->ref, rkvdec2_link_free_task);
993
+ link_dec->task_running--;
994
+
995
+ mpp_dbg_link("session %d task %d irq_status %#08x timeout %d abort %d\n",
996
+ mpp_task->session->index, mpp_task->task_index,
997
+ irq_status, timeout_flag, abort_flag);
998
+ if (irq_status & RKVDEC_INT_ERROR_MASK) {
999
+ dev_err(mpp->dev,
1000
+ "session %d task %d irq_status %#08x timeout %u abort %u\n",
1001
+ mpp_task->session->index, mpp_task->task_index,
1002
+ irq_status, timeout_flag, abort_flag);
1003
+ if (!reset_flag)
1004
+ atomic_inc(&mpp->reset_request);
1005
+ }
13381006 }
13391007
1340
- if (task_irq == task_irq_prev)
1341
- goto done;
1342
-
1343
- if (!atomic_read(&link_dec->power_enabled)) {
1344
- dev_info(link_dec->dev, "dequeue on power off\n");
1345
- goto done;
1346
- }
1347
-
1348
-proc:
1349
- task = list_first_entry_or_null(&queue->running_list, struct mpp_task,
1350
- queue_link);
1351
- if (!task) {
1352
- mpp_err("can found task on trydequeue with %d running task\n",
1353
- link_dec->task_running);
1354
- goto done;
1355
- }
1356
-
1357
- /* Check and process all finished task */
1358
- rkvdec2_link_isr(mpp);
1359
-
1360
-done:
1361
- link_dec->task_irq_prev = task_irq;
1362
- link_dec->task_timeout_prev = task_timeout;
1363
- link_dec->task_on_timeout = 0;
1364
-
1365
- mpp_taskqueue_scan_pending_abort_task(queue);
1366
-
1367
- /* TODO: if reset is needed do reset here */
1008
+ /* resend running task after reset */
1009
+ if (reset_flag && !list_empty(&queue->running_list))
1010
+ rkvdec2_link_resend(mpp);
13681011 }
13691012
1370
-static int mpp_task_queue(struct mpp_dev *mpp, struct mpp_task *task)
1013
+static int mpp_task_queue(struct mpp_dev *mpp, struct mpp_task *mpp_task)
13711014 {
13721015 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
13731016 struct rkvdec_link_dev *link_dec = dec->link_dec;
1374
- u32 task_to_run = 0;
1375
- int slot_idx = 0;
1376
- int ret;
1377
- struct mpp_session *session = task->session;
1017
+ struct mpp_taskqueue *queue = mpp->queue;
1018
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
13781019
13791020 mpp_debug_enter();
13801021
1381
- /*
1382
- * for iommu share hardware, should attach to ensure
1383
- * working in current device
1384
- */
1385
- ret = mpp_iommu_attach(mpp->iommu_info);
1386
- if (ret) {
1387
- dev_err(mpp->dev, "mpp_iommu_attach failed\n");
1388
- return -ENODATA;
1389
- }
1390
-
13911022 rkvdec2_link_power_on(mpp);
1392
- mpp_debug_func(DEBUG_TASK_INFO,
1393
- "%s session %d:%d task=%d state=0x%lx\n",
1394
- dev_name(mpp->dev), session->device_type,
1395
- session->index, task->task_index, task->state);
13961023
1397
- /* prepare the task for running */
1398
- if (test_and_set_bit(TASK_STATE_PREPARE, &task->state))
1399
- mpp_err("task %d has been prepare twice\n", task->task_index);
1024
+ /* hack for rk356x */
1025
+ if (task->need_hack) {
1026
+ u32 *tb_reg;
1027
+ struct mpp_dma_buffer *table;
1028
+ struct rkvdec2_task *hack_task;
1029
+ struct rkvdec_link_info *info = link_dec->info;
14001030
1401
- rkvdec2_link_prepare(mpp, task);
1031
+ /* need reserved 2 unused task for need hack task */
1032
+ if (link_dec->task_running > (link_dec->task_capacity - 2))
1033
+ return -EBUSY;
14021034
1403
- task_to_run = link_dec->task_to_run;
1404
- if (!task_to_run) {
1405
- dev_err(link_dec->dev, "nothing to run\n");
1406
- goto done;
1035
+ table = list_first_entry_or_null(&link_dec->unused_list,
1036
+ struct mpp_dma_buffer,
1037
+ link);
1038
+ if (!table)
1039
+ return -EBUSY;
1040
+
1041
+ hack_task = kzalloc(sizeof(*hack_task), GFP_KERNEL);
1042
+
1043
+ if (!hack_task)
1044
+ return -ENOMEM;
1045
+
1046
+ mpp_task_init(mpp_task->session, &hack_task->mpp_task);
1047
+ INIT_DELAYED_WORK(&hack_task->mpp_task.timeout_work,
1048
+ rkvdec2_link_timeout_proc);
1049
+
1050
+ tb_reg = (u32 *)table->vaddr;
1051
+ memset(tb_reg + info->part_r[0].tb_reg_off, 0, info->part_r[0].reg_num);
1052
+ rkvdec2_3568_hack_fix_link(tb_reg + 4);
1053
+ list_move_tail(&table->link, &link_dec->used_list);
1054
+ hack_task->table = table;
1055
+ hack_task->need_hack = RKVDEC2_LINK_HACK_TASK_FLAG;
1056
+ rkvdec2_link_enqueue(link_dec, &hack_task->mpp_task);
1057
+ mpp_taskqueue_pending_to_run(queue, &hack_task->mpp_task);
1058
+ link_dec->hack_task_running++;
1059
+ mpp_dbg_link("hack task send to hw, hack running %d\n",
1060
+ link_dec->hack_task_running);
14071061 }
14081062
1409
- mpp_reset_down_read(mpp->reset_group);
1410
- link_dec->task_to_run = 0;
1411
- slot_idx = rkvdec_link_get_task_send(link_dec);
1412
- link_dec->task_running += task_to_run;
1413
- rkvdec_link_send_task_to_hw(link_dec, task, slot_idx, task_to_run, 0);
1063
+ /* process normal */
1064
+ if (!rkvdec2_link_prepare(mpp, mpp_task))
1065
+ return -EBUSY;
14141066
1415
-done:
1067
+ rkvdec2_link_enqueue(link_dec, mpp_task);
1068
+
1069
+ set_bit(TASK_STATE_RUNNING, &mpp_task->state);
1070
+ atomic_dec(&link_dec->task_pending);
1071
+ mpp_taskqueue_pending_to_run(queue, mpp_task);
1072
+
1073
+ mpp_dbg_link("session %d task %d send to hw pending %d running %d\n",
1074
+ mpp_task->session->index, mpp_task->task_index,
1075
+ atomic_read(&link_dec->task_pending), link_dec->task_running);
14161076 mpp_debug_leave();
14171077
14181078 return 0;
....@@ -1424,7 +1084,7 @@
14241084 int ret = rkvdec2_link_irq(mpp);
14251085
14261086 if (!ret)
1427
- rkvdec2_link_trigger_irq(mpp);
1087
+ rkvdec2_link_trigger_work(mpp);
14281088
14291089 return IRQ_HANDLED;
14301090 }
....@@ -1470,10 +1130,10 @@
14701130 struct mpp_task_msgs *msgs)
14711131 {
14721132 struct mpp_task *task = NULL;
1473
- struct rkvdec2_task *dec_task = NULL;
14741133 struct mpp_dev *mpp = session->mpp;
1475
- u32 fmt;
14761134 struct rkvdec_link_info *link_info = mpp->var->hw_info->link_info;
1135
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1136
+ struct rkvdec_link_dev *link_dec = dec->link_dec;
14771137
14781138 task = rkvdec2_alloc_task(session, msgs);
14791139 if (!task) {
....@@ -1482,6 +1142,9 @@
14821142 }
14831143
14841144 if (link_info->hack_setup) {
1145
+ u32 fmt;
1146
+ struct rkvdec2_task *dec_task = NULL;
1147
+
14851148 dec_task = to_rkvdec2_task(task);
14861149 fmt = RKVDEC_GET_FORMAT(dec_task->reg[RKVDEC_REG_FORMAT_INDEX]);
14871150 dec_task->need_hack = (fmt == RKVDEC_FMT_H264D);
....@@ -1490,6 +1153,7 @@
14901153 kref_init(&task->ref);
14911154 atomic_set(&task->abort_request, 0);
14921155 task->task_index = atomic_fetch_inc(&mpp->task_index);
1156
+ task->task_id = atomic_fetch_inc(&mpp->queue->task_id);
14931157 INIT_DELAYED_WORK(&task->timeout_work, rkvdec2_link_timeout_proc);
14941158
14951159 atomic_inc(&session->task_count);
....@@ -1503,6 +1167,7 @@
15031167 mutex_lock(&mpp->queue->pending_lock);
15041168 list_add_tail(&task->queue_link, &mpp->queue->pending_list);
15051169 mutex_unlock(&mpp->queue->pending_lock);
1170
+ atomic_inc(&link_dec->task_pending);
15061171
15071172 /* push current task to queue */
15081173 atomic_inc(&mpp->task_count);
....@@ -1519,7 +1184,6 @@
15191184 {
15201185 struct mpp_dev *mpp = session->mpp;
15211186 struct mpp_task *mpp_task;
1522
- struct rkvdec2_task *task;
15231187 int ret;
15241188
15251189 mpp_task = mpp_session_get_pending_task(session);
....@@ -1528,16 +1192,15 @@
15281192 return -EIO;
15291193 }
15301194
1531
- task = to_rkvdec2_task(mpp_task);
1532
- ret = wait_event_timeout(task->wait, task_is_done(mpp_task),
1195
+ ret = wait_event_timeout(mpp_task->wait, task_is_done(mpp_task),
15331196 msecs_to_jiffies(WAIT_TIMEOUT_MS));
15341197 if (ret) {
15351198 ret = rkvdec2_result(mpp, mpp_task, msgs);
15361199
15371200 mpp_session_pop_done(session, mpp_task);
15381201 } else {
1539
- mpp_err("task %d:%d statue %lx timeout -> abort\n",
1540
- session->index, mpp_task->task_index, mpp_task->state);
1202
+ mpp_err("task %d:%d state %lx timeout -> abort\n",
1203
+ session->index, mpp_task->task_id, mpp_task->state);
15411204
15421205 atomic_inc(&mpp_task->abort_request);
15431206 set_bit(TASK_STATE_ABORT, &mpp_task->state);
....@@ -1550,34 +1213,25 @@
15501213 void rkvdec2_link_worker(struct kthread_work *work_s)
15511214 {
15521215 struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
1553
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1554
- struct rkvdec_link_dev *link_dec = dec->link_dec;
15551216 struct mpp_task *task;
15561217 struct mpp_taskqueue *queue = mpp->queue;
1218
+ u32 all_done;
15571219
15581220 mpp_debug_enter();
15591221
1560
- /*
1561
- * process timeout and finished task.
1562
- */
1222
+ /* dequeue running task */
15631223 rkvdec2_link_try_dequeue(mpp);
15641224
1565
-again:
1225
+ /* process reset */
15661226 if (atomic_read(&mpp->reset_request)) {
1567
- if (link_dec->task_running || link_dec->task_prepared)
1568
- goto done;
1569
-
1570
- disable_irq(mpp->irq);
1571
- mpp_iommu_disable_irq(mpp->iommu_info);
15721227 rkvdec2_link_reset(mpp);
1573
- link_dec->task_decoded = 0;
1574
- link_dec->task_total = 0;
1575
- enable_irq(mpp->irq);
1576
- mpp_iommu_enable_irq(mpp->iommu_info);
1228
+ /* resend running task after reset */
1229
+ if (!list_empty(&queue->running_list))
1230
+ rkvdec2_link_resend(mpp);
15771231 }
1578
- /*
1579
- * process pending queue to find the task to accept.
1580
- */
1232
+
1233
+again:
1234
+ /* get pending task to process */
15811235 mutex_lock(&queue->pending_lock);
15821236 task = list_first_entry_or_null(&queue->pending_list, struct mpp_task,
15831237 queue_link);
....@@ -1585,9 +1239,8 @@
15851239 if (!task)
15861240 goto done;
15871241
1588
- if (test_bit(TASK_STATE_ABORT, &task->state)) {
1589
- struct rkvdec2_task *dec_task = to_rkvdec2_task(task);
1590
-
1242
+ /* check abort task */
1243
+ if (atomic_read(&task->abort_request)) {
15911244 mutex_lock(&queue->pending_lock);
15921245 list_del_init(&task->queue_link);
15931246
....@@ -1595,48 +1248,28 @@
15951248 set_bit(TASK_STATE_PROC_DONE, &task->state);
15961249
15971250 mutex_unlock(&queue->pending_lock);
1598
- wake_up(&dec_task->wait);
1251
+ wake_up(&task->wait);
15991252 kref_put(&task->ref, rkvdec2_link_free_task);
16001253 goto again;
16011254 }
16021255
1603
- /*
1604
- * if target device can accept more task send the task to run.
1605
- */
1606
- if (link_dec->task_running >= link_dec->task_capacity - 2)
1607
- goto done;
1608
-
1609
- if (mpp_task_queue(mpp, task)) {
1610
- /* failed to run */
1611
- mpp_err("%p failed to process task %p:%d\n",
1612
- mpp, task, task->task_index);
1613
- } else {
1614
- mutex_lock(&queue->pending_lock);
1615
- set_bit(TASK_STATE_RUNNING, &task->state);
1616
- list_move_tail(&task->queue_link, &queue->running_list);
1617
- mutex_unlock(&queue->pending_lock);
1256
+ /* queue task to hw */
1257
+ if (!mpp_task_queue(mpp, task))
16181258 goto again;
1619
- }
1259
+
16201260 done:
1621
- mpp_debug_leave();
16221261
1623
- if (link_dec->task_irq != link_dec->task_irq_prev ||
1624
- atomic_read(&link_dec->task_timeout) != link_dec->task_timeout_prev)
1625
- rkvdec2_link_trigger_work(mpp);
1262
+ /* if no task in pending and running list, power off device */
1263
+ mutex_lock(&queue->pending_lock);
1264
+ all_done = list_empty(&queue->pending_list) && list_empty(&queue->running_list);
1265
+ mutex_unlock(&queue->pending_lock);
16261266
1627
- /* if no task for running power off device */
1628
- {
1629
- u32 all_done = 0;
1630
-
1631
- mutex_lock(&queue->pending_lock);
1632
- all_done = list_empty(&queue->pending_list);
1633
- mutex_unlock(&queue->pending_lock);
1634
-
1635
- if (all_done && !link_dec->task_running && !link_dec->task_prepared)
1636
- rkvdec2_link_power_off(mpp);
1637
- }
1267
+ if (all_done)
1268
+ rkvdec2_link_power_off(mpp);
16381269
16391270 mpp_session_cleanup_detach(queue, work_s);
1271
+
1272
+ mpp_debug_leave();
16401273 }
16411274
16421275 void rkvdec2_link_session_deinit(struct mpp_session *session)
....@@ -1667,3 +1300,1258 @@
16671300
16681301 mpp_debug_leave();
16691302 }
1303
+
1304
+#define RKVDEC2_1080P_PIXELS (1920*1080)
1305
+#define RKVDEC2_4K_PIXELS (4096*2304)
1306
+#define RKVDEC2_8K_PIXELS (7680*4320)
1307
+#define RKVDEC2_CCU_TIMEOUT_20MS (0xefffff)
1308
+#define RKVDEC2_CCU_TIMEOUT_50MS (0x2cfffff)
1309
+#define RKVDEC2_CCU_TIMEOUT_100MS (0x4ffffff)
1310
+
1311
+static u32 rkvdec2_ccu_get_timeout_threshold(struct rkvdec2_task *task)
1312
+{
1313
+ u32 pixels = task->pixels;
1314
+
1315
+ if (pixels < RKVDEC2_1080P_PIXELS)
1316
+ return RKVDEC2_CCU_TIMEOUT_20MS;
1317
+ else if (pixels < RKVDEC2_4K_PIXELS)
1318
+ return RKVDEC2_CCU_TIMEOUT_50MS;
1319
+ else
1320
+ return RKVDEC2_CCU_TIMEOUT_100MS;
1321
+}
1322
+
1323
+int rkvdec2_attach_ccu(struct device *dev, struct rkvdec2_dev *dec)
1324
+{
1325
+ int ret;
1326
+ struct device_node *np;
1327
+ struct platform_device *pdev;
1328
+ struct rkvdec2_ccu *ccu;
1329
+
1330
+ mpp_debug_enter();
1331
+
1332
+ np = of_parse_phandle(dev->of_node, "rockchip,ccu", 0);
1333
+ if (!np || !of_device_is_available(np))
1334
+ return -ENODEV;
1335
+
1336
+ pdev = of_find_device_by_node(np);
1337
+ of_node_put(np);
1338
+ if (!pdev)
1339
+ return -ENODEV;
1340
+
1341
+ ccu = platform_get_drvdata(pdev);
1342
+ if (!ccu)
1343
+ return -ENOMEM;
1344
+
1345
+ ret = of_property_read_u32(dev->of_node, "rockchip,core-mask", &dec->core_mask);
1346
+ if (ret)
1347
+ return ret;
1348
+ dev_info(dev, "core_mask=%08x\n", dec->core_mask);
1349
+
1350
+ /* if not the main-core, then attach the main core domain to current */
1351
+ if (dec->mpp.core_id != 0) {
1352
+ struct mpp_taskqueue *queue;
1353
+ struct mpp_iommu_info *ccu_info, *cur_info;
1354
+
1355
+ queue = dec->mpp.queue;
1356
+ /* set the ccu-domain for current device */
1357
+ ccu_info = queue->cores[0]->iommu_info;
1358
+ cur_info = dec->mpp.iommu_info;
1359
+ cur_info->domain = ccu_info->domain;
1360
+ mpp_iommu_attach(cur_info);
1361
+ }
1362
+
1363
+ dec->ccu = ccu;
1364
+
1365
+ dev_info(dev, "attach ccu as core %d\n", dec->mpp.core_id);
1366
+ mpp_debug_enter();
1367
+
1368
+ return 0;
1369
+}
1370
+
1371
+static void rkvdec2_ccu_timeout_work(struct work_struct *work_s)
1372
+{
1373
+ struct mpp_dev *mpp;
1374
+ struct mpp_task *task = container_of(to_delayed_work(work_s),
1375
+ struct mpp_task, timeout_work);
1376
+
1377
+ if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
1378
+ mpp_err("task %d state %lx has been handled\n",
1379
+ task->task_id, task->state);
1380
+ return;
1381
+ }
1382
+
1383
+ if (!task->session) {
1384
+ mpp_err("task %d session is null.\n", task->task_id);
1385
+ return;
1386
+ }
1387
+ mpp = mpp_get_task_used_device(task, task->session);
1388
+ mpp_err("%s, task %d state %#lx timeout\n", dev_name(mpp->dev),
1389
+ task->task_index, task->state);
1390
+ set_bit(TASK_STATE_TIMEOUT, &task->state);
1391
+ atomic_inc(&mpp->reset_request);
1392
+ atomic_inc(&mpp->queue->reset_request);
1393
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
1394
+}
1395
+
1396
+int rkvdec2_ccu_link_init(struct platform_device *pdev, struct rkvdec2_dev *dec)
1397
+{
1398
+ struct resource *res;
1399
+ struct rkvdec_link_dev *link_dec;
1400
+ struct device *dev = &pdev->dev;
1401
+
1402
+ mpp_debug_enter();
1403
+
1404
+ /* link structure */
1405
+ link_dec = devm_kzalloc(dev, sizeof(*link_dec), GFP_KERNEL);
1406
+ if (!link_dec)
1407
+ return -ENOMEM;
1408
+
1409
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
1410
+ if (!res)
1411
+ return -ENOMEM;
1412
+
1413
+ link_dec->info = dec->mpp.var->hw_info->link_info;
1414
+ link_dec->reg_base = devm_ioremap(dev, res->start, resource_size(res));
1415
+ if (!link_dec->reg_base) {
1416
+ dev_err(dev, "ioremap failed for resource %pR\n", res);
1417
+ return -ENOMEM;
1418
+ }
1419
+
1420
+ dec->link_dec = link_dec;
1421
+
1422
+ mpp_debug_leave();
1423
+
1424
+ return 0;
1425
+}
1426
+
1427
+static int rkvdec2_ccu_power_on(struct mpp_taskqueue *queue,
1428
+ struct rkvdec2_ccu *ccu)
1429
+{
1430
+ if (!atomic_xchg(&ccu->power_enabled, 1)) {
1431
+ u32 i;
1432
+ struct mpp_dev *mpp;
1433
+
1434
+ /* ccu pd and clk on */
1435
+ pm_runtime_get_sync(ccu->dev);
1436
+ pm_stay_awake(ccu->dev);
1437
+ mpp_clk_safe_enable(ccu->aclk_info.clk);
1438
+ /* core pd and clk on */
1439
+ for (i = 0; i < queue->core_count; i++) {
1440
+ struct rkvdec2_dev *dec;
1441
+
1442
+ mpp = queue->cores[i];
1443
+ dec = to_rkvdec2_dev(mpp);
1444
+ pm_runtime_get_sync(mpp->dev);
1445
+ pm_stay_awake(mpp->dev);
1446
+ if (mpp->hw_ops->clk_on)
1447
+ mpp->hw_ops->clk_on(mpp);
1448
+
1449
+ mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_NORMAL);
1450
+ mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_NORMAL);
1451
+ mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_NORMAL);
1452
+ mpp_devfreq_set_core_rate(mpp, CLK_MODE_NORMAL);
1453
+ mpp_iommu_dev_activate(mpp->iommu_info, mpp);
1454
+ }
1455
+ mpp_debug(DEBUG_CCU, "power on\n");
1456
+ }
1457
+
1458
+ return 0;
1459
+}
1460
+
1461
+static int rkvdec2_ccu_power_off(struct mpp_taskqueue *queue,
1462
+ struct rkvdec2_ccu *ccu)
1463
+{
1464
+ if (atomic_xchg(&ccu->power_enabled, 0)) {
1465
+ u32 i;
1466
+ struct mpp_dev *mpp;
1467
+
1468
+ /* ccu pd and clk off */
1469
+ mpp_clk_safe_disable(ccu->aclk_info.clk);
1470
+ pm_relax(ccu->dev);
1471
+ pm_runtime_mark_last_busy(ccu->dev);
1472
+ pm_runtime_put_autosuspend(ccu->dev);
1473
+ /* core pd and clk off */
1474
+ for (i = 0; i < queue->core_count; i++) {
1475
+ mpp = queue->cores[i];
1476
+
1477
+ if (mpp->hw_ops->clk_off)
1478
+ mpp->hw_ops->clk_off(mpp);
1479
+ pm_relax(mpp->dev);
1480
+ pm_runtime_mark_last_busy(mpp->dev);
1481
+ pm_runtime_put_autosuspend(mpp->dev);
1482
+ mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
1483
+ }
1484
+ mpp_debug(DEBUG_CCU, "power off\n");
1485
+ }
1486
+
1487
+ return 0;
1488
+}
1489
+
1490
+static int rkvdec2_soft_ccu_dequeue(struct mpp_taskqueue *queue)
1491
+{
1492
+ struct mpp_task *mpp_task = NULL, *n;
1493
+
1494
+ mpp_debug_enter();
1495
+
1496
+ list_for_each_entry_safe(mpp_task, n,
1497
+ &queue->running_list,
1498
+ queue_link) {
1499
+ struct mpp_dev *mpp = mpp_get_task_used_device(mpp_task, mpp_task->session);
1500
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1501
+ u32 irq_status = mpp->irq_status;
1502
+ u32 timeout_flag = test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
1503
+ u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
1504
+ u32 timing_en = mpp->srv->timing_en;
1505
+
1506
+ if (irq_status || timeout_flag || abort_flag) {
1507
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1508
+
1509
+ if (timing_en) {
1510
+ mpp_task->on_irq = ktime_get();
1511
+ set_bit(TASK_TIMING_IRQ, &mpp_task->state);
1512
+
1513
+ mpp_task->on_cancel_timeout = mpp_task->on_irq;
1514
+ set_bit(TASK_TIMING_TO_CANCEL, &mpp_task->state);
1515
+
1516
+ mpp_task->on_isr = mpp_task->on_irq;
1517
+ set_bit(TASK_TIMING_ISR, &mpp_task->state);
1518
+ }
1519
+
1520
+ set_bit(TASK_STATE_HANDLE, &mpp_task->state);
1521
+ cancel_delayed_work(&mpp_task->timeout_work);
1522
+ mpp_task->hw_cycles = mpp_read(mpp, RKVDEC_PERF_WORKING_CNT);
1523
+ mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
1524
+ task->irq_status = irq_status;
1525
+ mpp_debug(DEBUG_IRQ_CHECK, "irq_status=%08x, timeout=%u, abort=%u\n",
1526
+ irq_status, timeout_flag, abort_flag);
1527
+ if (irq_status && mpp->dev_ops->finish)
1528
+ mpp->dev_ops->finish(mpp, mpp_task);
1529
+ else
1530
+ task->reg[RKVDEC_REG_INT_EN_INDEX] = RKVDEC_TIMEOUT_STA;
1531
+
1532
+ set_bit(TASK_STATE_FINISH, &mpp_task->state);
1533
+ set_bit(TASK_STATE_DONE, &mpp_task->state);
1534
+
1535
+ set_bit(mpp->core_id, &queue->core_idle);
1536
+ mpp_dbg_core("set core %d idle %lx\n", mpp->core_id, queue->core_idle);
1537
+ /* Wake up the GET thread */
1538
+ wake_up(&mpp_task->wait);
1539
+ /* free task */
1540
+ list_del_init(&mpp_task->queue_link);
1541
+ kref_put(&mpp_task->ref, mpp_free_task);
1542
+ } else {
1543
+ /* NOTE: break when meet not finish */
1544
+ break;
1545
+ }
1546
+ }
1547
+
1548
+ mpp_debug_leave();
1549
+ return 0;
1550
+}
1551
+
1552
+static int rkvdec2_soft_ccu_reset(struct mpp_taskqueue *queue,
1553
+ struct rkvdec2_ccu *ccu)
1554
+{
1555
+ int i;
1556
+
1557
+ for (i = queue->core_count - 1; i >= 0; i--) {
1558
+ u32 val;
1559
+
1560
+ struct mpp_dev *mpp = queue->cores[i];
1561
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1562
+
1563
+ if (mpp->disable)
1564
+ continue;
1565
+
1566
+ dev_info(mpp->dev, "resetting...\n");
1567
+ disable_hardirq(mpp->irq);
1568
+
1569
+ /* foce idle, disconnect core and ccu */
1570
+ writel(dec->core_mask, ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
1571
+
1572
+ /* soft reset */
1573
+ mpp_write(mpp, RKVDEC_REG_IMPORTANT_BASE, RKVDEC_SOFTREST_EN);
1574
+ udelay(5);
1575
+ val = mpp_read(mpp, RKVDEC_REG_INT_EN);
1576
+ if (!(val & RKVDEC_SOFT_RESET_READY))
1577
+ mpp_err("soft reset fail, int %08x\n", val);
1578
+ mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1579
+
1580
+ /* check bus idle */
1581
+ val = mpp_read(mpp, RKVDEC_REG_DEBUG_INT_BASE);
1582
+ if (!(val & RKVDEC_BIT_BUS_IDLE))
1583
+ mpp_err("bus busy\n");
1584
+
1585
+ if (IS_REACHABLE(CONFIG_ROCKCHIP_SIP)) {
1586
+ /* sip reset */
1587
+ rockchip_dmcfreq_lock();
1588
+ sip_smc_vpu_reset(i, 0, 0);
1589
+ rockchip_dmcfreq_unlock();
1590
+ } else {
1591
+ rkvdec2_reset(mpp);
1592
+ }
1593
+ /* clear error mask */
1594
+ writel(dec->core_mask & RKVDEC_CCU_CORE_RW_MASK,
1595
+ ccu->reg_base + RKVDEC_CCU_CORE_ERR_BASE);
1596
+ /* connect core and ccu */
1597
+ writel(dec->core_mask & RKVDEC_CCU_CORE_RW_MASK,
1598
+ ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
1599
+ mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
1600
+ atomic_set(&mpp->reset_request, 0);
1601
+
1602
+ enable_irq(mpp->irq);
1603
+ dev_info(mpp->dev, "reset done\n");
1604
+ }
1605
+ atomic_set(&queue->reset_request, 0);
1606
+
1607
+ return 0;
1608
+}
1609
+
1610
+void *rkvdec2_ccu_alloc_task(struct mpp_session *session,
1611
+ struct mpp_task_msgs *msgs)
1612
+{
1613
+ int ret;
1614
+ struct rkvdec2_task *task;
1615
+
1616
+ task = kzalloc(sizeof(*task), GFP_KERNEL);
1617
+ if (!task)
1618
+ return NULL;
1619
+
1620
+ ret = rkvdec2_task_init(session->mpp, session, task, msgs);
1621
+ if (ret) {
1622
+ kfree(task);
1623
+ return NULL;
1624
+ }
1625
+
1626
+ return &task->mpp_task;
1627
+}
1628
+
1629
+static void rkvdec2_ccu_check_pagefault_info(struct mpp_dev *mpp)
1630
+{
1631
+ u32 i = 0;
1632
+
1633
+ for (i = 0; i < mpp->queue->core_count; i++) {
1634
+ struct mpp_dev *core = mpp->queue->cores[i];
1635
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(core);
1636
+ void __iomem *mmu_base = dec->mmu_base;
1637
+ u32 mmu0_st;
1638
+ u32 mmu1_st;
1639
+ u32 mmu0_pta;
1640
+ u32 mmu1_pta;
1641
+
1642
+ if (!mmu_base)
1643
+ return;
1644
+
1645
+ #define FAULT_STATUS 0x7e2
1646
+ rkvdec2_ccu_power_on(mpp->queue, dec->ccu);
1647
+
1648
+ mmu0_st = readl(mmu_base + 0x4);
1649
+ mmu1_st = readl(mmu_base + 0x44);
1650
+ mmu0_pta = readl(mmu_base + 0xc);
1651
+ mmu1_pta = readl(mmu_base + 0x4c);
1652
+
1653
+ dec->mmu0_st = mmu0_st;
1654
+ dec->mmu1_st = mmu1_st;
1655
+ dec->mmu0_pta = mmu0_pta;
1656
+ dec->mmu1_pta = mmu1_pta;
1657
+
1658
+ pr_err("core %d mmu0 %08x %08x mm1 %08x %08x\n",
1659
+ core->core_id, mmu0_st, mmu0_pta, mmu1_st, mmu1_pta);
1660
+ if ((mmu0_st & FAULT_STATUS) || (mmu1_st & FAULT_STATUS) ||
1661
+ mmu0_pta || mmu1_pta) {
1662
+ dec->fault_iova = readl(dec->link_dec->reg_base + 0x4);
1663
+ dec->mmu_fault = 1;
1664
+ pr_err("core %d fault iova %08x\n", core->core_id, dec->fault_iova);
1665
+ rockchip_iommu_mask_irq(core->dev);
1666
+ } else {
1667
+ dec->mmu_fault = 0;
1668
+ dec->fault_iova = 0;
1669
+ }
1670
+ }
1671
+}
1672
+
1673
+int rkvdec2_ccu_iommu_fault_handle(struct iommu_domain *iommu,
1674
+ struct device *iommu_dev,
1675
+ unsigned long iova, int status, void *arg)
1676
+{
1677
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
1678
+
1679
+ mpp_debug_enter();
1680
+
1681
+ rkvdec2_ccu_check_pagefault_info(mpp);
1682
+
1683
+ mpp->queue->iommu_fault = 1;
1684
+ atomic_inc(&mpp->queue->reset_request);
1685
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
1686
+
1687
+ mpp_debug_leave();
1688
+
1689
+ return 0;
1690
+}
1691
+
1692
+irqreturn_t rkvdec2_soft_ccu_irq(int irq, void *param)
1693
+{
1694
+ struct mpp_dev *mpp = param;
1695
+ u32 irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
1696
+
1697
+ if (irq_status & RKVDEC_IRQ_RAW) {
1698
+ mpp_debug(DEBUG_IRQ_STATUS, "irq_status=%08x\n", irq_status);
1699
+ if (irq_status & RKVDEC_INT_ERROR_MASK) {
1700
+ atomic_inc(&mpp->reset_request);
1701
+ atomic_inc(&mpp->queue->reset_request);
1702
+ }
1703
+ mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1704
+ mpp->irq_status = irq_status;
1705
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
1706
+ return IRQ_HANDLED;
1707
+ }
1708
+ return IRQ_NONE;
1709
+}
1710
+
1711
+static inline int rkvdec2_set_core_info(u32 *reg, int idx)
1712
+{
1713
+ u32 val = (idx << 16) & RKVDEC_REG_FILM_IDX_MASK;
1714
+
1715
+ reg[RKVDEC_REG_CORE_CTRL_INDEX] &= ~RKVDEC_REG_FILM_IDX_MASK;
1716
+
1717
+ reg[RKVDEC_REG_CORE_CTRL_INDEX] |= val;
1718
+
1719
+ return 0;
1720
+}
1721
+
1722
+static int rkvdec2_soft_ccu_enqueue(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1723
+{
1724
+ u32 i, reg_en, reg;
1725
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1726
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1727
+ u32 timing_en = mpp->srv->timing_en;
1728
+
1729
+ mpp_debug_enter();
1730
+
1731
+ /* set reg for link */
1732
+ reg = RKVDEC_LINK_BIT_CORE_WORK_MODE | RKVDEC_LINK_BIT_CCU_WORK_MODE;
1733
+ writel_relaxed(reg, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
1734
+
1735
+ /* set reg for ccu */
1736
+ writel_relaxed(RKVDEC_CCU_BIT_WORK_EN, dec->ccu->reg_base + RKVDEC_CCU_WORK_BASE);
1737
+ writel_relaxed(RKVDEC_CCU_BIT_WORK_MODE, dec->ccu->reg_base + RKVDEC_CCU_WORK_MODE_BASE);
1738
+ writel_relaxed(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
1739
+
1740
+ /* set cache size */
1741
+ reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS |
1742
+ RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
1743
+ if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
1744
+ reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
1745
+
1746
+ mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
1747
+ mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
1748
+ mpp_write_relaxed(mpp, RKVDEC_REG_CACHE2_SIZE_BASE, reg);
1749
+ /* clear cache */
1750
+ mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
1751
+ mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
1752
+ mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
1753
+
1754
+ mpp_iommu_flush_tlb(mpp->iommu_info);
1755
+ /* disable multicore pu/colmv offset req timeout reset */
1756
+ task->reg[RKVDEC_REG_EN_MODE_SET] |= BIT(1);
1757
+ task->reg[RKVDEC_REG_TIMEOUT_THRESHOLD] = rkvdec2_ccu_get_timeout_threshold(task);
1758
+ /* set registers for hardware */
1759
+ reg_en = mpp_task->hw_info->reg_en;
1760
+ for (i = 0; i < task->w_req_cnt; i++) {
1761
+ int s, e;
1762
+ struct mpp_request *req = &task->w_reqs[i];
1763
+
1764
+ s = req->offset / sizeof(u32);
1765
+ e = s + req->size / sizeof(u32);
1766
+ mpp_write_req(mpp, task->reg, s, e, reg_en);
1767
+ }
1768
+ /* init current task */
1769
+ mpp->cur_task = mpp_task;
1770
+
1771
+ mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
1772
+
1773
+ mpp->irq_status = 0;
1774
+ writel_relaxed(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_STA_BASE);
1775
+ /* Flush the register before the start the device */
1776
+ wmb();
1777
+ mpp_write(mpp, RKVDEC_REG_START_EN_BASE, task->reg[reg_en] | RKVDEC_START_EN);
1778
+
1779
+ mpp_task_run_end(mpp_task, timing_en);
1780
+
1781
+ mpp_debug_leave();
1782
+
1783
+ return 0;
1784
+}
1785
+
1786
+static struct mpp_dev *rkvdec2_get_idle_core(struct mpp_taskqueue *queue,
1787
+ struct mpp_task *mpp_task)
1788
+{
1789
+ u32 i = 0;
1790
+ struct rkvdec2_dev *dec = NULL;
1791
+
1792
+ for (i = 0; i < queue->core_count; i++) {
1793
+ struct mpp_dev *mpp = queue->cores[i];
1794
+ struct rkvdec2_dev *core = to_rkvdec2_dev(mpp);
1795
+
1796
+ if (mpp->disable)
1797
+ continue;
1798
+
1799
+ if (test_bit(i, &queue->core_idle)) {
1800
+ if (!dec) {
1801
+ dec = core;
1802
+ continue;
1803
+ }
1804
+ /* set the less work core */
1805
+ if (core->task_index < dec->task_index)
1806
+ dec = core;
1807
+ }
1808
+ }
1809
+ /* if get core */
1810
+ if (dec) {
1811
+ mpp_task->mpp = &dec->mpp;
1812
+ mpp_task->core_id = dec->mpp.core_id;
1813
+ clear_bit(mpp_task->core_id, &queue->core_idle);
1814
+ dec->task_index++;
1815
+ atomic_inc(&dec->mpp.task_count);
1816
+ mpp_dbg_core("clear core %d idle\n", mpp_task->core_id);
1817
+ return mpp_task->mpp;
1818
+ }
1819
+
1820
+ return NULL;
1821
+}
1822
+
1823
+static bool rkvdec2_core_working(struct mpp_taskqueue *queue)
1824
+{
1825
+ struct mpp_dev *mpp;
1826
+ bool flag = false;
1827
+ u32 i = 0;
1828
+
1829
+ for (i = 0; i < queue->core_count; i++) {
1830
+ mpp = queue->cores[i];
1831
+ if (mpp->disable)
1832
+ continue;
1833
+ if (!test_bit(i, &queue->core_idle)) {
1834
+ flag = true;
1835
+ break;
1836
+ }
1837
+ }
1838
+
1839
+ return flag;
1840
+}
1841
+
1842
+static int rkvdec2_ccu_link_session_detach(struct mpp_dev *mpp,
1843
+ struct mpp_taskqueue *queue)
1844
+{
1845
+ mutex_lock(&queue->session_lock);
1846
+ while (atomic_read(&queue->detach_count)) {
1847
+ struct mpp_session *session = NULL;
1848
+
1849
+ session = list_first_entry_or_null(&queue->session_detach,
1850
+ struct mpp_session,
1851
+ session_link);
1852
+ if (session) {
1853
+ list_del_init(&session->session_link);
1854
+ atomic_dec(&queue->detach_count);
1855
+ }
1856
+
1857
+ mutex_unlock(&queue->session_lock);
1858
+
1859
+ if (session) {
1860
+ mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev),
1861
+ atomic_read(&queue->detach_count));
1862
+ mpp_session_deinit(session);
1863
+ }
1864
+
1865
+ mutex_lock(&queue->session_lock);
1866
+ }
1867
+ mutex_unlock(&queue->session_lock);
1868
+
1869
+ return 0;
1870
+}
1871
+
1872
+void rkvdec2_soft_ccu_worker(struct kthread_work *work_s)
1873
+{
1874
+ struct mpp_task *mpp_task;
1875
+ struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
1876
+ struct mpp_taskqueue *queue = mpp->queue;
1877
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1878
+ u32 timing_en = mpp->srv->timing_en;
1879
+
1880
+ mpp_debug_enter();
1881
+
1882
+ /* 1. process all finished task in running list */
1883
+ rkvdec2_soft_ccu_dequeue(queue);
1884
+
1885
+ /* 2. process reset request */
1886
+ if (atomic_read(&queue->reset_request)) {
1887
+ if (!rkvdec2_core_working(queue)) {
1888
+ rkvdec2_ccu_power_on(queue, dec->ccu);
1889
+ rkvdec2_soft_ccu_reset(queue, dec->ccu);
1890
+ }
1891
+ }
1892
+
1893
+ /* 3. process pending task */
1894
+ while (1) {
1895
+ if (atomic_read(&queue->reset_request))
1896
+ break;
1897
+ /* get one task form pending list */
1898
+ mutex_lock(&queue->pending_lock);
1899
+ mpp_task = list_first_entry_or_null(&queue->pending_list,
1900
+ struct mpp_task, queue_link);
1901
+ mutex_unlock(&queue->pending_lock);
1902
+ if (!mpp_task)
1903
+ break;
1904
+
1905
+ if (test_bit(TASK_STATE_ABORT, &mpp_task->state)) {
1906
+ mutex_lock(&queue->pending_lock);
1907
+ list_del_init(&mpp_task->queue_link);
1908
+
1909
+ set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
1910
+ set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
1911
+
1912
+ mutex_unlock(&queue->pending_lock);
1913
+ wake_up(&mpp_task->wait);
1914
+ kref_put(&mpp_task->ref, rkvdec2_link_free_task);
1915
+ continue;
1916
+ }
1917
+ /* find one core is idle */
1918
+ mpp = rkvdec2_get_idle_core(queue, mpp_task);
1919
+ if (!mpp)
1920
+ break;
1921
+
1922
+ if (timing_en) {
1923
+ mpp_task->on_run = ktime_get();
1924
+ set_bit(TASK_TIMING_RUN, &mpp_task->state);
1925
+ }
1926
+
1927
+ /* set session index */
1928
+ rkvdec2_set_core_info(mpp_task->reg, mpp_task->session->index);
1929
+ /* set rcb buffer */
1930
+ mpp_set_rcbbuf(mpp, mpp_task->session, mpp_task);
1931
+
1932
+ INIT_DELAYED_WORK(&mpp_task->timeout_work, rkvdec2_ccu_timeout_work);
1933
+ rkvdec2_ccu_power_on(queue, dec->ccu);
1934
+ rkvdec2_soft_ccu_enqueue(mpp, mpp_task);
1935
+ /* pending to running */
1936
+ mpp_taskqueue_pending_to_run(queue, mpp_task);
1937
+ set_bit(TASK_STATE_RUNNING, &mpp_task->state);
1938
+ }
1939
+
1940
+ /* 4. poweroff when running and pending list are empty */
1941
+ if (list_empty(&queue->running_list) &&
1942
+ list_empty(&queue->pending_list))
1943
+ rkvdec2_ccu_power_off(queue, dec->ccu);
1944
+
1945
+ /* 5. check session detach out of queue */
1946
+ rkvdec2_ccu_link_session_detach(mpp, queue);
1947
+
1948
+ mpp_debug_leave();
1949
+}
1950
+
1951
+int rkvdec2_ccu_alloc_table(struct rkvdec2_dev *dec,
1952
+ struct rkvdec_link_dev *link_dec)
1953
+{
1954
+ int ret, i;
1955
+ struct mpp_dma_buffer *table;
1956
+ struct mpp_dev *mpp = &dec->mpp;
1957
+
1958
+ mpp_debug_enter();
1959
+
1960
+ /* alloc table pointer array */
1961
+ table = devm_kmalloc_array(mpp->dev, mpp->task_capacity,
1962
+ sizeof(*table), GFP_KERNEL | __GFP_ZERO);
1963
+ if (!table)
1964
+ return -ENOMEM;
1965
+
1966
+ /* alloc table buffer */
1967
+ ret = rkvdec2_link_alloc_table(mpp, link_dec);
1968
+ if (ret)
1969
+ return ret;
1970
+
1971
+ /* init table array */
1972
+ dec->ccu->table_array = table;
1973
+ for (i = 0; i < mpp->task_capacity; i++) {
1974
+ table[i].iova = link_dec->table->iova + i * link_dec->link_node_size;
1975
+ table[i].vaddr = link_dec->table->vaddr + i * link_dec->link_node_size;
1976
+ table[i].size = link_dec->link_node_size;
1977
+ INIT_LIST_HEAD(&table[i].link);
1978
+ list_add_tail(&table[i].link, &dec->ccu->unused_list);
1979
+ }
1980
+
1981
+ return 0;
1982
+}
1983
+
1984
+static void rkvdec2_dump_ccu(struct rkvdec2_ccu *ccu)
1985
+{
1986
+ u32 i;
1987
+
1988
+ for (i = 0; i < 10; i++)
1989
+ mpp_err("ccu:reg[%d]=%08x\n", i, readl(ccu->reg_base + 4 * i));
1990
+
1991
+ for (i = 16; i < 22; i++)
1992
+ mpp_err("ccu:reg[%d]=%08x\n", i, readl(ccu->reg_base + 4 * i));
1993
+}
1994
+
1995
+static void rkvdec2_dump_link(struct rkvdec2_dev *dec)
1996
+{
1997
+ u32 i;
1998
+
1999
+ for (i = 0; i < 10; i++)
2000
+ mpp_err("link:reg[%d]=%08x\n", i, readl(dec->link_dec->reg_base + 4 * i));
2001
+}
2002
+
2003
+static void rkvdec2_dump_core(struct mpp_dev *mpp, struct rkvdec2_task *task)
2004
+{
2005
+ u32 j;
2006
+
2007
+ if (task) {
2008
+ for (j = 0; j < 273; j++)
2009
+ mpp_err("reg[%d]=%08x, %08x\n", j, mpp_read(mpp, j*4), task->reg[j]);
2010
+ } else {
2011
+ for (j = 0; j < 273; j++)
2012
+ mpp_err("reg[%d]=%08x\n", j, mpp_read(mpp, j*4));
2013
+ }
2014
+}
2015
+
2016
+irqreturn_t rkvdec2_hard_ccu_irq(int irq, void *param)
2017
+{
2018
+ u32 irq_status;
2019
+ struct mpp_dev *mpp = param;
2020
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2021
+
2022
+ irq_status = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2023
+ dec->ccu->ccu_core_work_mode = readl(dec->ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2024
+ if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) {
2025
+ dec->link_dec->irq_status = irq_status;
2026
+ mpp->irq_status = mpp_read(mpp, RKVDEC_REG_INT_EN);
2027
+ mpp_debug(DEBUG_IRQ_STATUS, "core %d link_irq=%08x, core_irq=%08x\n",
2028
+ mpp->core_id, irq_status, mpp->irq_status);
2029
+
2030
+ writel(irq_status & 0xfffff0ff,
2031
+ dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2032
+
2033
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
2034
+ return IRQ_HANDLED;
2035
+ }
2036
+
2037
+ return IRQ_NONE;
2038
+}
2039
+
2040
+static int rkvdec2_hard_ccu_finish(struct rkvdec_link_info *hw, struct rkvdec2_task *task)
2041
+{
2042
+ u32 i, off, s, n;
2043
+ struct rkvdec_link_part *part = hw->part_r;
2044
+ u32 *tb_reg = (u32 *)task->table->vaddr;
2045
+
2046
+ mpp_debug_enter();
2047
+
2048
+ for (i = 0; i < hw->part_r_num; i++) {
2049
+ off = part[i].tb_reg_off;
2050
+ s = part[i].reg_start;
2051
+ n = part[i].reg_num;
2052
+ memcpy(&task->reg[s], &tb_reg[off], n * sizeof(u32));
2053
+ }
2054
+ /* revert hack for irq status */
2055
+ task->reg[RKVDEC_REG_INT_EN_INDEX] = task->irq_status;
2056
+
2057
+ mpp_debug_leave();
2058
+
2059
+ return 0;
2060
+}
2061
+
2062
+static int rkvdec2_hard_ccu_dequeue(struct mpp_taskqueue *queue,
2063
+ struct rkvdec2_ccu *ccu,
2064
+ struct rkvdec_link_info *hw)
2065
+{
2066
+ struct mpp_task *mpp_task = NULL, *n;
2067
+ u32 dump_reg = 0;
2068
+ u32 dequeue_none = 0;
2069
+
2070
+ mpp_debug_enter();
2071
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
2072
+ u32 timeout_flag = test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
2073
+ u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
2074
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2075
+ u32 *tb_reg = (u32 *)task->table->vaddr;
2076
+ u32 irq_status = tb_reg[hw->tb_reg_int];
2077
+ u32 ccu_decoded_num, ccu_total_dec_num;
2078
+
2079
+ ccu_decoded_num = readl(ccu->reg_base + RKVDEC_CCU_DEC_NUM_BASE);
2080
+ ccu_total_dec_num = readl(ccu->reg_base + RKVDEC_CCU_TOTAL_NUM_BASE);
2081
+ mpp_debug(DEBUG_IRQ_CHECK,
2082
+ "session %d task %d w:h[%d %d] err %d irq_status %08x timeout=%u abort=%u iova %08x next %08x ccu[%d %d]\n",
2083
+ mpp_task->session->index, mpp_task->task_index, task->width,
2084
+ task->height, !!(irq_status & RKVDEC_INT_ERROR_MASK), irq_status,
2085
+ timeout_flag, abort_flag, (u32)task->table->iova,
2086
+ ((u32 *)task->table->vaddr)[hw->tb_reg_next],
2087
+ ccu_decoded_num, ccu_total_dec_num);
2088
+
2089
+ if (irq_status || timeout_flag || abort_flag) {
2090
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(queue->cores[0]);
2091
+
2092
+ set_bit(TASK_STATE_HANDLE, &mpp_task->state);
2093
+ cancel_delayed_work(&mpp_task->timeout_work);
2094
+ mpp_task->hw_cycles = tb_reg[hw->tb_reg_cycle];
2095
+ mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
2096
+ task->irq_status = irq_status;
2097
+
2098
+ if (irq_status)
2099
+ rkvdec2_hard_ccu_finish(hw, task);
2100
+
2101
+ set_bit(TASK_STATE_FINISH, &mpp_task->state);
2102
+ set_bit(TASK_STATE_DONE, &mpp_task->state);
2103
+
2104
+ if (timeout_flag && !dump_reg && mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
2105
+ u32 i;
2106
+
2107
+ mpp_err("###### ccu #####\n");
2108
+ rkvdec2_dump_ccu(ccu);
2109
+ for (i = 0; i < queue->core_count; i++) {
2110
+ mpp_err("###### core %d #####\n", i);
2111
+ rkvdec2_dump_link(to_rkvdec2_dev(queue->cores[i]));
2112
+ rkvdec2_dump_core(queue->cores[i], task);
2113
+ }
2114
+ dump_reg = 1;
2115
+ }
2116
+ list_move_tail(&task->table->link, &ccu->unused_list);
2117
+ /* free task */
2118
+ list_del_init(&mpp_task->queue_link);
2119
+ /* Wake up the GET thread */
2120
+ wake_up(&mpp_task->wait);
2121
+ if ((irq_status & RKVDEC_INT_ERROR_MASK) || timeout_flag) {
2122
+ pr_err("session %d task %d irq_status %08x timeout=%u abort=%u\n",
2123
+ mpp_task->session->index, mpp_task->task_index,
2124
+ irq_status, timeout_flag, abort_flag);
2125
+ atomic_inc(&queue->reset_request);
2126
+ }
2127
+
2128
+ kref_put(&mpp_task->ref, mpp_free_task);
2129
+ } else {
2130
+ dequeue_none++;
2131
+ /*
2132
+ * there are only 2 cores,
2133
+ * if dequeue not finish task more than 2,
2134
+ * means the others task still not get run by hw, can break early.
2135
+ */
2136
+ if (dequeue_none > 2)
2137
+ break;
2138
+ }
2139
+ }
2140
+
2141
+ mpp_debug_leave();
2142
+ return 0;
2143
+}
2144
+
2145
+static int rkvdec2_hard_ccu_reset(struct mpp_taskqueue *queue, struct rkvdec2_ccu *ccu)
2146
+{
2147
+ int i = 0;
2148
+
2149
+ mpp_debug_enter();
2150
+
2151
+ /* reset and active core */
2152
+ for (i = 0; i < queue->core_count; i++) {
2153
+ u32 val = 0;
2154
+ struct mpp_dev *mpp = queue->cores[i];
2155
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2156
+
2157
+ if (mpp->disable)
2158
+ continue;
2159
+ dev_info(mpp->dev, "resetting...\n");
2160
+ disable_hardirq(mpp->irq);
2161
+ /* force idle */
2162
+ writel(dec->core_mask, ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
2163
+ writel(0, ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2164
+
2165
+ {
2166
+ /* soft reset */
2167
+ u32 val;
2168
+
2169
+ mpp_write(mpp, RKVDEC_REG_IMPORTANT_BASE, RKVDEC_SOFTREST_EN);
2170
+ udelay(5);
2171
+ val = mpp_read(mpp, RKVDEC_REG_INT_EN);
2172
+ if (!(val & RKVDEC_SOFT_RESET_READY))
2173
+ mpp_err("soft reset fail, int %08x\n", val);
2174
+
2175
+ // /* cru reset */
2176
+ // dev_info(mpp->dev, "cru reset\n");
2177
+ // rkvdec2_reset(mpp);
2178
+ }
2179
+#if IS_ENABLED(CONFIG_ROCKCHIP_SIP)
2180
+ rockchip_dmcfreq_lock();
2181
+ sip_smc_vpu_reset(i, 0, 0);
2182
+ rockchip_dmcfreq_unlock();
2183
+#else
2184
+ rkvdec2_reset(mpp);
2185
+#endif
2186
+ mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
2187
+ enable_irq(mpp->irq);
2188
+ atomic_set(&mpp->reset_request, 0);
2189
+ val = mpp_read_relaxed(mpp, 272*4);
2190
+ dev_info(mpp->dev, "reset done, idle %d\n", (val & 1));
2191
+ }
2192
+ /* reset ccu */
2193
+ mpp_safe_reset(ccu->rst_a);
2194
+ udelay(5);
2195
+ mpp_safe_unreset(ccu->rst_a);
2196
+
2197
+ mpp_debug_leave();
2198
+ return 0;
2199
+}
2200
+
2201
+static struct mpp_task *
2202
+rkvdec2_hard_ccu_prepare(struct mpp_task *mpp_task,
2203
+ struct rkvdec2_ccu *ccu, struct rkvdec_link_info *hw)
2204
+{
2205
+ u32 i, off, s, n;
2206
+ u32 *tb_reg;
2207
+ struct mpp_dma_buffer *table = NULL;
2208
+ struct rkvdec_link_part *part;
2209
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2210
+
2211
+ mpp_debug_enter();
2212
+
2213
+ if (test_bit(TASK_STATE_PREPARE, &mpp_task->state))
2214
+ return mpp_task;
2215
+
2216
+ /* ensure that cur table iova points to the next link table*/
2217
+ {
2218
+ struct mpp_dma_buffer *table0 = NULL, *table1 = NULL, *n;
2219
+
2220
+ list_for_each_entry_safe(table, n, &ccu->unused_list, link) {
2221
+ if (!table0) {
2222
+ table0 = table;
2223
+ continue;
2224
+ }
2225
+ if (!table1)
2226
+ table1 = table;
2227
+ break;
2228
+ }
2229
+ if (!table0 || !table1)
2230
+ return NULL;
2231
+ ((u32 *)table0->vaddr)[hw->tb_reg_next] = table1->iova;
2232
+ table = table0;
2233
+ }
2234
+
2235
+ /* set session idx */
2236
+ rkvdec2_set_core_info(task->reg, mpp_task->session->index);
2237
+ tb_reg = (u32 *)table->vaddr;
2238
+ part = hw->part_w;
2239
+
2240
+ /* disable multicore pu/colmv offset req timeout reset */
2241
+ task->reg[RKVDEC_REG_EN_MODE_SET] |= BIT(1);
2242
+ task->reg[RKVDEC_REG_TIMEOUT_THRESHOLD] = rkvdec2_ccu_get_timeout_threshold(task);
2243
+
2244
+ for (i = 0; i < hw->part_w_num; i++) {
2245
+ off = part[i].tb_reg_off;
2246
+ s = part[i].reg_start;
2247
+ n = part[i].reg_num;
2248
+ memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
2249
+ }
2250
+
2251
+ /* memset read registers */
2252
+ part = hw->part_r;
2253
+ for (i = 0; i < hw->part_r_num; i++) {
2254
+ off = part[i].tb_reg_off;
2255
+ n = part[i].reg_num;
2256
+ memset(&tb_reg[off], 0, n * sizeof(u32));
2257
+ }
2258
+ list_move_tail(&table->link, &ccu->used_list);
2259
+ task->table = table;
2260
+ set_bit(TASK_STATE_PREPARE, &mpp_task->state);
2261
+ mpp_dbg_ccu("session %d task %d iova %08x next %08x\n",
2262
+ mpp_task->session->index, mpp_task->task_index, (u32)task->table->iova,
2263
+ ((u32 *)task->table->vaddr)[hw->tb_reg_next]);
2264
+
2265
+ mpp_debug_leave();
2266
+
2267
+ return mpp_task;
2268
+}
2269
+
2270
+static int rkvdec2_ccu_link_fix_rcb_regs(struct rkvdec2_dev *dec)
2271
+{
2272
+ int ret = 0;
2273
+ u32 i, val;
2274
+ u32 reg, reg_idx, rcb_size, rcb_offset;
2275
+
2276
+ if (!dec->rcb_iova && !dec->rcb_info_count)
2277
+ goto done;
2278
+ /* check whether fixed */
2279
+ val = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2280
+ if (val & RKVDEC_CCU_BIT_FIX_RCB)
2281
+ goto done;
2282
+ /* set registers */
2283
+ rcb_offset = 0;
2284
+ for (i = 0; i < dec->rcb_info_count; i += 2) {
2285
+ reg_idx = dec->rcb_infos[i];
2286
+ rcb_size = dec->rcb_infos[i + 1];
2287
+ mpp_debug(DEBUG_SRAM_INFO,
2288
+ "rcb: reg %u size %u offset %u sram_size %u rcb_size %u\n",
2289
+ reg_idx, rcb_size, rcb_offset, dec->sram_size, dec->rcb_size);
2290
+ if ((rcb_offset + rcb_size) > dec->rcb_size) {
2291
+ mpp_err("rcb: reg[%u] set failed.\n", reg_idx);
2292
+ ret = -ENOMEM;
2293
+ goto done;
2294
+ }
2295
+ reg = dec->rcb_iova + rcb_offset;
2296
+ mpp_write(&dec->mpp, reg_idx * sizeof(u32), reg);
2297
+ rcb_offset += rcb_size;
2298
+ }
2299
+
2300
+ val |= RKVDEC_CCU_BIT_FIX_RCB;
2301
+ writel(val, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2302
+done:
2303
+ return ret;
2304
+}
2305
+
2306
+static int rkvdec2_hard_ccu_enqueue(struct rkvdec2_ccu *ccu,
2307
+ struct mpp_task *mpp_task,
2308
+ struct mpp_taskqueue *queue,
2309
+ struct mpp_dev *mpp)
2310
+{
2311
+ u32 ccu_en, work_mode, link_mode;
2312
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2313
+ u32 timing_en = mpp->srv->timing_en;
2314
+
2315
+ mpp_debug_enter();
2316
+
2317
+ if (test_bit(TASK_STATE_START, &mpp_task->state))
2318
+ goto done;
2319
+
2320
+ ccu_en = readl(ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2321
+ mpp_dbg_ccu("ccu_en=%d\n", ccu_en);
2322
+ if (!ccu_en) {
2323
+ u32 i;
2324
+
2325
+ /* set work mode */
2326
+ work_mode = 0;
2327
+ for (i = 0; i < queue->core_count; i++) {
2328
+ u32 val;
2329
+ struct mpp_dev *core = queue->cores[i];
2330
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(core);
2331
+
2332
+ if (mpp->disable)
2333
+ continue;
2334
+ work_mode |= dec->core_mask;
2335
+ rkvdec2_ccu_link_fix_rcb_regs(dec);
2336
+ /* control by ccu */
2337
+ val = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2338
+ val |= RKVDEC_LINK_BIT_CCU_WORK_MODE;
2339
+ writel(val, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2340
+ }
2341
+ writel(work_mode, ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2342
+ ccu->ccu_core_work_mode = readl(ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2343
+ mpp_dbg_ccu("ccu_work_mode=%08x, ccu_work_status=%08x\n",
2344
+ readl(ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE),
2345
+ readl(ccu->reg_base + RKVDEC_CCU_CORE_STA_BASE));
2346
+
2347
+ /* set auto gating */
2348
+ writel(RKVDEC_CCU_BIT_AUTOGATE, ccu->reg_base + RKVDEC_CCU_CTRL_BASE);
2349
+ /* link start base */
2350
+ writel(task->table->iova, ccu->reg_base + RKVDEC_CCU_CFG_ADDR_BASE);
2351
+ /* enable link */
2352
+ writel(RKVDEC_CCU_BIT_WORK_EN, ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2353
+ }
2354
+
2355
+ /* set link mode */
2356
+ link_mode = ccu_en ? RKVDEC_CCU_BIT_ADD_MODE : 0;
2357
+ writel(link_mode | RKVDEC_LINK_ADD_CFG_NUM, ccu->reg_base + RKVDEC_CCU_LINK_MODE_BASE);
2358
+
2359
+ /* flush tlb before starting hardware */
2360
+ mpp_iommu_flush_tlb(mpp->iommu_info);
2361
+ /* wmb */
2362
+ wmb();
2363
+ INIT_DELAYED_WORK(&mpp_task->timeout_work, rkvdec2_ccu_timeout_work);
2364
+ mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
2365
+ /* configure done */
2366
+ writel(RKVDEC_CCU_BIT_CFG_DONE, ccu->reg_base + RKVDEC_CCU_CFG_DONE_BASE);
2367
+ mpp_task_run_end(mpp_task, timing_en);
2368
+
2369
+ /* pending to running */
2370
+ set_bit(TASK_STATE_RUNNING, &mpp_task->state);
2371
+ mpp_taskqueue_pending_to_run(queue, mpp_task);
2372
+ mpp_dbg_ccu("session %d task %d iova=%08x task->state=%lx link_mode=%08x\n",
2373
+ mpp_task->session->index, mpp_task->task_index,
2374
+ (u32)task->table->iova, mpp_task->state,
2375
+ readl(ccu->reg_base + RKVDEC_CCU_LINK_MODE_BASE));
2376
+done:
2377
+ mpp_debug_leave();
2378
+
2379
+ return 0;
2380
+}
2381
+
2382
+static void rkvdec2_hard_ccu_handle_pagefault_task(struct rkvdec2_dev *dec,
2383
+ struct mpp_task *mpp_task)
2384
+{
2385
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2386
+
2387
+ mpp_dbg_ccu("session %d task %d w:h[%d %d] pagefault mmu0[%08x %08x] mmu1[%08x %08x] fault_iova %08x\n",
2388
+ mpp_task->session->index, mpp_task->task_index,
2389
+ task->width, task->height, dec->mmu0_st, dec->mmu0_pta,
2390
+ dec->mmu1_st, dec->mmu1_pta, dec->fault_iova);
2391
+
2392
+ set_bit(TASK_STATE_HANDLE, &mpp_task->state);
2393
+ task->irq_status |= BIT(4);
2394
+ cancel_delayed_work(&mpp_task->timeout_work);
2395
+ rkvdec2_hard_ccu_finish(dec->link_dec->info, task);
2396
+ set_bit(TASK_STATE_FINISH, &mpp_task->state);
2397
+ set_bit(TASK_STATE_DONE, &mpp_task->state);
2398
+ list_move_tail(&task->table->link, &dec->ccu->unused_list);
2399
+ list_del_init(&mpp_task->queue_link);
2400
+ /* Wake up the GET thread */
2401
+ wake_up(&mpp_task->wait);
2402
+ kref_put(&mpp_task->ref, mpp_free_task);
2403
+ dec->mmu_fault = 0;
2404
+ dec->fault_iova = 0;
2405
+}
2406
+
2407
+static void rkvdec2_hard_ccu_pagefault_proc(struct mpp_taskqueue *queue)
2408
+{
2409
+ struct mpp_task *loop = NULL, *n;
2410
+
2411
+ list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2412
+ struct rkvdec2_task *task = to_rkvdec2_task(loop);
2413
+ u32 iova = (u32)task->table->iova;
2414
+ u32 i;
2415
+
2416
+ for (i = 0; i < queue->core_count; i++) {
2417
+ struct mpp_dev *core = queue->cores[i];
2418
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(core);
2419
+
2420
+ if (!dec->mmu_fault || dec->fault_iova != iova)
2421
+ continue;
2422
+ rkvdec2_hard_ccu_handle_pagefault_task(dec, loop);
2423
+ }
2424
+ }
2425
+}
2426
+
2427
+static void rkvdec2_hard_ccu_resend_tasks(struct mpp_dev *mpp, struct mpp_taskqueue *queue)
2428
+{
2429
+ struct rkvdec2_task *task_pre = NULL;
2430
+ struct mpp_task *loop = NULL, *n;
2431
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2432
+
2433
+ /* re sort running list */
2434
+ list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2435
+ struct rkvdec2_task *task = to_rkvdec2_task(loop);
2436
+ u32 *tb_reg = (u32 *)task->table->vaddr;
2437
+ u32 irq_status = tb_reg[dec->link_dec->info->tb_reg_int];
2438
+
2439
+ if (!irq_status) {
2440
+ if (task_pre) {
2441
+ tb_reg = (u32 *)task_pre->table->vaddr;
2442
+ tb_reg[dec->link_dec->info->tb_reg_next] = task->table->iova;
2443
+ }
2444
+ task_pre = task;
2445
+ }
2446
+ }
2447
+
2448
+ if (task_pre) {
2449
+ struct mpp_dma_buffer *tbl;
2450
+ u32 *tb_reg;
2451
+
2452
+ tbl = list_first_entry_or_null(&dec->ccu->unused_list,
2453
+ struct mpp_dma_buffer, link);
2454
+ WARN_ON(!tbl);
2455
+ if (tbl) {
2456
+ tb_reg = (u32 *)task_pre->table->vaddr;
2457
+ tb_reg[dec->link_dec->info->tb_reg_next] = tbl->iova;
2458
+ }
2459
+ }
2460
+
2461
+ /* resend */
2462
+ list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2463
+ struct rkvdec2_task *task = to_rkvdec2_task(loop);
2464
+ u32 *tb_reg = (u32 *)task->table->vaddr;
2465
+ u32 irq_status = tb_reg[dec->link_dec->info->tb_reg_int];
2466
+
2467
+ mpp_dbg_ccu("reback: session %d task %d iova %08x next %08x irq_status 0x%08x\n",
2468
+ loop->session->index, loop->task_index, (u32)task->table->iova,
2469
+ tb_reg[dec->link_dec->info->tb_reg_next], irq_status);
2470
+
2471
+ if (!irq_status) {
2472
+ cancel_delayed_work(&loop->timeout_work);
2473
+ clear_bit(TASK_STATE_START, &loop->state);
2474
+ rkvdec2_hard_ccu_enqueue(dec->ccu, loop, queue, mpp);
2475
+ }
2476
+ }
2477
+}
2478
+
2479
+void rkvdec2_hard_ccu_worker(struct kthread_work *work_s)
2480
+{
2481
+ struct mpp_task *mpp_task;
2482
+ struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
2483
+ struct mpp_taskqueue *queue = mpp->queue;
2484
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2485
+
2486
+ mpp_debug_enter();
2487
+
2488
+ /* 1. process all finished task in running list */
2489
+ rkvdec2_hard_ccu_dequeue(queue, dec->ccu, dec->link_dec->info);
2490
+
2491
+ /* 2. process reset request */
2492
+ if (atomic_read(&queue->reset_request) &&
2493
+ (list_empty(&queue->running_list) || !dec->ccu->ccu_core_work_mode)) {
2494
+ /*
2495
+ * cancel running list timeout work to avoid
2496
+ * sw timeout causeby reset long time
2497
+ */
2498
+ struct mpp_task *loop = NULL, *n;
2499
+
2500
+ list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2501
+ cancel_delayed_work(&loop->timeout_work);
2502
+ }
2503
+ /* reset process */
2504
+ rkvdec2_hard_ccu_reset(queue, dec->ccu);
2505
+ atomic_set(&queue->reset_request, 0);
2506
+ /* if iommu pagefault, find the fault task and drop it */
2507
+ if (queue->iommu_fault) {
2508
+ rkvdec2_hard_ccu_pagefault_proc(queue);
2509
+ queue->iommu_fault = 0;
2510
+ }
2511
+
2512
+ /* relink running task iova in list, and resend them to hw */
2513
+ if (!list_empty(&queue->running_list))
2514
+ rkvdec2_hard_ccu_resend_tasks(mpp, queue);
2515
+ }
2516
+
2517
+ /* 3. process pending task */
2518
+ while (1) {
2519
+ if (atomic_read(&queue->reset_request))
2520
+ break;
2521
+
2522
+ /* get one task form pending list */
2523
+ mutex_lock(&queue->pending_lock);
2524
+ mpp_task = list_first_entry_or_null(&queue->pending_list,
2525
+ struct mpp_task, queue_link);
2526
+ mutex_unlock(&queue->pending_lock);
2527
+
2528
+ if (!mpp_task)
2529
+ break;
2530
+ if (test_bit(TASK_STATE_ABORT, &mpp_task->state)) {
2531
+ mutex_lock(&queue->pending_lock);
2532
+ list_del_init(&mpp_task->queue_link);
2533
+ mutex_unlock(&queue->pending_lock);
2534
+ kref_put(&mpp_task->ref, mpp_free_task);
2535
+ continue;
2536
+ }
2537
+
2538
+ mpp_task = rkvdec2_hard_ccu_prepare(mpp_task, dec->ccu, dec->link_dec->info);
2539
+ if (!mpp_task)
2540
+ break;
2541
+
2542
+ rkvdec2_ccu_power_on(queue, dec->ccu);
2543
+ rkvdec2_hard_ccu_enqueue(dec->ccu, mpp_task, queue, mpp);
2544
+ }
2545
+
2546
+ /* 4. poweroff when running and pending list are empty */
2547
+ mutex_lock(&queue->pending_lock);
2548
+ if (list_empty(&queue->running_list) &&
2549
+ list_empty(&queue->pending_list))
2550
+ rkvdec2_ccu_power_off(queue, dec->ccu);
2551
+ mutex_unlock(&queue->pending_lock);
2552
+
2553
+ /* 5. check session detach out of queue */
2554
+ mpp_session_cleanup_detach(queue, work_s);
2555
+
2556
+ mpp_debug_leave();
2557
+}