hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c
....@@ -12,62 +12,75 @@
1212 #include <linux/slab.h>
1313 #include <soc/rockchip/pm_domains.h>
1414 #include <soc/rockchip/rockchip_dmc.h>
15
+#include <soc/rockchip/rockchip_iommu.h>
1516
1617 #include "mpp_rkvdec2_link.h"
1718
1819 #include "hack/mpp_rkvdec2_link_hack_rk3568.c"
1920
20
-#ifdef CONFIG_PM_DEVFREQ
21
-#include "../../../devfreq/governor.h"
22
-#endif
21
+#define RKVDEC2_LINK_HACK_TASK_FLAG (0xff)
2322
24
-#define WAIT_TIMEOUT_MS (2000)
23
+/* vdpu381 link hw info for rk3588 */
24
+struct rkvdec_link_info rkvdec_link_v2_hw_info = {
25
+ .tb_reg_num = 218,
26
+ .tb_reg_next = 0,
27
+ .tb_reg_r = 1,
28
+ .tb_reg_second_en = 8,
2529
26
-#define RKVDEC_MAX_WRITE_PART 6
27
-#define RKVDEC_MAX_READ_PART 2
28
-
29
-struct rkvdec_link_part {
30
- /* register offset of table buffer */
31
- u32 tb_reg_off;
32
- /* start idx of task register */
33
- u32 reg_start;
34
- /* number of task register */
35
- u32 reg_num;
30
+ .part_w_num = 6,
31
+ .part_r_num = 2,
32
+ .part_w[0] = {
33
+ .tb_reg_off = 4,
34
+ .reg_start = 8,
35
+ .reg_num = 28,
36
+ },
37
+ .part_w[1] = {
38
+ .tb_reg_off = 32,
39
+ .reg_start = 64,
40
+ .reg_num = 52,
41
+ },
42
+ .part_w[2] = {
43
+ .tb_reg_off = 84,
44
+ .reg_start = 128,
45
+ .reg_num = 16,
46
+ },
47
+ .part_w[3] = {
48
+ .tb_reg_off = 100,
49
+ .reg_start = 160,
50
+ .reg_num = 48,
51
+ },
52
+ .part_w[4] = {
53
+ .tb_reg_off = 148,
54
+ .reg_start = 224,
55
+ .reg_num = 16,
56
+ },
57
+ .part_w[5] = {
58
+ .tb_reg_off = 164,
59
+ .reg_start = 256,
60
+ .reg_num = 16,
61
+ },
62
+ .part_r[0] = {
63
+ .tb_reg_off = 180,
64
+ .reg_start = 224,
65
+ .reg_num = 10,
66
+ },
67
+ .part_r[1] = {
68
+ .tb_reg_off = 190,
69
+ .reg_start = 258,
70
+ .reg_num = 28,
71
+ },
72
+ .tb_reg_int = 180,
73
+ .tb_reg_cycle = 195,
74
+ .hack_setup = 0,
75
+ .reg_status = {
76
+ .dec_num_mask = 0x3fffffff,
77
+ .err_flag_base = 0x010,
78
+ .err_flag_bit = BIT(31),
79
+ },
3680 };
3781
38
-struct rkvdec_link_status {
39
- u32 dec_num_mask;
40
- u32 err_flag_base;
41
- u32 err_flag_bit;
42
-};
43
-
44
-struct rkvdec_link_info {
45
- dma_addr_t iova;
46
- /* total register for link table buffer */
47
- u32 tb_reg_num;
48
- /* next link table addr in table buffer */
49
- u32 tb_reg_next;
50
- /* current read back addr in table buffer */
51
- u32 tb_reg_r;
52
- /* secondary enable in table buffer */
53
- u32 tb_reg_second_en;
54
- u32 part_w_num;
55
- u32 part_r_num;
56
-
57
- struct rkvdec_link_part part_w[RKVDEC_MAX_WRITE_PART];
58
- struct rkvdec_link_part part_r[RKVDEC_MAX_READ_PART];
59
-
60
- /* interrupt read back in table buffer */
61
- u32 tb_reg_int;
62
- bool hack_setup;
63
- u32 tb_reg_cycle;
64
- u32 tb_reg_out;
65
- u32 tb_reg_ref_s;
66
- u32 tb_reg_ref_e;
67
- struct rkvdec_link_status reg_status;
68
-};
69
-
70
-struct rkvdec_link_info rkvdec_link_rk3568_hw_info = {
82
+/* vdpu34x link hw info for rk356x */
83
+struct rkvdec_link_info rkvdec_link_rk356x_hw_info = {
7184 .tb_reg_num = 202,
7285 .tb_reg_next = 0,
7386 .tb_reg_r = 1,
....@@ -116,8 +129,8 @@
116129 .reg_num = 28,
117130 },
118131 .tb_reg_int = 164,
119
- .hack_setup = 1,
120132 .tb_reg_cycle = 179,
133
+ .hack_setup = 1,
121134 .reg_status = {
122135 .dec_num_mask = 0x3fffffff,
123136 .err_flag_base = 0x010,
....@@ -126,7 +139,7 @@
126139 };
127140
128141 /* vdpu382 link hw info */
129
-struct rkvdec_link_info rkvdec_link_v2_hw_info = {
142
+struct rkvdec_link_info rkvdec_link_vdpu382_hw_info = {
130143 .tb_reg_num = 222,
131144 .tb_reg_next = 0,
132145 .tb_reg_r = 1,
....@@ -174,12 +187,9 @@
174187 .reg_start = 258,
175188 .reg_num = 30,
176189 },
177
- .tb_reg_int = 180,
178
- .hack_setup = 0,
179
- .tb_reg_cycle = 197,
180
- .tb_reg_out = 86,
181
- .tb_reg_ref_s = 104,
182
- .tb_reg_ref_e = 119,
190
+ .tb_reg_int = 180,
191
+ .hack_setup = 0,
192
+ .tb_reg_cycle = 197,
183193 .reg_status = {
184194 .dec_num_mask = 0x000fffff,
185195 .err_flag_base = 0x024,
....@@ -188,6 +198,11 @@
188198 };
189199
190200 static void rkvdec2_link_free_task(struct kref *ref);
201
+static void rkvdec2_link_timeout_proc(struct work_struct *work_s);
202
+static int rkvdec2_link_iommu_fault_handle(struct iommu_domain *iommu,
203
+ struct device *iommu_dev,
204
+ unsigned long iova,
205
+ int status, void *arg);
191206
192207 static void rkvdec_link_status_update(struct rkvdec_link_dev *dev)
193208 {
....@@ -239,7 +254,7 @@
239254 u32 *reg = NULL;
240255 u32 i, j;
241256
242
- for (i = 0; i < dev->task_size; i++) {
257
+ for (i = 0; i < dev->task_capacity; i++) {
243258 reg = table_base + i * reg_count;
244259
245260 mpp_err("slot %d link config iova %08x:\n", i,
....@@ -286,9 +301,8 @@
286301 {
287302 mpp_err("dump link counter from %s\n", func);
288303
289
- mpp_err("task write %d read %d send %d recv %d run %d decoded %d total %d\n",
290
- dev->task_write, dev->task_read, dev->task_send, dev->task_recv,
291
- dev->task_to_run, dev->task_decoded, dev->task_total);
304
+ mpp_err("task pending %d running %d\n",
305
+ atomic_read(&dev->task_pending), dev->task_running);
292306 }
293307
294308 int rkvdec_link_dump(struct mpp_dev *mpp)
....@@ -301,158 +315,6 @@
301315 rkvdec_link_counter(__func__, dev);
302316 rkvdec_core_reg_dump(__func__, dev);
303317 rkvdec_link_node_dump(__func__, dev);
304
-
305
- return 0;
306
-}
307
-
308
-static int rkvdec_link_get_task_write(struct rkvdec_link_dev *dev)
309
-{
310
- int idx = dev->task_write < dev->task_size ? dev->task_write :
311
- dev->task_write - dev->task_size;
312
-
313
- return idx;
314
-}
315
-static int rkvdec_link_inc_task_write(struct rkvdec_link_dev *dev)
316
-{
317
- int task_write = rkvdec_link_get_task_write(dev);
318
-
319
- dev->task_write++;
320
- if (dev->task_write >= dev->task_size * 2)
321
- dev->task_write = 0;
322
-
323
- return task_write;
324
-}
325
-static int rkvdec_link_get_task_read(struct rkvdec_link_dev *dev)
326
-{
327
- int idx = dev->task_read < dev->task_size ? dev->task_read :
328
- dev->task_read - dev->task_size;
329
-
330
- return idx;
331
-}
332
-static int rkvdec_link_inc_task_read(struct rkvdec_link_dev *dev)
333
-{
334
- int task_read = rkvdec_link_get_task_read(dev);
335
-
336
- dev->task_read++;
337
- if (dev->task_read >= dev->task_size * 2)
338
- dev->task_read = 0;
339
-
340
- return task_read;
341
-}
342
-static int rkvdec_link_get_task_hw_queue_length(struct rkvdec_link_dev *dev)
343
-{
344
- int len;
345
-
346
- if (dev->task_send <= dev->task_recv)
347
- len = dev->task_send + dev->task_size - dev->task_recv;
348
- else
349
- len = dev->task_send - dev->task_recv - dev->task_size;
350
-
351
- return len;
352
-}
353
-static int rkvdec_link_get_task_send(struct rkvdec_link_dev *dev)
354
-{
355
- int idx = dev->task_send < dev->task_size ? dev->task_send :
356
- dev->task_send - dev->task_size;
357
-
358
- return idx;
359
-}
360
-static int rkvdec_link_inc_task_send(struct rkvdec_link_dev *dev)
361
-{
362
- int task_send = rkvdec_link_get_task_send(dev);
363
-
364
- dev->task_send++;
365
- if (dev->task_send >= dev->task_size * 2)
366
- dev->task_send = 0;
367
-
368
- return task_send;
369
-}
370
-static int rkvdec_link_inc_task_recv(struct rkvdec_link_dev *dev)
371
-{
372
- int task_recv = dev->task_recv;
373
-
374
- dev->task_recv++;
375
- if (dev->task_recv >= dev->task_size * 2)
376
- dev->task_recv = 0;
377
-
378
- return task_recv;
379
-}
380
-
381
-static int rkvdec_link_get_next_slot(struct rkvdec_link_dev *dev)
382
-{
383
- int next = -1;
384
-
385
- if (dev->task_write == dev->task_read)
386
- return next;
387
-
388
- next = rkvdec_link_get_task_write(dev);
389
-
390
- return next;
391
-}
392
-
393
-static int rkvdec_link_write_task_to_slot(struct rkvdec_link_dev *dev, int idx,
394
- struct mpp_task *mpp_task)
395
-{
396
- u32 i, off, s, n;
397
- struct rkvdec_link_part *part;
398
- struct rkvdec_link_info *info;
399
- struct mpp_dma_buffer *table;
400
- struct rkvdec2_task *task;
401
- int slot_idx;
402
- u32 *tb_reg;
403
-
404
- if (idx < 0 || idx >= dev->task_size) {
405
- mpp_err("send invalid task index %d\n", idx);
406
- return -1;
407
- }
408
-
409
- info = dev->info;
410
- part = info->part_w;
411
- table = dev->table;
412
- task = to_rkvdec2_task(mpp_task);
413
-
414
- slot_idx = rkvdec_link_inc_task_write(dev);
415
- if (idx != slot_idx)
416
- dev_info(dev->dev, "slot index mismatch %d vs %d\n",
417
- idx, slot_idx);
418
-
419
- if (task->need_hack) {
420
- tb_reg = (u32 *)table->vaddr + slot_idx * dev->link_reg_count;
421
-
422
- rkvdec2_3568_hack_fix_link(tb_reg + 4);
423
-
424
- /* setup error mode flag */
425
- dev->tasks_hw[slot_idx] = NULL;
426
- dev->task_to_run++;
427
- dev->task_prepared++;
428
- slot_idx = rkvdec_link_inc_task_write(dev);
429
- }
430
-
431
- tb_reg = (u32 *)table->vaddr + slot_idx * dev->link_reg_count;
432
-
433
- for (i = 0; i < info->part_w_num; i++) {
434
- off = part[i].tb_reg_off;
435
- s = part[i].reg_start;
436
- n = part[i].reg_num;
437
- memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
438
- }
439
-
440
- tb_reg[info->tb_reg_second_en] |= RKVDEC_WAIT_RESET_EN;
441
-
442
- /* memset read registers */
443
- part = info->part_r;
444
- for (i = 0; i < info->part_r_num; i++) {
445
- off = part[i].tb_reg_off;
446
- n = part[i].reg_num;
447
- memset(&tb_reg[off], 0, n * sizeof(u32));
448
- }
449
-
450
- dev->tasks_hw[slot_idx] = mpp_task;
451
- task->slot_idx = slot_idx;
452
- dev->task_to_run++;
453
- dev->task_prepared++;
454
- mpp_dbg_link_flow("slot %d write task %d\n", slot_idx,
455
- mpp_task->task_index);
456318
457319 return 0;
458320 }
....@@ -476,34 +338,20 @@
476338 mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
477339 }
478340
479
-static int rkvdec_link_send_task_to_hw(struct rkvdec_link_dev *dev,
480
- struct mpp_task *mpp_task,
481
- int slot_idx, u32 task_to_run,
482
- int resend)
341
+static int rkvdec2_link_enqueue(struct rkvdec_link_dev *link_dec,
342
+ struct mpp_task *mpp_task)
483343 {
484
- void __iomem *reg_base = dev->reg_base;
485
- struct mpp_dma_buffer *table = dev->table;
486
- u32 task_total = dev->task_total;
487
- u32 mode_start = 0;
488
- u32 val;
344
+ void __iomem *reg_base = link_dec->reg_base;
345
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
346
+ struct mpp_dma_buffer *table = task->table;
347
+ u32 link_en = 0;
348
+ u32 frame_num = 1;
349
+ u32 link_mode;
350
+ u32 timing_en = link_dec->mpp->srv->timing_en;
489351
490
- /* write address */
491
- if (!task_to_run || task_to_run > dev->task_size ||
492
- slot_idx < 0 || slot_idx >= dev->task_size) {
493
- mpp_err("invalid task send cfg at %d count %d\n",
494
- slot_idx, task_to_run);
495
- rkvdec_link_counter("error on send", dev);
496
- return 0;
497
- }
498
-
499
- val = task_to_run;
500
- if (!task_total || resend)
501
- mode_start = 1;
502
-
503
- if (mode_start) {
504
- u32 iova = table->iova + slot_idx * dev->link_node_size;
505
-
506
- rkvdec2_clear_cache(dev->mpp);
352
+ link_en = readl(reg_base + RKVDEC_LINK_EN_BASE);
353
+ if (!link_en) {
354
+ rkvdec2_clear_cache(link_dec->mpp);
507355 /* cleanup counter in hardware */
508356 writel(0, reg_base + RKVDEC_LINK_MODE_BASE);
509357 /* start config before all registers are set */
....@@ -513,55 +361,31 @@
513361 wmb();
514362 /* clear counter and enable link mode hardware */
515363 writel(RKVDEC_LINK_BIT_EN, reg_base + RKVDEC_LINK_EN_BASE);
516
-
517
- dev->task_total = 0;
518
- dev->task_decoded = 0;
519
-
520
- writel_relaxed(iova, reg_base + RKVDEC_LINK_CFG_ADDR_BASE);
521
- } else {
522
- val |= RKVDEC_LINK_BIT_ADD_MODE;
523
- }
524
-
525
- if (!resend) {
526
- u32 timing_en = dev->mpp->srv->timing_en;
527
- u32 i;
528
-
529
- for (i = 0; i < task_to_run; i++) {
530
- int next_idx = rkvdec_link_inc_task_send(dev);
531
- struct mpp_task *task_ddr = dev->tasks_hw[next_idx];
532
-
533
- if (!task_ddr)
534
- continue;
535
-
536
- mpp_task_run_begin(task_ddr, timing_en, MPP_WORK_TIMEOUT_DELAY);
537
- mpp_task_run_end(task_ddr, timing_en);
538
- }
539
- } else {
540
- if (task_total)
541
- dev_info(dev->dev, "resend with total %d\n", task_total);
542
- }
364
+ writel_relaxed(table->iova, reg_base + RKVDEC_LINK_CFG_ADDR_BASE);
365
+ link_mode = frame_num;
366
+ } else
367
+ link_mode = (frame_num | RKVDEC_LINK_BIT_ADD_MODE);
543368
544369 /* set link mode */
545
- writel_relaxed(val, reg_base + RKVDEC_LINK_MODE_BASE);
370
+ writel_relaxed(link_mode, reg_base + RKVDEC_LINK_MODE_BASE);
546371
547372 /* start config before all registers are set */
548373 wmb();
549374
550
- mpp_iommu_flush_tlb(dev->mpp->iommu_info);
375
+ mpp_iommu_flush_tlb(link_dec->mpp->iommu_info);
376
+ mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
551377
378
+ link_dec->task_running++;
552379 /* configure done */
553380 writel(RKVDEC_LINK_BIT_CFG_DONE, reg_base + RKVDEC_LINK_CFG_CTRL_BASE);
554
-
555
- mpp_dbg_link_flow("slot %d enable task %d mode %s\n", slot_idx,
556
- task_to_run, mode_start ? "start" : "add");
557
- if (mode_start) {
381
+ if (!link_en) {
558382 /* start hardware before all registers are set */
559383 wmb();
560384 /* clear counter and enable link mode hardware */
561385 writel(RKVDEC_LINK_BIT_EN, reg_base + RKVDEC_LINK_EN_BASE);
562386 }
387
+ mpp_task_run_end(mpp_task, timing_en);
563388
564
- dev->task_total += task_to_run;
565389 return 0;
566390 }
567391
....@@ -573,8 +397,7 @@
573397 struct mpp_dma_buffer *table = link_dec->table;
574398 struct rkvdec_link_info *info = link_dec->info;
575399 struct rkvdec_link_part *part = info->part_r;
576
- int slot_idx = task->slot_idx;
577
- u32 *tb_reg = (u32 *)(table->vaddr + slot_idx * link_dec->link_node_size);
400
+ u32 *tb_reg = (u32 *)table->vaddr;
578401 u32 off, s, n;
579402 u32 i;
580403
....@@ -594,161 +417,71 @@
594417 return 0;
595418 }
596419
597
-static int rkvdec_link_isr_recv_task(struct mpp_dev *mpp,
598
- struct rkvdec_link_dev *link_dec,
599
- int count)
600
-{
601
- struct rkvdec_link_info *info = link_dec->info;
602
- u32 *table_base = (u32 *)link_dec->table->vaddr;
603
- int i;
604
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
605
-
606
- for (i = 0; i < count; i++) {
607
- int idx = rkvdec_link_get_task_read(link_dec);
608
- struct mpp_task *mpp_task = link_dec->tasks_hw[idx];
609
- struct rkvdec2_task *task = NULL;
610
- u32 *regs = NULL;
611
- u32 irq_status = 0;
612
-
613
- if (!mpp_task && info->hack_setup) {
614
- regs = table_base + idx * link_dec->link_reg_count;
615
- mpp_dbg_link_flow("slot %d read task stuff\n", idx);
616
-
617
- link_dec->stuff_total++;
618
- if (link_dec->statistic_count &&
619
- regs[info->tb_reg_cycle]) {
620
- link_dec->stuff_cycle_sum +=
621
- regs[info->tb_reg_cycle];
622
- link_dec->stuff_cnt++;
623
- if (link_dec->stuff_cnt >=
624
- link_dec->statistic_count) {
625
- dev_info(
626
- link_dec->dev, "hw cycle %u\n",
627
- (u32)(link_dec->stuff_cycle_sum /
628
- link_dec->statistic_count));
629
- link_dec->stuff_cycle_sum = 0;
630
- link_dec->stuff_cnt = 0;
631
- }
632
- }
633
-
634
- if (link_dec->error && (i == (count - 1))) {
635
- link_dec->stuff_err++;
636
-
637
- irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
638
- dev_info(link_dec->dev, "found stuff task error irq %08x %u/%u\n",
639
- irq_status, link_dec->stuff_err,
640
- link_dec->stuff_total);
641
-
642
- if (link_dec->stuff_on_error) {
643
- dev_info(link_dec->dev, "stuff task error again %u/%u\n",
644
- link_dec->stuff_err,
645
- link_dec->stuff_total);
646
- }
647
-
648
- link_dec->stuff_on_error = 1;
649
- /* resend task */
650
- link_dec->decoded--;
651
- } else {
652
- link_dec->stuff_on_error = 0;
653
- rkvdec_link_inc_task_recv(link_dec);
654
- rkvdec_link_inc_task_read(link_dec);
655
- link_dec->task_running--;
656
- link_dec->task_prepared--;
657
- }
658
-
659
- continue;
660
- }
661
-
662
- if (!mpp_task)
663
- return 0;
664
-
665
- task = to_rkvdec2_task(mpp_task);
666
- regs = table_base + idx * link_dec->link_reg_count;
667
- link_dec->error_iova = regs[info->tb_reg_out];
668
- irq_status = regs[info->tb_reg_int];
669
- mpp_task->hw_cycles = regs[info->tb_reg_cycle];
670
- mpp_time_diff_with_hw_time(mpp_task, dec->aclk_info.real_rate_hz);
671
- mpp_dbg_link_flow("slot %d rd task %d\n", idx,
672
- mpp_task->task_index);
673
-
674
- task->irq_status = irq_status ? irq_status : mpp->irq_status;
675
- mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
676
- cancel_delayed_work_sync(&mpp_task->timeout_work);
677
- set_bit(TASK_STATE_HANDLE, &mpp_task->state);
678
-
679
- if (link_dec->statistic_count &&
680
- regs[info->tb_reg_cycle]) {
681
- link_dec->task_cycle_sum +=
682
- regs[info->tb_reg_cycle];
683
- link_dec->task_cnt++;
684
- if (link_dec->task_cnt >= link_dec->statistic_count) {
685
- dev_info(link_dec->dev, "hw cycle %u\n",
686
- (u32)(link_dec->task_cycle_sum /
687
- link_dec->statistic_count));
688
- link_dec->task_cycle_sum = 0;
689
- link_dec->task_cnt = 0;
690
- }
691
- }
692
-
693
- rkvdec2_link_finish(mpp, mpp_task);
694
-
695
- set_bit(TASK_STATE_FINISH, &mpp_task->state);
696
-
697
- list_del_init(&mpp_task->queue_link);
698
- link_dec->task_running--;
699
- link_dec->task_prepared--;
700
-
701
- rkvdec_link_inc_task_recv(link_dec);
702
- rkvdec_link_inc_task_read(link_dec);
703
-
704
- if (test_bit(TASK_STATE_ABORT, &mpp_task->state))
705
- set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
706
-
707
- set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
708
- /* Wake up the GET thread */
709
- wake_up(&task->wait);
710
- kref_put(&mpp_task->ref, rkvdec2_link_free_task);
711
- link_dec->tasks_hw[idx] = NULL;
712
- }
713
-
714
- return 0;
715
-}
716
-
717420 static void *rkvdec2_link_prepare(struct mpp_dev *mpp,
718421 struct mpp_task *mpp_task)
719422 {
720
- struct mpp_task *out_task = NULL;
721423 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
722424 struct rkvdec_link_dev *link_dec = dec->link_dec;
723
- int ret = 0;
724
- int slot_idx;
425
+ struct mpp_dma_buffer *table = NULL;
426
+ struct rkvdec_link_part *part;
427
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
428
+ struct rkvdec_link_info *info = link_dec->info;
429
+ u32 i, off, s, n;
430
+ u32 *tb_reg;
725431
726432 mpp_debug_enter();
727433
728
- slot_idx = rkvdec_link_get_next_slot(link_dec);
729
- if (slot_idx < 0) {
730
- mpp_err("capacity %d running %d\n",
731
- mpp->task_capacity, link_dec->task_running);
732
- dev_err(link_dec->dev, "no slot to write on get next slot\n");
733
- goto done;
434
+ if (test_bit(TASK_STATE_PREPARE, &mpp_task->state)) {
435
+ dev_err(mpp->dev, "task %d has prepared\n", mpp_task->task_index);
436
+ return mpp_task;
734437 }
735438
736
- ret = rkvdec_link_write_task_to_slot(link_dec, slot_idx, mpp_task);
737
- if (ret >= 0)
738
- out_task = mpp_task;
739
- else
740
- dev_err(mpp->dev, "no slot to write\n");
439
+ table = list_first_entry_or_null(&link_dec->unused_list, struct mpp_dma_buffer, link);
741440
742
-done:
441
+ if (!table)
442
+ return NULL;
443
+
444
+ /* fill regs value */
445
+ tb_reg = (u32 *)table->vaddr;
446
+ part = info->part_w;
447
+ for (i = 0; i < info->part_w_num; i++) {
448
+ off = part[i].tb_reg_off;
449
+ s = part[i].reg_start;
450
+ n = part[i].reg_num;
451
+ memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
452
+ }
453
+
454
+ /* setup error mode flag */
455
+ tb_reg[9] |= BIT(18) | BIT(9);
456
+ tb_reg[info->tb_reg_second_en] |= RKVDEC_WAIT_RESET_EN;
457
+
458
+ /* memset read registers */
459
+ part = info->part_r;
460
+ for (i = 0; i < info->part_r_num; i++) {
461
+ off = part[i].tb_reg_off;
462
+ n = part[i].reg_num;
463
+ memset(&tb_reg[off], 0, n * sizeof(u32));
464
+ }
465
+
466
+ list_move_tail(&table->link, &link_dec->used_list);
467
+ task->table = table;
468
+ set_bit(TASK_STATE_PREPARE, &mpp_task->state);
469
+
470
+ mpp_dbg_link("session %d task %d prepare pending %d running %d\n",
471
+ mpp_task->session->index, mpp_task->task_index,
472
+ atomic_read(&link_dec->task_pending), link_dec->task_running);
743473 mpp_debug_leave();
744474
745
- return out_task;
475
+ return mpp_task;
746476 }
747477
748478 static int rkvdec2_link_reset(struct mpp_dev *mpp)
749479 {
750480
751481 dev_info(mpp->dev, "resetting...\n");
482
+
483
+ disable_irq(mpp->irq);
484
+ mpp_iommu_disable_irq(mpp->iommu_info);
752485
753486 /* FIXME lock resource lock of the other devices in combo */
754487 mpp_iommu_down_write(mpp->iommu_info);
....@@ -771,52 +504,11 @@
771504 mpp_reset_up_write(mpp->reset_group);
772505 mpp_iommu_up_write(mpp->iommu_info);
773506
507
+ enable_irq(mpp->irq);
508
+ mpp_iommu_enable_irq(mpp->iommu_info);
774509 dev_info(mpp->dev, "reset done\n");
775510
776511 return 0;
777
-}
778
-
779
-static void rkvdec2_check_err_ref(struct mpp_dev *mpp)
780
-{
781
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
782
- struct rkvdec_link_dev *link_dec = dec->link_dec;
783
- struct rkvdec_link_info *link_info = link_dec->info;
784
- struct mpp_taskqueue *queue = mpp->queue;
785
- struct mpp_task *mpp_task = NULL, *n;
786
- struct rkvdec2_task *task;
787
- int i;
788
-
789
- if (!link_dec->error_iova || !dec->err_ref_hack)
790
- return;
791
-
792
- dev_err(mpp->dev, "err task iova %#08x\n", link_dec->error_iova);
793
- list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
794
- if (mpp_task) {
795
- u32 *regs = NULL;
796
- u32 *table_base = (u32 *)link_dec->table->vaddr;
797
-
798
- task = to_rkvdec2_task(mpp_task);
799
- regs = table_base + task->slot_idx * link_dec->link_reg_count;
800
-
801
- for (i = link_info->tb_reg_ref_s; i <= link_info->tb_reg_ref_e; i++) {
802
- if (regs[i] == link_dec->error_iova)
803
- regs[i] = 0;
804
- }
805
- }
806
- }
807
-
808
- mutex_lock(&queue->pending_lock);
809
- list_for_each_entry_safe(mpp_task, n, &queue->pending_list, queue_link) {
810
- task = to_rkvdec2_task(mpp_task);
811
-
812
- /* ref frame reg index start - end */
813
- for (i = 164; i <= 179; i++) {
814
- if (task->reg[i] == link_dec->error_iova)
815
- task->reg[i] = 0;
816
- }
817
- }
818
- mutex_unlock(&queue->pending_lock);
819
- link_dec->error_iova = 0;
820512 }
821513
822514 static int rkvdec2_link_irq(struct mpp_dev *mpp)
....@@ -824,11 +516,6 @@
824516 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
825517 struct rkvdec_link_dev *link_dec = dec->link_dec;
826518 u32 irq_status = 0;
827
-
828
- if (!atomic_read(&link_dec->power_enabled)) {
829
- dev_info(link_dec->dev, "irq on power off\n");
830
- return -1;
831
- }
832519
833520 irq_status = readl(link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
834521
....@@ -845,122 +532,12 @@
845532
846533 link_dec->irq_status = irq_status;
847534 mpp->irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
848
- mpp_dbg_link_flow("core irq %08x\n", mpp->irq_status);
849535
850536 writel_relaxed(0, link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
851537 }
852
- mpp_debug((DEBUG_IRQ_STATUS | DEBUG_LINK_TABLE), "irq_status: %08x : %08x\n",
538
+
539
+ mpp_debug(DEBUG_IRQ_STATUS | DEBUG_LINK_TABLE, "irq_status: %08x : %08x\n",
853540 irq_status, mpp->irq_status);
854
- return 0;
855
-}
856
-
857
-static int rkvdec2_link_isr(struct mpp_dev *mpp)
858
-{
859
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
860
- struct rkvdec_link_dev *link_dec = dec->link_dec;
861
- struct rkvdec_link_info *link_info = link_dec->info;
862
- /* keep irq_status */
863
- u32 irq_status = link_dec->irq_status;
864
- u32 prev_dec_num;
865
- int count = 0;
866
- u32 len = 0;
867
- u32 need_reset = atomic_read(&mpp->reset_request);
868
- u32 task_timeout = link_dec->task_on_timeout;
869
-
870
- mpp_debug_enter();
871
-
872
- disable_irq(mpp->irq);
873
- mpp_iommu_disable_irq(mpp->iommu_info);
874
- rkvdec_link_status_update(link_dec);
875
- link_dec->irq_status = irq_status;
876
- prev_dec_num = link_dec->task_decoded;
877
-
878
- if (!link_dec->enabled || task_timeout) {
879
- u32 val;
880
-
881
- if (task_timeout) {
882
- rkvdec_link_reg_dump("timeout", link_dec);
883
- link_dec->decoded += task_timeout;
884
- }
885
-
886
- val = mpp_read(mpp, 224 * 4);
887
- if (link_info->hack_setup && !(val & BIT(2))) {
888
- /* only for rk356x */
889
- dev_info(mpp->dev, "frame not complete\n");
890
- link_dec->decoded++;
891
- }
892
- }
893
- count = (int)link_dec->decoded - (int)prev_dec_num;
894
-
895
- /* handle counter wrap */
896
- if (link_dec->enabled && !count && !need_reset) {
897
- /* process extra isr when task is processed */
898
- enable_irq(mpp->irq);
899
- mpp_iommu_enable_irq(mpp->iommu_info);
900
- goto done;
901
- }
902
-
903
- /* get previous ready task */
904
- if (count) {
905
- rkvdec_link_isr_recv_task(mpp, link_dec, count);
906
- link_dec->task_decoded = link_dec->decoded;
907
- }
908
-
909
- if (!link_dec->enabled || need_reset)
910
- goto do_reset;
911
-
912
- enable_irq(mpp->irq);
913
- mpp_iommu_enable_irq(mpp->iommu_info);
914
- goto done;
915
-
916
-do_reset:
917
- rkvdec2_check_err_ref(mpp);
918
- /* NOTE: irq may run with reset */
919
- atomic_inc(&mpp->reset_request);
920
- rkvdec2_link_reset(mpp);
921
- link_dec->task_decoded = 0;
922
- link_dec->task_total = 0;
923
- enable_irq(mpp->irq);
924
- mpp_iommu_enable_irq(mpp->iommu_info);
925
-
926
- if (link_dec->total == link_dec->decoded)
927
- goto done;
928
-
929
- len = rkvdec_link_get_task_hw_queue_length(link_dec);
930
- if (len > link_dec->task_size)
931
- rkvdec_link_counter("invalid len", link_dec);
932
-
933
- if (len) {
934
- int slot_idx = rkvdec_link_get_task_read(link_dec);
935
- struct mpp_task *mpp_task = NULL;
936
-
937
- mpp_task = link_dec->tasks_hw[slot_idx];
938
- rkvdec_link_send_task_to_hw(link_dec, mpp_task,
939
- slot_idx, len, 1);
940
- }
941
-
942
-done:
943
- mpp_debug_leave();
944
-
945
- return IRQ_HANDLED;
946
-}
947
-
948
-static int rkvdec2_link_iommu_handle(struct iommu_domain *iommu,
949
- struct device *iommu_dev,
950
- unsigned long iova,
951
- int status, void *arg)
952
-{
953
- struct mpp_dev *mpp = (struct mpp_dev *)arg;
954
-
955
- dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n",
956
- iova, status, arg);
957
-
958
- if (!mpp) {
959
- dev_err(iommu_dev, "pagefault without device to handle\n");
960
- return 0;
961
- }
962
-
963
- rk_iommu_mask_irq(mpp->dev);
964541
965542 return 0;
966543 }
....@@ -1015,12 +592,6 @@
1015592 }
1016593
1017594 link_dec->table = table;
1018
- link_dec->task_size = task_capacity;
1019
- link_dec->task_count = 0;
1020
- link_dec->task_write = 0;
1021
- link_dec->task_read = link_dec->task_size;
1022
- link_dec->task_send = 0;
1023
- link_dec->task_recv = link_dec->task_size;
1024595
1025596 return 0;
1026597 err_free_node:
....@@ -1059,18 +630,13 @@
1059630 struct rkvdec_link_dev *link_dec = NULL;
1060631 struct device *dev = &pdev->dev;
1061632 struct mpp_dev *mpp = &dec->mpp;
633
+ struct mpp_dma_buffer *table;
634
+ int i;
1062635
1063636 mpp_debug_enter();
1064637
1065638 link_dec = devm_kzalloc(dev, sizeof(*link_dec), GFP_KERNEL);
1066639 if (!link_dec) {
1067
- ret = -ENOMEM;
1068
- goto done;
1069
- }
1070
-
1071
- link_dec->tasks_hw = devm_kzalloc(dev, sizeof(*link_dec->tasks_hw) *
1072
- mpp->task_capacity, GFP_KERNEL);
1073
- if (!link_dec->tasks_hw) {
1074640 ret = -ENOMEM;
1075641 goto done;
1076642 }
....@@ -1096,13 +662,33 @@
1096662 if (ret)
1097663 goto done;
1098664
1099
- if (link_dec->info->hack_setup)
665
+ /* alloc table pointer array */
666
+ table = devm_kmalloc_array(mpp->dev, mpp->task_capacity,
667
+ sizeof(*table), GFP_KERNEL | __GFP_ZERO);
668
+ if (!table)
669
+ return -ENOMEM;
670
+
671
+ /* init table array */
672
+ link_dec->table_array = table;
673
+ INIT_LIST_HEAD(&link_dec->used_list);
674
+ INIT_LIST_HEAD(&link_dec->unused_list);
675
+ for (i = 0; i < mpp->task_capacity; i++) {
676
+ table[i].iova = link_dec->table->iova + i * link_dec->link_node_size;
677
+ table[i].vaddr = link_dec->table->vaddr + i * link_dec->link_node_size;
678
+ table[i].size = link_dec->link_node_size;
679
+ INIT_LIST_HEAD(&table[i].link);
680
+ list_add_tail(&table[i].link, &link_dec->unused_list);
681
+ }
682
+
683
+ if (dec->fix)
1100684 rkvdec2_link_hack_data_setup(dec->fix);
1101
- iommu_set_fault_handler(mpp->iommu_info->domain,
1102
- rkvdec2_link_iommu_handle, mpp);
685
+
686
+ mpp->fault_handler = rkvdec2_link_iommu_fault_handle;
687
+
1103688 link_dec->mpp = mpp;
1104689 link_dec->dev = dev;
1105690 atomic_set(&link_dec->task_timeout, 0);
691
+ atomic_set(&link_dec->task_pending, 0);
1106692 atomic_set(&link_dec->power_enabled, 0);
1107693 link_dec->irq_enabled = 1;
1108694
....@@ -1116,11 +702,6 @@
1116702 devm_iounmap(dev, link_dec->reg_base);
1117703 link_dec->reg_base = NULL;
1118704 }
1119
- if (link_dec->tasks_hw) {
1120
- devm_kfree(dev, link_dec->tasks_hw);
1121
- link_dec->tasks_hw = NULL;
1122
- }
1123
-
1124705 devm_kfree(dev, link_dec);
1125706 link_dec = NULL;
1126707 }
....@@ -1138,15 +719,13 @@
1138719 struct mpp_task *task = container_of(ref, struct mpp_task, ref);
1139720
1140721 if (!task->session) {
1141
- mpp_err("task %d task->session is null.\n", task->task_index);
722
+ mpp_err("task %d task->session is null.\n", task->task_id);
1142723 return;
1143724 }
1144725 session = task->session;
1145726
1146
- mpp_debug_func(DEBUG_TASK_INFO,
1147
- "session %d:%d task %d state 0x%lx abort_request %d\n",
1148
- session->device_type, session->index, task->task_index,
1149
- task->state, atomic_read(&task->abort_request));
727
+ mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d state 0x%lx\n",
728
+ session->index, task->task_id, task->state);
1150729 if (!session->mpp) {
1151730 mpp_err("session %d session->mpp is null.\n", session->index);
1152731 return;
....@@ -1165,30 +744,16 @@
1165744 kthread_queue_work(&mpp->queue->worker, &mpp->work);
1166745 }
1167746
1168
-static void rkvdec2_link_trigger_timeout(struct mpp_dev *mpp)
1169
-{
1170
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1171
- struct rkvdec_link_dev *link_dec = dec->link_dec;
1172
-
1173
- atomic_inc(&link_dec->task_timeout);
1174
- rkvdec2_link_trigger_work(mpp);
1175
-}
1176
-
1177
-static void rkvdec2_link_trigger_irq(struct mpp_dev *mpp)
1178
-{
1179
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1180
- struct rkvdec_link_dev *link_dec = dec->link_dec;
1181
-
1182
- link_dec->task_irq++;
1183
- rkvdec2_link_trigger_work(mpp);
1184
-}
1185
-
1186
-static void rkvdec2_link_power_on(struct mpp_dev *mpp)
747
+static int rkvdec2_link_power_on(struct mpp_dev *mpp)
1187748 {
1188749 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1189750 struct rkvdec_link_dev *link_dec = dec->link_dec;
1190751
1191752 if (!atomic_xchg(&link_dec->power_enabled, 1)) {
753
+ if (mpp_iommu_attach(mpp->iommu_info)) {
754
+ dev_err(mpp->dev, "mpp_iommu_attach failed\n");
755
+ return -ENODATA;
756
+ }
1192757 pm_runtime_get_sync(mpp->dev);
1193758 pm_stay_awake(mpp->dev);
1194759
....@@ -1204,25 +769,10 @@
1204769 mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_ADVANCED);
1205770 mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_ADVANCED);
1206771 mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_ADVANCED);
1207
-
1208
-#ifdef CONFIG_PM_DEVFREQ
1209
- if (dec->devfreq) {
1210
- unsigned long core_rate_hz;
1211
-
1212
- mutex_lock(&dec->devfreq->lock);
1213
- core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info,
1214
- CLK_MODE_ADVANCED);
1215
- if (dec->core_rate_hz != core_rate_hz) {
1216
- dec->core_rate_hz = core_rate_hz;
1217
- update_devfreq(dec->devfreq);
1218
- }
1219
- mutex_unlock(&dec->devfreq->lock);
1220
-
1221
- return;
1222
- }
1223
-#endif
1224
- mpp_clk_set_rate(&dec->core_clk_info, CLK_MODE_ADVANCED);
772
+ mpp_devfreq_set_core_rate(mpp, CLK_MODE_ADVANCED);
773
+ mpp_iommu_dev_activate(mpp->iommu_info, mpp);
1225774 }
775
+ return 0;
1226776 }
1227777
1228778 static void rkvdec2_link_power_off(struct mpp_dev *mpp)
....@@ -1241,178 +791,282 @@
1241791 pm_relax(mpp->dev);
1242792 pm_runtime_put_sync_suspend(mpp->dev);
1243793
1244
- link_dec->task_decoded = 0;
1245
- link_dec->task_total = 0;
1246
-
1247794 mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_NORMAL);
1248795 mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_NORMAL);
1249796 mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_NORMAL);
1250
-
1251
-#ifdef CONFIG_PM_DEVFREQ
1252
- if (dec->devfreq) {
1253
- unsigned long core_rate_hz;
1254
-
1255
- mutex_lock(&dec->devfreq->lock);
1256
- core_rate_hz = mpp_get_clk_info_rate_hz(&dec->core_clk_info,
1257
- CLK_MODE_NORMAL);
1258
- if (dec->core_rate_hz != core_rate_hz) {
1259
- dec->core_rate_hz = core_rate_hz;
1260
- update_devfreq(dec->devfreq);
1261
- }
1262
- mutex_unlock(&dec->devfreq->lock);
1263
-
1264
- return;
1265
- }
1266
-#endif
1267
- mpp_clk_set_rate(&dec->core_clk_info, CLK_MODE_NORMAL);
797
+ mpp_devfreq_set_core_rate(mpp, CLK_MODE_NORMAL);
798
+ mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
1268799 }
1269800 }
1270801
1271802 static void rkvdec2_link_timeout_proc(struct work_struct *work_s)
1272803 {
1273804 struct mpp_dev *mpp;
805
+ struct rkvdec2_dev *dec;
1274806 struct mpp_session *session;
1275807 struct mpp_task *task = container_of(to_delayed_work(work_s),
1276808 struct mpp_task, timeout_work);
1277809
1278810 if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
1279811 mpp_err("task %d state %lx has been handled\n",
1280
- task->task_index, task->state);
812
+ task->task_id, task->state);
1281813 return;
1282814 }
1283815
1284816 if (!task->session) {
1285
- mpp_err("task %d session is null.\n", task->task_index);
817
+ mpp_err("task %d session is null.\n", task->task_id);
1286818 return;
1287819 }
1288820 session = task->session;
1289821
1290822 if (!session->mpp) {
1291823 mpp_err("task %d:%d mpp is null.\n", session->index,
1292
- task->task_index);
824
+ task->task_id);
1293825 return;
1294826 }
1295827 mpp = session->mpp;
1296
- rkvdec2_link_trigger_timeout(mpp);
828
+ set_bit(TASK_STATE_TIMEOUT, &task->state);
829
+
830
+ dec = to_rkvdec2_dev(mpp);
831
+ atomic_inc(&dec->link_dec->task_timeout);
832
+
833
+ dev_err(mpp->dev, "session %d task %d state %#lx timeout, cnt %d\n",
834
+ session->index, task->task_index, task->state,
835
+ atomic_read(&dec->link_dec->task_timeout));
836
+
837
+ rkvdec2_link_trigger_work(mpp);
1297838 }
1298839
1299
-static void mpp_taskqueue_scan_pending_abort_task(struct mpp_taskqueue *queue)
840
+static int rkvdec2_link_iommu_fault_handle(struct iommu_domain *iommu,
841
+ struct device *iommu_dev,
842
+ unsigned long iova,
843
+ int status, void *arg)
1300844 {
1301
- struct mpp_task *task, *n;
845
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
846
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
847
+ struct mpp_task *mpp_task = NULL, *n;
848
+ struct mpp_taskqueue *queue;
1302849
1303
- mutex_lock(&queue->pending_lock);
1304
- /* Check and pop all timeout task */
1305
- list_for_each_entry_safe(task, n, &queue->pending_list, queue_link) {
1306
- struct mpp_session *session = task->session;
850
+ dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n",
851
+ iova, status, arg);
1307852
1308
- if (test_bit(TASK_STATE_ABORT, &task->state)) {
1309
- mutex_lock(&session->pending_lock);
1310
- /* wait and signal */
1311
- list_del_init(&task->queue_link);
1312
- mutex_unlock(&session->pending_lock);
1313
- kref_put(&task->ref, rkvdec2_link_free_task);
853
+ if (!mpp) {
854
+ dev_err(iommu_dev, "pagefault without device to handle\n");
855
+ return 0;
856
+ }
857
+ queue = mpp->queue;
858
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
859
+ struct rkvdec_link_info *info = dec->link_dec->info;
860
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
861
+ u32 *tb_reg = (u32 *)task->table->vaddr;
862
+ u32 irq_status = tb_reg[info->tb_reg_int];
863
+
864
+ if (!irq_status) {
865
+ mpp_task_dump_mem_region(mpp, mpp_task);
866
+ break;
1314867 }
1315868 }
1316
- mutex_unlock(&queue->pending_lock);
869
+
870
+ mpp_task_dump_hw_reg(mpp);
871
+ /*
872
+ * Mask iommu irq, in order for iommu not repeatedly trigger pagefault.
873
+ * Until the pagefault task finish by hw timeout.
874
+ */
875
+ rockchip_iommu_mask_irq(mpp->dev);
876
+ dec->mmu_fault = 1;
877
+
878
+ return 0;
879
+}
880
+
881
+static void rkvdec2_link_resend(struct mpp_dev *mpp)
882
+{
883
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
884
+ struct rkvdec_link_dev *link_dec = dec->link_dec;
885
+ struct mpp_taskqueue *queue = mpp->queue;
886
+ struct mpp_task *mpp_task, *n;
887
+
888
+ link_dec->task_running = 0;
889
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
890
+ dev_err(mpp->dev, "resend task %d\n", mpp_task->task_index);
891
+ cancel_delayed_work_sync(&mpp_task->timeout_work);
892
+ clear_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
893
+ clear_bit(TASK_STATE_HANDLE, &mpp_task->state);
894
+ rkvdec2_link_enqueue(link_dec, mpp_task);
895
+ }
1317896 }
1318897
1319898 static void rkvdec2_link_try_dequeue(struct mpp_dev *mpp)
1320899 {
1321900 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1322901 struct rkvdec_link_dev *link_dec = dec->link_dec;
1323
- struct mpp_task *task;
1324902 struct mpp_taskqueue *queue = mpp->queue;
1325
- int task_irq = link_dec->task_irq;
1326
- int task_irq_prev = link_dec->task_irq_prev;
1327
- int task_timeout = atomic_read(&link_dec->task_timeout);
903
+ struct mpp_task *mpp_task = NULL, *n;
904
+ struct rkvdec_link_info *info = link_dec->info;
905
+ u32 reset_flag = 0;
906
+ u32 iommu_fault = dec->mmu_fault && (mpp->irq_status & RKVDEC_TIMEOUT_STA);
907
+ u32 link_en = atomic_read(&link_dec->power_enabled) ?
908
+ readl(link_dec->reg_base + RKVDEC_LINK_EN_BASE) : 0;
909
+ u32 force_dequeue = iommu_fault || !link_en;
910
+ u32 dequeue_cnt = 0;
1328911
1329
- if (!link_dec->task_running)
1330
- goto done;
912
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
913
+ /*
914
+ * Because there are multiple tasks enqueue at the same time,
915
+ * soft timeout may be triggered at the same time, but in reality only
916
+ * first task is being timeout because of the hardware stuck,
917
+ * so only process the first task.
918
+ */
919
+ u32 timeout_flag = dequeue_cnt ? 0 : test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
920
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
921
+ u32 *tb_reg = (u32 *)task->table->vaddr;
922
+ u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
923
+ u32 irq_status = tb_reg[info->tb_reg_int];
924
+ u32 task_done = irq_status || timeout_flag || abort_flag;
1331925
1332
- if (task_timeout != link_dec->task_timeout_prev) {
1333
- dev_info(link_dec->dev, "process task timeout\n");
1334
- atomic_inc(&mpp->reset_request);
1335
- link_dec->task_on_timeout =
1336
- task_timeout - link_dec->task_timeout_prev;
1337
- goto proc;
926
+ /*
927
+ * there are some cases will cause hw cannot write reg to ddr:
928
+ * 1. iommu pagefault
929
+ * 2. link stop(link_en == 0) because of err task, it is a rk356x issue.
930
+ * so need force dequeue one task.
931
+ */
932
+ if (force_dequeue)
933
+ task_done = 1;
934
+
935
+ if (!task_done)
936
+ break;
937
+
938
+ dequeue_cnt++;
939
+ /* check hack task only for rk356x*/
940
+ if (task->need_hack == RKVDEC2_LINK_HACK_TASK_FLAG) {
941
+ cancel_delayed_work_sync(&mpp_task->timeout_work);
942
+ list_move_tail(&task->table->link, &link_dec->unused_list);
943
+ list_del_init(&mpp_task->queue_link);
944
+ link_dec->task_running--;
945
+ link_dec->hack_task_running--;
946
+ kfree(task);
947
+ mpp_dbg_link("hack running %d irq_status %#08x timeout %d abort %d\n",
948
+ link_dec->hack_task_running, irq_status,
949
+ timeout_flag, abort_flag);
950
+ continue;
951
+ }
952
+
953
+ /*
954
+ * if timeout/abort/force dequeue found, reset and stop hw first.
955
+ */
956
+ if ((timeout_flag || abort_flag || force_dequeue) && !reset_flag) {
957
+ dev_err(mpp->dev, "session %d task %d timeout %d abort %d force_dequeue %d\n",
958
+ mpp_task->session->index, mpp_task->task_index,
959
+ timeout_flag, abort_flag, force_dequeue);
960
+ rkvdec2_link_reset(mpp);
961
+ reset_flag = 1;
962
+ dec->mmu_fault = 0;
963
+ mpp->irq_status = 0;
964
+ force_dequeue = 0;
965
+ }
966
+
967
+ cancel_delayed_work_sync(&mpp_task->timeout_work);
968
+
969
+ task->irq_status = irq_status;
970
+ mpp_task->hw_cycles = tb_reg[info->tb_reg_cycle];
971
+ mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
972
+ rkvdec2_link_finish(mpp, mpp_task);
973
+
974
+ list_move_tail(&task->table->link, &link_dec->unused_list);
975
+ list_del_init(&mpp_task->queue_link);
976
+ link_dec->task_running--;
977
+
978
+ set_bit(TASK_STATE_HANDLE, &mpp_task->state);
979
+ set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
980
+ set_bit(TASK_STATE_FINISH, &mpp_task->state);
981
+ set_bit(TASK_STATE_DONE, &mpp_task->state);
982
+ if (test_bit(TASK_STATE_ABORT, &mpp_task->state))
983
+ set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
984
+
985
+ mpp_dbg_link("session %d task %d irq_status %#08x timeout %d abort %d\n",
986
+ mpp_task->session->index, mpp_task->task_index,
987
+ irq_status, timeout_flag, abort_flag);
988
+
989
+ if (irq_status & RKVDEC_INT_ERROR_MASK) {
990
+ dev_err(mpp->dev,
991
+ "session %d task %d irq_status %#08x timeout %u abort %u\n",
992
+ mpp_task->session->index, mpp_task->task_index,
993
+ irq_status, timeout_flag, abort_flag);
994
+ if (!reset_flag)
995
+ atomic_inc(&mpp->reset_request);
996
+ }
997
+
998
+ wake_up(&mpp_task->wait);
999
+ kref_put(&mpp_task->ref, rkvdec2_link_free_task);
13381000 }
13391001
1340
- if (task_irq == task_irq_prev)
1341
- goto done;
1342
-
1343
- if (!atomic_read(&link_dec->power_enabled)) {
1344
- dev_info(link_dec->dev, "dequeue on power off\n");
1345
- goto done;
1346
- }
1347
-
1348
-proc:
1349
- task = list_first_entry_or_null(&queue->running_list, struct mpp_task,
1350
- queue_link);
1351
- if (!task) {
1352
- mpp_err("can found task on trydequeue with %d running task\n",
1353
- link_dec->task_running);
1354
- goto done;
1355
- }
1356
-
1357
- /* Check and process all finished task */
1358
- rkvdec2_link_isr(mpp);
1359
-
1360
-done:
1361
- link_dec->task_irq_prev = task_irq;
1362
- link_dec->task_timeout_prev = task_timeout;
1363
- link_dec->task_on_timeout = 0;
1364
-
1365
- mpp_taskqueue_scan_pending_abort_task(queue);
1366
-
1367
- /* TODO: if reset is needed do reset here */
1002
+ /* resend running task after reset */
1003
+ if (reset_flag && !list_empty(&queue->running_list))
1004
+ rkvdec2_link_resend(mpp);
13681005 }
13691006
1370
-static int mpp_task_queue(struct mpp_dev *mpp, struct mpp_task *task)
1007
+static int mpp_task_queue(struct mpp_dev *mpp, struct mpp_task *mpp_task)
13711008 {
13721009 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
13731010 struct rkvdec_link_dev *link_dec = dec->link_dec;
1374
- u32 task_to_run = 0;
1375
- int slot_idx = 0;
1376
- int ret;
1377
- struct mpp_session *session = task->session;
1011
+ struct mpp_taskqueue *queue = mpp->queue;
1012
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
13781013
13791014 mpp_debug_enter();
13801015
1381
- /*
1382
- * for iommu share hardware, should attach to ensure
1383
- * working in current device
1384
- */
1385
- ret = mpp_iommu_attach(mpp->iommu_info);
1386
- if (ret) {
1387
- dev_err(mpp->dev, "mpp_iommu_attach failed\n");
1388
- return -ENODATA;
1389
- }
1390
-
13911016 rkvdec2_link_power_on(mpp);
1392
- mpp_debug_func(DEBUG_TASK_INFO,
1393
- "%s session %d:%d task=%d state=0x%lx\n",
1394
- dev_name(mpp->dev), session->device_type,
1395
- session->index, task->task_index, task->state);
13961017
1397
- /* prepare the task for running */
1398
- if (test_and_set_bit(TASK_STATE_PREPARE, &task->state))
1399
- mpp_err("task %d has been prepare twice\n", task->task_index);
1018
+ /* hack for rk356x */
1019
+ if (task->need_hack) {
1020
+ u32 *tb_reg;
1021
+ struct mpp_dma_buffer *table;
1022
+ struct rkvdec2_task *hack_task;
1023
+ struct rkvdec_link_info *info = link_dec->info;
14001024
1401
- rkvdec2_link_prepare(mpp, task);
1025
+ /* need reserved 2 unused task for need hack task */
1026
+ if (link_dec->task_running > (link_dec->task_capacity - 2))
1027
+ return -EBUSY;
14021028
1403
- task_to_run = link_dec->task_to_run;
1404
- if (!task_to_run) {
1405
- dev_err(link_dec->dev, "nothing to run\n");
1406
- goto done;
1029
+ table = list_first_entry_or_null(&link_dec->unused_list,
1030
+ struct mpp_dma_buffer,
1031
+ link);
1032
+ if (!table)
1033
+ return -EBUSY;
1034
+
1035
+ hack_task = kzalloc(sizeof(*hack_task), GFP_KERNEL);
1036
+
1037
+ if (!hack_task)
1038
+ return -ENOMEM;
1039
+
1040
+ mpp_task_init(mpp_task->session, &hack_task->mpp_task);
1041
+ INIT_DELAYED_WORK(&hack_task->mpp_task.timeout_work,
1042
+ rkvdec2_link_timeout_proc);
1043
+
1044
+ tb_reg = (u32 *)table->vaddr;
1045
+ memset(tb_reg + info->part_r[0].tb_reg_off, 0, info->part_r[0].reg_num);
1046
+ rkvdec2_3568_hack_fix_link(tb_reg + 4);
1047
+ list_move_tail(&table->link, &link_dec->used_list);
1048
+ hack_task->table = table;
1049
+ hack_task->need_hack = RKVDEC2_LINK_HACK_TASK_FLAG;
1050
+ rkvdec2_link_enqueue(link_dec, &hack_task->mpp_task);
1051
+ mpp_taskqueue_pending_to_run(queue, &hack_task->mpp_task);
1052
+ link_dec->hack_task_running++;
1053
+ mpp_dbg_link("hack task send to hw, hack running %d\n",
1054
+ link_dec->hack_task_running);
14071055 }
14081056
1409
- mpp_reset_down_read(mpp->reset_group);
1410
- link_dec->task_to_run = 0;
1411
- slot_idx = rkvdec_link_get_task_send(link_dec);
1412
- link_dec->task_running += task_to_run;
1413
- rkvdec_link_send_task_to_hw(link_dec, task, slot_idx, task_to_run, 0);
1057
+ /* process normal */
1058
+ if (!rkvdec2_link_prepare(mpp, mpp_task))
1059
+ return -EBUSY;
14141060
1415
-done:
1061
+ rkvdec2_link_enqueue(link_dec, mpp_task);
1062
+
1063
+ set_bit(TASK_STATE_RUNNING, &mpp_task->state);
1064
+ atomic_dec(&link_dec->task_pending);
1065
+ mpp_taskqueue_pending_to_run(queue, mpp_task);
1066
+
1067
+ mpp_dbg_link("session %d task %d send to hw pending %d running %d\n",
1068
+ mpp_task->session->index, mpp_task->task_index,
1069
+ atomic_read(&link_dec->task_pending), link_dec->task_running);
14161070 mpp_debug_leave();
14171071
14181072 return 0;
....@@ -1424,7 +1078,7 @@
14241078 int ret = rkvdec2_link_irq(mpp);
14251079
14261080 if (!ret)
1427
- rkvdec2_link_trigger_irq(mpp);
1081
+ rkvdec2_link_trigger_work(mpp);
14281082
14291083 return IRQ_HANDLED;
14301084 }
....@@ -1470,10 +1124,10 @@
14701124 struct mpp_task_msgs *msgs)
14711125 {
14721126 struct mpp_task *task = NULL;
1473
- struct rkvdec2_task *dec_task = NULL;
14741127 struct mpp_dev *mpp = session->mpp;
1475
- u32 fmt;
14761128 struct rkvdec_link_info *link_info = mpp->var->hw_info->link_info;
1129
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1130
+ struct rkvdec_link_dev *link_dec = dec->link_dec;
14771131
14781132 task = rkvdec2_alloc_task(session, msgs);
14791133 if (!task) {
....@@ -1482,6 +1136,9 @@
14821136 }
14831137
14841138 if (link_info->hack_setup) {
1139
+ u32 fmt;
1140
+ struct rkvdec2_task *dec_task = NULL;
1141
+
14851142 dec_task = to_rkvdec2_task(task);
14861143 fmt = RKVDEC_GET_FORMAT(dec_task->reg[RKVDEC_REG_FORMAT_INDEX]);
14871144 dec_task->need_hack = (fmt == RKVDEC_FMT_H264D);
....@@ -1490,6 +1147,7 @@
14901147 kref_init(&task->ref);
14911148 atomic_set(&task->abort_request, 0);
14921149 task->task_index = atomic_fetch_inc(&mpp->task_index);
1150
+ task->task_id = atomic_fetch_inc(&mpp->queue->task_id);
14931151 INIT_DELAYED_WORK(&task->timeout_work, rkvdec2_link_timeout_proc);
14941152
14951153 atomic_inc(&session->task_count);
....@@ -1503,6 +1161,7 @@
15031161 mutex_lock(&mpp->queue->pending_lock);
15041162 list_add_tail(&task->queue_link, &mpp->queue->pending_list);
15051163 mutex_unlock(&mpp->queue->pending_lock);
1164
+ atomic_inc(&link_dec->task_pending);
15061165
15071166 /* push current task to queue */
15081167 atomic_inc(&mpp->task_count);
....@@ -1519,7 +1178,6 @@
15191178 {
15201179 struct mpp_dev *mpp = session->mpp;
15211180 struct mpp_task *mpp_task;
1522
- struct rkvdec2_task *task;
15231181 int ret;
15241182
15251183 mpp_task = mpp_session_get_pending_task(session);
....@@ -1528,20 +1186,16 @@
15281186 return -EIO;
15291187 }
15301188
1531
- task = to_rkvdec2_task(mpp_task);
1532
- ret = wait_event_timeout(task->wait, task_is_done(mpp_task),
1533
- msecs_to_jiffies(WAIT_TIMEOUT_MS));
1534
- if (ret) {
1535
- ret = rkvdec2_result(mpp, mpp_task, msgs);
1189
+ ret = wait_event_interruptible(mpp_task->wait, task_is_done(mpp_task));
1190
+ if (ret == -ERESTARTSYS)
1191
+ mpp_err("wait task break by signal\n");
15361192
1537
- mpp_session_pop_done(session, mpp_task);
1538
- } else {
1539
- mpp_err("task %d:%d statue %lx timeout -> abort\n",
1540
- session->index, mpp_task->task_index, mpp_task->state);
1193
+ ret = rkvdec2_result(mpp, mpp_task, msgs);
15411194
1542
- atomic_inc(&mpp_task->abort_request);
1543
- set_bit(TASK_STATE_ABORT, &mpp_task->state);
1544
- }
1195
+ mpp_session_pop_done(session, mpp_task);
1196
+ mpp_debug_func(DEBUG_TASK_INFO, "wait done session %d:%d count %d task %d state %lx\n",
1197
+ session->device_type, session->index, atomic_read(&session->task_count),
1198
+ mpp_task->task_index, mpp_task->state);
15451199
15461200 mpp_session_pop_pending(session, mpp_task);
15471201 return ret;
....@@ -1550,34 +1204,25 @@
15501204 void rkvdec2_link_worker(struct kthread_work *work_s)
15511205 {
15521206 struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
1553
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1554
- struct rkvdec_link_dev *link_dec = dec->link_dec;
15551207 struct mpp_task *task;
15561208 struct mpp_taskqueue *queue = mpp->queue;
1209
+ u32 all_done;
15571210
15581211 mpp_debug_enter();
15591212
1560
- /*
1561
- * process timeout and finished task.
1562
- */
1213
+ /* dequeue running task */
15631214 rkvdec2_link_try_dequeue(mpp);
15641215
1565
-again:
1216
+ /* process reset */
15661217 if (atomic_read(&mpp->reset_request)) {
1567
- if (link_dec->task_running || link_dec->task_prepared)
1568
- goto done;
1569
-
1570
- disable_irq(mpp->irq);
1571
- mpp_iommu_disable_irq(mpp->iommu_info);
15721218 rkvdec2_link_reset(mpp);
1573
- link_dec->task_decoded = 0;
1574
- link_dec->task_total = 0;
1575
- enable_irq(mpp->irq);
1576
- mpp_iommu_enable_irq(mpp->iommu_info);
1219
+ /* resend running task after reset */
1220
+ if (!list_empty(&queue->running_list))
1221
+ rkvdec2_link_resend(mpp);
15771222 }
1578
- /*
1579
- * process pending queue to find the task to accept.
1580
- */
1223
+
1224
+again:
1225
+ /* get pending task to process */
15811226 mutex_lock(&queue->pending_lock);
15821227 task = list_first_entry_or_null(&queue->pending_list, struct mpp_task,
15831228 queue_link);
....@@ -1585,9 +1230,8 @@
15851230 if (!task)
15861231 goto done;
15871232
1588
- if (test_bit(TASK_STATE_ABORT, &task->state)) {
1589
- struct rkvdec2_task *dec_task = to_rkvdec2_task(task);
1590
-
1233
+ /* check abort task */
1234
+ if (atomic_read(&task->abort_request)) {
15911235 mutex_lock(&queue->pending_lock);
15921236 list_del_init(&task->queue_link);
15931237
....@@ -1595,48 +1239,28 @@
15951239 set_bit(TASK_STATE_PROC_DONE, &task->state);
15961240
15971241 mutex_unlock(&queue->pending_lock);
1598
- wake_up(&dec_task->wait);
1242
+ wake_up(&task->wait);
15991243 kref_put(&task->ref, rkvdec2_link_free_task);
16001244 goto again;
16011245 }
16021246
1603
- /*
1604
- * if target device can accept more task send the task to run.
1605
- */
1606
- if (link_dec->task_running >= link_dec->task_capacity - 2)
1607
- goto done;
1608
-
1609
- if (mpp_task_queue(mpp, task)) {
1610
- /* failed to run */
1611
- mpp_err("%p failed to process task %p:%d\n",
1612
- mpp, task, task->task_index);
1613
- } else {
1614
- mutex_lock(&queue->pending_lock);
1615
- set_bit(TASK_STATE_RUNNING, &task->state);
1616
- list_move_tail(&task->queue_link, &queue->running_list);
1617
- mutex_unlock(&queue->pending_lock);
1247
+ /* queue task to hw */
1248
+ if (!mpp_task_queue(mpp, task))
16181249 goto again;
1619
- }
1250
+
16201251 done:
1621
- mpp_debug_leave();
16221252
1623
- if (link_dec->task_irq != link_dec->task_irq_prev ||
1624
- atomic_read(&link_dec->task_timeout) != link_dec->task_timeout_prev)
1625
- rkvdec2_link_trigger_work(mpp);
1253
+ /* if no task in pending and running list, power off device */
1254
+ mutex_lock(&queue->pending_lock);
1255
+ all_done = list_empty(&queue->pending_list) && list_empty(&queue->running_list);
1256
+ mutex_unlock(&queue->pending_lock);
16261257
1627
- /* if no task for running power off device */
1628
- {
1629
- u32 all_done = 0;
1630
-
1631
- mutex_lock(&queue->pending_lock);
1632
- all_done = list_empty(&queue->pending_list);
1633
- mutex_unlock(&queue->pending_lock);
1634
-
1635
- if (all_done && !link_dec->task_running && !link_dec->task_prepared)
1636
- rkvdec2_link_power_off(mpp);
1637
- }
1258
+ if (all_done)
1259
+ rkvdec2_link_power_off(mpp);
16381260
16391261 mpp_session_cleanup_detach(queue, work_s);
1262
+
1263
+ mpp_debug_leave();
16401264 }
16411265
16421266 void rkvdec2_link_session_deinit(struct mpp_session *session)
....@@ -1667,3 +1291,1221 @@
16671291
16681292 mpp_debug_leave();
16691293 }
1294
+
1295
+#define RKVDEC2_1080P_PIXELS (1920*1080)
1296
+#define RKVDEC2_4K_PIXELS (4096*2304)
1297
+#define RKVDEC2_8K_PIXELS (7680*4320)
1298
+#define RKVDEC2_CCU_TIMEOUT_20MS (0xefffff)
1299
+#define RKVDEC2_CCU_TIMEOUT_50MS (0x2cfffff)
1300
+#define RKVDEC2_CCU_TIMEOUT_100MS (0x4ffffff)
1301
+
1302
+static u32 rkvdec2_ccu_get_timeout_threshold(struct rkvdec2_task *task)
1303
+{
1304
+ u32 pixels = task->pixels;
1305
+
1306
+ if (pixels < RKVDEC2_1080P_PIXELS)
1307
+ return RKVDEC2_CCU_TIMEOUT_20MS;
1308
+ else if (pixels < RKVDEC2_4K_PIXELS)
1309
+ return RKVDEC2_CCU_TIMEOUT_50MS;
1310
+ else
1311
+ return RKVDEC2_CCU_TIMEOUT_100MS;
1312
+}
1313
+
1314
+int rkvdec2_attach_ccu(struct device *dev, struct rkvdec2_dev *dec)
1315
+{
1316
+ int ret;
1317
+ struct device_node *np;
1318
+ struct platform_device *pdev;
1319
+ struct rkvdec2_ccu *ccu;
1320
+
1321
+ mpp_debug_enter();
1322
+
1323
+ np = of_parse_phandle(dev->of_node, "rockchip,ccu", 0);
1324
+ if (!np || !of_device_is_available(np))
1325
+ return -ENODEV;
1326
+
1327
+ pdev = of_find_device_by_node(np);
1328
+ of_node_put(np);
1329
+ if (!pdev)
1330
+ return -ENODEV;
1331
+
1332
+ ccu = platform_get_drvdata(pdev);
1333
+ if (!ccu)
1334
+ return -ENOMEM;
1335
+
1336
+ ret = of_property_read_u32(dev->of_node, "rockchip,core-mask", &dec->core_mask);
1337
+ if (ret)
1338
+ return ret;
1339
+ dev_info(dev, "core_mask=%08x\n", dec->core_mask);
1340
+
1341
+ /* if not the main-core, then attach the main core domain to current */
1342
+ if (dec->mpp.core_id != 0) {
1343
+ struct mpp_taskqueue *queue;
1344
+ struct mpp_iommu_info *ccu_info, *cur_info;
1345
+
1346
+ queue = dec->mpp.queue;
1347
+ /* set the ccu-domain for current device */
1348
+ ccu_info = queue->cores[0]->iommu_info;
1349
+ cur_info = dec->mpp.iommu_info;
1350
+ if (cur_info)
1351
+ cur_info->domain = ccu_info->domain;
1352
+ mpp_iommu_attach(cur_info);
1353
+ }
1354
+
1355
+ dec->ccu = ccu;
1356
+
1357
+ dev_info(dev, "attach ccu as core %d\n", dec->mpp.core_id);
1358
+ mpp_debug_enter();
1359
+
1360
+ return 0;
1361
+}
1362
+
1363
+static void rkvdec2_ccu_timeout_work(struct work_struct *work_s)
1364
+{
1365
+ struct mpp_dev *mpp;
1366
+ struct mpp_task *task = container_of(to_delayed_work(work_s),
1367
+ struct mpp_task, timeout_work);
1368
+
1369
+ if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
1370
+ mpp_err("task %d state %lx has been handled\n",
1371
+ task->task_id, task->state);
1372
+ return;
1373
+ }
1374
+
1375
+ if (!task->session) {
1376
+ mpp_err("task %d session is null.\n", task->task_id);
1377
+ return;
1378
+ }
1379
+ mpp = mpp_get_task_used_device(task, task->session);
1380
+ mpp_err("%s, task %d state %#lx timeout\n", dev_name(mpp->dev),
1381
+ task->task_index, task->state);
1382
+ set_bit(TASK_STATE_TIMEOUT, &task->state);
1383
+ atomic_inc(&mpp->reset_request);
1384
+ atomic_inc(&mpp->queue->reset_request);
1385
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
1386
+}
1387
+
1388
+int rkvdec2_ccu_link_init(struct platform_device *pdev, struct rkvdec2_dev *dec)
1389
+{
1390
+ struct resource *res;
1391
+ struct rkvdec_link_dev *link_dec;
1392
+ struct device *dev = &pdev->dev;
1393
+
1394
+ mpp_debug_enter();
1395
+
1396
+ /* link structure */
1397
+ link_dec = devm_kzalloc(dev, sizeof(*link_dec), GFP_KERNEL);
1398
+ if (!link_dec)
1399
+ return -ENOMEM;
1400
+
1401
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
1402
+ if (!res)
1403
+ return -ENOMEM;
1404
+
1405
+ link_dec->info = dec->mpp.var->hw_info->link_info;
1406
+ link_dec->reg_base = devm_ioremap(dev, res->start, resource_size(res));
1407
+ if (!link_dec->reg_base) {
1408
+ dev_err(dev, "ioremap failed for resource %pR\n", res);
1409
+ return -ENOMEM;
1410
+ }
1411
+
1412
+ dec->link_dec = link_dec;
1413
+
1414
+ mpp_debug_leave();
1415
+
1416
+ return 0;
1417
+}
1418
+
1419
+static int rkvdec2_ccu_power_on(struct mpp_taskqueue *queue,
1420
+ struct rkvdec2_ccu *ccu)
1421
+{
1422
+ if (!atomic_xchg(&ccu->power_enabled, 1)) {
1423
+ u32 i;
1424
+ struct mpp_dev *mpp;
1425
+
1426
+ /* ccu pd and clk on */
1427
+ pm_runtime_get_sync(ccu->dev);
1428
+ pm_stay_awake(ccu->dev);
1429
+ mpp_clk_safe_enable(ccu->aclk_info.clk);
1430
+ /* core pd and clk on */
1431
+ for (i = 0; i < queue->core_count; i++) {
1432
+ struct rkvdec2_dev *dec;
1433
+
1434
+ mpp = queue->cores[i];
1435
+ dec = to_rkvdec2_dev(mpp);
1436
+ pm_runtime_get_sync(mpp->dev);
1437
+ pm_stay_awake(mpp->dev);
1438
+ if (mpp->hw_ops->clk_on)
1439
+ mpp->hw_ops->clk_on(mpp);
1440
+
1441
+ mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_NORMAL);
1442
+ mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_NORMAL);
1443
+ mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_NORMAL);
1444
+ mpp_devfreq_set_core_rate(mpp, CLK_MODE_NORMAL);
1445
+ mpp_iommu_dev_activate(mpp->iommu_info, mpp);
1446
+ }
1447
+ mpp_debug(DEBUG_CCU, "power on\n");
1448
+ }
1449
+
1450
+ return 0;
1451
+}
1452
+
1453
+static int rkvdec2_ccu_power_off(struct mpp_taskqueue *queue,
1454
+ struct rkvdec2_ccu *ccu)
1455
+{
1456
+ if (atomic_xchg(&ccu->power_enabled, 0)) {
1457
+ u32 i;
1458
+ struct mpp_dev *mpp;
1459
+
1460
+ /* ccu pd and clk off */
1461
+ mpp_clk_safe_disable(ccu->aclk_info.clk);
1462
+ pm_relax(ccu->dev);
1463
+ pm_runtime_mark_last_busy(ccu->dev);
1464
+ pm_runtime_put_autosuspend(ccu->dev);
1465
+ /* core pd and clk off */
1466
+ for (i = 0; i < queue->core_count; i++) {
1467
+ mpp = queue->cores[i];
1468
+
1469
+ if (mpp->hw_ops->clk_off)
1470
+ mpp->hw_ops->clk_off(mpp);
1471
+ pm_relax(mpp->dev);
1472
+ pm_runtime_mark_last_busy(mpp->dev);
1473
+ pm_runtime_put_autosuspend(mpp->dev);
1474
+ mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
1475
+ }
1476
+ mpp_debug(DEBUG_CCU, "power off\n");
1477
+ }
1478
+
1479
+ return 0;
1480
+}
1481
+
1482
+static int rkvdec2_soft_ccu_dequeue(struct mpp_taskqueue *queue)
1483
+{
1484
+ struct mpp_task *mpp_task = NULL, *n;
1485
+
1486
+ mpp_debug_enter();
1487
+
1488
+ list_for_each_entry_safe(mpp_task, n,
1489
+ &queue->running_list,
1490
+ queue_link) {
1491
+ struct mpp_dev *mpp = mpp_get_task_used_device(mpp_task, mpp_task->session);
1492
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1493
+ u32 irq_status = mpp->irq_status;
1494
+ u32 timeout_flag = test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
1495
+ u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
1496
+ u32 timing_en = mpp->srv->timing_en;
1497
+
1498
+ if (irq_status || timeout_flag || abort_flag) {
1499
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1500
+
1501
+ if (timing_en) {
1502
+ mpp_task->on_irq = ktime_get();
1503
+ set_bit(TASK_TIMING_IRQ, &mpp_task->state);
1504
+
1505
+ mpp_task->on_cancel_timeout = mpp_task->on_irq;
1506
+ set_bit(TASK_TIMING_TO_CANCEL, &mpp_task->state);
1507
+
1508
+ mpp_task->on_isr = mpp_task->on_irq;
1509
+ set_bit(TASK_TIMING_ISR, &mpp_task->state);
1510
+ }
1511
+
1512
+ set_bit(TASK_STATE_HANDLE, &mpp_task->state);
1513
+ cancel_delayed_work(&mpp_task->timeout_work);
1514
+ mpp_task->hw_cycles = mpp_read(mpp, RKVDEC_PERF_WORKING_CNT);
1515
+ mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
1516
+ task->irq_status = irq_status;
1517
+ mpp_debug(DEBUG_IRQ_CHECK, "irq_status=%08x, timeout=%u, abort=%u\n",
1518
+ irq_status, timeout_flag, abort_flag);
1519
+ if (irq_status && mpp->dev_ops->finish)
1520
+ mpp->dev_ops->finish(mpp, mpp_task);
1521
+ else
1522
+ task->reg[RKVDEC_REG_INT_EN_INDEX] = RKVDEC_TIMEOUT_STA;
1523
+
1524
+ set_bit(TASK_STATE_FINISH, &mpp_task->state);
1525
+ set_bit(TASK_STATE_DONE, &mpp_task->state);
1526
+
1527
+ set_bit(mpp->core_id, &queue->core_idle);
1528
+ mpp_dbg_core("set core %d idle %lx\n", mpp->core_id, queue->core_idle);
1529
+ /* Wake up the GET thread */
1530
+ wake_up(&mpp_task->wait);
1531
+ /* free task */
1532
+ list_del_init(&mpp_task->queue_link);
1533
+ kref_put(&mpp_task->ref, mpp_free_task);
1534
+ } else {
1535
+ /* NOTE: break when meet not finish */
1536
+ break;
1537
+ }
1538
+ }
1539
+
1540
+ mpp_debug_leave();
1541
+ return 0;
1542
+}
1543
+
1544
+static int rkvdec2_soft_ccu_reset(struct mpp_taskqueue *queue,
1545
+ struct rkvdec2_ccu *ccu)
1546
+{
1547
+ int i;
1548
+
1549
+ for (i = queue->core_count - 1; i >= 0; i--) {
1550
+ u32 val;
1551
+
1552
+ struct mpp_dev *mpp = queue->cores[i];
1553
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1554
+
1555
+ if (mpp->disable)
1556
+ continue;
1557
+
1558
+ dev_info(mpp->dev, "resetting for err %#x\n", mpp->irq_status);
1559
+ disable_hardirq(mpp->irq);
1560
+
1561
+ /* foce idle, disconnect core and ccu */
1562
+ writel(dec->core_mask, ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
1563
+
1564
+ /* soft reset */
1565
+ mpp_write(mpp, RKVDEC_REG_IMPORTANT_BASE, RKVDEC_SOFTREST_EN);
1566
+ udelay(5);
1567
+ val = mpp_read(mpp, RKVDEC_REG_INT_EN);
1568
+ if (!(val & RKVDEC_SOFT_RESET_READY))
1569
+ mpp_err("soft reset fail, int %08x\n", val);
1570
+ mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1571
+
1572
+ /* check bus idle */
1573
+ val = mpp_read(mpp, RKVDEC_REG_DEBUG_INT_BASE);
1574
+ if (!(val & RKVDEC_BIT_BUS_IDLE))
1575
+ mpp_err("bus busy\n");
1576
+
1577
+ if (IS_REACHABLE(CONFIG_ROCKCHIP_SIP)) {
1578
+ /* sip reset */
1579
+ rockchip_dmcfreq_lock();
1580
+ sip_smc_vpu_reset(i, 0, 0);
1581
+ rockchip_dmcfreq_unlock();
1582
+ } else {
1583
+ rkvdec2_reset(mpp);
1584
+ }
1585
+ /* clear error mask */
1586
+ writel(dec->core_mask & RKVDEC_CCU_CORE_RW_MASK,
1587
+ ccu->reg_base + RKVDEC_CCU_CORE_ERR_BASE);
1588
+ /* connect core and ccu */
1589
+ writel(dec->core_mask & RKVDEC_CCU_CORE_RW_MASK,
1590
+ ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
1591
+ mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
1592
+ atomic_set(&mpp->reset_request, 0);
1593
+
1594
+ enable_irq(mpp->irq);
1595
+ dev_info(mpp->dev, "reset done\n");
1596
+ }
1597
+ atomic_set(&queue->reset_request, 0);
1598
+
1599
+ return 0;
1600
+}
1601
+
1602
+void *rkvdec2_ccu_alloc_task(struct mpp_session *session,
1603
+ struct mpp_task_msgs *msgs)
1604
+{
1605
+ int ret;
1606
+ struct rkvdec2_task *task;
1607
+
1608
+ task = kzalloc(sizeof(*task), GFP_KERNEL);
1609
+ if (!task)
1610
+ return NULL;
1611
+
1612
+ ret = rkvdec2_task_init(session->mpp, session, task, msgs);
1613
+ if (ret) {
1614
+ kfree(task);
1615
+ return NULL;
1616
+ }
1617
+
1618
+ return &task->mpp_task;
1619
+}
1620
+
1621
+static struct mpp_dev *rkvdec2_ccu_dev_match_by_iommu(struct mpp_taskqueue *queue,
1622
+ struct device *iommu_dev)
1623
+{
1624
+ struct mpp_dev *mpp = NULL;
1625
+ struct rkvdec2_dev *dec = NULL;
1626
+ u32 mmu[2] = {0, 0x40};
1627
+ u32 i;
1628
+
1629
+ for (i = 0; i < queue->core_count; i++) {
1630
+ struct mpp_dev *core = queue->cores[i];
1631
+
1632
+ if (&core->iommu_info->pdev->dev == iommu_dev) {
1633
+ mpp = core;
1634
+ dec = to_rkvdec2_dev(mpp);
1635
+ }
1636
+ }
1637
+
1638
+ if (!dec || !dec->mmu_base)
1639
+ goto out;
1640
+
1641
+ /* there are two iommus */
1642
+ for (i = 0; i < 2; i++) {
1643
+ u32 status = readl(dec->mmu_base + mmu[i] + 0x4);
1644
+ u32 iova = readl(dec->mmu_base + mmu[i] + 0xc);
1645
+ u32 is_write = (status & BIT(5)) ? 1 : 0;
1646
+
1647
+ if (status && iova)
1648
+ dev_err(iommu_dev, "core %d pagfault at iova %#08x type %s status %#x\n",
1649
+ mpp->core_id, iova, is_write ? "write" : "read", status);
1650
+ }
1651
+out:
1652
+ return mpp;
1653
+}
1654
+
1655
+int rkvdec2_soft_ccu_iommu_fault_handle(struct iommu_domain *iommu,
1656
+ struct device *iommu_dev,
1657
+ unsigned long iova, int status, void *arg)
1658
+{
1659
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
1660
+ struct mpp_taskqueue *queue = mpp->queue;
1661
+ struct mpp_task *mpp_task;
1662
+
1663
+ mpp_debug_enter();
1664
+
1665
+ mpp = rkvdec2_ccu_dev_match_by_iommu(queue, iommu_dev);
1666
+ if (!mpp) {
1667
+ dev_err(iommu_dev, "iommu fault, but no dev match\n");
1668
+ return 0;
1669
+ }
1670
+ mpp_task = mpp->cur_task;
1671
+ if (mpp_task)
1672
+ mpp_task_dump_mem_region(mpp, mpp_task);
1673
+
1674
+ /*
1675
+ * Mask iommu irq, in order for iommu not repeatedly trigger pagefault.
1676
+ * Until the pagefault task finish by hw timeout.
1677
+ */
1678
+ rockchip_iommu_mask_irq(mpp->dev);
1679
+ atomic_inc(&mpp->queue->reset_request);
1680
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
1681
+
1682
+ mpp_debug_leave();
1683
+
1684
+ return 0;
1685
+}
1686
+
1687
+int rkvdec2_hard_ccu_iommu_fault_handle(struct iommu_domain *iommu,
1688
+ struct device *iommu_dev,
1689
+ unsigned long iova, int status, void *arg)
1690
+{
1691
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
1692
+ struct mpp_taskqueue *queue = mpp->queue;
1693
+ struct mpp_task *mpp_task = NULL, *n;
1694
+ struct rkvdec2_dev *dec;
1695
+ u32 err_task_iova;
1696
+
1697
+ mpp_debug_enter();
1698
+
1699
+ mpp = rkvdec2_ccu_dev_match_by_iommu(queue, iommu_dev);
1700
+ if (!mpp) {
1701
+ dev_err(iommu_dev, "iommu fault, but no dev match\n");
1702
+ return 0;
1703
+ }
1704
+
1705
+ dec = to_rkvdec2_dev(mpp);
1706
+ err_task_iova = readl(dec->link_dec->reg_base + 0x4);
1707
+ dev_err(mpp->dev, "core %d err task iova %#08x\n", mpp->core_id, err_task_iova);
1708
+ rockchip_iommu_mask_irq(mpp->dev);
1709
+
1710
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
1711
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1712
+
1713
+ if ((u32)task->table->iova == err_task_iova) {
1714
+ mpp_task_dump_mem_region(mpp, mpp_task);
1715
+ set_bit(TASK_STATE_ABORT, &mpp_task->state);
1716
+ break;
1717
+ }
1718
+ }
1719
+ atomic_inc(&mpp->queue->reset_request);
1720
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
1721
+
1722
+ mpp_debug_leave();
1723
+
1724
+ return 0;
1725
+}
1726
+
1727
+irqreturn_t rkvdec2_soft_ccu_irq(int irq, void *param)
1728
+{
1729
+ struct mpp_dev *mpp = param;
1730
+ u32 irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
1731
+
1732
+ if (irq_status & RKVDEC_IRQ_RAW) {
1733
+ mpp_debug(DEBUG_IRQ_STATUS, "irq_status=%08x\n", irq_status);
1734
+ if (irq_status & RKVDEC_INT_ERROR_MASK) {
1735
+ atomic_inc(&mpp->reset_request);
1736
+ atomic_inc(&mpp->queue->reset_request);
1737
+ }
1738
+ mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1739
+ mpp->irq_status = irq_status;
1740
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
1741
+ return IRQ_HANDLED;
1742
+ }
1743
+ return IRQ_NONE;
1744
+}
1745
+
1746
+static inline int rkvdec2_set_core_info(u32 *reg, int idx)
1747
+{
1748
+ u32 val = (idx << 16) & RKVDEC_REG_FILM_IDX_MASK;
1749
+
1750
+ reg[RKVDEC_REG_CORE_CTRL_INDEX] &= ~RKVDEC_REG_FILM_IDX_MASK;
1751
+
1752
+ reg[RKVDEC_REG_CORE_CTRL_INDEX] |= val;
1753
+
1754
+ return 0;
1755
+}
1756
+
1757
+static int rkvdec2_soft_ccu_enqueue(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1758
+{
1759
+ u32 i, reg_en, reg;
1760
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1761
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1762
+ u32 timing_en = mpp->srv->timing_en;
1763
+
1764
+ mpp_debug_enter();
1765
+
1766
+ /* set reg for link */
1767
+ reg = RKVDEC_LINK_BIT_CORE_WORK_MODE | RKVDEC_LINK_BIT_CCU_WORK_MODE;
1768
+ writel_relaxed(reg, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
1769
+
1770
+ /* set reg for ccu */
1771
+ writel_relaxed(RKVDEC_CCU_BIT_WORK_EN, dec->ccu->reg_base + RKVDEC_CCU_WORK_BASE);
1772
+ writel_relaxed(RKVDEC_CCU_BIT_WORK_MODE, dec->ccu->reg_base + RKVDEC_CCU_WORK_MODE_BASE);
1773
+ writel_relaxed(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
1774
+
1775
+ /* set cache size */
1776
+ reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS |
1777
+ RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
1778
+ if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
1779
+ reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
1780
+
1781
+ mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
1782
+ mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
1783
+ mpp_write_relaxed(mpp, RKVDEC_REG_CACHE2_SIZE_BASE, reg);
1784
+ /* clear cache */
1785
+ mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
1786
+ mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
1787
+ mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
1788
+
1789
+ mpp_iommu_flush_tlb(mpp->iommu_info);
1790
+ /* disable multicore pu/colmv offset req timeout reset */
1791
+ task->reg[RKVDEC_REG_EN_MODE_SET] |= BIT(1);
1792
+ task->reg[RKVDEC_REG_TIMEOUT_THRESHOLD] = rkvdec2_ccu_get_timeout_threshold(task);
1793
+ /* set registers for hardware */
1794
+ reg_en = mpp_task->hw_info->reg_en;
1795
+ for (i = 0; i < task->w_req_cnt; i++) {
1796
+ int s, e;
1797
+ struct mpp_request *req = &task->w_reqs[i];
1798
+
1799
+ s = req->offset / sizeof(u32);
1800
+ e = s + req->size / sizeof(u32);
1801
+ mpp_write_req(mpp, task->reg, s, e, reg_en);
1802
+ }
1803
+ /* init current task */
1804
+ mpp->cur_task = mpp_task;
1805
+
1806
+ mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
1807
+
1808
+ mpp->irq_status = 0;
1809
+ writel_relaxed(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_STA_BASE);
1810
+ /* Flush the register before the start the device */
1811
+ wmb();
1812
+ mpp_write(mpp, RKVDEC_REG_START_EN_BASE, task->reg[reg_en] | RKVDEC_START_EN);
1813
+
1814
+ mpp_task_run_end(mpp_task, timing_en);
1815
+
1816
+ mpp_debug_leave();
1817
+
1818
+ return 0;
1819
+}
1820
+
1821
+static struct mpp_dev *rkvdec2_get_idle_core(struct mpp_taskqueue *queue,
1822
+ struct mpp_task *mpp_task)
1823
+{
1824
+ u32 i = 0;
1825
+ struct rkvdec2_dev *dec = NULL;
1826
+
1827
+ for (i = 0; i < queue->core_count; i++) {
1828
+ struct mpp_dev *mpp = queue->cores[i];
1829
+ struct rkvdec2_dev *core = to_rkvdec2_dev(mpp);
1830
+
1831
+ if (mpp->disable)
1832
+ continue;
1833
+
1834
+ if (test_bit(i, &queue->core_idle)) {
1835
+ if (!dec) {
1836
+ dec = core;
1837
+ continue;
1838
+ }
1839
+ /* set the less work core */
1840
+ if (core->task_index < dec->task_index)
1841
+ dec = core;
1842
+ }
1843
+ }
1844
+ /* if get core */
1845
+ if (dec) {
1846
+ mpp_task->mpp = &dec->mpp;
1847
+ mpp_task->core_id = dec->mpp.core_id;
1848
+ clear_bit(mpp_task->core_id, &queue->core_idle);
1849
+ dec->task_index++;
1850
+ atomic_inc(&dec->mpp.task_count);
1851
+ mpp_dbg_core("clear core %d idle\n", mpp_task->core_id);
1852
+ return mpp_task->mpp;
1853
+ }
1854
+
1855
+ return NULL;
1856
+}
1857
+
1858
+static bool rkvdec2_core_working(struct mpp_taskqueue *queue)
1859
+{
1860
+ struct mpp_dev *mpp;
1861
+ bool flag = false;
1862
+ u32 i = 0;
1863
+
1864
+ for (i = 0; i < queue->core_count; i++) {
1865
+ mpp = queue->cores[i];
1866
+ if (mpp->disable)
1867
+ continue;
1868
+ if (!test_bit(i, &queue->core_idle)) {
1869
+ flag = true;
1870
+ break;
1871
+ }
1872
+ }
1873
+
1874
+ return flag;
1875
+}
1876
+
1877
+void rkvdec2_soft_ccu_worker(struct kthread_work *work_s)
1878
+{
1879
+ struct mpp_task *mpp_task;
1880
+ struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
1881
+ struct mpp_taskqueue *queue = mpp->queue;
1882
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1883
+ u32 timing_en = mpp->srv->timing_en;
1884
+
1885
+ mpp_debug_enter();
1886
+
1887
+ /* 1. process all finished task in running list */
1888
+ rkvdec2_soft_ccu_dequeue(queue);
1889
+
1890
+ /* 2. process reset request */
1891
+ if (atomic_read(&queue->reset_request)) {
1892
+ if (!rkvdec2_core_working(queue)) {
1893
+ rkvdec2_ccu_power_on(queue, dec->ccu);
1894
+ rkvdec2_soft_ccu_reset(queue, dec->ccu);
1895
+ }
1896
+ }
1897
+
1898
+ /* 3. process pending task */
1899
+ while (1) {
1900
+ if (atomic_read(&queue->reset_request))
1901
+ break;
1902
+ /* get one task form pending list */
1903
+ mutex_lock(&queue->pending_lock);
1904
+ mpp_task = list_first_entry_or_null(&queue->pending_list,
1905
+ struct mpp_task, queue_link);
1906
+ mutex_unlock(&queue->pending_lock);
1907
+ if (!mpp_task)
1908
+ break;
1909
+
1910
+ if (test_bit(TASK_STATE_ABORT, &mpp_task->state)) {
1911
+ mutex_lock(&queue->pending_lock);
1912
+ list_del_init(&mpp_task->queue_link);
1913
+
1914
+ set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
1915
+ set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
1916
+
1917
+ mutex_unlock(&queue->pending_lock);
1918
+ wake_up(&mpp_task->wait);
1919
+ kref_put(&mpp_task->ref, rkvdec2_link_free_task);
1920
+ continue;
1921
+ }
1922
+ /* find one core is idle */
1923
+ mpp = rkvdec2_get_idle_core(queue, mpp_task);
1924
+ if (!mpp)
1925
+ break;
1926
+
1927
+ if (timing_en) {
1928
+ mpp_task->on_run = ktime_get();
1929
+ set_bit(TASK_TIMING_RUN, &mpp_task->state);
1930
+ }
1931
+
1932
+ /* set session index */
1933
+ rkvdec2_set_core_info(mpp_task->reg, mpp_task->session->index);
1934
+ /* set rcb buffer */
1935
+ mpp_set_rcbbuf(mpp, mpp_task->session, mpp_task);
1936
+
1937
+ INIT_DELAYED_WORK(&mpp_task->timeout_work, rkvdec2_ccu_timeout_work);
1938
+ rkvdec2_ccu_power_on(queue, dec->ccu);
1939
+ rkvdec2_soft_ccu_enqueue(mpp, mpp_task);
1940
+ /* pending to running */
1941
+ mpp_taskqueue_pending_to_run(queue, mpp_task);
1942
+ set_bit(TASK_STATE_RUNNING, &mpp_task->state);
1943
+ }
1944
+
1945
+ /* 4. poweroff when running and pending list are empty */
1946
+ if (list_empty(&queue->running_list) &&
1947
+ list_empty(&queue->pending_list))
1948
+ rkvdec2_ccu_power_off(queue, dec->ccu);
1949
+
1950
+ /* 5. check session detach out of queue */
1951
+ mpp_session_cleanup_detach(queue, work_s);
1952
+
1953
+ mpp_debug_leave();
1954
+}
1955
+
1956
+int rkvdec2_ccu_alloc_table(struct rkvdec2_dev *dec,
1957
+ struct rkvdec_link_dev *link_dec)
1958
+{
1959
+ int ret, i;
1960
+ struct mpp_dma_buffer *table;
1961
+ struct mpp_dev *mpp = &dec->mpp;
1962
+
1963
+ mpp_debug_enter();
1964
+
1965
+ /* alloc table pointer array */
1966
+ table = devm_kmalloc_array(mpp->dev, mpp->task_capacity,
1967
+ sizeof(*table), GFP_KERNEL | __GFP_ZERO);
1968
+ if (!table)
1969
+ return -ENOMEM;
1970
+
1971
+ /* alloc table buffer */
1972
+ ret = rkvdec2_link_alloc_table(mpp, link_dec);
1973
+ if (ret)
1974
+ return ret;
1975
+
1976
+ /* init table array */
1977
+ dec->ccu->table_array = table;
1978
+ for (i = 0; i < mpp->task_capacity; i++) {
1979
+ table[i].iova = link_dec->table->iova + i * link_dec->link_node_size;
1980
+ table[i].vaddr = link_dec->table->vaddr + i * link_dec->link_node_size;
1981
+ table[i].size = link_dec->link_node_size;
1982
+ INIT_LIST_HEAD(&table[i].link);
1983
+ list_add_tail(&table[i].link, &dec->ccu->unused_list);
1984
+ }
1985
+
1986
+ return 0;
1987
+}
1988
+
1989
+static void rkvdec2_dump_ccu(struct rkvdec2_ccu *ccu)
1990
+{
1991
+ u32 i;
1992
+
1993
+ for (i = 0; i < 10; i++)
1994
+ mpp_err("ccu:reg[%d]=%08x\n", i, readl(ccu->reg_base + 4 * i));
1995
+
1996
+ for (i = 16; i < 22; i++)
1997
+ mpp_err("ccu:reg[%d]=%08x\n", i, readl(ccu->reg_base + 4 * i));
1998
+}
1999
+
2000
+static void rkvdec2_dump_link(struct rkvdec2_dev *dec)
2001
+{
2002
+ u32 i;
2003
+
2004
+ for (i = 0; i < 10; i++)
2005
+ mpp_err("link:reg[%d]=%08x\n", i, readl(dec->link_dec->reg_base + 4 * i));
2006
+}
2007
+
2008
+static void rkvdec2_dump_core(struct mpp_dev *mpp, struct rkvdec2_task *task)
2009
+{
2010
+ u32 j;
2011
+
2012
+ if (task) {
2013
+ for (j = 0; j < 273; j++)
2014
+ mpp_err("reg[%d]=%08x, %08x\n", j, mpp_read(mpp, j*4), task->reg[j]);
2015
+ } else {
2016
+ for (j = 0; j < 273; j++)
2017
+ mpp_err("reg[%d]=%08x\n", j, mpp_read(mpp, j*4));
2018
+ }
2019
+}
2020
+
2021
+irqreturn_t rkvdec2_hard_ccu_irq(int irq, void *param)
2022
+{
2023
+ u32 irq_status;
2024
+ struct mpp_dev *mpp = param;
2025
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2026
+
2027
+ irq_status = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2028
+ dec->ccu->ccu_core_work_mode = readl(dec->ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2029
+ if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) {
2030
+ dec->link_dec->irq_status = irq_status;
2031
+ mpp->irq_status = mpp_read(mpp, RKVDEC_REG_INT_EN);
2032
+ mpp_debug(DEBUG_IRQ_STATUS, "core %d link_irq=%08x, core_irq=%08x\n",
2033
+ mpp->core_id, irq_status, mpp->irq_status);
2034
+
2035
+ writel(irq_status & 0xfffff0ff,
2036
+ dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2037
+
2038
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
2039
+ return IRQ_HANDLED;
2040
+ }
2041
+
2042
+ return IRQ_NONE;
2043
+}
2044
+
2045
+static int rkvdec2_hard_ccu_finish(struct rkvdec_link_info *hw, struct rkvdec2_task *task)
2046
+{
2047
+ u32 i, off, s, n;
2048
+ struct rkvdec_link_part *part = hw->part_r;
2049
+ u32 *tb_reg = (u32 *)task->table->vaddr;
2050
+
2051
+ mpp_debug_enter();
2052
+
2053
+ for (i = 0; i < hw->part_r_num; i++) {
2054
+ off = part[i].tb_reg_off;
2055
+ s = part[i].reg_start;
2056
+ n = part[i].reg_num;
2057
+ memcpy(&task->reg[s], &tb_reg[off], n * sizeof(u32));
2058
+ }
2059
+ /* revert hack for irq status */
2060
+ task->reg[RKVDEC_REG_INT_EN_INDEX] = task->irq_status;
2061
+
2062
+ mpp_debug_leave();
2063
+
2064
+ return 0;
2065
+}
2066
+
2067
+static int rkvdec2_hard_ccu_dequeue(struct mpp_taskqueue *queue,
2068
+ struct rkvdec2_ccu *ccu,
2069
+ struct rkvdec_link_info *hw)
2070
+{
2071
+ struct mpp_task *mpp_task = NULL, *n;
2072
+ u32 dump_reg = 0;
2073
+ u32 dequeue_none = 0;
2074
+
2075
+ mpp_debug_enter();
2076
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
2077
+ u32 timeout_flag = test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
2078
+ u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
2079
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2080
+ u32 *tb_reg = (u32 *)task->table->vaddr;
2081
+ u32 irq_status = tb_reg[hw->tb_reg_int];
2082
+ u32 ccu_decoded_num, ccu_total_dec_num;
2083
+
2084
+ ccu_decoded_num = readl(ccu->reg_base + RKVDEC_CCU_DEC_NUM_BASE);
2085
+ ccu_total_dec_num = readl(ccu->reg_base + RKVDEC_CCU_TOTAL_NUM_BASE);
2086
+ mpp_debug(DEBUG_IRQ_CHECK,
2087
+ "session %d task %d w:h[%d %d] err %d irq_status %#x timeout=%u abort=%u iova %08x next %08x ccu[%d %d]\n",
2088
+ mpp_task->session->index, mpp_task->task_index, task->width,
2089
+ task->height, !!(irq_status & RKVDEC_INT_ERROR_MASK), irq_status,
2090
+ timeout_flag, abort_flag, (u32)task->table->iova,
2091
+ ((u32 *)task->table->vaddr)[hw->tb_reg_next],
2092
+ ccu_decoded_num, ccu_total_dec_num);
2093
+
2094
+ if (irq_status || timeout_flag || abort_flag) {
2095
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(queue->cores[0]);
2096
+
2097
+ set_bit(TASK_STATE_HANDLE, &mpp_task->state);
2098
+ cancel_delayed_work(&mpp_task->timeout_work);
2099
+ mpp_task->hw_cycles = tb_reg[hw->tb_reg_cycle];
2100
+ mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
2101
+ task->irq_status = irq_status ? irq_status : RKVDEC_ERROR_STA;
2102
+
2103
+ if (irq_status)
2104
+ rkvdec2_hard_ccu_finish(hw, task);
2105
+
2106
+ set_bit(TASK_STATE_FINISH, &mpp_task->state);
2107
+ set_bit(TASK_STATE_DONE, &mpp_task->state);
2108
+
2109
+ if (timeout_flag && !dump_reg && mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
2110
+ u32 i;
2111
+
2112
+ mpp_err("###### ccu #####\n");
2113
+ rkvdec2_dump_ccu(ccu);
2114
+ for (i = 0; i < queue->core_count; i++) {
2115
+ mpp_err("###### core %d #####\n", i);
2116
+ rkvdec2_dump_link(to_rkvdec2_dev(queue->cores[i]));
2117
+ rkvdec2_dump_core(queue->cores[i], task);
2118
+ }
2119
+ dump_reg = 1;
2120
+ }
2121
+ list_move_tail(&task->table->link, &ccu->unused_list);
2122
+ /* free task */
2123
+ list_del_init(&mpp_task->queue_link);
2124
+ /* Wake up the GET thread */
2125
+ wake_up(&mpp_task->wait);
2126
+ if ((irq_status & RKVDEC_INT_ERROR_MASK) || timeout_flag) {
2127
+ pr_err("session %d task %d irq_status %#x timeout=%u abort=%u\n",
2128
+ mpp_task->session->index, mpp_task->task_index,
2129
+ irq_status, timeout_flag, abort_flag);
2130
+ atomic_inc(&queue->reset_request);
2131
+ }
2132
+
2133
+ kref_put(&mpp_task->ref, mpp_free_task);
2134
+ } else {
2135
+ dequeue_none++;
2136
+ /*
2137
+ * there are only 2 cores,
2138
+ * if dequeue not finish task more than 2,
2139
+ * means the others task still not get run by hw, can break early.
2140
+ */
2141
+ if (dequeue_none > 2)
2142
+ break;
2143
+ }
2144
+ }
2145
+
2146
+ mpp_debug_leave();
2147
+ return 0;
2148
+}
2149
+
2150
+static int rkvdec2_hard_ccu_reset(struct mpp_taskqueue *queue, struct rkvdec2_ccu *ccu)
2151
+{
2152
+ int i = 0;
2153
+
2154
+ mpp_debug_enter();
2155
+
2156
+ /* reset and active core */
2157
+ for (i = 0; i < queue->core_count; i++) {
2158
+ u32 val = 0;
2159
+ struct mpp_dev *mpp = queue->cores[i];
2160
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2161
+
2162
+ if (mpp->disable)
2163
+ continue;
2164
+ dev_info(mpp->dev, "resetting...\n");
2165
+ disable_hardirq(mpp->irq);
2166
+ /* force idle */
2167
+ writel(dec->core_mask, ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
2168
+ writel(0, ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2169
+
2170
+ {
2171
+ /* soft reset */
2172
+ u32 val;
2173
+
2174
+ mpp_write(mpp, RKVDEC_REG_IMPORTANT_BASE, RKVDEC_SOFTREST_EN);
2175
+ udelay(5);
2176
+ val = mpp_read(mpp, RKVDEC_REG_INT_EN);
2177
+ if (!(val & RKVDEC_SOFT_RESET_READY))
2178
+ mpp_err("soft reset fail, int %08x\n", val);
2179
+
2180
+ // /* cru reset */
2181
+ // dev_info(mpp->dev, "cru reset\n");
2182
+ // rkvdec2_reset(mpp);
2183
+ }
2184
+#if IS_ENABLED(CONFIG_ROCKCHIP_SIP)
2185
+ rockchip_dmcfreq_lock();
2186
+ sip_smc_vpu_reset(i, 0, 0);
2187
+ rockchip_dmcfreq_unlock();
2188
+#else
2189
+ rkvdec2_reset(mpp);
2190
+#endif
2191
+ mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
2192
+ enable_irq(mpp->irq);
2193
+ atomic_set(&mpp->reset_request, 0);
2194
+ val = mpp_read_relaxed(mpp, 272*4);
2195
+ dev_info(mpp->dev, "reset done, idle %d\n", (val & 1));
2196
+ }
2197
+ /* reset ccu */
2198
+ mpp_safe_reset(ccu->rst_a);
2199
+ udelay(5);
2200
+ mpp_safe_unreset(ccu->rst_a);
2201
+
2202
+ mpp_debug_leave();
2203
+ return 0;
2204
+}
2205
+
2206
+static struct mpp_task *
2207
+rkvdec2_hard_ccu_prepare(struct mpp_task *mpp_task,
2208
+ struct rkvdec2_ccu *ccu, struct rkvdec_link_info *hw)
2209
+{
2210
+ u32 i, off, s, n;
2211
+ u32 *tb_reg;
2212
+ struct mpp_dma_buffer *table = NULL;
2213
+ struct rkvdec_link_part *part;
2214
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2215
+
2216
+ mpp_debug_enter();
2217
+
2218
+ if (test_bit(TASK_STATE_PREPARE, &mpp_task->state))
2219
+ return mpp_task;
2220
+
2221
+ /* ensure that cur table iova points to the next link table*/
2222
+ {
2223
+ struct mpp_dma_buffer *table0 = NULL, *table1 = NULL, *n;
2224
+
2225
+ list_for_each_entry_safe(table, n, &ccu->unused_list, link) {
2226
+ if (!table0) {
2227
+ table0 = table;
2228
+ continue;
2229
+ }
2230
+ if (!table1)
2231
+ table1 = table;
2232
+ break;
2233
+ }
2234
+ if (!table0 || !table1)
2235
+ return NULL;
2236
+ ((u32 *)table0->vaddr)[hw->tb_reg_next] = table1->iova;
2237
+ table = table0;
2238
+ }
2239
+
2240
+ /* set session idx */
2241
+ rkvdec2_set_core_info(task->reg, mpp_task->session->index);
2242
+ tb_reg = (u32 *)table->vaddr;
2243
+ part = hw->part_w;
2244
+
2245
+ /* disable multicore pu/colmv offset req timeout reset */
2246
+ task->reg[RKVDEC_REG_EN_MODE_SET] |= BIT(1);
2247
+ task->reg[RKVDEC_REG_TIMEOUT_THRESHOLD] = rkvdec2_ccu_get_timeout_threshold(task);
2248
+
2249
+ for (i = 0; i < hw->part_w_num; i++) {
2250
+ off = part[i].tb_reg_off;
2251
+ s = part[i].reg_start;
2252
+ n = part[i].reg_num;
2253
+ memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
2254
+ }
2255
+
2256
+ /* memset read registers */
2257
+ part = hw->part_r;
2258
+ for (i = 0; i < hw->part_r_num; i++) {
2259
+ off = part[i].tb_reg_off;
2260
+ n = part[i].reg_num;
2261
+ memset(&tb_reg[off], 0, n * sizeof(u32));
2262
+ }
2263
+ list_move_tail(&table->link, &ccu->used_list);
2264
+ task->table = table;
2265
+ set_bit(TASK_STATE_PREPARE, &mpp_task->state);
2266
+ mpp_dbg_ccu("session %d task %d iova %08x next %08x\n",
2267
+ mpp_task->session->index, mpp_task->task_index, (u32)task->table->iova,
2268
+ ((u32 *)task->table->vaddr)[hw->tb_reg_next]);
2269
+
2270
+ mpp_debug_leave();
2271
+
2272
+ return mpp_task;
2273
+}
2274
+
2275
+static int rkvdec2_ccu_link_fix_rcb_regs(struct rkvdec2_dev *dec)
2276
+{
2277
+ int ret = 0;
2278
+ u32 i, val;
2279
+ u32 reg, reg_idx, rcb_size, rcb_offset;
2280
+
2281
+ if (!dec->rcb_iova && !dec->rcb_info_count)
2282
+ goto done;
2283
+ /* check whether fixed */
2284
+ val = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2285
+ if (val & RKVDEC_CCU_BIT_FIX_RCB)
2286
+ goto done;
2287
+ /* set registers */
2288
+ rcb_offset = 0;
2289
+ for (i = 0; i < dec->rcb_info_count; i += 2) {
2290
+ reg_idx = dec->rcb_infos[i];
2291
+ rcb_size = dec->rcb_infos[i + 1];
2292
+ mpp_debug(DEBUG_SRAM_INFO,
2293
+ "rcb: reg %u size %u offset %u sram_size %u rcb_size %u\n",
2294
+ reg_idx, rcb_size, rcb_offset, dec->sram_size, dec->rcb_size);
2295
+ if ((rcb_offset + rcb_size) > dec->rcb_size) {
2296
+ mpp_err("rcb: reg[%u] set failed.\n", reg_idx);
2297
+ ret = -ENOMEM;
2298
+ goto done;
2299
+ }
2300
+ reg = dec->rcb_iova + rcb_offset;
2301
+ mpp_write(&dec->mpp, reg_idx * sizeof(u32), reg);
2302
+ rcb_offset += rcb_size;
2303
+ }
2304
+
2305
+ val |= RKVDEC_CCU_BIT_FIX_RCB;
2306
+ writel(val, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2307
+done:
2308
+ return ret;
2309
+}
2310
+
2311
+static int rkvdec2_hard_ccu_enqueue(struct rkvdec2_ccu *ccu,
2312
+ struct mpp_task *mpp_task,
2313
+ struct mpp_taskqueue *queue,
2314
+ struct mpp_dev *mpp)
2315
+{
2316
+ u32 ccu_en, work_mode, link_mode;
2317
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2318
+ u32 timing_en = mpp->srv->timing_en;
2319
+
2320
+ mpp_debug_enter();
2321
+
2322
+ if (test_bit(TASK_STATE_START, &mpp_task->state))
2323
+ goto done;
2324
+
2325
+ ccu_en = readl(ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2326
+ mpp_dbg_ccu("ccu_en=%d\n", ccu_en);
2327
+ if (!ccu_en) {
2328
+ u32 i;
2329
+
2330
+ /* set work mode */
2331
+ work_mode = 0;
2332
+ for (i = 0; i < queue->core_count; i++) {
2333
+ u32 val;
2334
+ struct mpp_dev *core = queue->cores[i];
2335
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(core);
2336
+
2337
+ if (mpp->disable)
2338
+ continue;
2339
+ work_mode |= dec->core_mask;
2340
+ rkvdec2_ccu_link_fix_rcb_regs(dec);
2341
+ /* control by ccu */
2342
+ val = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2343
+ val |= RKVDEC_LINK_BIT_CCU_WORK_MODE;
2344
+ writel(val, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2345
+ }
2346
+ writel(work_mode, ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2347
+ ccu->ccu_core_work_mode = readl(ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2348
+ mpp_dbg_ccu("ccu_work_mode=%08x, ccu_work_status=%08x\n",
2349
+ readl(ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE),
2350
+ readl(ccu->reg_base + RKVDEC_CCU_CORE_STA_BASE));
2351
+
2352
+ /* set auto gating */
2353
+ writel(RKVDEC_CCU_BIT_AUTOGATE, ccu->reg_base + RKVDEC_CCU_CTRL_BASE);
2354
+ /* link start base */
2355
+ writel(task->table->iova, ccu->reg_base + RKVDEC_CCU_CFG_ADDR_BASE);
2356
+ /* enable link */
2357
+ writel(RKVDEC_CCU_BIT_WORK_EN, ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2358
+ }
2359
+
2360
+ /* set link mode */
2361
+ link_mode = ccu_en ? RKVDEC_CCU_BIT_ADD_MODE : 0;
2362
+ writel(link_mode | RKVDEC_LINK_ADD_CFG_NUM, ccu->reg_base + RKVDEC_CCU_LINK_MODE_BASE);
2363
+
2364
+ /* flush tlb before starting hardware */
2365
+ mpp_iommu_flush_tlb(mpp->iommu_info);
2366
+ /* wmb */
2367
+ wmb();
2368
+ INIT_DELAYED_WORK(&mpp_task->timeout_work, rkvdec2_ccu_timeout_work);
2369
+ mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
2370
+ /* configure done */
2371
+ writel(RKVDEC_CCU_BIT_CFG_DONE, ccu->reg_base + RKVDEC_CCU_CFG_DONE_BASE);
2372
+ mpp_task_run_end(mpp_task, timing_en);
2373
+
2374
+ set_bit(TASK_STATE_RUNNING, &mpp_task->state);
2375
+ mpp_dbg_ccu("session %d task %d iova=%08x task->state=%lx link_mode=%08x\n",
2376
+ mpp_task->session->index, mpp_task->task_index,
2377
+ (u32)task->table->iova, mpp_task->state,
2378
+ readl(ccu->reg_base + RKVDEC_CCU_LINK_MODE_BASE));
2379
+done:
2380
+ mpp_debug_leave();
2381
+
2382
+ return 0;
2383
+}
2384
+
2385
+static void rkvdec2_hard_ccu_resend_tasks(struct mpp_dev *mpp, struct mpp_taskqueue *queue)
2386
+{
2387
+ struct rkvdec2_task *task_pre = NULL;
2388
+ struct mpp_task *loop = NULL, *n;
2389
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2390
+
2391
+ /* re sort running list */
2392
+ list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2393
+ struct rkvdec2_task *task = to_rkvdec2_task(loop);
2394
+ u32 *tb_reg = (u32 *)task->table->vaddr;
2395
+ u32 irq_status = tb_reg[dec->link_dec->info->tb_reg_int];
2396
+
2397
+ if (!irq_status) {
2398
+ if (task_pre) {
2399
+ tb_reg = (u32 *)task_pre->table->vaddr;
2400
+ tb_reg[dec->link_dec->info->tb_reg_next] = task->table->iova;
2401
+ }
2402
+ task_pre = task;
2403
+ }
2404
+ }
2405
+
2406
+ if (task_pre) {
2407
+ struct mpp_dma_buffer *tbl;
2408
+ u32 *tb_reg;
2409
+
2410
+ tbl = list_first_entry_or_null(&dec->ccu->unused_list,
2411
+ struct mpp_dma_buffer, link);
2412
+ WARN_ON(!tbl);
2413
+ if (tbl) {
2414
+ tb_reg = (u32 *)task_pre->table->vaddr;
2415
+ tb_reg[dec->link_dec->info->tb_reg_next] = tbl->iova;
2416
+ }
2417
+ }
2418
+
2419
+ /* resend */
2420
+ list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2421
+ struct rkvdec2_task *task = to_rkvdec2_task(loop);
2422
+ u32 *tb_reg = (u32 *)task->table->vaddr;
2423
+ u32 irq_status = tb_reg[dec->link_dec->info->tb_reg_int];
2424
+
2425
+ mpp_dbg_ccu("reback: session %d task %d iova %08x next %08x irq_status 0x%08x\n",
2426
+ loop->session->index, loop->task_index, (u32)task->table->iova,
2427
+ tb_reg[dec->link_dec->info->tb_reg_next], irq_status);
2428
+
2429
+ if (!irq_status) {
2430
+ cancel_delayed_work(&loop->timeout_work);
2431
+ clear_bit(TASK_STATE_START, &loop->state);
2432
+ rkvdec2_hard_ccu_enqueue(dec->ccu, loop, queue, mpp);
2433
+ }
2434
+ }
2435
+}
2436
+
2437
+void rkvdec2_hard_ccu_worker(struct kthread_work *work_s)
2438
+{
2439
+ struct mpp_task *mpp_task;
2440
+ struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
2441
+ struct mpp_taskqueue *queue = mpp->queue;
2442
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2443
+
2444
+ mpp_debug_enter();
2445
+
2446
+ /* 1. process all finished task in running list */
2447
+ rkvdec2_hard_ccu_dequeue(queue, dec->ccu, dec->link_dec->info);
2448
+
2449
+ /* 2. process reset request */
2450
+ if (atomic_read(&queue->reset_request) &&
2451
+ (list_empty(&queue->running_list) || !dec->ccu->ccu_core_work_mode)) {
2452
+ /*
2453
+ * cancel running list timeout work to avoid
2454
+ * sw timeout causeby reset long time
2455
+ */
2456
+ struct mpp_task *loop = NULL, *n;
2457
+
2458
+ list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2459
+ cancel_delayed_work(&loop->timeout_work);
2460
+ }
2461
+ /* reset process */
2462
+ rkvdec2_hard_ccu_reset(queue, dec->ccu);
2463
+ atomic_set(&queue->reset_request, 0);
2464
+
2465
+ /* relink running task iova in list, and resend them to hw */
2466
+ if (!list_empty(&queue->running_list))
2467
+ rkvdec2_hard_ccu_resend_tasks(mpp, queue);
2468
+ }
2469
+
2470
+ /* 3. process pending task */
2471
+ while (1) {
2472
+ if (atomic_read(&queue->reset_request))
2473
+ break;
2474
+
2475
+ /* get one task form pending list */
2476
+ mutex_lock(&queue->pending_lock);
2477
+ mpp_task = list_first_entry_or_null(&queue->pending_list,
2478
+ struct mpp_task, queue_link);
2479
+ mutex_unlock(&queue->pending_lock);
2480
+
2481
+ if (!mpp_task)
2482
+ break;
2483
+ if (test_bit(TASK_STATE_ABORT, &mpp_task->state)) {
2484
+ mutex_lock(&queue->pending_lock);
2485
+ list_del_init(&mpp_task->queue_link);
2486
+ mutex_unlock(&queue->pending_lock);
2487
+ kref_put(&mpp_task->ref, mpp_free_task);
2488
+ continue;
2489
+ }
2490
+
2491
+ mpp_task = rkvdec2_hard_ccu_prepare(mpp_task, dec->ccu, dec->link_dec->info);
2492
+ if (!mpp_task)
2493
+ break;
2494
+
2495
+ rkvdec2_ccu_power_on(queue, dec->ccu);
2496
+ rkvdec2_hard_ccu_enqueue(dec->ccu, mpp_task, queue, mpp);
2497
+ mpp_taskqueue_pending_to_run(queue, mpp_task);
2498
+ }
2499
+
2500
+ /* 4. poweroff when running and pending list are empty */
2501
+ mutex_lock(&queue->pending_lock);
2502
+ if (list_empty(&queue->running_list) &&
2503
+ list_empty(&queue->pending_list))
2504
+ rkvdec2_ccu_power_off(queue, dec->ccu);
2505
+ mutex_unlock(&queue->pending_lock);
2506
+
2507
+ /* 5. check session detach out of queue */
2508
+ mpp_session_cleanup_detach(queue, work_s);
2509
+
2510
+ mpp_debug_leave();
2511
+}