.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: (GPL-2.0+ OR MIT) |
---|
2 | 2 | /* |
---|
3 | | - * Copyright (c) 2022 Rockchip Electronics Co., Ltd |
---|
| 3 | + * Copyright (c) 2021 Rockchip Electronics Co., Ltd |
---|
4 | 4 | * |
---|
5 | 5 | * author: |
---|
6 | 6 | * Ding Wei, leo.ding@rock-chips.com |
---|
.. | .. |
---|
28 | 28 | #include <linux/nospec.h> |
---|
29 | 29 | #include <linux/workqueue.h> |
---|
30 | 30 | #include <linux/dma-iommu.h> |
---|
31 | | -#include <linux/mfd/syscon.h> |
---|
32 | | -#include <linux/rockchip/cpu.h> |
---|
33 | 31 | #include <soc/rockchip/pm_domains.h> |
---|
34 | 32 | #include <soc/rockchip/rockchip_ipa.h> |
---|
35 | 33 | #include <soc/rockchip/rockchip_opp_select.h> |
---|
.. | .. |
---|
43 | 41 | |
---|
44 | 42 | #define RKVENC_SESSION_MAX_BUFFERS 40 |
---|
45 | 43 | #define RKVENC_MAX_CORE_NUM 4 |
---|
| 44 | +#define RKVENC_MAX_DCHS_ID 4 |
---|
| 45 | +#define RKVENC_MAX_SLICE_FIFO_LEN 256 |
---|
46 | 46 | #define RKVENC_SCLR_DONE_STA BIT(2) |
---|
47 | 47 | |
---|
48 | 48 | #define to_rkvenc_info(info) \ |
---|
.. | .. |
---|
119 | 119 | u32 err_mask; |
---|
120 | 120 | }; |
---|
121 | 121 | |
---|
| 122 | +#define INT_STA_ENC_DONE_STA BIT(0) |
---|
| 123 | +#define INT_STA_SCLR_DONE_STA BIT(2) |
---|
| 124 | +#define INT_STA_SLC_DONE_STA BIT(3) |
---|
| 125 | +#define INT_STA_BSF_OFLW_STA BIT(4) |
---|
| 126 | +#define INT_STA_BRSP_OTSD_STA BIT(5) |
---|
| 127 | +#define INT_STA_WBUS_ERR_STA BIT(6) |
---|
| 128 | +#define INT_STA_RBUS_ERR_STA BIT(7) |
---|
| 129 | +#define INT_STA_WDG_STA BIT(8) |
---|
| 130 | + |
---|
| 131 | +#define DCHS_REG_OFFSET (0x304) |
---|
| 132 | +#define DCHS_CLASS_OFFSET (33) |
---|
| 133 | +#define DCHS_TXE (0x10) |
---|
| 134 | +#define DCHS_RXE (0x20) |
---|
| 135 | + |
---|
| 136 | +/* dual core hand-shake info */ |
---|
| 137 | +union rkvenc2_dual_core_handshake_id { |
---|
| 138 | + u64 val; |
---|
| 139 | + struct { |
---|
| 140 | + u32 txid : 2; |
---|
| 141 | + u32 rxid : 2; |
---|
| 142 | + u32 txe : 1; |
---|
| 143 | + u32 rxe : 1; |
---|
| 144 | + u32 working : 1; |
---|
| 145 | + u32 reserve0 : 1; |
---|
| 146 | + u32 txid_orig : 2; |
---|
| 147 | + u32 rxid_orig : 2; |
---|
| 148 | + u32 txid_map : 2; |
---|
| 149 | + u32 rxid_map : 2; |
---|
| 150 | + u32 offset : 11; |
---|
| 151 | + u32 reserve1 : 1; |
---|
| 152 | + u32 txe_orig : 1; |
---|
| 153 | + u32 rxe_orig : 1; |
---|
| 154 | + u32 txe_map : 1; |
---|
| 155 | + u32 rxe_map : 1; |
---|
| 156 | + u32 session_id; |
---|
| 157 | + }; |
---|
| 158 | +}; |
---|
| 159 | + |
---|
| 160 | +#define RKVENC2_REG_INT_EN (8) |
---|
| 161 | +#define RKVENC2_BIT_SLICE_DONE_EN BIT(3) |
---|
| 162 | + |
---|
| 163 | +#define RKVENC2_REG_INT_MASK (9) |
---|
| 164 | +#define RKVENC2_BIT_SLICE_DONE_MASK BIT(3) |
---|
| 165 | + |
---|
| 166 | +#define RKVENC2_REG_EXT_LINE_BUF_BASE (22) |
---|
| 167 | + |
---|
| 168 | +#define RKVENC2_REG_ENC_PIC (32) |
---|
| 169 | +#define RKVENC2_BIT_ENC_STND BIT(0) |
---|
| 170 | +#define RKVENC2_BIT_VAL_H264 0 |
---|
| 171 | +#define RKVENC2_BIT_VAL_H265 1 |
---|
| 172 | +#define RKVENC2_BIT_SLEN_FIFO BIT(30) |
---|
| 173 | + |
---|
| 174 | +#define RKVENC2_REG_SLI_SPLIT (56) |
---|
| 175 | +#define RKVENC2_BIT_SLI_SPLIT BIT(0) |
---|
| 176 | +#define RKVENC2_BIT_SLI_FLUSH BIT(15) |
---|
| 177 | + |
---|
| 178 | +#define RKVENC2_REG_SLICE_NUM_BASE (0x4034) |
---|
| 179 | +#define RKVENC2_REG_SLICE_LEN_BASE (0x4038) |
---|
| 180 | + |
---|
| 181 | +#define RKVENC2_REG_ST_BSB (0x402c) |
---|
| 182 | +#define RKVENC2_REG_ADR_BSBT (0x2b0) |
---|
| 183 | +#define RKVENC2_REG_ADR_BSBB (0x2b4) |
---|
| 184 | +#define RKVENC2_REG_ADR_BSBR (0x2b8) |
---|
| 185 | +#define RKVENC2_REG_ADR_BSBS (0x2bc) |
---|
| 186 | + |
---|
| 187 | +union rkvenc2_slice_len_info { |
---|
| 188 | + u32 val; |
---|
| 189 | + |
---|
| 190 | + struct { |
---|
| 191 | + u32 slice_len : 31; |
---|
| 192 | + u32 last : 1; |
---|
| 193 | + }; |
---|
| 194 | +}; |
---|
| 195 | + |
---|
| 196 | +struct rkvenc_poll_slice_cfg { |
---|
| 197 | + s32 poll_type; |
---|
| 198 | + s32 poll_ret; |
---|
| 199 | + s32 count_max; |
---|
| 200 | + s32 count_ret; |
---|
| 201 | + union rkvenc2_slice_len_info slice_info[]; |
---|
| 202 | +}; |
---|
| 203 | + |
---|
122 | 204 | struct rkvenc_task { |
---|
123 | 205 | struct mpp_task mpp_task; |
---|
124 | 206 | int fmt; |
---|
.. | .. |
---|
141 | 223 | u32 r_req_cnt; |
---|
142 | 224 | struct mpp_request r_reqs[MPP_MAX_MSG_NUM]; |
---|
143 | 225 | struct mpp_dma_buffer *table; |
---|
144 | | - u32 task_no; |
---|
| 226 | + |
---|
| 227 | + union rkvenc2_dual_core_handshake_id dchs_id; |
---|
| 228 | + |
---|
| 229 | + /* split output / slice mode info */ |
---|
| 230 | + u32 task_split; |
---|
| 231 | + u32 task_split_done; |
---|
| 232 | + u32 last_slice_found; |
---|
| 233 | + u32 slice_wr_cnt; |
---|
| 234 | + u32 slice_rd_cnt; |
---|
| 235 | + DECLARE_KFIFO(slice_info, union rkvenc2_slice_len_info, RKVENC_MAX_SLICE_FIFO_LEN); |
---|
| 236 | + |
---|
| 237 | + /* jpege bitstream */ |
---|
| 238 | + struct mpp_dma_buffer *bs_buf; |
---|
| 239 | + u32 offset_bs; |
---|
145 | 240 | }; |
---|
146 | 241 | |
---|
147 | 242 | #define RKVENC_MAX_RCB_NUM (4) |
---|
.. | .. |
---|
183 | 278 | struct reset_control *rst_a; |
---|
184 | 279 | struct reset_control *rst_h; |
---|
185 | 280 | struct reset_control *rst_core; |
---|
| 281 | + /* for ccu */ |
---|
| 282 | + struct rkvenc_ccu *ccu; |
---|
| 283 | + struct list_head core_link; |
---|
186 | 284 | |
---|
187 | 285 | /* internal rcb-memory */ |
---|
188 | 286 | u32 sram_size; |
---|
.. | .. |
---|
190 | 288 | dma_addr_t sram_iova; |
---|
191 | 289 | u32 sram_enabled; |
---|
192 | 290 | struct page *rcb_page; |
---|
193 | | - struct regmap *grf; |
---|
| 291 | + |
---|
| 292 | + u32 bs_overflow; |
---|
| 293 | + |
---|
| 294 | +#ifdef CONFIG_PM_DEVFREQ |
---|
| 295 | + struct rockchip_opp_info opp_info; |
---|
| 296 | + struct monitor_dev_info *mdev_info; |
---|
| 297 | +#endif |
---|
194 | 298 | }; |
---|
195 | 299 | |
---|
| 300 | +struct rkvenc_ccu { |
---|
| 301 | + u32 core_num; |
---|
| 302 | + /* lock for core attach */ |
---|
| 303 | + struct mutex lock; |
---|
| 304 | + struct list_head core_list; |
---|
| 305 | + struct mpp_dev *main_core; |
---|
| 306 | + |
---|
| 307 | + spinlock_t lock_dchs; |
---|
| 308 | + union rkvenc2_dual_core_handshake_id dchs[RKVENC_MAX_CORE_NUM]; |
---|
| 309 | +}; |
---|
196 | 310 | |
---|
197 | 311 | static struct rkvenc_hw_info rkvenc_v2_hw_info = { |
---|
| 312 | + .hw = { |
---|
| 313 | + .reg_num = 254, |
---|
| 314 | + .reg_id = 0, |
---|
| 315 | + .reg_en = 4, |
---|
| 316 | + .reg_start = 160, |
---|
| 317 | + .reg_end = 253, |
---|
| 318 | + }, |
---|
| 319 | + .reg_class = RKVENC_CLASS_BUTT, |
---|
| 320 | + .reg_msg[RKVENC_CLASS_BASE] = { |
---|
| 321 | + .base_s = 0x0000, |
---|
| 322 | + .base_e = 0x0058, |
---|
| 323 | + }, |
---|
| 324 | + .reg_msg[RKVENC_CLASS_PIC] = { |
---|
| 325 | + .base_s = 0x0280, |
---|
| 326 | + .base_e = 0x03f4, |
---|
| 327 | + }, |
---|
| 328 | + .reg_msg[RKVENC_CLASS_RC] = { |
---|
| 329 | + .base_s = 0x1000, |
---|
| 330 | + .base_e = 0x10e0, |
---|
| 331 | + }, |
---|
| 332 | + .reg_msg[RKVENC_CLASS_PAR] = { |
---|
| 333 | + .base_s = 0x1700, |
---|
| 334 | + .base_e = 0x1cd4, |
---|
| 335 | + }, |
---|
| 336 | + .reg_msg[RKVENC_CLASS_SQI] = { |
---|
| 337 | + .base_s = 0x2000, |
---|
| 338 | + .base_e = 0x21e4, |
---|
| 339 | + }, |
---|
| 340 | + .reg_msg[RKVENC_CLASS_SCL] = { |
---|
| 341 | + .base_s = 0x2200, |
---|
| 342 | + .base_e = 0x2c98, |
---|
| 343 | + }, |
---|
| 344 | + .reg_msg[RKVENC_CLASS_OSD] = { |
---|
| 345 | + .base_s = 0x3000, |
---|
| 346 | + .base_e = 0x347c, |
---|
| 347 | + }, |
---|
| 348 | + .reg_msg[RKVENC_CLASS_ST] = { |
---|
| 349 | + .base_s = 0x4000, |
---|
| 350 | + .base_e = 0x42cc, |
---|
| 351 | + }, |
---|
| 352 | + .reg_msg[RKVENC_CLASS_DEBUG] = { |
---|
| 353 | + .base_s = 0x5000, |
---|
| 354 | + .base_e = 0x5354, |
---|
| 355 | + }, |
---|
| 356 | + .fd_class = RKVENC_CLASS_FD_BUTT, |
---|
| 357 | + .fd_reg[RKVENC_CLASS_FD_BASE] = { |
---|
| 358 | + .class = RKVENC_CLASS_PIC, |
---|
| 359 | + .base_fmt = RKVENC_FMT_BASE, |
---|
| 360 | + }, |
---|
| 361 | + .fd_reg[RKVENC_CLASS_FD_OSD] = { |
---|
| 362 | + .class = RKVENC_CLASS_OSD, |
---|
| 363 | + .base_fmt = RKVENC_FMT_OSD_BASE, |
---|
| 364 | + }, |
---|
| 365 | + .fmt_reg = { |
---|
| 366 | + .class = RKVENC_CLASS_PIC, |
---|
| 367 | + .base = 0x0300, |
---|
| 368 | + .bitpos = 0, |
---|
| 369 | + .bitlen = 1, |
---|
| 370 | + }, |
---|
| 371 | + .enc_start_base = 0x0010, |
---|
| 372 | + .enc_clr_base = 0x0014, |
---|
| 373 | + .int_en_base = 0x0020, |
---|
| 374 | + .int_mask_base = 0x0024, |
---|
| 375 | + .int_clr_base = 0x0028, |
---|
| 376 | + .int_sta_base = 0x002c, |
---|
| 377 | + .enc_wdg_base = 0x0038, |
---|
| 378 | + .err_mask = 0x03f0, |
---|
| 379 | +}; |
---|
| 380 | + |
---|
| 381 | +static struct rkvenc_hw_info rkvenc_540c_hw_info = { |
---|
198 | 382 | .hw = { |
---|
199 | 383 | .reg_num = 254, |
---|
200 | 384 | .reg_id = 0, |
---|
.. | .. |
---|
263 | 447 | .enc_wdg_base = 0x0038, |
---|
264 | 448 | .err_mask = 0x27d0, |
---|
265 | 449 | }; |
---|
266 | | - |
---|
267 | 450 | /* |
---|
268 | 451 | * file handle translate information for v2 |
---|
269 | 452 | */ |
---|
270 | 453 | static const u16 trans_tbl_h264e_v2[] = { |
---|
| 454 | + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, |
---|
| 455 | + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, |
---|
| 456 | + 20, 21, 22, 23, |
---|
| 457 | +}; |
---|
| 458 | + |
---|
| 459 | +static const u16 trans_tbl_h264e_v2_osd[] = { |
---|
| 460 | + 20, 21, 22, 23, 24, 25, 26, 27, |
---|
| 461 | +}; |
---|
| 462 | + |
---|
| 463 | +static const u16 trans_tbl_h265e_v2[] = { |
---|
| 464 | + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, |
---|
| 465 | + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, |
---|
| 466 | + 20, 21, 22, 23, |
---|
| 467 | +}; |
---|
| 468 | + |
---|
| 469 | +static const u16 trans_tbl_h265e_v2_osd[] = { |
---|
| 470 | + 20, 21, 22, 23, 24, 25, 26, 27, |
---|
| 471 | +}; |
---|
| 472 | + |
---|
| 473 | +/* |
---|
| 474 | + * file handle translate information for 540c |
---|
| 475 | + */ |
---|
| 476 | +static const u16 trans_tbl_h264e_540c[] = { |
---|
271 | 477 | 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, |
---|
272 | 478 | 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, |
---|
273 | 479 | // /* renc and ref wrap */ |
---|
274 | 480 | // 24, 25, 26, 27, |
---|
275 | 481 | }; |
---|
276 | 482 | |
---|
277 | | -static const u16 trans_tbl_h264e_v2_osd[] = { |
---|
| 483 | +static const u16 trans_tbl_h264e_540c_osd[] = { |
---|
278 | 484 | 3, 4, 12, 13, 21, 22, 30, 31, |
---|
279 | 485 | 39, 40, 48, 49, 57, 58, 66, 67, |
---|
280 | 486 | }; |
---|
281 | 487 | |
---|
282 | | -static const u16 trans_tbl_h265e_v2[] = { |
---|
| 488 | +static const u16 trans_tbl_h265e_540c[] = { |
---|
283 | 489 | 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, |
---|
284 | 490 | 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 |
---|
285 | 491 | }; |
---|
286 | 492 | |
---|
287 | | -static const u16 trans_tbl_h265e_v2_osd[] = { |
---|
| 493 | +static const u16 trans_tbl_h265e_540c_osd[] = { |
---|
288 | 494 | 3, 4, 12, 13, 21, 22, 30, 31, |
---|
289 | 495 | 39, 40, 48, 49, 57, 58, 66, 67, |
---|
290 | 496 | }; |
---|
.. | .. |
---|
315 | 521 | [RKVENC_FMT_H265E_OSD] = { |
---|
316 | 522 | .count = ARRAY_SIZE(trans_tbl_h265e_v2_osd), |
---|
317 | 523 | .table = trans_tbl_h265e_v2_osd, |
---|
| 524 | + }, |
---|
| 525 | +}; |
---|
| 526 | + |
---|
| 527 | +static struct mpp_trans_info trans_rkvenc_540c[] = { |
---|
| 528 | + [RKVENC_FMT_H264E] = { |
---|
| 529 | + .count = ARRAY_SIZE(trans_tbl_h264e_540c), |
---|
| 530 | + .table = trans_tbl_h264e_540c, |
---|
| 531 | + }, |
---|
| 532 | + [RKVENC_FMT_H264E_OSD] = { |
---|
| 533 | + .count = ARRAY_SIZE(trans_tbl_h264e_540c_osd), |
---|
| 534 | + .table = trans_tbl_h264e_540c_osd, |
---|
| 535 | + }, |
---|
| 536 | + [RKVENC_FMT_H265E] = { |
---|
| 537 | + .count = ARRAY_SIZE(trans_tbl_h265e_540c), |
---|
| 538 | + .table = trans_tbl_h265e_540c, |
---|
| 539 | + }, |
---|
| 540 | + [RKVENC_FMT_H265E_OSD] = { |
---|
| 541 | + .count = ARRAY_SIZE(trans_tbl_h265e_540c_osd), |
---|
| 542 | + .table = trans_tbl_h265e_540c_osd, |
---|
318 | 543 | }, |
---|
319 | 544 | [RKVENC_FMT_JPEGE] = { |
---|
320 | 545 | .count = ARRAY_SIZE(trans_tbl_jpege), |
---|
.. | .. |
---|
349 | 574 | |
---|
350 | 575 | for (i = 0; i < reg_class; i++) { |
---|
351 | 576 | kfree(task->reg[i].data); |
---|
| 577 | + task->reg[i].data = NULL; |
---|
352 | 578 | task->reg[i].size = 0; |
---|
353 | 579 | } |
---|
354 | 580 | |
---|
.. | .. |
---|
574 | 800 | return 0; |
---|
575 | 801 | } |
---|
576 | 802 | |
---|
577 | | - |
---|
578 | 803 | static int rkvenc2_set_rcbbuf(struct mpp_dev *mpp, struct mpp_session *session, |
---|
579 | 804 | struct rkvenc_task *task) |
---|
580 | 805 | { |
---|
.. | .. |
---|
620 | 845 | return 0; |
---|
621 | 846 | } |
---|
622 | 847 | |
---|
| 848 | +static void rkvenc2_setup_task_id(u32 session_id, struct rkvenc_task *task) |
---|
| 849 | +{ |
---|
| 850 | + u32 val = task->reg[RKVENC_CLASS_PIC].data[DCHS_CLASS_OFFSET]; |
---|
| 851 | + |
---|
| 852 | + /* always enable tx */ |
---|
| 853 | + val |= DCHS_TXE; |
---|
| 854 | + |
---|
| 855 | + task->reg[RKVENC_CLASS_PIC].data[DCHS_CLASS_OFFSET] = val; |
---|
| 856 | + task->dchs_id.val = (((u64)session_id << 32) | val); |
---|
| 857 | + |
---|
| 858 | + task->dchs_id.txid_orig = task->dchs_id.txid; |
---|
| 859 | + task->dchs_id.rxid_orig = task->dchs_id.rxid; |
---|
| 860 | + task->dchs_id.txid_map = task->dchs_id.txid; |
---|
| 861 | + task->dchs_id.rxid_map = task->dchs_id.rxid; |
---|
| 862 | + |
---|
| 863 | + task->dchs_id.txe_orig = task->dchs_id.txe; |
---|
| 864 | + task->dchs_id.rxe_orig = task->dchs_id.rxe; |
---|
| 865 | + task->dchs_id.txe_map = task->dchs_id.txe; |
---|
| 866 | + task->dchs_id.rxe_map = task->dchs_id.rxe; |
---|
| 867 | +} |
---|
| 868 | + |
---|
| 869 | +static void rkvenc2_check_split_task(struct rkvenc_task *task) |
---|
| 870 | +{ |
---|
| 871 | + u32 slen_fifo_en = 0; |
---|
| 872 | + u32 sli_split_en = 0; |
---|
| 873 | + |
---|
| 874 | + if (task->reg[RKVENC_CLASS_PIC].valid) { |
---|
| 875 | + u32 *reg = task->reg[RKVENC_CLASS_PIC].data; |
---|
| 876 | + u32 enc_stnd = reg[RKVENC2_REG_ENC_PIC] & RKVENC2_BIT_ENC_STND; |
---|
| 877 | + |
---|
| 878 | + slen_fifo_en = (reg[RKVENC2_REG_ENC_PIC] & RKVENC2_BIT_SLEN_FIFO) ? 1 : 0; |
---|
| 879 | + sli_split_en = (reg[RKVENC2_REG_SLI_SPLIT] & RKVENC2_BIT_SLI_SPLIT) ? 1 : 0; |
---|
| 880 | + |
---|
| 881 | + /* |
---|
| 882 | + * FIXUP: rkvenc2 hardware bug: |
---|
| 883 | + * H.264 encoding has bug when external line buffer and slice flush both |
---|
| 884 | + * are enabled. |
---|
| 885 | + */ |
---|
| 886 | + if (sli_split_en && slen_fifo_en && |
---|
| 887 | + enc_stnd == RKVENC2_BIT_VAL_H264 && |
---|
| 888 | + reg[RKVENC2_REG_EXT_LINE_BUF_BASE]) |
---|
| 889 | + reg[RKVENC2_REG_SLI_SPLIT] &= ~RKVENC2_BIT_SLI_FLUSH; |
---|
| 890 | + } |
---|
| 891 | + |
---|
| 892 | + task->task_split = sli_split_en && slen_fifo_en; |
---|
| 893 | + |
---|
| 894 | + if (task->task_split) |
---|
| 895 | + INIT_KFIFO(task->slice_info); |
---|
| 896 | +} |
---|
| 897 | + |
---|
623 | 898 | static void *rkvenc_alloc_task(struct mpp_session *session, |
---|
624 | 899 | struct mpp_task_msgs *msgs) |
---|
625 | 900 | { |
---|
.. | .. |
---|
654 | 929 | u32 off; |
---|
655 | 930 | const u16 *tbl; |
---|
656 | 931 | struct rkvenc_hw_info *hw = task->hw_info; |
---|
| 932 | + int fd_bs = -1; |
---|
657 | 933 | |
---|
658 | 934 | for (i = 0; i < hw->fd_class; i++) { |
---|
659 | 935 | u32 class = hw->fd_reg[i].class; |
---|
.. | .. |
---|
663 | 939 | |
---|
664 | 940 | if (!reg) |
---|
665 | 941 | continue; |
---|
| 942 | + |
---|
| 943 | + if (fmt == RKVENC_FMT_JPEGE && class == RKVENC_CLASS_PIC && fd_bs == -1) { |
---|
| 944 | + int bs_index; |
---|
| 945 | + |
---|
| 946 | + bs_index = mpp->var->trans_info[fmt].table[2]; |
---|
| 947 | + fd_bs = reg[bs_index]; |
---|
| 948 | + task->offset_bs = mpp_query_reg_offset_info(&task->off_inf, |
---|
| 949 | + bs_index + ss); |
---|
| 950 | + } |
---|
666 | 951 | |
---|
667 | 952 | ret = mpp_translate_reg_address(session, mpp_task, fmt, reg, NULL); |
---|
668 | 953 | if (ret) |
---|
.. | .. |
---|
676 | 961 | reg[tbl[j]] += off; |
---|
677 | 962 | } |
---|
678 | 963 | } |
---|
| 964 | + |
---|
| 965 | + if (fd_bs >= 0) { |
---|
| 966 | + struct mpp_dma_buffer *bs_buf = |
---|
| 967 | + mpp_dma_find_buffer_fd(session->dma, fd_bs); |
---|
| 968 | + |
---|
| 969 | + if (bs_buf && task->offset_bs > 0) |
---|
| 970 | + mpp_dma_buf_sync(bs_buf, 0, task->offset_bs, DMA_TO_DEVICE, false); |
---|
| 971 | + task->bs_buf = bs_buf; |
---|
| 972 | + } |
---|
679 | 973 | } |
---|
680 | | - rkvenc2_set_rcbbuf(mpp, session, task); |
---|
| 974 | + rkvenc2_setup_task_id(session->index, task); |
---|
681 | 975 | task->clk_mode = CLK_MODE_NORMAL; |
---|
| 976 | + rkvenc2_check_split_task(task); |
---|
682 | 977 | |
---|
683 | 978 | mpp_debug_leave(); |
---|
684 | 979 | |
---|
.. | .. |
---|
694 | 989 | kfree(task); |
---|
695 | 990 | |
---|
696 | 991 | return NULL; |
---|
| 992 | +} |
---|
| 993 | + |
---|
| 994 | +static void *rkvenc2_prepare(struct mpp_dev *mpp, struct mpp_task *mpp_task) |
---|
| 995 | +{ |
---|
| 996 | + struct mpp_taskqueue *queue = mpp->queue; |
---|
| 997 | + unsigned long core_idle; |
---|
| 998 | + unsigned long flags; |
---|
| 999 | + u32 core_id_max; |
---|
| 1000 | + s32 core_id; |
---|
| 1001 | + u32 i; |
---|
| 1002 | + |
---|
| 1003 | + spin_lock_irqsave(&queue->running_lock, flags); |
---|
| 1004 | + |
---|
| 1005 | + core_idle = queue->core_idle; |
---|
| 1006 | + core_id_max = queue->core_id_max; |
---|
| 1007 | + |
---|
| 1008 | + for (i = 0; i <= core_id_max; i++) { |
---|
| 1009 | + struct mpp_dev *mpp = queue->cores[i]; |
---|
| 1010 | + |
---|
| 1011 | + if (mpp && mpp->disable) |
---|
| 1012 | + clear_bit(i, &core_idle); |
---|
| 1013 | + } |
---|
| 1014 | + |
---|
| 1015 | + core_id = find_first_bit(&core_idle, core_id_max + 1); |
---|
| 1016 | + |
---|
| 1017 | + if (core_id >= core_id_max + 1 || !queue->cores[core_id]) { |
---|
| 1018 | + mpp_task = NULL; |
---|
| 1019 | + mpp_dbg_core("core %d all busy %lx\n", core_id, core_idle); |
---|
| 1020 | + } else { |
---|
| 1021 | + struct rkvenc_task *task = to_rkvenc_task(mpp_task); |
---|
| 1022 | + |
---|
| 1023 | + clear_bit(core_id, &queue->core_idle); |
---|
| 1024 | + mpp_task->mpp = queue->cores[core_id]; |
---|
| 1025 | + mpp_task->core_id = core_id; |
---|
| 1026 | + rkvenc2_set_rcbbuf(mpp_task->mpp, mpp_task->session, task); |
---|
| 1027 | + mpp_dbg_core("core %d set idle %lx -> %lx\n", core_id, |
---|
| 1028 | + core_idle, queue->core_idle); |
---|
| 1029 | + } |
---|
| 1030 | + |
---|
| 1031 | + spin_unlock_irqrestore(&queue->running_lock, flags); |
---|
| 1032 | + |
---|
| 1033 | + return mpp_task; |
---|
| 1034 | +} |
---|
| 1035 | + |
---|
| 1036 | +static void rkvenc2_patch_dchs(struct rkvenc_dev *enc, struct rkvenc_task *task) |
---|
| 1037 | +{ |
---|
| 1038 | + struct rkvenc_ccu *ccu; |
---|
| 1039 | + union rkvenc2_dual_core_handshake_id *dchs; |
---|
| 1040 | + union rkvenc2_dual_core_handshake_id *task_dchs = &task->dchs_id; |
---|
| 1041 | + int core_num; |
---|
| 1042 | + int core_id = enc->mpp.core_id; |
---|
| 1043 | + unsigned long flags; |
---|
| 1044 | + int i; |
---|
| 1045 | + |
---|
| 1046 | + if (!enc->ccu) |
---|
| 1047 | + return; |
---|
| 1048 | + |
---|
| 1049 | + if (core_id >= RKVENC_MAX_CORE_NUM) { |
---|
| 1050 | + dev_err(enc->mpp.dev, "invalid core id %d max %d\n", |
---|
| 1051 | + core_id, RKVENC_MAX_CORE_NUM); |
---|
| 1052 | + return; |
---|
| 1053 | + } |
---|
| 1054 | + |
---|
| 1055 | + ccu = enc->ccu; |
---|
| 1056 | + dchs = ccu->dchs; |
---|
| 1057 | + core_num = ccu->core_num; |
---|
| 1058 | + |
---|
| 1059 | + spin_lock_irqsave(&ccu->lock_dchs, flags); |
---|
| 1060 | + |
---|
| 1061 | + if (dchs[core_id].working) { |
---|
| 1062 | + spin_unlock_irqrestore(&ccu->lock_dchs, flags); |
---|
| 1063 | + |
---|
| 1064 | + mpp_err("can not config when core %d is still working\n", core_id); |
---|
| 1065 | + return; |
---|
| 1066 | + } |
---|
| 1067 | + |
---|
| 1068 | + if (mpp_debug_unlikely(DEBUG_CORE)) |
---|
| 1069 | + pr_info("core tx:rx 0 %s %d:%d %d:%d -- 1 %s %d:%d %d:%d -- task %d %d:%d %d:%d\n", |
---|
| 1070 | + dchs[0].working ? "work" : "idle", |
---|
| 1071 | + dchs[0].txid, dchs[0].txe, dchs[0].rxid, dchs[0].rxe, |
---|
| 1072 | + dchs[1].working ? "work" : "idle", |
---|
| 1073 | + dchs[1].txid, dchs[1].txe, dchs[1].rxid, dchs[1].rxe, |
---|
| 1074 | + core_id, task_dchs->txid, task_dchs->txe, |
---|
| 1075 | + task_dchs->rxid, task_dchs->rxe); |
---|
| 1076 | + |
---|
| 1077 | + /* always use new id as */ |
---|
| 1078 | + { |
---|
| 1079 | + struct mpp_task *mpp_task = &task->mpp_task; |
---|
| 1080 | + unsigned long id_valid = (unsigned long)-1; |
---|
| 1081 | + int txid_map = -1; |
---|
| 1082 | + int rxid_map = -1; |
---|
| 1083 | + |
---|
| 1084 | + /* scan all used id */ |
---|
| 1085 | + for (i = 0; i < core_num; i++) { |
---|
| 1086 | + if (!dchs[i].working) |
---|
| 1087 | + continue; |
---|
| 1088 | + |
---|
| 1089 | + clear_bit(dchs[i].txid_map, &id_valid); |
---|
| 1090 | + clear_bit(dchs[i].rxid_map, &id_valid); |
---|
| 1091 | + } |
---|
| 1092 | + |
---|
| 1093 | + if (task_dchs->rxe) { |
---|
| 1094 | + for (i = 0; i < core_num; i++) { |
---|
| 1095 | + if (i == core_id) |
---|
| 1096 | + continue; |
---|
| 1097 | + |
---|
| 1098 | + if (!dchs[i].working) |
---|
| 1099 | + continue; |
---|
| 1100 | + |
---|
| 1101 | + if (task_dchs->session_id != dchs[i].session_id) |
---|
| 1102 | + continue; |
---|
| 1103 | + |
---|
| 1104 | + if (task_dchs->rxid_orig != dchs[i].txid_orig) |
---|
| 1105 | + continue; |
---|
| 1106 | + |
---|
| 1107 | + rxid_map = dchs[i].txid_map; |
---|
| 1108 | + break; |
---|
| 1109 | + } |
---|
| 1110 | + } |
---|
| 1111 | + |
---|
| 1112 | + txid_map = find_first_bit(&id_valid, RKVENC_MAX_DCHS_ID); |
---|
| 1113 | + if (txid_map == RKVENC_MAX_DCHS_ID) { |
---|
| 1114 | + spin_unlock_irqrestore(&ccu->lock_dchs, flags); |
---|
| 1115 | + |
---|
| 1116 | + mpp_err("task %d:%d on core %d failed to find a txid\n", |
---|
| 1117 | + mpp_task->session->pid, mpp_task->task_id, |
---|
| 1118 | + mpp_task->core_id); |
---|
| 1119 | + return; |
---|
| 1120 | + } |
---|
| 1121 | + |
---|
| 1122 | + clear_bit(txid_map, &id_valid); |
---|
| 1123 | + task_dchs->txid_map = txid_map; |
---|
| 1124 | + |
---|
| 1125 | + if (rxid_map < 0) { |
---|
| 1126 | + rxid_map = find_first_bit(&id_valid, RKVENC_MAX_DCHS_ID); |
---|
| 1127 | + if (rxid_map == RKVENC_MAX_DCHS_ID) { |
---|
| 1128 | + spin_unlock_irqrestore(&ccu->lock_dchs, flags); |
---|
| 1129 | + |
---|
| 1130 | + mpp_err("task %d:%d on core %d failed to find a rxid\n", |
---|
| 1131 | + mpp_task->session->pid, mpp_task->task_id, |
---|
| 1132 | + mpp_task->core_id); |
---|
| 1133 | + return; |
---|
| 1134 | + } |
---|
| 1135 | + |
---|
| 1136 | + task_dchs->rxe_map = 0; |
---|
| 1137 | + } |
---|
| 1138 | + |
---|
| 1139 | + task_dchs->rxid_map = rxid_map; |
---|
| 1140 | + } |
---|
| 1141 | + |
---|
| 1142 | + task_dchs->txid = task_dchs->txid_map; |
---|
| 1143 | + task_dchs->rxid = task_dchs->rxid_map; |
---|
| 1144 | + task_dchs->rxe = task_dchs->rxe_map; |
---|
| 1145 | + |
---|
| 1146 | + dchs[core_id].val = task_dchs->val; |
---|
| 1147 | + task->reg[RKVENC_CLASS_PIC].data[DCHS_CLASS_OFFSET] = task_dchs->val; |
---|
| 1148 | + |
---|
| 1149 | + dchs[core_id].working = 1; |
---|
| 1150 | + |
---|
| 1151 | + spin_unlock_irqrestore(&ccu->lock_dchs, flags); |
---|
| 1152 | +} |
---|
| 1153 | + |
---|
| 1154 | +static void rkvenc2_update_dchs(struct rkvenc_dev *enc, struct rkvenc_task *task) |
---|
| 1155 | +{ |
---|
| 1156 | + struct rkvenc_ccu *ccu = enc->ccu; |
---|
| 1157 | + int core_id = enc->mpp.core_id; |
---|
| 1158 | + unsigned long flags; |
---|
| 1159 | + |
---|
| 1160 | + if (!ccu) |
---|
| 1161 | + return; |
---|
| 1162 | + |
---|
| 1163 | + if (core_id >= RKVENC_MAX_CORE_NUM) { |
---|
| 1164 | + dev_err(enc->mpp.dev, "invalid core id %d max %d\n", |
---|
| 1165 | + core_id, RKVENC_MAX_CORE_NUM); |
---|
| 1166 | + return; |
---|
| 1167 | + } |
---|
| 1168 | + |
---|
| 1169 | + spin_lock_irqsave(&ccu->lock_dchs, flags); |
---|
| 1170 | + ccu->dchs[core_id].val = 0; |
---|
| 1171 | + |
---|
| 1172 | + if (mpp_debug_unlikely(DEBUG_CORE)) { |
---|
| 1173 | + union rkvenc2_dual_core_handshake_id *dchs = ccu->dchs; |
---|
| 1174 | + union rkvenc2_dual_core_handshake_id *task_dchs = &task->dchs_id; |
---|
| 1175 | + |
---|
| 1176 | + pr_info("core %d task done\n", core_id); |
---|
| 1177 | + pr_info("core tx:rx 0 %s %d:%d %d:%d -- 1 %s %d:%d %d:%d -- task %d %d:%d %d:%d\n", |
---|
| 1178 | + dchs[0].working ? "work" : "idle", |
---|
| 1179 | + dchs[0].txid, dchs[0].txe, dchs[0].rxid, dchs[0].rxe, |
---|
| 1180 | + dchs[1].working ? "work" : "idle", |
---|
| 1181 | + dchs[1].txid, dchs[1].txe, dchs[1].rxid, dchs[1].rxe, |
---|
| 1182 | + core_id, task_dchs->txid, task_dchs->txe, |
---|
| 1183 | + task_dchs->rxid, task_dchs->rxe); |
---|
| 1184 | + } |
---|
| 1185 | + |
---|
| 1186 | + spin_unlock_irqrestore(&ccu->lock_dchs, flags); |
---|
697 | 1187 | } |
---|
698 | 1188 | |
---|
699 | 1189 | static int rkvenc_run(struct mpp_dev *mpp, struct mpp_task *mpp_task) |
---|
.. | .. |
---|
714 | 1204 | |
---|
715 | 1205 | /* clear hardware counter */ |
---|
716 | 1206 | mpp_write_relaxed(mpp, 0x5300, 0x2); |
---|
| 1207 | + |
---|
| 1208 | + rkvenc2_patch_dchs(enc, task); |
---|
717 | 1209 | |
---|
718 | 1210 | for (i = 0; i < task->w_req_cnt; i++) { |
---|
719 | 1211 | int ret; |
---|
.. | .. |
---|
740 | 1232 | } |
---|
741 | 1233 | } |
---|
742 | 1234 | |
---|
| 1235 | + if (mpp_debug_unlikely(DEBUG_CORE)) |
---|
| 1236 | + dev_info(mpp->dev, "core %d dchs %08x\n", mpp->core_id, |
---|
| 1237 | + mpp_read_relaxed(&enc->mpp, DCHS_REG_OFFSET)); |
---|
| 1238 | + |
---|
743 | 1239 | /* flush tlb before starting hardware */ |
---|
744 | 1240 | mpp_iommu_flush_tlb(mpp->iommu_info); |
---|
745 | 1241 | |
---|
.. | .. |
---|
760 | 1256 | return 0; |
---|
761 | 1257 | } |
---|
762 | 1258 | |
---|
| 1259 | +static void rkvenc2_read_slice_len(struct mpp_dev *mpp, struct rkvenc_task *task) |
---|
| 1260 | +{ |
---|
| 1261 | + u32 last = mpp_read_relaxed(mpp, 0x002c) & INT_STA_ENC_DONE_STA; |
---|
| 1262 | + u32 sli_num = mpp_read_relaxed(mpp, RKVENC2_REG_SLICE_NUM_BASE); |
---|
| 1263 | + union rkvenc2_slice_len_info slice_info; |
---|
| 1264 | + u32 task_id = task->mpp_task.task_id; |
---|
| 1265 | + u32 i; |
---|
| 1266 | + |
---|
| 1267 | + mpp_dbg_slice("task %d wr %3d len start %s\n", task_id, |
---|
| 1268 | + sli_num, last ? "last" : ""); |
---|
| 1269 | + |
---|
| 1270 | + for (i = 0; i < sli_num; i++) { |
---|
| 1271 | + slice_info.val = mpp_read_relaxed(mpp, RKVENC2_REG_SLICE_LEN_BASE); |
---|
| 1272 | + |
---|
| 1273 | + if (last && i == sli_num - 1) { |
---|
| 1274 | + task->last_slice_found = 1; |
---|
| 1275 | + slice_info.last = 1; |
---|
| 1276 | + } |
---|
| 1277 | + |
---|
| 1278 | + mpp_dbg_slice("task %d wr %3d len %d %s\n", task_id, |
---|
| 1279 | + task->slice_wr_cnt, slice_info.slice_len, |
---|
| 1280 | + slice_info.last ? "last" : ""); |
---|
| 1281 | + |
---|
| 1282 | + kfifo_in(&task->slice_info, &slice_info, 1); |
---|
| 1283 | + task->slice_wr_cnt++; |
---|
| 1284 | + } |
---|
| 1285 | + |
---|
| 1286 | + /* Fixup for async between last flag and slice number register */ |
---|
| 1287 | + if (last && !task->last_slice_found) { |
---|
| 1288 | + mpp_dbg_slice("task %d mark last slice\n", task_id); |
---|
| 1289 | + slice_info.last = 1; |
---|
| 1290 | + slice_info.slice_len = 0; |
---|
| 1291 | + kfifo_in(&task->slice_info, &slice_info, 1); |
---|
| 1292 | + } |
---|
| 1293 | +} |
---|
| 1294 | + |
---|
763 | 1295 | static int rkvenc_irq(struct mpp_dev *mpp) |
---|
764 | 1296 | { |
---|
765 | 1297 | struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
766 | 1298 | struct rkvenc_hw_info *hw = enc->hw_info; |
---|
| 1299 | + struct mpp_task *mpp_task = NULL; |
---|
| 1300 | + struct rkvenc_task *task = NULL; |
---|
| 1301 | + u32 int_clear = 1; |
---|
| 1302 | + u32 irq_mask = 0; |
---|
| 1303 | + int ret = IRQ_NONE; |
---|
767 | 1304 | |
---|
768 | 1305 | mpp_debug_enter(); |
---|
769 | 1306 | |
---|
770 | 1307 | mpp->irq_status = mpp_read(mpp, hw->int_sta_base); |
---|
771 | 1308 | if (!mpp->irq_status) |
---|
772 | | - return IRQ_NONE; |
---|
773 | | - mpp_write(mpp, hw->int_mask_base, 0x100); |
---|
774 | | - mpp_write(mpp, hw->int_clr_base, 0xffffffff); |
---|
775 | | - udelay(5); |
---|
776 | | - mpp_write(mpp, hw->int_sta_base, 0); |
---|
| 1309 | + return ret; |
---|
| 1310 | + |
---|
| 1311 | + if (mpp->cur_task) { |
---|
| 1312 | + mpp_task = mpp->cur_task; |
---|
| 1313 | + task = to_rkvenc_task(mpp_task); |
---|
| 1314 | + } |
---|
| 1315 | + |
---|
| 1316 | + if (mpp->irq_status & INT_STA_ENC_DONE_STA) { |
---|
| 1317 | + if (task) { |
---|
| 1318 | + if (task->task_split) |
---|
| 1319 | + rkvenc2_read_slice_len(mpp, task); |
---|
| 1320 | + |
---|
| 1321 | + wake_up(&mpp_task->wait); |
---|
| 1322 | + } |
---|
| 1323 | + |
---|
| 1324 | + irq_mask = INT_STA_ENC_DONE_STA; |
---|
| 1325 | + ret = IRQ_WAKE_THREAD; |
---|
| 1326 | + if (enc->bs_overflow) { |
---|
| 1327 | + mpp->irq_status |= INT_STA_BSF_OFLW_STA; |
---|
| 1328 | + enc->bs_overflow = 0; |
---|
| 1329 | + } |
---|
| 1330 | + } else if (mpp->irq_status & INT_STA_SLC_DONE_STA) { |
---|
| 1331 | + if (task && task->task_split) { |
---|
| 1332 | + mpp_time_part_diff(mpp_task); |
---|
| 1333 | + |
---|
| 1334 | + rkvenc2_read_slice_len(mpp, task); |
---|
| 1335 | + wake_up(&mpp_task->wait); |
---|
| 1336 | + } |
---|
| 1337 | + |
---|
| 1338 | + irq_mask = INT_STA_ENC_DONE_STA; |
---|
| 1339 | + int_clear = 0; |
---|
| 1340 | + } else if (mpp->irq_status & INT_STA_BSF_OFLW_STA) { |
---|
| 1341 | + u32 bs_rd = mpp_read(mpp, RKVENC2_REG_ADR_BSBR); |
---|
| 1342 | + u32 bs_wr = mpp_read(mpp, RKVENC2_REG_ST_BSB); |
---|
| 1343 | + u32 bs_top = mpp_read(mpp, RKVENC2_REG_ADR_BSBT); |
---|
| 1344 | + u32 bs_bot = mpp_read(mpp, RKVENC2_REG_ADR_BSBB); |
---|
| 1345 | + |
---|
| 1346 | + if (mpp_task) |
---|
| 1347 | + dev_err(mpp->dev, "task %d found bitstream overflow [%#08x %#08x %#08x %#08x]\n", |
---|
| 1348 | + mpp_task->task_index, bs_top, bs_bot, bs_wr, bs_rd); |
---|
| 1349 | + bs_wr += 128; |
---|
| 1350 | + if (bs_wr >= bs_top) |
---|
| 1351 | + bs_wr = bs_bot; |
---|
| 1352 | + /* clear int first */ |
---|
| 1353 | + mpp_write(mpp, hw->int_clr_base, mpp->irq_status); |
---|
| 1354 | + /* update write addr for enc continue */ |
---|
| 1355 | + mpp_write(mpp, RKVENC2_REG_ADR_BSBS, bs_wr); |
---|
| 1356 | + enc->bs_overflow = 1; |
---|
| 1357 | + irq_mask = 0; |
---|
| 1358 | + int_clear = 0; |
---|
| 1359 | + ret = IRQ_HANDLED; |
---|
| 1360 | + } else { |
---|
| 1361 | + dev_err(mpp->dev, "found error status %08x\n", mpp->irq_status); |
---|
| 1362 | + |
---|
| 1363 | + irq_mask = mpp->irq_status; |
---|
| 1364 | + ret = IRQ_WAKE_THREAD; |
---|
| 1365 | + } |
---|
| 1366 | + |
---|
| 1367 | + if (irq_mask) |
---|
| 1368 | + mpp_write(mpp, hw->int_mask_base, irq_mask); |
---|
| 1369 | + |
---|
| 1370 | + if (int_clear) { |
---|
| 1371 | + mpp_write(mpp, hw->int_clr_base, mpp->irq_status); |
---|
| 1372 | + udelay(5); |
---|
| 1373 | + mpp_write(mpp, hw->int_sta_base, 0); |
---|
| 1374 | + } |
---|
777 | 1375 | |
---|
778 | 1376 | mpp_debug_leave(); |
---|
779 | 1377 | |
---|
780 | | - return IRQ_WAKE_THREAD; |
---|
| 1378 | + return ret; |
---|
781 | 1379 | } |
---|
782 | 1380 | |
---|
783 | 1381 | static int rkvenc_isr(struct mpp_dev *mpp) |
---|
.. | .. |
---|
785 | 1383 | struct rkvenc_task *task; |
---|
786 | 1384 | struct mpp_task *mpp_task; |
---|
787 | 1385 | struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
| 1386 | + struct mpp_taskqueue *queue = mpp->queue; |
---|
| 1387 | + unsigned long core_idle; |
---|
788 | 1388 | |
---|
789 | 1389 | mpp_debug_enter(); |
---|
790 | 1390 | |
---|
.. | .. |
---|
797 | 1397 | mpp_task = mpp->cur_task; |
---|
798 | 1398 | mpp_time_diff(mpp_task); |
---|
799 | 1399 | mpp->cur_task = NULL; |
---|
| 1400 | + |
---|
| 1401 | + if (mpp_task->mpp && mpp_task->mpp != mpp) |
---|
| 1402 | + dev_err(mpp->dev, "mismatch core dev %p:%p\n", mpp_task->mpp, mpp); |
---|
| 1403 | + |
---|
800 | 1404 | task = to_rkvenc_task(mpp_task); |
---|
801 | 1405 | task->irq_status = mpp->irq_status; |
---|
802 | | - mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status); |
---|
| 1406 | + |
---|
| 1407 | + rkvenc2_update_dchs(enc, task); |
---|
| 1408 | + |
---|
| 1409 | + mpp_debug(DEBUG_IRQ_STATUS, "%s irq_status: %08x\n", |
---|
| 1410 | + dev_name(mpp->dev), task->irq_status); |
---|
803 | 1411 | |
---|
804 | 1412 | if (task->irq_status & enc->hw_info->err_mask) { |
---|
805 | 1413 | atomic_inc(&mpp->reset_request); |
---|
| 1414 | + |
---|
806 | 1415 | /* dump register */ |
---|
807 | 1416 | if (mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) |
---|
808 | | - mpp_task_dump_hw_reg(mpp, mpp_task); |
---|
| 1417 | + mpp_task_dump_hw_reg(mpp); |
---|
809 | 1418 | } |
---|
810 | 1419 | |
---|
811 | 1420 | mpp_task_finish(mpp_task->session, mpp_task); |
---|
| 1421 | + |
---|
| 1422 | + core_idle = queue->core_idle; |
---|
| 1423 | + set_bit(mpp->core_id, &queue->core_idle); |
---|
| 1424 | + |
---|
| 1425 | + mpp_dbg_core("core %d isr idle %lx -> %lx\n", mpp->core_id, core_idle, |
---|
| 1426 | + queue->core_idle); |
---|
812 | 1427 | |
---|
813 | 1428 | mpp_debug_leave(); |
---|
814 | 1429 | |
---|
.. | .. |
---|
839 | 1454 | reg[j] = mpp_read_relaxed(mpp, msg.offset + j * sizeof(u32)); |
---|
840 | 1455 | |
---|
841 | 1456 | } |
---|
| 1457 | + |
---|
| 1458 | + if (task->bs_buf) { |
---|
| 1459 | + u32 bs_size = mpp_read(mpp, 0x4064); |
---|
| 1460 | + |
---|
| 1461 | + mpp_dma_buf_sync(task->bs_buf, 0, bs_size / 8 + task->offset_bs, |
---|
| 1462 | + DMA_FROM_DEVICE, true); |
---|
| 1463 | + } |
---|
| 1464 | + |
---|
842 | 1465 | /* revert hack for irq status */ |
---|
843 | 1466 | reg = rkvenc_get_class_reg(task, task->hw_info->int_sta_base); |
---|
844 | 1467 | if (reg) |
---|
.. | .. |
---|
990 | 1613 | } |
---|
991 | 1614 | seq_puts(seq, "\n"); |
---|
992 | 1615 | /* item data*/ |
---|
993 | | - seq_printf(seq, "|%8p|", session); |
---|
| 1616 | + seq_printf(seq, "|%8d|", session->index); |
---|
994 | 1617 | seq_printf(seq, "%8s|", mpp_device_name[session->device_type]); |
---|
995 | 1618 | for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) { |
---|
996 | 1619 | u32 flag = priv->codec_info[i].flag; |
---|
.. | .. |
---|
1023 | 1646 | mutex_lock(&mpp->srv->session_lock); |
---|
1024 | 1647 | list_for_each_entry_safe(session, n, |
---|
1025 | 1648 | &mpp->srv->session_list, |
---|
1026 | | - session_link) { |
---|
| 1649 | + service_link) { |
---|
1027 | 1650 | if (session->device_type != MPP_DEVICE_RKVENC) |
---|
1028 | 1651 | continue; |
---|
1029 | 1652 | if (!session->priv) |
---|
.. | .. |
---|
1039 | 1662 | static int rkvenc_procfs_init(struct mpp_dev *mpp) |
---|
1040 | 1663 | { |
---|
1041 | 1664 | struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
| 1665 | + char name[32]; |
---|
1042 | 1666 | |
---|
1043 | | - enc->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs); |
---|
| 1667 | + if (!mpp->dev || !mpp->dev->of_node || !mpp->dev->of_node->name || |
---|
| 1668 | + !mpp->srv || !mpp->srv->procfs) |
---|
| 1669 | + return -EINVAL; |
---|
| 1670 | + |
---|
| 1671 | + snprintf(name, sizeof(name) - 1, "%s%d", |
---|
| 1672 | + mpp->dev->of_node->name, mpp->core_id); |
---|
| 1673 | + |
---|
| 1674 | + enc->procfs = proc_mkdir(name, mpp->srv->procfs); |
---|
1044 | 1675 | if (IS_ERR_OR_NULL(enc->procfs)) { |
---|
1045 | 1676 | mpp_err("failed on open procfs\n"); |
---|
1046 | 1677 | enc->procfs = NULL; |
---|
.. | .. |
---|
1064 | 1695 | return 0; |
---|
1065 | 1696 | } |
---|
1066 | 1697 | |
---|
| 1698 | +static int rkvenc_procfs_ccu_init(struct mpp_dev *mpp) |
---|
| 1699 | +{ |
---|
| 1700 | + struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
| 1701 | + |
---|
| 1702 | + if (!enc->procfs) |
---|
| 1703 | + goto done; |
---|
| 1704 | + |
---|
| 1705 | +done: |
---|
| 1706 | + return 0; |
---|
| 1707 | +} |
---|
1067 | 1708 | #else |
---|
1068 | 1709 | static inline int rkvenc_procfs_remove(struct mpp_dev *mpp) |
---|
1069 | 1710 | { |
---|
.. | .. |
---|
1075 | 1716 | return 0; |
---|
1076 | 1717 | } |
---|
1077 | 1718 | |
---|
| 1719 | +static inline int rkvenc_procfs_ccu_init(struct mpp_dev *mpp) |
---|
| 1720 | +{ |
---|
| 1721 | + return 0; |
---|
| 1722 | +} |
---|
| 1723 | +#endif |
---|
| 1724 | + |
---|
| 1725 | +#ifdef CONFIG_PM_DEVFREQ |
---|
| 1726 | +static int rk3588_venc_set_read_margin(struct device *dev, |
---|
| 1727 | + struct rockchip_opp_info *opp_info, |
---|
| 1728 | + u32 rm) |
---|
| 1729 | +{ |
---|
| 1730 | + if (!opp_info->grf || !opp_info->volt_rm_tbl) |
---|
| 1731 | + return 0; |
---|
| 1732 | + |
---|
| 1733 | + if (rm == opp_info->current_rm || rm == UINT_MAX) |
---|
| 1734 | + return 0; |
---|
| 1735 | + |
---|
| 1736 | + dev_dbg(dev, "set rm to %d\n", rm); |
---|
| 1737 | + |
---|
| 1738 | + regmap_write(opp_info->grf, 0x214, 0x001c0000 | (rm << 2)); |
---|
| 1739 | + regmap_write(opp_info->grf, 0x218, 0x001c0000 | (rm << 2)); |
---|
| 1740 | + regmap_write(opp_info->grf, 0x220, 0x003c0000 | (rm << 2)); |
---|
| 1741 | + regmap_write(opp_info->grf, 0x224, 0x003c0000 | (rm << 2)); |
---|
| 1742 | + |
---|
| 1743 | + opp_info->current_rm = rm; |
---|
| 1744 | + |
---|
| 1745 | + return 0; |
---|
| 1746 | +} |
---|
| 1747 | + |
---|
| 1748 | +static const struct rockchip_opp_data rk3588_venc_opp_data = { |
---|
| 1749 | + .set_read_margin = rk3588_venc_set_read_margin, |
---|
| 1750 | +}; |
---|
| 1751 | + |
---|
| 1752 | +static const struct of_device_id rockchip_rkvenc_of_match[] = { |
---|
| 1753 | + { |
---|
| 1754 | + .compatible = "rockchip,rk3588", |
---|
| 1755 | + .data = (void *)&rk3588_venc_opp_data, |
---|
| 1756 | + }, |
---|
| 1757 | + {}, |
---|
| 1758 | +}; |
---|
| 1759 | + |
---|
| 1760 | +static struct monitor_dev_profile venc_mdevp = { |
---|
| 1761 | + .type = MONITOR_TYPE_DEV, |
---|
| 1762 | + .update_volt = rockchip_monitor_check_rate_volt, |
---|
| 1763 | +}; |
---|
| 1764 | + |
---|
| 1765 | +static int rkvenc_devfreq_init(struct mpp_dev *mpp) |
---|
| 1766 | +{ |
---|
| 1767 | + struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
| 1768 | + struct clk *clk_core = enc->core_clk_info.clk; |
---|
| 1769 | + struct device *dev = mpp->dev; |
---|
| 1770 | + struct opp_table *reg_table = NULL; |
---|
| 1771 | + struct opp_table *clk_table = NULL; |
---|
| 1772 | + const char *const reg_names[] = { "venc", "mem" }; |
---|
| 1773 | + int ret = 0; |
---|
| 1774 | + |
---|
| 1775 | + if (!clk_core) |
---|
| 1776 | + return 0; |
---|
| 1777 | + |
---|
| 1778 | + if (of_find_property(dev->of_node, "venc-supply", NULL) && |
---|
| 1779 | + of_find_property(dev->of_node, "mem-supply", NULL)) { |
---|
| 1780 | + reg_table = dev_pm_opp_set_regulators(dev, reg_names, 2); |
---|
| 1781 | + if (IS_ERR(reg_table)) |
---|
| 1782 | + return PTR_ERR(reg_table); |
---|
| 1783 | + } else { |
---|
| 1784 | + reg_table = dev_pm_opp_set_regulators(dev, reg_names, 1); |
---|
| 1785 | + if (IS_ERR(reg_table)) |
---|
| 1786 | + return PTR_ERR(reg_table); |
---|
| 1787 | + } |
---|
| 1788 | + |
---|
| 1789 | + clk_table = dev_pm_opp_set_clkname(dev, "clk_core"); |
---|
| 1790 | + if (IS_ERR(clk_table)) |
---|
| 1791 | + return PTR_ERR(clk_table); |
---|
| 1792 | + |
---|
| 1793 | + rockchip_get_opp_data(rockchip_rkvenc_of_match, &enc->opp_info); |
---|
| 1794 | + ret = rockchip_init_opp_table(dev, &enc->opp_info, "leakage", "venc"); |
---|
| 1795 | + if (ret) { |
---|
| 1796 | + dev_err(dev, "failed to init_opp_table\n"); |
---|
| 1797 | + return ret; |
---|
| 1798 | + } |
---|
| 1799 | + |
---|
| 1800 | + enc->mdev_info = rockchip_system_monitor_register(dev, &venc_mdevp); |
---|
| 1801 | + if (IS_ERR(enc->mdev_info)) { |
---|
| 1802 | + dev_dbg(dev, "without system monitor\n"); |
---|
| 1803 | + enc->mdev_info = NULL; |
---|
| 1804 | + } |
---|
| 1805 | + |
---|
| 1806 | + return ret; |
---|
| 1807 | +} |
---|
| 1808 | + |
---|
| 1809 | +static int rkvenc_devfreq_remove(struct mpp_dev *mpp) |
---|
| 1810 | +{ |
---|
| 1811 | + struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
| 1812 | + |
---|
| 1813 | + if (enc->mdev_info) |
---|
| 1814 | + rockchip_system_monitor_unregister(enc->mdev_info); |
---|
| 1815 | + |
---|
| 1816 | + return 0; |
---|
| 1817 | +} |
---|
1078 | 1818 | #endif |
---|
1079 | 1819 | |
---|
1080 | 1820 | static int rkvenc_init(struct mpp_dev *mpp) |
---|
.. | .. |
---|
1082 | 1822 | struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
1083 | 1823 | int ret = 0; |
---|
1084 | 1824 | |
---|
1085 | | - mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_RKVENC2]; |
---|
| 1825 | + mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_RKVENC]; |
---|
1086 | 1826 | |
---|
1087 | 1827 | /* Get clock info from dtsi */ |
---|
1088 | 1828 | ret = mpp_get_clk_info(mpp, &enc->aclk_info, "aclk_vcodec"); |
---|
.. | .. |
---|
1113 | 1853 | if (!enc->rst_core) |
---|
1114 | 1854 | mpp_err("No core reset resource define\n"); |
---|
1115 | 1855 | |
---|
| 1856 | +#ifdef CONFIG_PM_DEVFREQ |
---|
| 1857 | + ret = rkvenc_devfreq_init(mpp); |
---|
| 1858 | + if (ret) |
---|
| 1859 | + mpp_err("failed to add venc devfreq\n"); |
---|
| 1860 | +#endif |
---|
| 1861 | + |
---|
| 1862 | + return 0; |
---|
| 1863 | +} |
---|
| 1864 | + |
---|
| 1865 | +static int rkvenc_exit(struct mpp_dev *mpp) |
---|
| 1866 | +{ |
---|
| 1867 | +#ifdef CONFIG_PM_DEVFREQ |
---|
| 1868 | + rkvenc_devfreq_remove(mpp); |
---|
| 1869 | +#endif |
---|
| 1870 | + |
---|
1116 | 1871 | return 0; |
---|
1117 | 1872 | } |
---|
1118 | 1873 | |
---|
.. | .. |
---|
1141 | 1896 | { |
---|
1142 | 1897 | struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
1143 | 1898 | int ret = 0; |
---|
| 1899 | + struct mpp_taskqueue *queue = mpp->queue; |
---|
1144 | 1900 | |
---|
1145 | 1901 | mpp_debug_enter(); |
---|
1146 | 1902 | |
---|
.. | .. |
---|
1160 | 1916 | mpp_safe_unreset(enc->rst_core); |
---|
1161 | 1917 | mpp_pmu_idle_request(mpp, false); |
---|
1162 | 1918 | } |
---|
| 1919 | + |
---|
| 1920 | + set_bit(mpp->core_id, &queue->core_idle); |
---|
| 1921 | + if (enc->ccu) |
---|
| 1922 | + enc->ccu->dchs[mpp->core_id].val = 0; |
---|
| 1923 | + |
---|
| 1924 | + mpp_dbg_core("core %d reset idle %lx\n", mpp->core_id, queue->core_idle); |
---|
1163 | 1925 | |
---|
1164 | 1926 | mpp_debug_leave(); |
---|
1165 | 1927 | |
---|
.. | .. |
---|
1199 | 1961 | return 0; |
---|
1200 | 1962 | } |
---|
1201 | 1963 | |
---|
| 1964 | +#define RKVENC2_WORK_TIMEOUT_DELAY (200) |
---|
| 1965 | +#define RKVENC2_WAIT_TIMEOUT_DELAY (2000) |
---|
| 1966 | + |
---|
| 1967 | +static void rkvenc2_task_pop_pending(struct mpp_task *task) |
---|
| 1968 | +{ |
---|
| 1969 | + struct mpp_session *session = task->session; |
---|
| 1970 | + |
---|
| 1971 | + mutex_lock(&session->pending_lock); |
---|
| 1972 | + list_del_init(&task->pending_link); |
---|
| 1973 | + mutex_unlock(&session->pending_lock); |
---|
| 1974 | + |
---|
| 1975 | + kref_put(&task->ref, mpp_free_task); |
---|
| 1976 | +} |
---|
| 1977 | + |
---|
| 1978 | +static int rkvenc2_task_default_process(struct mpp_dev *mpp, |
---|
| 1979 | + struct mpp_task *task) |
---|
| 1980 | +{ |
---|
| 1981 | + int ret = 0; |
---|
| 1982 | + |
---|
| 1983 | + if (mpp->dev_ops && mpp->dev_ops->result) |
---|
| 1984 | + ret = mpp->dev_ops->result(mpp, task, NULL); |
---|
| 1985 | + |
---|
| 1986 | + mpp_debug_func(DEBUG_TASK_INFO, "kref_read %d, ret %d\n", |
---|
| 1987 | + kref_read(&task->ref), ret); |
---|
| 1988 | + |
---|
| 1989 | + rkvenc2_task_pop_pending(task); |
---|
| 1990 | + |
---|
| 1991 | + return ret; |
---|
| 1992 | +} |
---|
| 1993 | + |
---|
| 1994 | +#define RKVENC2_TIMEOUT_DUMP_REG_START (0x5100) |
---|
| 1995 | +#define RKVENC2_TIMEOUT_DUMP_REG_END (0x5160) |
---|
| 1996 | + |
---|
| 1997 | +static void rkvenc2_task_timeout_process(struct mpp_session *session, |
---|
| 1998 | + struct mpp_task *task) |
---|
| 1999 | +{ |
---|
| 2000 | + atomic_inc(&task->abort_request); |
---|
| 2001 | + set_bit(TASK_STATE_ABORT, &task->state); |
---|
| 2002 | + |
---|
| 2003 | + mpp_err("session %d:%d count %d task %d ref %d timeout\n", |
---|
| 2004 | + session->pid, session->index, atomic_read(&session->task_count), |
---|
| 2005 | + task->task_id, kref_read(&task->ref)); |
---|
| 2006 | + |
---|
| 2007 | + if (task->mpp) { |
---|
| 2008 | + struct mpp_dev *mpp = task->mpp; |
---|
| 2009 | + u32 start = RKVENC2_TIMEOUT_DUMP_REG_START; |
---|
| 2010 | + u32 end = RKVENC2_TIMEOUT_DUMP_REG_END; |
---|
| 2011 | + u32 offset; |
---|
| 2012 | + |
---|
| 2013 | + dev_err(mpp->dev, "core %d dump timeout status:\n", mpp->core_id); |
---|
| 2014 | + |
---|
| 2015 | + for (offset = start; offset < end; offset += sizeof(u32)) |
---|
| 2016 | + mpp_reg_show(mpp, offset); |
---|
| 2017 | + } |
---|
| 2018 | + |
---|
| 2019 | + rkvenc2_task_pop_pending(task); |
---|
| 2020 | +} |
---|
| 2021 | + |
---|
| 2022 | +static int rkvenc2_wait_result(struct mpp_session *session, |
---|
| 2023 | + struct mpp_task_msgs *msgs) |
---|
| 2024 | +{ |
---|
| 2025 | + struct rkvenc_poll_slice_cfg cfg; |
---|
| 2026 | + struct rkvenc_task *enc_task; |
---|
| 2027 | + struct mpp_request *req; |
---|
| 2028 | + struct mpp_task *task; |
---|
| 2029 | + struct mpp_dev *mpp; |
---|
| 2030 | + union rkvenc2_slice_len_info slice_info; |
---|
| 2031 | + u32 task_id; |
---|
| 2032 | + int ret = 0; |
---|
| 2033 | + |
---|
| 2034 | + mutex_lock(&session->pending_lock); |
---|
| 2035 | + task = list_first_entry_or_null(&session->pending_list, |
---|
| 2036 | + struct mpp_task, |
---|
| 2037 | + pending_link); |
---|
| 2038 | + mutex_unlock(&session->pending_lock); |
---|
| 2039 | + if (!task) { |
---|
| 2040 | + mpp_err("session %p pending list is empty!\n", session); |
---|
| 2041 | + return -EIO; |
---|
| 2042 | + } |
---|
| 2043 | + |
---|
| 2044 | + mpp = mpp_get_task_used_device(task, session); |
---|
| 2045 | + enc_task = to_rkvenc_task(task); |
---|
| 2046 | + task_id = task->task_id; |
---|
| 2047 | + |
---|
| 2048 | + req = cmpxchg(&msgs->poll_req, msgs->poll_req, NULL); |
---|
| 2049 | + |
---|
| 2050 | + if (!enc_task->task_split || enc_task->task_split_done) { |
---|
| 2051 | +task_done_ret: |
---|
| 2052 | + ret = wait_event_interruptible(task->wait, test_bit(TASK_STATE_DONE, &task->state)); |
---|
| 2053 | + if (ret == -ERESTARTSYS) |
---|
| 2054 | + mpp_err("wait task break by signal in normal mode\n"); |
---|
| 2055 | + |
---|
| 2056 | + return rkvenc2_task_default_process(mpp, task); |
---|
| 2057 | + |
---|
| 2058 | + } |
---|
| 2059 | + |
---|
| 2060 | + /* not slice return just wait all slice length */ |
---|
| 2061 | + if (!req) { |
---|
| 2062 | + do { |
---|
| 2063 | + ret = wait_event_interruptible(task->wait, kfifo_out(&enc_task->slice_info, |
---|
| 2064 | + &slice_info, 1)); |
---|
| 2065 | + if (ret == -ERESTARTSYS) { |
---|
| 2066 | + mpp_err("wait task break by signal in slice all mode\n"); |
---|
| 2067 | + return 0; |
---|
| 2068 | + } |
---|
| 2069 | + mpp_dbg_slice("task %d rd %3d len %d %s\n", |
---|
| 2070 | + task_id, enc_task->slice_rd_cnt, slice_info.slice_len, |
---|
| 2071 | + slice_info.last ? "last" : ""); |
---|
| 2072 | + |
---|
| 2073 | + enc_task->slice_rd_cnt++; |
---|
| 2074 | + |
---|
| 2075 | + if (slice_info.last) |
---|
| 2076 | + goto task_done_ret; |
---|
| 2077 | + } while (1); |
---|
| 2078 | + } |
---|
| 2079 | + |
---|
| 2080 | + if (copy_from_user(&cfg, req->data, sizeof(cfg))) { |
---|
| 2081 | + mpp_err("copy_from_user failed\n"); |
---|
| 2082 | + return -EINVAL; |
---|
| 2083 | + } |
---|
| 2084 | + |
---|
| 2085 | + mpp_dbg_slice("task %d poll irq %d:%d\n", task->task_id, |
---|
| 2086 | + cfg.count_max, cfg.count_ret); |
---|
| 2087 | + cfg.count_ret = 0; |
---|
| 2088 | + |
---|
| 2089 | + /* handle slice mode poll return */ |
---|
| 2090 | + do { |
---|
| 2091 | + ret = wait_event_interruptible(task->wait, kfifo_out(&enc_task->slice_info, |
---|
| 2092 | + &slice_info, 1)); |
---|
| 2093 | + if (ret == -ERESTARTSYS) { |
---|
| 2094 | + mpp_err("wait task break by signal in slice one mode\n"); |
---|
| 2095 | + return 0; |
---|
| 2096 | + } |
---|
| 2097 | + mpp_dbg_slice("core %d task %d rd %3d len %d %s\n", task_id, |
---|
| 2098 | + mpp->core_id, enc_task->slice_rd_cnt, slice_info.slice_len, |
---|
| 2099 | + slice_info.last ? "last" : ""); |
---|
| 2100 | + enc_task->slice_rd_cnt++; |
---|
| 2101 | + if (cfg.count_ret < cfg.count_max) { |
---|
| 2102 | + struct rkvenc_poll_slice_cfg __user *ucfg = |
---|
| 2103 | + (struct rkvenc_poll_slice_cfg __user *)(req->data); |
---|
| 2104 | + u32 __user *dst = (u32 __user *)(ucfg + 1); |
---|
| 2105 | + |
---|
| 2106 | + /* Do NOT return here when put_user error. Just continue */ |
---|
| 2107 | + if (put_user(slice_info.val, dst + cfg.count_ret)) |
---|
| 2108 | + ret = -EFAULT; |
---|
| 2109 | + |
---|
| 2110 | + cfg.count_ret++; |
---|
| 2111 | + if (put_user(cfg.count_ret, &ucfg->count_ret)) |
---|
| 2112 | + ret = -EFAULT; |
---|
| 2113 | + } |
---|
| 2114 | + |
---|
| 2115 | + if (slice_info.last) { |
---|
| 2116 | + enc_task->task_split_done = 1; |
---|
| 2117 | + goto task_done_ret; |
---|
| 2118 | + } |
---|
| 2119 | + |
---|
| 2120 | + if (cfg.count_ret >= cfg.count_max) |
---|
| 2121 | + return 0; |
---|
| 2122 | + |
---|
| 2123 | + if (ret < 0) |
---|
| 2124 | + return ret; |
---|
| 2125 | + } while (!ret); |
---|
| 2126 | + |
---|
| 2127 | + rkvenc2_task_timeout_process(session, task); |
---|
| 2128 | + |
---|
| 2129 | + return ret; |
---|
| 2130 | +} |
---|
| 2131 | + |
---|
1202 | 2132 | static struct mpp_hw_ops rkvenc_hw_ops = { |
---|
1203 | 2133 | .init = rkvenc_init, |
---|
| 2134 | + .exit = rkvenc_exit, |
---|
1204 | 2135 | .clk_on = rkvenc_clk_on, |
---|
1205 | 2136 | .clk_off = rkvenc_clk_off, |
---|
1206 | 2137 | .set_freq = rkvenc_set_freq, |
---|
.. | .. |
---|
1208 | 2139 | }; |
---|
1209 | 2140 | |
---|
1210 | 2141 | static struct mpp_dev_ops rkvenc_dev_ops_v2 = { |
---|
| 2142 | + .wait_result = rkvenc2_wait_result, |
---|
1211 | 2143 | .alloc_task = rkvenc_alloc_task, |
---|
| 2144 | + .run = rkvenc_run, |
---|
| 2145 | + .irq = rkvenc_irq, |
---|
| 2146 | + .isr = rkvenc_isr, |
---|
| 2147 | + .finish = rkvenc_finish, |
---|
| 2148 | + .result = rkvenc_result, |
---|
| 2149 | + .free_task = rkvenc_free_task, |
---|
| 2150 | + .ioctl = rkvenc_control, |
---|
| 2151 | + .init_session = rkvenc_init_session, |
---|
| 2152 | + .free_session = rkvenc_free_session, |
---|
| 2153 | + .dump_session = rkvenc_dump_session, |
---|
| 2154 | +}; |
---|
| 2155 | + |
---|
| 2156 | +static struct mpp_dev_ops rkvenc_ccu_dev_ops = { |
---|
| 2157 | + .wait_result = rkvenc2_wait_result, |
---|
| 2158 | + .alloc_task = rkvenc_alloc_task, |
---|
| 2159 | + .prepare = rkvenc2_prepare, |
---|
1212 | 2160 | .run = rkvenc_run, |
---|
1213 | 2161 | .irq = rkvenc_irq, |
---|
1214 | 2162 | .isr = rkvenc_isr, |
---|
.. | .. |
---|
1230 | 2178 | .dev_ops = &rkvenc_dev_ops_v2, |
---|
1231 | 2179 | }; |
---|
1232 | 2180 | |
---|
| 2181 | +static const struct mpp_dev_var rkvenc_540c_data = { |
---|
| 2182 | + .device_type = MPP_DEVICE_RKVENC, |
---|
| 2183 | + .hw_info = &rkvenc_540c_hw_info.hw, |
---|
| 2184 | + .trans_info = trans_rkvenc_540c, |
---|
| 2185 | + .hw_ops = &rkvenc_hw_ops, |
---|
| 2186 | + .dev_ops = &rkvenc_dev_ops_v2, |
---|
| 2187 | +}; |
---|
| 2188 | + |
---|
| 2189 | +static const struct mpp_dev_var rkvenc_ccu_data = { |
---|
| 2190 | + .device_type = MPP_DEVICE_RKVENC, |
---|
| 2191 | + .hw_info = &rkvenc_v2_hw_info.hw, |
---|
| 2192 | + .trans_info = trans_rkvenc_v2, |
---|
| 2193 | + .hw_ops = &rkvenc_hw_ops, |
---|
| 2194 | + .dev_ops = &rkvenc_ccu_dev_ops, |
---|
| 2195 | +}; |
---|
| 2196 | + |
---|
1233 | 2197 | static const struct of_device_id mpp_rkvenc_dt_match[] = { |
---|
1234 | 2198 | { |
---|
1235 | 2199 | .compatible = "rockchip,rkv-encoder-v2", |
---|
1236 | 2200 | .data = &rkvenc_v2_data, |
---|
1237 | 2201 | }, |
---|
| 2202 | +#ifdef CONFIG_CPU_RK3528 |
---|
| 2203 | + { |
---|
| 2204 | + .compatible = "rockchip,rkv-encoder-rk3528", |
---|
| 2205 | + .data = &rkvenc_540c_data, |
---|
| 2206 | + }, |
---|
| 2207 | +#endif |
---|
| 2208 | +#ifdef CONFIG_CPU_RK3562 |
---|
| 2209 | + { |
---|
| 2210 | + .compatible = "rockchip,rkv-encoder-rk3562", |
---|
| 2211 | + .data = &rkvenc_540c_data, |
---|
| 2212 | + }, |
---|
| 2213 | +#endif |
---|
| 2214 | +#ifdef CONFIG_CPU_RK3588 |
---|
| 2215 | + { |
---|
| 2216 | + .compatible = "rockchip,rkv-encoder-v2-core", |
---|
| 2217 | + .data = &rkvenc_ccu_data, |
---|
| 2218 | + }, |
---|
| 2219 | + { |
---|
| 2220 | + .compatible = "rockchip,rkv-encoder-v2-ccu", |
---|
| 2221 | + }, |
---|
| 2222 | +#endif |
---|
1238 | 2223 | {}, |
---|
1239 | 2224 | }; |
---|
| 2225 | + |
---|
| 2226 | +static int rkvenc_ccu_probe(struct platform_device *pdev) |
---|
| 2227 | +{ |
---|
| 2228 | + struct rkvenc_ccu *ccu; |
---|
| 2229 | + struct device *dev = &pdev->dev; |
---|
| 2230 | + |
---|
| 2231 | + ccu = devm_kzalloc(dev, sizeof(*ccu), GFP_KERNEL); |
---|
| 2232 | + if (!ccu) |
---|
| 2233 | + return -ENOMEM; |
---|
| 2234 | + |
---|
| 2235 | + platform_set_drvdata(pdev, ccu); |
---|
| 2236 | + |
---|
| 2237 | + mutex_init(&ccu->lock); |
---|
| 2238 | + INIT_LIST_HEAD(&ccu->core_list); |
---|
| 2239 | + spin_lock_init(&ccu->lock_dchs); |
---|
| 2240 | + |
---|
| 2241 | + return 0; |
---|
| 2242 | +} |
---|
| 2243 | + |
---|
| 2244 | +static int rkvenc_attach_ccu(struct device *dev, struct rkvenc_dev *enc) |
---|
| 2245 | +{ |
---|
| 2246 | + struct device_node *np; |
---|
| 2247 | + struct platform_device *pdev; |
---|
| 2248 | + struct rkvenc_ccu *ccu; |
---|
| 2249 | + |
---|
| 2250 | + mpp_debug_enter(); |
---|
| 2251 | + |
---|
| 2252 | + np = of_parse_phandle(dev->of_node, "rockchip,ccu", 0); |
---|
| 2253 | + if (!np || !of_device_is_available(np)) |
---|
| 2254 | + return -ENODEV; |
---|
| 2255 | + |
---|
| 2256 | + pdev = of_find_device_by_node(np); |
---|
| 2257 | + of_node_put(np); |
---|
| 2258 | + if (!pdev) |
---|
| 2259 | + return -ENODEV; |
---|
| 2260 | + |
---|
| 2261 | + ccu = platform_get_drvdata(pdev); |
---|
| 2262 | + if (!ccu) |
---|
| 2263 | + return -ENOMEM; |
---|
| 2264 | + |
---|
| 2265 | + INIT_LIST_HEAD(&enc->core_link); |
---|
| 2266 | + mutex_lock(&ccu->lock); |
---|
| 2267 | + ccu->core_num++; |
---|
| 2268 | + list_add_tail(&enc->core_link, &ccu->core_list); |
---|
| 2269 | + mutex_unlock(&ccu->lock); |
---|
| 2270 | + |
---|
| 2271 | + /* attach the ccu-domain to current core */ |
---|
| 2272 | + if (!ccu->main_core) { |
---|
| 2273 | + /** |
---|
| 2274 | + * set the first device for the main-core, |
---|
| 2275 | + * then the domain of the main-core named ccu-domain |
---|
| 2276 | + */ |
---|
| 2277 | + ccu->main_core = &enc->mpp; |
---|
| 2278 | + } else { |
---|
| 2279 | + struct mpp_iommu_info *ccu_info, *cur_info; |
---|
| 2280 | + |
---|
| 2281 | + /* set the ccu-domain for current device */ |
---|
| 2282 | + ccu_info = ccu->main_core->iommu_info; |
---|
| 2283 | + cur_info = enc->mpp.iommu_info; |
---|
| 2284 | + |
---|
| 2285 | + if (cur_info) { |
---|
| 2286 | + cur_info->domain = ccu_info->domain; |
---|
| 2287 | + cur_info->rw_sem = ccu_info->rw_sem; |
---|
| 2288 | + } |
---|
| 2289 | + mpp_iommu_attach(cur_info); |
---|
| 2290 | + |
---|
| 2291 | + /* increase main core message capacity */ |
---|
| 2292 | + ccu->main_core->msgs_cap++; |
---|
| 2293 | + enc->mpp.msgs_cap = 0; |
---|
| 2294 | + } |
---|
| 2295 | + enc->ccu = ccu; |
---|
| 2296 | + |
---|
| 2297 | + dev_info(dev, "attach ccu as core %d\n", enc->mpp.core_id); |
---|
| 2298 | + mpp_debug_enter(); |
---|
| 2299 | + |
---|
| 2300 | + return 0; |
---|
| 2301 | +} |
---|
1240 | 2302 | |
---|
1241 | 2303 | static int rkvenc2_alloc_rcbbuf(struct platform_device *pdev, struct rkvenc_dev *enc) |
---|
1242 | 2304 | { |
---|
.. | .. |
---|
1252 | 2314 | |
---|
1253 | 2315 | /* get rcb iova start and size */ |
---|
1254 | 2316 | ret = device_property_read_u32_array(dev, "rockchip,rcb-iova", vals, 2); |
---|
1255 | | - if (ret) { |
---|
1256 | | - dev_err(dev, "could not find property rcb-iova\n"); |
---|
| 2317 | + if (ret) |
---|
1257 | 2318 | return ret; |
---|
1258 | | - } |
---|
| 2319 | + |
---|
1259 | 2320 | iova = PAGE_ALIGN(vals[0]); |
---|
1260 | 2321 | sram_used = PAGE_ALIGN(vals[1]); |
---|
1261 | 2322 | if (!sram_used) { |
---|
.. | .. |
---|
1337 | 2398 | return ret; |
---|
1338 | 2399 | } |
---|
1339 | 2400 | |
---|
| 2401 | +static int rkvenc2_iommu_fault_handle(struct iommu_domain *iommu, |
---|
| 2402 | + struct device *iommu_dev, |
---|
| 2403 | + unsigned long iova, int status, void *arg) |
---|
| 2404 | +{ |
---|
| 2405 | + struct mpp_dev *mpp = (struct mpp_dev *)arg; |
---|
| 2406 | + struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
| 2407 | + struct mpp_task *mpp_task = mpp->cur_task; |
---|
| 2408 | + |
---|
| 2409 | + dev_info(mpp->dev, "core %d page fault found dchs %08x\n", |
---|
| 2410 | + mpp->core_id, mpp_read_relaxed(&enc->mpp, DCHS_REG_OFFSET)); |
---|
| 2411 | + |
---|
| 2412 | + if (mpp_task) |
---|
| 2413 | + mpp_task_dump_mem_region(mpp, mpp_task); |
---|
| 2414 | + |
---|
| 2415 | + return 0; |
---|
| 2416 | +} |
---|
| 2417 | + |
---|
| 2418 | +static int rkvenc_core_probe(struct platform_device *pdev) |
---|
| 2419 | +{ |
---|
| 2420 | + int ret = 0; |
---|
| 2421 | + struct device *dev = &pdev->dev; |
---|
| 2422 | + struct rkvenc_dev *enc = NULL; |
---|
| 2423 | + struct mpp_dev *mpp = NULL; |
---|
| 2424 | + |
---|
| 2425 | + enc = devm_kzalloc(dev, sizeof(*enc), GFP_KERNEL); |
---|
| 2426 | + if (!enc) |
---|
| 2427 | + return -ENOMEM; |
---|
| 2428 | + |
---|
| 2429 | + mpp = &enc->mpp; |
---|
| 2430 | + platform_set_drvdata(pdev, mpp); |
---|
| 2431 | + |
---|
| 2432 | + if (pdev->dev.of_node) { |
---|
| 2433 | + struct device_node *np = pdev->dev.of_node; |
---|
| 2434 | + const struct of_device_id *match = NULL; |
---|
| 2435 | + |
---|
| 2436 | + match = of_match_node(mpp_rkvenc_dt_match, np); |
---|
| 2437 | + if (match) |
---|
| 2438 | + mpp->var = (struct mpp_dev_var *)match->data; |
---|
| 2439 | + |
---|
| 2440 | + mpp->core_id = of_alias_get_id(np, "rkvenc"); |
---|
| 2441 | + } |
---|
| 2442 | + |
---|
| 2443 | + ret = mpp_dev_probe(mpp, pdev); |
---|
| 2444 | + if (ret) |
---|
| 2445 | + return ret; |
---|
| 2446 | + |
---|
| 2447 | + /* attach core to ccu */ |
---|
| 2448 | + ret = rkvenc_attach_ccu(dev, enc); |
---|
| 2449 | + if (ret) { |
---|
| 2450 | + dev_err(dev, "attach ccu failed\n"); |
---|
| 2451 | + return ret; |
---|
| 2452 | + } |
---|
| 2453 | + rkvenc2_alloc_rcbbuf(pdev, enc); |
---|
| 2454 | + |
---|
| 2455 | + ret = devm_request_threaded_irq(dev, mpp->irq, |
---|
| 2456 | + mpp_dev_irq, |
---|
| 2457 | + mpp_dev_isr_sched, |
---|
| 2458 | + IRQF_SHARED, |
---|
| 2459 | + dev_name(dev), mpp); |
---|
| 2460 | + if (ret) { |
---|
| 2461 | + dev_err(dev, "register interrupter runtime failed\n"); |
---|
| 2462 | + return -EINVAL; |
---|
| 2463 | + } |
---|
| 2464 | + mpp->session_max_buffers = RKVENC_SESSION_MAX_BUFFERS; |
---|
| 2465 | + enc->hw_info = to_rkvenc_info(mpp->var->hw_info); |
---|
| 2466 | + mpp->fault_handler = rkvenc2_iommu_fault_handle; |
---|
| 2467 | + rkvenc_procfs_init(mpp); |
---|
| 2468 | + rkvenc_procfs_ccu_init(mpp); |
---|
| 2469 | + |
---|
| 2470 | + /* if current is main-core, register current device to mpp service */ |
---|
| 2471 | + if (mpp == enc->ccu->main_core) |
---|
| 2472 | + mpp_dev_register_srv(mpp, mpp->srv); |
---|
| 2473 | + |
---|
| 2474 | + return 0; |
---|
| 2475 | +} |
---|
| 2476 | + |
---|
1340 | 2477 | static int rkvenc_probe_default(struct platform_device *pdev) |
---|
1341 | 2478 | { |
---|
1342 | 2479 | int ret = 0; |
---|
.. | .. |
---|
1350 | 2487 | return -ENOMEM; |
---|
1351 | 2488 | |
---|
1352 | 2489 | mpp = &enc->mpp; |
---|
1353 | | - platform_set_drvdata(pdev, enc); |
---|
| 2490 | + platform_set_drvdata(pdev, mpp); |
---|
1354 | 2491 | |
---|
1355 | 2492 | if (pdev->dev.of_node) { |
---|
1356 | 2493 | match = of_match_node(mpp_rkvenc_dt_match, pdev->dev.of_node); |
---|
.. | .. |
---|
1375 | 2512 | } |
---|
1376 | 2513 | mpp->session_max_buffers = RKVENC_SESSION_MAX_BUFFERS; |
---|
1377 | 2514 | enc->hw_info = to_rkvenc_info(mpp->var->hw_info); |
---|
1378 | | - |
---|
1379 | 2515 | rkvenc_procfs_init(mpp); |
---|
1380 | 2516 | mpp_dev_register_srv(mpp, mpp->srv); |
---|
1381 | 2517 | |
---|
.. | .. |
---|
1391 | 2527 | { |
---|
1392 | 2528 | int ret = 0; |
---|
1393 | 2529 | struct device *dev = &pdev->dev; |
---|
| 2530 | + struct device_node *np = dev->of_node; |
---|
1394 | 2531 | |
---|
1395 | 2532 | dev_info(dev, "probing start\n"); |
---|
1396 | 2533 | |
---|
1397 | | - ret = rkvenc_probe_default(pdev); |
---|
| 2534 | + if (strstr(np->name, "ccu")) |
---|
| 2535 | + ret = rkvenc_ccu_probe(pdev); |
---|
| 2536 | + else if (strstr(np->name, "core")) |
---|
| 2537 | + ret = rkvenc_core_probe(pdev); |
---|
| 2538 | + else |
---|
| 2539 | + ret = rkvenc_probe_default(pdev); |
---|
1398 | 2540 | |
---|
1399 | 2541 | dev_info(dev, "probing finish\n"); |
---|
1400 | 2542 | |
---|
.. | .. |
---|
1407 | 2549 | |
---|
1408 | 2550 | if (enc->rcb_page) { |
---|
1409 | 2551 | size_t page_size = PAGE_ALIGN(enc->sram_used - enc->sram_size); |
---|
| 2552 | + int order = min(get_order(page_size), MAX_ORDER); |
---|
1410 | 2553 | |
---|
1411 | | - __free_pages(enc->rcb_page, get_order(page_size)); |
---|
| 2554 | + __free_pages(enc->rcb_page, order); |
---|
1412 | 2555 | } |
---|
1413 | 2556 | if (enc->sram_iova) { |
---|
1414 | 2557 | domain = enc->mpp.iommu_info->domain; |
---|
.. | .. |
---|
1421 | 2564 | static int rkvenc_remove(struct platform_device *pdev) |
---|
1422 | 2565 | { |
---|
1423 | 2566 | struct device *dev = &pdev->dev; |
---|
| 2567 | + struct device_node *np = dev->of_node; |
---|
1424 | 2568 | |
---|
1425 | | - struct rkvenc_dev *enc = platform_get_drvdata(pdev); |
---|
| 2569 | + if (strstr(np->name, "ccu")) { |
---|
| 2570 | + dev_info(dev, "remove ccu\n"); |
---|
| 2571 | + } else if (strstr(np->name, "core")) { |
---|
| 2572 | + struct mpp_dev *mpp = dev_get_drvdata(dev); |
---|
| 2573 | + struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
1426 | 2574 | |
---|
1427 | | - dev_info(dev, "remove device\n"); |
---|
1428 | | - rkvenc2_free_rcbbuf(pdev, enc); |
---|
1429 | | - mpp_dev_remove(&enc->mpp); |
---|
1430 | | - rkvenc_procfs_remove(&enc->mpp); |
---|
| 2575 | + dev_info(dev, "remove core\n"); |
---|
| 2576 | + if (enc->ccu) { |
---|
| 2577 | + mutex_lock(&enc->ccu->lock); |
---|
| 2578 | + list_del_init(&enc->core_link); |
---|
| 2579 | + enc->ccu->core_num--; |
---|
| 2580 | + mutex_unlock(&enc->ccu->lock); |
---|
| 2581 | + } |
---|
| 2582 | + rkvenc2_free_rcbbuf(pdev, enc); |
---|
| 2583 | + mpp_dev_remove(&enc->mpp); |
---|
| 2584 | + rkvenc_procfs_remove(&enc->mpp); |
---|
| 2585 | + } else { |
---|
| 2586 | + struct mpp_dev *mpp = dev_get_drvdata(dev); |
---|
| 2587 | + struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
| 2588 | + |
---|
| 2589 | + dev_info(dev, "remove device\n"); |
---|
| 2590 | + rkvenc2_free_rcbbuf(pdev, enc); |
---|
| 2591 | + mpp_dev_remove(mpp); |
---|
| 2592 | + rkvenc_procfs_remove(mpp); |
---|
| 2593 | + } |
---|
1431 | 2594 | |
---|
1432 | 2595 | return 0; |
---|
1433 | 2596 | } |
---|
.. | .. |
---|
1435 | 2598 | static void rkvenc_shutdown(struct platform_device *pdev) |
---|
1436 | 2599 | { |
---|
1437 | 2600 | struct device *dev = &pdev->dev; |
---|
1438 | | - int ret; |
---|
1439 | | - int val; |
---|
1440 | | - struct rkvenc_dev *enc = platform_get_drvdata(pdev); |
---|
1441 | | - struct mpp_dev *mpp = &enc->mpp; |
---|
1442 | 2601 | |
---|
1443 | | - dev_info(dev, "shutdown device\n"); |
---|
1444 | | - |
---|
1445 | | - if (mpp->srv) |
---|
1446 | | - atomic_inc(&mpp->srv->shutdown_request); |
---|
1447 | | - |
---|
1448 | | - ret = readx_poll_timeout(atomic_read, |
---|
1449 | | - &mpp->task_count, |
---|
1450 | | - val, val == 0, 1000, 200000); |
---|
1451 | | - if (ret == -ETIMEDOUT) |
---|
1452 | | - dev_err(dev, "wait total running time out\n"); |
---|
1453 | | - |
---|
1454 | | - dev_info(dev, "shutdown success\n"); |
---|
| 2602 | + if (!strstr(dev_name(dev), "ccu")) |
---|
| 2603 | + mpp_dev_shutdown(pdev); |
---|
1455 | 2604 | } |
---|
1456 | | - |
---|
1457 | | -static int rkvenc_runtime_suspend(struct device *dev) |
---|
1458 | | -{ |
---|
1459 | | - struct mpp_dev *mpp = dev_get_drvdata(dev); |
---|
1460 | | - struct mpp_grf_info *info = mpp->grf_info; |
---|
1461 | | - |
---|
1462 | | - if (cpu_is_rk3528() && info && info->mem_offset) |
---|
1463 | | - regmap_write(info->grf, |
---|
1464 | | - info->mem_offset, |
---|
1465 | | - info->val_mem_off); |
---|
1466 | | - |
---|
1467 | | - return 0; |
---|
1468 | | -} |
---|
1469 | | - |
---|
1470 | | -static int rkvenc_runtime_resume(struct device *dev) |
---|
1471 | | -{ |
---|
1472 | | - struct mpp_dev *mpp = dev_get_drvdata(dev); |
---|
1473 | | - struct mpp_grf_info *info = mpp->grf_info; |
---|
1474 | | - |
---|
1475 | | - if (cpu_is_rk3528() && info && info->mem_offset) |
---|
1476 | | - regmap_write(info->grf, |
---|
1477 | | - info->mem_offset, |
---|
1478 | | - info->val_mem_on); |
---|
1479 | | - |
---|
1480 | | - return 0; |
---|
1481 | | -} |
---|
1482 | | - |
---|
1483 | | -static const struct dev_pm_ops rkvenc_pm_ops = { |
---|
1484 | | - .runtime_suspend = rkvenc_runtime_suspend, |
---|
1485 | | - .runtime_resume = rkvenc_runtime_resume, |
---|
1486 | | -}; |
---|
1487 | 2605 | |
---|
1488 | 2606 | struct platform_driver rockchip_rkvenc2_driver = { |
---|
1489 | 2607 | .probe = rkvenc_probe, |
---|
.. | .. |
---|
1491 | 2609 | .shutdown = rkvenc_shutdown, |
---|
1492 | 2610 | .driver = { |
---|
1493 | 2611 | .name = RKVENC_DRIVER_NAME, |
---|
1494 | | - .pm = &rkvenc_pm_ops, |
---|
1495 | 2612 | .of_match_table = of_match_ptr(mpp_rkvenc_dt_match), |
---|
1496 | 2613 | }, |
---|
1497 | 2614 | }; |
---|