.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: (GPL-2.0+ OR MIT) |
---|
2 | 2 | /* |
---|
3 | | - * Copyright (c) 2022 Rockchip Electronics Co., Ltd |
---|
| 3 | + * Copyright (c) 2021 Rockchip Electronics Co., Ltd |
---|
4 | 4 | * |
---|
5 | 5 | * author: |
---|
6 | 6 | * Ding Wei, leo.ding@rock-chips.com |
---|
.. | .. |
---|
32 | 32 | #include <soc/rockchip/rockchip_ipa.h> |
---|
33 | 33 | #include <soc/rockchip/rockchip_opp_select.h> |
---|
34 | 34 | #include <soc/rockchip/rockchip_system_monitor.h> |
---|
| 35 | +#include <soc/rockchip/rockchip_iommu.h> |
---|
35 | 36 | |
---|
36 | 37 | #include "mpp_debug.h" |
---|
37 | 38 | #include "mpp_iommu.h" |
---|
.. | .. |
---|
41 | 42 | |
---|
42 | 43 | #define RKVENC_SESSION_MAX_BUFFERS 40 |
---|
43 | 44 | #define RKVENC_MAX_CORE_NUM 4 |
---|
| 45 | +#define RKVENC_MAX_DCHS_ID 4 |
---|
| 46 | +#define RKVENC_MAX_SLICE_FIFO_LEN 256 |
---|
| 47 | +#define RKVENC_SCLR_DONE_STA BIT(2) |
---|
| 48 | +#define RKVENC_WDG 0x38 |
---|
| 49 | +#define TIMEOUT_MS 100 |
---|
44 | 50 | |
---|
45 | 51 | #define to_rkvenc_info(info) \ |
---|
46 | 52 | container_of(info, struct rkvenc_hw_info, hw) |
---|
.. | .. |
---|
116 | 122 | u32 err_mask; |
---|
117 | 123 | }; |
---|
118 | 124 | |
---|
| 125 | +#define INT_STA_ENC_DONE_STA BIT(0) |
---|
| 126 | +#define INT_STA_SCLR_DONE_STA BIT(2) |
---|
| 127 | +#define INT_STA_SLC_DONE_STA BIT(3) |
---|
| 128 | +#define INT_STA_BSF_OFLW_STA BIT(4) |
---|
| 129 | +#define INT_STA_BRSP_OTSD_STA BIT(5) |
---|
| 130 | +#define INT_STA_WBUS_ERR_STA BIT(6) |
---|
| 131 | +#define INT_STA_RBUS_ERR_STA BIT(7) |
---|
| 132 | +#define INT_STA_WDG_STA BIT(8) |
---|
| 133 | + |
---|
| 134 | +#define INT_STA_ERROR (INT_STA_BRSP_OTSD_STA | \ |
---|
| 135 | + INT_STA_WBUS_ERR_STA | \ |
---|
| 136 | + INT_STA_RBUS_ERR_STA | \ |
---|
| 137 | + INT_STA_WDG_STA) |
---|
| 138 | + |
---|
| 139 | +#define DCHS_REG_OFFSET (0x304) |
---|
| 140 | +#define DCHS_CLASS_OFFSET (33) |
---|
| 141 | +#define DCHS_TXE (0x10) |
---|
| 142 | +#define DCHS_RXE (0x20) |
---|
| 143 | + |
---|
| 144 | +/* dual core hand-shake info */ |
---|
| 145 | +union rkvenc2_dual_core_handshake_id { |
---|
| 146 | + u64 val; |
---|
| 147 | + struct { |
---|
| 148 | + u32 txid : 2; |
---|
| 149 | + u32 rxid : 2; |
---|
| 150 | + u32 txe : 1; |
---|
| 151 | + u32 rxe : 1; |
---|
| 152 | + u32 working : 1; |
---|
| 153 | + u32 reserve0 : 1; |
---|
| 154 | + u32 txid_orig : 2; |
---|
| 155 | + u32 rxid_orig : 2; |
---|
| 156 | + u32 txid_map : 2; |
---|
| 157 | + u32 rxid_map : 2; |
---|
| 158 | + u32 offset : 11; |
---|
| 159 | + u32 reserve1 : 1; |
---|
| 160 | + u32 txe_orig : 1; |
---|
| 161 | + u32 rxe_orig : 1; |
---|
| 162 | + u32 txe_map : 1; |
---|
| 163 | + u32 rxe_map : 1; |
---|
| 164 | + u32 session_id; |
---|
| 165 | + }; |
---|
| 166 | +}; |
---|
| 167 | + |
---|
| 168 | +#define RKVENC2_REG_INT_EN (8) |
---|
| 169 | +#define RKVENC2_BIT_SLICE_DONE_EN BIT(3) |
---|
| 170 | + |
---|
| 171 | +#define RKVENC2_REG_INT_MASK (9) |
---|
| 172 | +#define RKVENC2_BIT_SLICE_DONE_MASK BIT(3) |
---|
| 173 | + |
---|
| 174 | +#define RKVENC2_REG_EXT_LINE_BUF_BASE (22) |
---|
| 175 | + |
---|
| 176 | +#define RKVENC2_REG_ENC_PIC (32) |
---|
| 177 | +#define RKVENC2_BIT_ENC_STND BIT(0) |
---|
| 178 | +#define RKVENC2_BIT_VAL_H264 0 |
---|
| 179 | +#define RKVENC2_BIT_VAL_H265 1 |
---|
| 180 | +#define RKVENC2_BIT_SLEN_FIFO BIT(30) |
---|
| 181 | + |
---|
| 182 | +#define RKVENC2_REG_SLI_SPLIT (56) |
---|
| 183 | +#define RKVENC2_BIT_SLI_SPLIT BIT(0) |
---|
| 184 | +#define RKVENC2_BIT_SLI_FLUSH BIT(15) |
---|
| 185 | + |
---|
| 186 | +#define RKVENC2_REG_SLICE_NUM_BASE (0x4034) |
---|
| 187 | +#define RKVENC2_REG_SLICE_LEN_BASE (0x4038) |
---|
| 188 | + |
---|
| 189 | +#define RKVENC2_REG_ST_BSB (0x402c) |
---|
| 190 | +#define RKVENC2_REG_ADR_BSBT (0x2b0) |
---|
| 191 | +#define RKVENC2_REG_ADR_BSBB (0x2b4) |
---|
| 192 | +#define RKVENC2_REG_ADR_BSBR (0x2b8) |
---|
| 193 | +#define RKVENC2_REG_ADR_BSBS (0x2bc) |
---|
| 194 | + |
---|
| 195 | +union rkvenc2_slice_len_info { |
---|
| 196 | + u32 val; |
---|
| 197 | + |
---|
| 198 | + struct { |
---|
| 199 | + u32 slice_len : 31; |
---|
| 200 | + u32 last : 1; |
---|
| 201 | + }; |
---|
| 202 | +}; |
---|
| 203 | + |
---|
| 204 | +struct rkvenc_poll_slice_cfg { |
---|
| 205 | + s32 poll_type; |
---|
| 206 | + s32 poll_ret; |
---|
| 207 | + s32 count_max; |
---|
| 208 | + s32 count_ret; |
---|
| 209 | + union rkvenc2_slice_len_info slice_info[]; |
---|
| 210 | +}; |
---|
| 211 | + |
---|
119 | 212 | struct rkvenc_task { |
---|
120 | 213 | struct mpp_task mpp_task; |
---|
121 | 214 | int fmt; |
---|
.. | .. |
---|
138 | 231 | u32 r_req_cnt; |
---|
139 | 232 | struct mpp_request r_reqs[MPP_MAX_MSG_NUM]; |
---|
140 | 233 | struct mpp_dma_buffer *table; |
---|
141 | | - u32 task_no; |
---|
| 234 | + |
---|
| 235 | + union rkvenc2_dual_core_handshake_id dchs_id; |
---|
| 236 | + |
---|
| 237 | + /* split output / slice mode info */ |
---|
| 238 | + u32 task_split; |
---|
| 239 | + u32 task_split_done; |
---|
| 240 | + u32 last_slice_found; |
---|
| 241 | + u32 slice_wr_cnt; |
---|
| 242 | + u32 slice_rd_cnt; |
---|
| 243 | + DECLARE_KFIFO(slice_info, union rkvenc2_slice_len_info, RKVENC_MAX_SLICE_FIFO_LEN); |
---|
| 244 | + |
---|
| 245 | + /* jpege bitstream */ |
---|
| 246 | + struct mpp_dma_buffer *bs_buf; |
---|
| 247 | + u32 offset_bs; |
---|
142 | 248 | }; |
---|
143 | 249 | |
---|
144 | 250 | #define RKVENC_MAX_RCB_NUM (4) |
---|
.. | .. |
---|
180 | 286 | struct reset_control *rst_a; |
---|
181 | 287 | struct reset_control *rst_h; |
---|
182 | 288 | struct reset_control *rst_core; |
---|
| 289 | + /* for ccu */ |
---|
| 290 | + struct rkvenc_ccu *ccu; |
---|
| 291 | + struct list_head core_link; |
---|
183 | 292 | |
---|
184 | 293 | /* internal rcb-memory */ |
---|
185 | 294 | u32 sram_size; |
---|
.. | .. |
---|
187 | 296 | dma_addr_t sram_iova; |
---|
188 | 297 | u32 sram_enabled; |
---|
189 | 298 | struct page *rcb_page; |
---|
| 299 | + |
---|
| 300 | + u32 bs_overflow; |
---|
| 301 | + |
---|
| 302 | +#ifdef CONFIG_PM_DEVFREQ |
---|
| 303 | + struct rockchip_opp_info opp_info; |
---|
| 304 | + struct monitor_dev_info *mdev_info; |
---|
| 305 | + struct opp_table *opp_table; |
---|
| 306 | +#endif |
---|
190 | 307 | }; |
---|
191 | 308 | |
---|
| 309 | +struct rkvenc_ccu { |
---|
| 310 | + u32 core_num; |
---|
| 311 | + /* lock for core attach */ |
---|
| 312 | + struct mutex lock; |
---|
| 313 | + struct list_head core_list; |
---|
| 314 | + struct mpp_dev *main_core; |
---|
| 315 | + |
---|
| 316 | + spinlock_t lock_dchs; |
---|
| 317 | + union rkvenc2_dual_core_handshake_id dchs[RKVENC_MAX_CORE_NUM]; |
---|
| 318 | +}; |
---|
192 | 319 | |
---|
193 | 320 | static struct rkvenc_hw_info rkvenc_v2_hw_info = { |
---|
| 321 | + .hw = { |
---|
| 322 | + .reg_num = 254, |
---|
| 323 | + .reg_id = 0, |
---|
| 324 | + .reg_en = 4, |
---|
| 325 | + .reg_start = 160, |
---|
| 326 | + .reg_end = 253, |
---|
| 327 | + }, |
---|
| 328 | + .reg_class = RKVENC_CLASS_BUTT, |
---|
| 329 | + .reg_msg[RKVENC_CLASS_BASE] = { |
---|
| 330 | + .base_s = 0x0000, |
---|
| 331 | + .base_e = 0x0058, |
---|
| 332 | + }, |
---|
| 333 | + .reg_msg[RKVENC_CLASS_PIC] = { |
---|
| 334 | + .base_s = 0x0280, |
---|
| 335 | + .base_e = 0x03f4, |
---|
| 336 | + }, |
---|
| 337 | + .reg_msg[RKVENC_CLASS_RC] = { |
---|
| 338 | + .base_s = 0x1000, |
---|
| 339 | + .base_e = 0x10e0, |
---|
| 340 | + }, |
---|
| 341 | + .reg_msg[RKVENC_CLASS_PAR] = { |
---|
| 342 | + .base_s = 0x1700, |
---|
| 343 | + .base_e = 0x1cd4, |
---|
| 344 | + }, |
---|
| 345 | + .reg_msg[RKVENC_CLASS_SQI] = { |
---|
| 346 | + .base_s = 0x2000, |
---|
| 347 | + .base_e = 0x21e4, |
---|
| 348 | + }, |
---|
| 349 | + .reg_msg[RKVENC_CLASS_SCL] = { |
---|
| 350 | + .base_s = 0x2200, |
---|
| 351 | + .base_e = 0x2c98, |
---|
| 352 | + }, |
---|
| 353 | + .reg_msg[RKVENC_CLASS_OSD] = { |
---|
| 354 | + .base_s = 0x3000, |
---|
| 355 | + .base_e = 0x347c, |
---|
| 356 | + }, |
---|
| 357 | + .reg_msg[RKVENC_CLASS_ST] = { |
---|
| 358 | + .base_s = 0x4000, |
---|
| 359 | + .base_e = 0x42cc, |
---|
| 360 | + }, |
---|
| 361 | + .reg_msg[RKVENC_CLASS_DEBUG] = { |
---|
| 362 | + .base_s = 0x5000, |
---|
| 363 | + .base_e = 0x5354, |
---|
| 364 | + }, |
---|
| 365 | + .fd_class = RKVENC_CLASS_FD_BUTT, |
---|
| 366 | + .fd_reg[RKVENC_CLASS_FD_BASE] = { |
---|
| 367 | + .class = RKVENC_CLASS_PIC, |
---|
| 368 | + .base_fmt = RKVENC_FMT_BASE, |
---|
| 369 | + }, |
---|
| 370 | + .fd_reg[RKVENC_CLASS_FD_OSD] = { |
---|
| 371 | + .class = RKVENC_CLASS_OSD, |
---|
| 372 | + .base_fmt = RKVENC_FMT_OSD_BASE, |
---|
| 373 | + }, |
---|
| 374 | + .fmt_reg = { |
---|
| 375 | + .class = RKVENC_CLASS_PIC, |
---|
| 376 | + .base = 0x0300, |
---|
| 377 | + .bitpos = 0, |
---|
| 378 | + .bitlen = 1, |
---|
| 379 | + }, |
---|
| 380 | + .enc_start_base = 0x0010, |
---|
| 381 | + .enc_clr_base = 0x0014, |
---|
| 382 | + .int_en_base = 0x0020, |
---|
| 383 | + .int_mask_base = 0x0024, |
---|
| 384 | + .int_clr_base = 0x0028, |
---|
| 385 | + .int_sta_base = 0x002c, |
---|
| 386 | + .enc_wdg_base = 0x0038, |
---|
| 387 | + .err_mask = 0x03f0, |
---|
| 388 | +}; |
---|
| 389 | + |
---|
| 390 | +static struct rkvenc_hw_info rkvenc_540c_hw_info = { |
---|
194 | 391 | .hw = { |
---|
195 | 392 | .reg_num = 254, |
---|
196 | 393 | .reg_id = 0, |
---|
.. | .. |
---|
259 | 456 | .enc_wdg_base = 0x0038, |
---|
260 | 457 | .err_mask = 0x27d0, |
---|
261 | 458 | }; |
---|
262 | | - |
---|
263 | 459 | /* |
---|
264 | 460 | * file handle translate information for v2 |
---|
265 | 461 | */ |
---|
266 | 462 | static const u16 trans_tbl_h264e_v2[] = { |
---|
| 463 | + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, |
---|
| 464 | + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, |
---|
| 465 | + 20, 21, 22, 23, |
---|
| 466 | +}; |
---|
| 467 | + |
---|
| 468 | +static const u16 trans_tbl_h264e_v2_osd[] = { |
---|
| 469 | + 20, 21, 22, 23, 24, 25, 26, 27, |
---|
| 470 | +}; |
---|
| 471 | + |
---|
| 472 | +static const u16 trans_tbl_h265e_v2[] = { |
---|
| 473 | + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, |
---|
| 474 | + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, |
---|
| 475 | + 20, 21, 22, 23, |
---|
| 476 | +}; |
---|
| 477 | + |
---|
| 478 | +static const u16 trans_tbl_h265e_v2_osd[] = { |
---|
| 479 | + 20, 21, 22, 23, 24, 25, 26, 27, |
---|
| 480 | +}; |
---|
| 481 | + |
---|
| 482 | +/* |
---|
| 483 | + * file handle translate information for 540c |
---|
| 484 | + */ |
---|
| 485 | +static const u16 trans_tbl_h264e_540c[] = { |
---|
267 | 486 | 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, |
---|
268 | 487 | 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, |
---|
269 | 488 | // /* renc and ref wrap */ |
---|
270 | 489 | // 24, 25, 26, 27, |
---|
271 | 490 | }; |
---|
272 | 491 | |
---|
273 | | -static const u16 trans_tbl_h264e_v2_osd[] = { |
---|
| 492 | +static const u16 trans_tbl_h264e_540c_osd[] = { |
---|
274 | 493 | 3, 4, 12, 13, 21, 22, 30, 31, |
---|
275 | 494 | 39, 40, 48, 49, 57, 58, 66, 67, |
---|
276 | 495 | }; |
---|
277 | 496 | |
---|
278 | | -static const u16 trans_tbl_h265e_v2[] = { |
---|
| 497 | +static const u16 trans_tbl_h265e_540c[] = { |
---|
279 | 498 | 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, |
---|
280 | 499 | 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 |
---|
281 | 500 | }; |
---|
282 | 501 | |
---|
283 | | -static const u16 trans_tbl_h265e_v2_osd[] = { |
---|
| 502 | +static const u16 trans_tbl_h265e_540c_osd[] = { |
---|
284 | 503 | 3, 4, 12, 13, 21, 22, 30, 31, |
---|
285 | 504 | 39, 40, 48, 49, 57, 58, 66, 67, |
---|
286 | 505 | }; |
---|
.. | .. |
---|
311 | 530 | [RKVENC_FMT_H265E_OSD] = { |
---|
312 | 531 | .count = ARRAY_SIZE(trans_tbl_h265e_v2_osd), |
---|
313 | 532 | .table = trans_tbl_h265e_v2_osd, |
---|
| 533 | + }, |
---|
| 534 | +}; |
---|
| 535 | + |
---|
| 536 | +static struct mpp_trans_info trans_rkvenc_540c[] = { |
---|
| 537 | + [RKVENC_FMT_H264E] = { |
---|
| 538 | + .count = ARRAY_SIZE(trans_tbl_h264e_540c), |
---|
| 539 | + .table = trans_tbl_h264e_540c, |
---|
| 540 | + }, |
---|
| 541 | + [RKVENC_FMT_H264E_OSD] = { |
---|
| 542 | + .count = ARRAY_SIZE(trans_tbl_h264e_540c_osd), |
---|
| 543 | + .table = trans_tbl_h264e_540c_osd, |
---|
| 544 | + }, |
---|
| 545 | + [RKVENC_FMT_H265E] = { |
---|
| 546 | + .count = ARRAY_SIZE(trans_tbl_h265e_540c), |
---|
| 547 | + .table = trans_tbl_h265e_540c, |
---|
| 548 | + }, |
---|
| 549 | + [RKVENC_FMT_H265E_OSD] = { |
---|
| 550 | + .count = ARRAY_SIZE(trans_tbl_h265e_540c_osd), |
---|
| 551 | + .table = trans_tbl_h265e_540c_osd, |
---|
314 | 552 | }, |
---|
315 | 553 | [RKVENC_FMT_JPEGE] = { |
---|
316 | 554 | .count = ARRAY_SIZE(trans_tbl_jpege), |
---|
.. | .. |
---|
345 | 583 | |
---|
346 | 584 | for (i = 0; i < reg_class; i++) { |
---|
347 | 585 | kfree(task->reg[i].data); |
---|
| 586 | + task->reg[i].data = NULL; |
---|
348 | 587 | task->reg[i].size = 0; |
---|
349 | 588 | } |
---|
350 | 589 | |
---|
.. | .. |
---|
570 | 809 | return 0; |
---|
571 | 810 | } |
---|
572 | 811 | |
---|
573 | | - |
---|
574 | 812 | static int rkvenc2_set_rcbbuf(struct mpp_dev *mpp, struct mpp_session *session, |
---|
575 | 813 | struct rkvenc_task *task) |
---|
576 | 814 | { |
---|
.. | .. |
---|
616 | 854 | return 0; |
---|
617 | 855 | } |
---|
618 | 856 | |
---|
| 857 | +static void rkvenc2_setup_task_id(u32 session_id, struct rkvenc_task *task) |
---|
| 858 | +{ |
---|
| 859 | + u32 val = task->reg[RKVENC_CLASS_PIC].data[DCHS_CLASS_OFFSET]; |
---|
| 860 | + |
---|
| 861 | + /* always enable tx */ |
---|
| 862 | + val |= DCHS_TXE; |
---|
| 863 | + |
---|
| 864 | + task->reg[RKVENC_CLASS_PIC].data[DCHS_CLASS_OFFSET] = val; |
---|
| 865 | + task->dchs_id.val = (((u64)session_id << 32) | val); |
---|
| 866 | + |
---|
| 867 | + task->dchs_id.txid_orig = task->dchs_id.txid; |
---|
| 868 | + task->dchs_id.rxid_orig = task->dchs_id.rxid; |
---|
| 869 | + task->dchs_id.txid_map = task->dchs_id.txid; |
---|
| 870 | + task->dchs_id.rxid_map = task->dchs_id.rxid; |
---|
| 871 | + |
---|
| 872 | + task->dchs_id.txe_orig = task->dchs_id.txe; |
---|
| 873 | + task->dchs_id.rxe_orig = task->dchs_id.rxe; |
---|
| 874 | + task->dchs_id.txe_map = task->dchs_id.txe; |
---|
| 875 | + task->dchs_id.rxe_map = task->dchs_id.rxe; |
---|
| 876 | +} |
---|
| 877 | + |
---|
| 878 | +static void rkvenc2_check_split_task(struct rkvenc_task *task) |
---|
| 879 | +{ |
---|
| 880 | + u32 slen_fifo_en = 0; |
---|
| 881 | + u32 sli_split_en = 0; |
---|
| 882 | + |
---|
| 883 | + if (task->reg[RKVENC_CLASS_PIC].valid) { |
---|
| 884 | + u32 *reg = task->reg[RKVENC_CLASS_PIC].data; |
---|
| 885 | + u32 enc_stnd = reg[RKVENC2_REG_ENC_PIC] & RKVENC2_BIT_ENC_STND; |
---|
| 886 | + |
---|
| 887 | + slen_fifo_en = (reg[RKVENC2_REG_ENC_PIC] & RKVENC2_BIT_SLEN_FIFO) ? 1 : 0; |
---|
| 888 | + sli_split_en = (reg[RKVENC2_REG_SLI_SPLIT] & RKVENC2_BIT_SLI_SPLIT) ? 1 : 0; |
---|
| 889 | + |
---|
| 890 | + /* |
---|
| 891 | + * FIXUP: rkvenc2 hardware bug: |
---|
| 892 | + * H.264 encoding has bug when external line buffer and slice flush both |
---|
| 893 | + * are enabled. |
---|
| 894 | + */ |
---|
| 895 | + if (sli_split_en && slen_fifo_en && |
---|
| 896 | + enc_stnd == RKVENC2_BIT_VAL_H264 && |
---|
| 897 | + reg[RKVENC2_REG_EXT_LINE_BUF_BASE]) |
---|
| 898 | + reg[RKVENC2_REG_SLI_SPLIT] &= ~RKVENC2_BIT_SLI_FLUSH; |
---|
| 899 | + } |
---|
| 900 | + |
---|
| 901 | + task->task_split = sli_split_en && slen_fifo_en; |
---|
| 902 | + |
---|
| 903 | + if (task->task_split) |
---|
| 904 | + INIT_KFIFO(task->slice_info); |
---|
| 905 | +} |
---|
| 906 | + |
---|
619 | 907 | static void *rkvenc_alloc_task(struct mpp_session *session, |
---|
620 | 908 | struct mpp_task_msgs *msgs) |
---|
621 | 909 | { |
---|
.. | .. |
---|
650 | 938 | u32 off; |
---|
651 | 939 | const u16 *tbl; |
---|
652 | 940 | struct rkvenc_hw_info *hw = task->hw_info; |
---|
| 941 | + int fd_bs = -1; |
---|
653 | 942 | |
---|
654 | 943 | for (i = 0; i < hw->fd_class; i++) { |
---|
655 | 944 | u32 class = hw->fd_reg[i].class; |
---|
.. | .. |
---|
659 | 948 | |
---|
660 | 949 | if (!reg) |
---|
661 | 950 | continue; |
---|
| 951 | + |
---|
| 952 | + if (fmt == RKVENC_FMT_JPEGE && class == RKVENC_CLASS_PIC && fd_bs == -1) { |
---|
| 953 | + int bs_index; |
---|
| 954 | + |
---|
| 955 | + bs_index = mpp->var->trans_info[fmt].table[2]; |
---|
| 956 | + fd_bs = reg[bs_index]; |
---|
| 957 | + task->offset_bs = mpp_query_reg_offset_info(&task->off_inf, |
---|
| 958 | + bs_index + ss); |
---|
| 959 | + } |
---|
662 | 960 | |
---|
663 | 961 | ret = mpp_translate_reg_address(session, mpp_task, fmt, reg, NULL); |
---|
664 | 962 | if (ret) |
---|
.. | .. |
---|
672 | 970 | reg[tbl[j]] += off; |
---|
673 | 971 | } |
---|
674 | 972 | } |
---|
| 973 | + |
---|
| 974 | + if (fd_bs >= 0) { |
---|
| 975 | + struct mpp_dma_buffer *bs_buf = |
---|
| 976 | + mpp_dma_find_buffer_fd(session->dma, fd_bs); |
---|
| 977 | + |
---|
| 978 | + if (bs_buf && task->offset_bs > 0) |
---|
| 979 | + mpp_dma_buf_sync(bs_buf, 0, task->offset_bs, DMA_TO_DEVICE, false); |
---|
| 980 | + task->bs_buf = bs_buf; |
---|
| 981 | + } |
---|
675 | 982 | } |
---|
676 | | - rkvenc2_set_rcbbuf(mpp, session, task); |
---|
| 983 | + rkvenc2_setup_task_id(session->index, task); |
---|
677 | 984 | task->clk_mode = CLK_MODE_NORMAL; |
---|
| 985 | + rkvenc2_check_split_task(task); |
---|
678 | 986 | |
---|
679 | 987 | mpp_debug_leave(); |
---|
680 | 988 | |
---|
.. | .. |
---|
692 | 1000 | return NULL; |
---|
693 | 1001 | } |
---|
694 | 1002 | |
---|
| 1003 | +static void *rkvenc2_prepare(struct mpp_dev *mpp, struct mpp_task *mpp_task) |
---|
| 1004 | +{ |
---|
| 1005 | + struct mpp_taskqueue *queue = mpp->queue; |
---|
| 1006 | + unsigned long core_idle; |
---|
| 1007 | + unsigned long flags; |
---|
| 1008 | + u32 core_id_max; |
---|
| 1009 | + s32 core_id; |
---|
| 1010 | + u32 i; |
---|
| 1011 | + |
---|
| 1012 | + spin_lock_irqsave(&queue->running_lock, flags); |
---|
| 1013 | + |
---|
| 1014 | + core_idle = queue->core_idle; |
---|
| 1015 | + core_id_max = queue->core_id_max; |
---|
| 1016 | + |
---|
| 1017 | + for (i = 0; i <= core_id_max; i++) { |
---|
| 1018 | + struct mpp_dev *mpp = queue->cores[i]; |
---|
| 1019 | + |
---|
| 1020 | + if (mpp && mpp->disable) |
---|
| 1021 | + clear_bit(i, &core_idle); |
---|
| 1022 | + } |
---|
| 1023 | + |
---|
| 1024 | + core_id = find_first_bit(&core_idle, core_id_max + 1); |
---|
| 1025 | + |
---|
| 1026 | + if (core_id >= core_id_max + 1 || !queue->cores[core_id]) { |
---|
| 1027 | + mpp_task = NULL; |
---|
| 1028 | + mpp_dbg_core("core %d all busy %lx\n", core_id, core_idle); |
---|
| 1029 | + } else { |
---|
| 1030 | + struct rkvenc_task *task = to_rkvenc_task(mpp_task); |
---|
| 1031 | + |
---|
| 1032 | + clear_bit(core_id, &queue->core_idle); |
---|
| 1033 | + mpp_task->mpp = queue->cores[core_id]; |
---|
| 1034 | + mpp_task->core_id = core_id; |
---|
| 1035 | + rkvenc2_set_rcbbuf(mpp_task->mpp, mpp_task->session, task); |
---|
| 1036 | + mpp_dbg_core("core %d set idle %lx -> %lx\n", core_id, |
---|
| 1037 | + core_idle, queue->core_idle); |
---|
| 1038 | + } |
---|
| 1039 | + |
---|
| 1040 | + spin_unlock_irqrestore(&queue->running_lock, flags); |
---|
| 1041 | + |
---|
| 1042 | + return mpp_task; |
---|
| 1043 | +} |
---|
| 1044 | + |
---|
| 1045 | +static void rkvenc2_patch_dchs(struct rkvenc_dev *enc, struct rkvenc_task *task) |
---|
| 1046 | +{ |
---|
| 1047 | + struct rkvenc_ccu *ccu; |
---|
| 1048 | + union rkvenc2_dual_core_handshake_id *dchs; |
---|
| 1049 | + union rkvenc2_dual_core_handshake_id *task_dchs = &task->dchs_id; |
---|
| 1050 | + int core_num; |
---|
| 1051 | + int core_id = enc->mpp.core_id; |
---|
| 1052 | + unsigned long flags; |
---|
| 1053 | + int i; |
---|
| 1054 | + |
---|
| 1055 | + if (!enc->ccu) |
---|
| 1056 | + return; |
---|
| 1057 | + |
---|
| 1058 | + if (core_id >= RKVENC_MAX_CORE_NUM) { |
---|
| 1059 | + dev_err(enc->mpp.dev, "invalid core id %d max %d\n", |
---|
| 1060 | + core_id, RKVENC_MAX_CORE_NUM); |
---|
| 1061 | + return; |
---|
| 1062 | + } |
---|
| 1063 | + |
---|
| 1064 | + ccu = enc->ccu; |
---|
| 1065 | + dchs = ccu->dchs; |
---|
| 1066 | + core_num = ccu->core_num; |
---|
| 1067 | + |
---|
| 1068 | + spin_lock_irqsave(&ccu->lock_dchs, flags); |
---|
| 1069 | + |
---|
| 1070 | + if (dchs[core_id].working) { |
---|
| 1071 | + spin_unlock_irqrestore(&ccu->lock_dchs, flags); |
---|
| 1072 | + |
---|
| 1073 | + mpp_err("can not config when core %d is still working\n", core_id); |
---|
| 1074 | + return; |
---|
| 1075 | + } |
---|
| 1076 | + |
---|
| 1077 | + if (mpp_debug_unlikely(DEBUG_CORE)) |
---|
| 1078 | + pr_info("core tx:rx 0 %s %d:%d %d:%d -- 1 %s %d:%d %d:%d -- task %d %d:%d %d:%d\n", |
---|
| 1079 | + dchs[0].working ? "work" : "idle", |
---|
| 1080 | + dchs[0].txid, dchs[0].txe, dchs[0].rxid, dchs[0].rxe, |
---|
| 1081 | + dchs[1].working ? "work" : "idle", |
---|
| 1082 | + dchs[1].txid, dchs[1].txe, dchs[1].rxid, dchs[1].rxe, |
---|
| 1083 | + core_id, task_dchs->txid, task_dchs->txe, |
---|
| 1084 | + task_dchs->rxid, task_dchs->rxe); |
---|
| 1085 | + |
---|
| 1086 | + /* always use new id as */ |
---|
| 1087 | + { |
---|
| 1088 | + struct mpp_task *mpp_task = &task->mpp_task; |
---|
| 1089 | + unsigned long id_valid = (unsigned long)-1; |
---|
| 1090 | + int txid_map = -1; |
---|
| 1091 | + int rxid_map = -1; |
---|
| 1092 | + |
---|
| 1093 | + /* scan all used id */ |
---|
| 1094 | + for (i = 0; i < core_num; i++) { |
---|
| 1095 | + if (!dchs[i].working) |
---|
| 1096 | + continue; |
---|
| 1097 | + |
---|
| 1098 | + clear_bit(dchs[i].txid_map, &id_valid); |
---|
| 1099 | + clear_bit(dchs[i].rxid_map, &id_valid); |
---|
| 1100 | + } |
---|
| 1101 | + |
---|
| 1102 | + if (task_dchs->rxe) { |
---|
| 1103 | + for (i = 0; i < core_num; i++) { |
---|
| 1104 | + if (i == core_id) |
---|
| 1105 | + continue; |
---|
| 1106 | + |
---|
| 1107 | + if (!dchs[i].working) |
---|
| 1108 | + continue; |
---|
| 1109 | + |
---|
| 1110 | + if (task_dchs->session_id != dchs[i].session_id) |
---|
| 1111 | + continue; |
---|
| 1112 | + |
---|
| 1113 | + if (task_dchs->rxid_orig != dchs[i].txid_orig) |
---|
| 1114 | + continue; |
---|
| 1115 | + |
---|
| 1116 | + rxid_map = dchs[i].txid_map; |
---|
| 1117 | + break; |
---|
| 1118 | + } |
---|
| 1119 | + } |
---|
| 1120 | + |
---|
| 1121 | + txid_map = find_first_bit(&id_valid, RKVENC_MAX_DCHS_ID); |
---|
| 1122 | + if (txid_map == RKVENC_MAX_DCHS_ID) { |
---|
| 1123 | + spin_unlock_irqrestore(&ccu->lock_dchs, flags); |
---|
| 1124 | + |
---|
| 1125 | + mpp_err("task %d:%d on core %d failed to find a txid\n", |
---|
| 1126 | + mpp_task->session->pid, mpp_task->task_id, |
---|
| 1127 | + mpp_task->core_id); |
---|
| 1128 | + return; |
---|
| 1129 | + } |
---|
| 1130 | + |
---|
| 1131 | + clear_bit(txid_map, &id_valid); |
---|
| 1132 | + task_dchs->txid_map = txid_map; |
---|
| 1133 | + |
---|
| 1134 | + if (rxid_map < 0) { |
---|
| 1135 | + rxid_map = find_first_bit(&id_valid, RKVENC_MAX_DCHS_ID); |
---|
| 1136 | + if (rxid_map == RKVENC_MAX_DCHS_ID) { |
---|
| 1137 | + spin_unlock_irqrestore(&ccu->lock_dchs, flags); |
---|
| 1138 | + |
---|
| 1139 | + mpp_err("task %d:%d on core %d failed to find a rxid\n", |
---|
| 1140 | + mpp_task->session->pid, mpp_task->task_id, |
---|
| 1141 | + mpp_task->core_id); |
---|
| 1142 | + return; |
---|
| 1143 | + } |
---|
| 1144 | + |
---|
| 1145 | + task_dchs->rxe_map = 0; |
---|
| 1146 | + } |
---|
| 1147 | + |
---|
| 1148 | + task_dchs->rxid_map = rxid_map; |
---|
| 1149 | + } |
---|
| 1150 | + |
---|
| 1151 | + task_dchs->txid = task_dchs->txid_map; |
---|
| 1152 | + task_dchs->rxid = task_dchs->rxid_map; |
---|
| 1153 | + task_dchs->rxe = task_dchs->rxe_map; |
---|
| 1154 | + |
---|
| 1155 | + dchs[core_id].val = task_dchs->val; |
---|
| 1156 | + task->reg[RKVENC_CLASS_PIC].data[DCHS_CLASS_OFFSET] = task_dchs->val; |
---|
| 1157 | + |
---|
| 1158 | + dchs[core_id].working = 1; |
---|
| 1159 | + |
---|
| 1160 | + spin_unlock_irqrestore(&ccu->lock_dchs, flags); |
---|
| 1161 | +} |
---|
| 1162 | + |
---|
| 1163 | +static void rkvenc2_update_dchs(struct rkvenc_dev *enc, struct rkvenc_task *task) |
---|
| 1164 | +{ |
---|
| 1165 | + struct rkvenc_ccu *ccu = enc->ccu; |
---|
| 1166 | + int core_id = enc->mpp.core_id; |
---|
| 1167 | + unsigned long flags; |
---|
| 1168 | + |
---|
| 1169 | + if (!ccu) |
---|
| 1170 | + return; |
---|
| 1171 | + |
---|
| 1172 | + if (core_id >= RKVENC_MAX_CORE_NUM) { |
---|
| 1173 | + dev_err(enc->mpp.dev, "invalid core id %d max %d\n", |
---|
| 1174 | + core_id, RKVENC_MAX_CORE_NUM); |
---|
| 1175 | + return; |
---|
| 1176 | + } |
---|
| 1177 | + |
---|
| 1178 | + spin_lock_irqsave(&ccu->lock_dchs, flags); |
---|
| 1179 | + ccu->dchs[core_id].val = 0; |
---|
| 1180 | + |
---|
| 1181 | + if (mpp_debug_unlikely(DEBUG_CORE)) { |
---|
| 1182 | + union rkvenc2_dual_core_handshake_id *dchs = ccu->dchs; |
---|
| 1183 | + union rkvenc2_dual_core_handshake_id *task_dchs = &task->dchs_id; |
---|
| 1184 | + |
---|
| 1185 | + pr_info("core %d task done\n", core_id); |
---|
| 1186 | + pr_info("core tx:rx 0 %s %d:%d %d:%d -- 1 %s %d:%d %d:%d -- task %d %d:%d %d:%d\n", |
---|
| 1187 | + dchs[0].working ? "work" : "idle", |
---|
| 1188 | + dchs[0].txid, dchs[0].txe, dchs[0].rxid, dchs[0].rxe, |
---|
| 1189 | + dchs[1].working ? "work" : "idle", |
---|
| 1190 | + dchs[1].txid, dchs[1].txe, dchs[1].rxid, dchs[1].rxe, |
---|
| 1191 | + core_id, task_dchs->txid, task_dchs->txe, |
---|
| 1192 | + task_dchs->rxid, task_dchs->rxe); |
---|
| 1193 | + } |
---|
| 1194 | + |
---|
| 1195 | + spin_unlock_irqrestore(&ccu->lock_dchs, flags); |
---|
| 1196 | +} |
---|
| 1197 | + |
---|
695 | 1198 | static int rkvenc_run(struct mpp_dev *mpp, struct mpp_task *mpp_task) |
---|
696 | 1199 | { |
---|
697 | 1200 | u32 i, j; |
---|
.. | .. |
---|
699 | 1202 | struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
700 | 1203 | struct rkvenc_task *task = to_rkvenc_task(mpp_task); |
---|
701 | 1204 | struct rkvenc_hw_info *hw = enc->hw_info; |
---|
| 1205 | + u32 timing_en = mpp->srv->timing_en; |
---|
| 1206 | + u32 timeout_thd; |
---|
702 | 1207 | |
---|
703 | 1208 | mpp_debug_enter(); |
---|
704 | 1209 | |
---|
.. | .. |
---|
709 | 1214 | |
---|
710 | 1215 | /* clear hardware counter */ |
---|
711 | 1216 | mpp_write_relaxed(mpp, 0x5300, 0x2); |
---|
| 1217 | + |
---|
| 1218 | + rkvenc2_patch_dchs(enc, task); |
---|
712 | 1219 | |
---|
713 | 1220 | for (i = 0; i < task->w_req_cnt; i++) { |
---|
714 | 1221 | int ret; |
---|
.. | .. |
---|
735 | 1242 | } |
---|
736 | 1243 | } |
---|
737 | 1244 | |
---|
| 1245 | + if (mpp_debug_unlikely(DEBUG_CORE)) |
---|
| 1246 | + dev_info(mpp->dev, "core %d dchs %08x\n", mpp->core_id, |
---|
| 1247 | + mpp_read_relaxed(&enc->mpp, DCHS_REG_OFFSET)); |
---|
| 1248 | + |
---|
| 1249 | + /* flush tlb before starting hardware */ |
---|
| 1250 | + mpp_iommu_flush_tlb(mpp->iommu_info); |
---|
| 1251 | + |
---|
738 | 1252 | /* init current task */ |
---|
739 | 1253 | mpp->cur_task = mpp_task; |
---|
| 1254 | + |
---|
| 1255 | + /* |
---|
| 1256 | + * reconfig timeout threshold. |
---|
| 1257 | + * bit0-bit23,x1024 core clk cycles |
---|
| 1258 | + */ |
---|
| 1259 | + timeout_thd = mpp_read(mpp, RKVENC_WDG) & 0xff000000; |
---|
| 1260 | + timeout_thd |= TIMEOUT_MS * clk_get_rate(enc->core_clk_info.clk) / 1024000; |
---|
| 1261 | + mpp_write(mpp, RKVENC_WDG, timeout_thd); |
---|
| 1262 | + |
---|
| 1263 | + mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY); |
---|
| 1264 | + |
---|
740 | 1265 | /* Flush the register before the start the device */ |
---|
741 | 1266 | wmb(); |
---|
742 | 1267 | mpp_write(mpp, enc->hw_info->enc_start_base, start_val); |
---|
| 1268 | + |
---|
| 1269 | + mpp_task_run_end(mpp_task, timing_en); |
---|
743 | 1270 | |
---|
744 | 1271 | mpp_debug_leave(); |
---|
745 | 1272 | |
---|
746 | 1273 | return 0; |
---|
747 | 1274 | } |
---|
748 | 1275 | |
---|
| 1276 | +static void rkvenc2_read_slice_len(struct mpp_dev *mpp, struct rkvenc_task *task, |
---|
| 1277 | + u32 last) |
---|
| 1278 | +{ |
---|
| 1279 | + u32 sli_num = mpp_read_relaxed(mpp, RKVENC2_REG_SLICE_NUM_BASE); |
---|
| 1280 | + union rkvenc2_slice_len_info slice_info; |
---|
| 1281 | + u32 task_id = task->mpp_task.task_id; |
---|
| 1282 | + u32 i; |
---|
| 1283 | + |
---|
| 1284 | + mpp_dbg_slice("task %d wr %3d len start %s\n", task_id, |
---|
| 1285 | + sli_num, last ? "last" : ""); |
---|
| 1286 | + |
---|
| 1287 | + for (i = 0; i < sli_num; i++) { |
---|
| 1288 | + slice_info.val = mpp_read_relaxed(mpp, RKVENC2_REG_SLICE_LEN_BASE); |
---|
| 1289 | + |
---|
| 1290 | + if (last && i == sli_num - 1) { |
---|
| 1291 | + task->last_slice_found = 1; |
---|
| 1292 | + slice_info.last = 1; |
---|
| 1293 | + } |
---|
| 1294 | + |
---|
| 1295 | + mpp_dbg_slice("task %d wr %3d len %d %s\n", task_id, |
---|
| 1296 | + task->slice_wr_cnt, slice_info.slice_len, |
---|
| 1297 | + slice_info.last ? "last" : ""); |
---|
| 1298 | + |
---|
| 1299 | + kfifo_in(&task->slice_info, &slice_info, 1); |
---|
| 1300 | + task->slice_wr_cnt++; |
---|
| 1301 | + } |
---|
| 1302 | + |
---|
| 1303 | + /* Fixup for async between last flag and slice number register */ |
---|
| 1304 | + if (last && !task->last_slice_found) { |
---|
| 1305 | + mpp_dbg_slice("task %d mark last slice\n", task_id); |
---|
| 1306 | + slice_info.last = 1; |
---|
| 1307 | + slice_info.slice_len = 0; |
---|
| 1308 | + kfifo_in(&task->slice_info, &slice_info, 1); |
---|
| 1309 | + } |
---|
| 1310 | +} |
---|
| 1311 | + |
---|
749 | 1312 | static int rkvenc_irq(struct mpp_dev *mpp) |
---|
750 | 1313 | { |
---|
751 | 1314 | struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
752 | 1315 | struct rkvenc_hw_info *hw = enc->hw_info; |
---|
| 1316 | + struct mpp_task *mpp_task = NULL; |
---|
| 1317 | + struct rkvenc_task *task = NULL; |
---|
| 1318 | + u32 irq_status; |
---|
| 1319 | + int ret = IRQ_NONE; |
---|
753 | 1320 | |
---|
754 | 1321 | mpp_debug_enter(); |
---|
755 | 1322 | |
---|
756 | | - mpp->irq_status = mpp_read(mpp, hw->int_sta_base); |
---|
757 | | - if (!mpp->irq_status) |
---|
758 | | - return IRQ_NONE; |
---|
759 | | - mpp_write(mpp, hw->int_mask_base, 0x100); |
---|
760 | | - mpp_write(mpp, hw->int_clr_base, 0xffffffff); |
---|
761 | | - udelay(5); |
---|
762 | | - mpp_write(mpp, hw->int_sta_base, 0); |
---|
| 1323 | + irq_status = mpp_read(mpp, hw->int_sta_base); |
---|
| 1324 | + |
---|
| 1325 | + mpp_debug(DEBUG_IRQ_STATUS, "%s irq_status: %08x\n", |
---|
| 1326 | + dev_name(mpp->dev), irq_status); |
---|
| 1327 | + |
---|
| 1328 | + if (!irq_status) |
---|
| 1329 | + return ret; |
---|
| 1330 | + |
---|
| 1331 | + /* clear int first */ |
---|
| 1332 | + mpp_write(mpp, hw->int_clr_base, irq_status); |
---|
| 1333 | + |
---|
| 1334 | + /* |
---|
| 1335 | + * prevent watch dog irq storm. |
---|
| 1336 | + * The encoder did not stop working when watchdog interrupt is triggered, |
---|
| 1337 | + * it still check timeout and trigger watch dog irq. |
---|
| 1338 | + */ |
---|
| 1339 | + if (irq_status & INT_STA_WDG_STA) |
---|
| 1340 | + mpp_write(mpp, hw->int_mask_base, INT_STA_WDG_STA); |
---|
| 1341 | + |
---|
| 1342 | + if (mpp->cur_task) { |
---|
| 1343 | + mpp_task = mpp->cur_task; |
---|
| 1344 | + task = to_rkvenc_task(mpp_task); |
---|
| 1345 | + } |
---|
| 1346 | + |
---|
| 1347 | + /* 1. read slice number and slice length */ |
---|
| 1348 | + if (task && task->task_split && |
---|
| 1349 | + (irq_status & (INT_STA_SLC_DONE_STA | INT_STA_ENC_DONE_STA))) { |
---|
| 1350 | + mpp_time_part_diff(mpp_task); |
---|
| 1351 | + rkvenc2_read_slice_len(mpp, task, irq_status & INT_STA_ENC_DONE_STA); |
---|
| 1352 | + wake_up(&mpp_task->wait); |
---|
| 1353 | + } |
---|
| 1354 | + |
---|
| 1355 | + /* 2. process slice irq */ |
---|
| 1356 | + if (irq_status & INT_STA_SLC_DONE_STA) |
---|
| 1357 | + ret = IRQ_HANDLED; |
---|
| 1358 | + |
---|
| 1359 | + /* 3. process bitstream overflow */ |
---|
| 1360 | + if (irq_status & INT_STA_BSF_OFLW_STA) { |
---|
| 1361 | + u32 bs_rd = mpp_read(mpp, RKVENC2_REG_ADR_BSBR); |
---|
| 1362 | + u32 bs_wr = mpp_read(mpp, RKVENC2_REG_ST_BSB); |
---|
| 1363 | + u32 bs_top = mpp_read(mpp, RKVENC2_REG_ADR_BSBT); |
---|
| 1364 | + u32 bs_bot = mpp_read(mpp, RKVENC2_REG_ADR_BSBB); |
---|
| 1365 | + |
---|
| 1366 | + if (mpp_task) |
---|
| 1367 | + dev_err(mpp->dev, "task %d found bitstream overflow [%#08x %#08x %#08x %#08x]\n", |
---|
| 1368 | + mpp_task->task_index, bs_top, bs_bot, bs_wr, bs_rd); |
---|
| 1369 | + bs_wr += 128; |
---|
| 1370 | + if (bs_wr >= bs_top) |
---|
| 1371 | + bs_wr = bs_bot; |
---|
| 1372 | + |
---|
| 1373 | + /* update write addr for enc continue */ |
---|
| 1374 | + mpp_write(mpp, RKVENC2_REG_ADR_BSBS, bs_wr); |
---|
| 1375 | + enc->bs_overflow = 1; |
---|
| 1376 | + |
---|
| 1377 | + ret = IRQ_HANDLED; |
---|
| 1378 | + } |
---|
| 1379 | + |
---|
| 1380 | + /* 4. process frame irq */ |
---|
| 1381 | + if (irq_status & INT_STA_ENC_DONE_STA) { |
---|
| 1382 | + mpp->irq_status = irq_status; |
---|
| 1383 | + |
---|
| 1384 | + if (enc->bs_overflow) { |
---|
| 1385 | + mpp->irq_status |= INT_STA_BSF_OFLW_STA; |
---|
| 1386 | + enc->bs_overflow = 0; |
---|
| 1387 | + } |
---|
| 1388 | + |
---|
| 1389 | + ret = IRQ_WAKE_THREAD; |
---|
| 1390 | + } |
---|
| 1391 | + |
---|
| 1392 | + /* 5. process error irq */ |
---|
| 1393 | + if (irq_status & INT_STA_ERROR) { |
---|
| 1394 | + mpp->irq_status = irq_status; |
---|
| 1395 | + |
---|
| 1396 | + dev_err(mpp->dev, "found error status %08x\n", irq_status); |
---|
| 1397 | + |
---|
| 1398 | + ret = IRQ_WAKE_THREAD; |
---|
| 1399 | + } |
---|
763 | 1400 | |
---|
764 | 1401 | mpp_debug_leave(); |
---|
765 | 1402 | |
---|
766 | | - return IRQ_WAKE_THREAD; |
---|
| 1403 | + return ret; |
---|
| 1404 | +} |
---|
| 1405 | + |
---|
| 1406 | +static int vepu540c_irq(struct mpp_dev *mpp) |
---|
| 1407 | +{ |
---|
| 1408 | + return rkvenc_irq(mpp); |
---|
767 | 1409 | } |
---|
768 | 1410 | |
---|
769 | 1411 | static int rkvenc_isr(struct mpp_dev *mpp) |
---|
.. | .. |
---|
771 | 1413 | struct rkvenc_task *task; |
---|
772 | 1414 | struct mpp_task *mpp_task; |
---|
773 | 1415 | struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
| 1416 | + struct mpp_taskqueue *queue = mpp->queue; |
---|
| 1417 | + unsigned long core_idle; |
---|
774 | 1418 | |
---|
775 | 1419 | mpp_debug_enter(); |
---|
776 | 1420 | |
---|
.. | .. |
---|
783 | 1427 | mpp_task = mpp->cur_task; |
---|
784 | 1428 | mpp_time_diff(mpp_task); |
---|
785 | 1429 | mpp->cur_task = NULL; |
---|
| 1430 | + |
---|
| 1431 | + if (mpp_task->mpp && mpp_task->mpp != mpp) |
---|
| 1432 | + dev_err(mpp->dev, "mismatch core dev %p:%p\n", mpp_task->mpp, mpp); |
---|
| 1433 | + |
---|
786 | 1434 | task = to_rkvenc_task(mpp_task); |
---|
787 | 1435 | task->irq_status = mpp->irq_status; |
---|
788 | | - mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status); |
---|
| 1436 | + |
---|
| 1437 | + rkvenc2_update_dchs(enc, task); |
---|
789 | 1438 | |
---|
790 | 1439 | if (task->irq_status & enc->hw_info->err_mask) { |
---|
791 | 1440 | atomic_inc(&mpp->reset_request); |
---|
| 1441 | + |
---|
792 | 1442 | /* dump register */ |
---|
793 | 1443 | if (mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) |
---|
794 | | - mpp_task_dump_hw_reg(mpp, mpp_task); |
---|
| 1444 | + mpp_task_dump_hw_reg(mpp); |
---|
795 | 1445 | } |
---|
| 1446 | + |
---|
796 | 1447 | mpp_task_finish(mpp_task->session, mpp_task); |
---|
| 1448 | + |
---|
| 1449 | + core_idle = queue->core_idle; |
---|
| 1450 | + set_bit(mpp->core_id, &queue->core_idle); |
---|
| 1451 | + |
---|
| 1452 | + mpp_dbg_core("core %d isr idle %lx -> %lx\n", mpp->core_id, core_idle, |
---|
| 1453 | + queue->core_idle); |
---|
797 | 1454 | |
---|
798 | 1455 | mpp_debug_leave(); |
---|
799 | 1456 | |
---|
.. | .. |
---|
824 | 1481 | reg[j] = mpp_read_relaxed(mpp, msg.offset + j * sizeof(u32)); |
---|
825 | 1482 | |
---|
826 | 1483 | } |
---|
| 1484 | + |
---|
| 1485 | + if (task->bs_buf) { |
---|
| 1486 | + u32 bs_size = mpp_read(mpp, 0x4064); |
---|
| 1487 | + |
---|
| 1488 | + mpp_dma_buf_sync(task->bs_buf, 0, bs_size + task->offset_bs, |
---|
| 1489 | + DMA_FROM_DEVICE, true); |
---|
| 1490 | + } |
---|
| 1491 | + |
---|
827 | 1492 | /* revert hack for irq status */ |
---|
828 | 1493 | reg = rkvenc_get_class_reg(task, task->hw_info->int_sta_base); |
---|
829 | 1494 | if (reg) |
---|
.. | .. |
---|
975 | 1640 | } |
---|
976 | 1641 | seq_puts(seq, "\n"); |
---|
977 | 1642 | /* item data*/ |
---|
978 | | - seq_printf(seq, "|%8p|", session); |
---|
| 1643 | + seq_printf(seq, "|%8d|", session->index); |
---|
979 | 1644 | seq_printf(seq, "%8s|", mpp_device_name[session->device_type]); |
---|
980 | 1645 | for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) { |
---|
981 | 1646 | u32 flag = priv->codec_info[i].flag; |
---|
.. | .. |
---|
1008 | 1673 | mutex_lock(&mpp->srv->session_lock); |
---|
1009 | 1674 | list_for_each_entry_safe(session, n, |
---|
1010 | 1675 | &mpp->srv->session_list, |
---|
1011 | | - session_link) { |
---|
| 1676 | + service_link) { |
---|
1012 | 1677 | if (session->device_type != MPP_DEVICE_RKVENC) |
---|
1013 | 1678 | continue; |
---|
1014 | 1679 | if (!session->priv) |
---|
.. | .. |
---|
1024 | 1689 | static int rkvenc_procfs_init(struct mpp_dev *mpp) |
---|
1025 | 1690 | { |
---|
1026 | 1691 | struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
| 1692 | + char name[32]; |
---|
1027 | 1693 | |
---|
1028 | | - enc->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs); |
---|
| 1694 | + if (!mpp->dev || !mpp->dev->of_node || !mpp->dev->of_node->name || |
---|
| 1695 | + !mpp->srv || !mpp->srv->procfs) |
---|
| 1696 | + return -EINVAL; |
---|
| 1697 | + |
---|
| 1698 | + snprintf(name, sizeof(name) - 1, "%s%d", |
---|
| 1699 | + mpp->dev->of_node->name, mpp->core_id); |
---|
| 1700 | + |
---|
| 1701 | + enc->procfs = proc_mkdir(name, mpp->srv->procfs); |
---|
1029 | 1702 | if (IS_ERR_OR_NULL(enc->procfs)) { |
---|
1030 | 1703 | mpp_err("failed on open procfs\n"); |
---|
1031 | 1704 | enc->procfs = NULL; |
---|
1032 | 1705 | return -EIO; |
---|
1033 | 1706 | } |
---|
| 1707 | + |
---|
| 1708 | + /* for common mpp_dev options */ |
---|
| 1709 | + mpp_procfs_create_common(enc->procfs, mpp); |
---|
| 1710 | + |
---|
1034 | 1711 | /* for debug */ |
---|
1035 | 1712 | mpp_procfs_create_u32("aclk", 0644, |
---|
1036 | 1713 | enc->procfs, &enc->aclk_info.debug_rate_hz); |
---|
.. | .. |
---|
1045 | 1722 | return 0; |
---|
1046 | 1723 | } |
---|
1047 | 1724 | |
---|
| 1725 | +static int rkvenc_procfs_ccu_init(struct mpp_dev *mpp) |
---|
| 1726 | +{ |
---|
| 1727 | + struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
| 1728 | + |
---|
| 1729 | + if (!enc->procfs) |
---|
| 1730 | + goto done; |
---|
| 1731 | + |
---|
| 1732 | +done: |
---|
| 1733 | + return 0; |
---|
| 1734 | +} |
---|
1048 | 1735 | #else |
---|
1049 | 1736 | static inline int rkvenc_procfs_remove(struct mpp_dev *mpp) |
---|
1050 | 1737 | { |
---|
.. | .. |
---|
1056 | 1743 | return 0; |
---|
1057 | 1744 | } |
---|
1058 | 1745 | |
---|
| 1746 | +static inline int rkvenc_procfs_ccu_init(struct mpp_dev *mpp) |
---|
| 1747 | +{ |
---|
| 1748 | + return 0; |
---|
| 1749 | +} |
---|
| 1750 | +#endif |
---|
| 1751 | + |
---|
| 1752 | +#ifdef CONFIG_PM_DEVFREQ |
---|
| 1753 | +static int rk3588_venc_set_read_margin(struct device *dev, |
---|
| 1754 | + struct rockchip_opp_info *opp_info, |
---|
| 1755 | + u32 rm) |
---|
| 1756 | +{ |
---|
| 1757 | + if (!opp_info->grf || !opp_info->volt_rm_tbl) |
---|
| 1758 | + return 0; |
---|
| 1759 | + |
---|
| 1760 | + if (rm == opp_info->current_rm || rm == UINT_MAX) |
---|
| 1761 | + return 0; |
---|
| 1762 | + |
---|
| 1763 | + dev_dbg(dev, "set rm to %d\n", rm); |
---|
| 1764 | + |
---|
| 1765 | + regmap_write(opp_info->grf, 0x214, 0x001c0000 | (rm << 2)); |
---|
| 1766 | + regmap_write(opp_info->grf, 0x218, 0x001c0000 | (rm << 2)); |
---|
| 1767 | + regmap_write(opp_info->grf, 0x220, 0x003c0000 | (rm << 2)); |
---|
| 1768 | + regmap_write(opp_info->grf, 0x224, 0x003c0000 | (rm << 2)); |
---|
| 1769 | + |
---|
| 1770 | + opp_info->current_rm = rm; |
---|
| 1771 | + |
---|
| 1772 | + return 0; |
---|
| 1773 | +} |
---|
| 1774 | + |
---|
| 1775 | +static const struct rockchip_opp_data rk3588_venc_opp_data = { |
---|
| 1776 | + .set_read_margin = rk3588_venc_set_read_margin, |
---|
| 1777 | +}; |
---|
| 1778 | + |
---|
| 1779 | +static const struct of_device_id rockchip_rkvenc_of_match[] = { |
---|
| 1780 | + { |
---|
| 1781 | + .compatible = "rockchip,rk3588", |
---|
| 1782 | + .data = (void *)&rk3588_venc_opp_data, |
---|
| 1783 | + }, |
---|
| 1784 | + {}, |
---|
| 1785 | +}; |
---|
| 1786 | + |
---|
| 1787 | +static struct monitor_dev_profile venc_mdevp = { |
---|
| 1788 | + .type = MONITOR_TYPE_DEV, |
---|
| 1789 | + .update_volt = rockchip_monitor_check_rate_volt, |
---|
| 1790 | +}; |
---|
| 1791 | + |
---|
| 1792 | +static int rkvenc_devfreq_init(struct mpp_dev *mpp) |
---|
| 1793 | +{ |
---|
| 1794 | + struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
| 1795 | + struct clk *clk_core = enc->core_clk_info.clk; |
---|
| 1796 | + struct device *dev = mpp->dev; |
---|
| 1797 | + struct opp_table *reg_table = NULL; |
---|
| 1798 | + struct opp_table *clk_table = NULL; |
---|
| 1799 | + const char *const reg_names[] = { "venc", "mem" }; |
---|
| 1800 | + int ret = 0; |
---|
| 1801 | + |
---|
| 1802 | + if (!clk_core) |
---|
| 1803 | + return 0; |
---|
| 1804 | + |
---|
| 1805 | + if (of_find_property(dev->of_node, "venc-supply", NULL) && |
---|
| 1806 | + of_find_property(dev->of_node, "mem-supply", NULL)) { |
---|
| 1807 | + reg_table = dev_pm_opp_set_regulators(dev, reg_names, 2); |
---|
| 1808 | + if (IS_ERR(reg_table)) |
---|
| 1809 | + return PTR_ERR(reg_table); |
---|
| 1810 | + } else { |
---|
| 1811 | + reg_table = dev_pm_opp_set_regulators(dev, reg_names, 1); |
---|
| 1812 | + if (IS_ERR(reg_table)) |
---|
| 1813 | + return PTR_ERR(reg_table); |
---|
| 1814 | + } |
---|
| 1815 | + enc->opp_table = reg_table; |
---|
| 1816 | + |
---|
| 1817 | + clk_table = dev_pm_opp_set_clkname(dev, "clk_core"); |
---|
| 1818 | + if (IS_ERR(clk_table)) { |
---|
| 1819 | + ret = PTR_ERR(clk_table); |
---|
| 1820 | + goto put_opp_reg; |
---|
| 1821 | + } |
---|
| 1822 | + |
---|
| 1823 | + rockchip_get_opp_data(rockchip_rkvenc_of_match, &enc->opp_info); |
---|
| 1824 | + ret = rockchip_init_opp_table(dev, &enc->opp_info, "leakage", "venc"); |
---|
| 1825 | + if (ret) { |
---|
| 1826 | + dev_err(dev, "failed to init_opp_table\n"); |
---|
| 1827 | + goto put_opp_clk; |
---|
| 1828 | + } |
---|
| 1829 | + |
---|
| 1830 | + enc->mdev_info = rockchip_system_monitor_register(dev, &venc_mdevp); |
---|
| 1831 | + if (IS_ERR(enc->mdev_info)) { |
---|
| 1832 | + dev_dbg(dev, "without system monitor\n"); |
---|
| 1833 | + enc->mdev_info = NULL; |
---|
| 1834 | + } |
---|
| 1835 | + |
---|
| 1836 | + return 0; |
---|
| 1837 | + |
---|
| 1838 | +put_opp_clk: |
---|
| 1839 | + dev_pm_opp_put_clkname(enc->opp_table); |
---|
| 1840 | +put_opp_reg: |
---|
| 1841 | + dev_pm_opp_put_regulators(enc->opp_table); |
---|
| 1842 | + enc->opp_table = NULL; |
---|
| 1843 | + |
---|
| 1844 | + return ret; |
---|
| 1845 | +} |
---|
| 1846 | + |
---|
| 1847 | +static int rkvenc_devfreq_remove(struct mpp_dev *mpp) |
---|
| 1848 | +{ |
---|
| 1849 | + struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
| 1850 | + |
---|
| 1851 | + if (enc->mdev_info) { |
---|
| 1852 | + rockchip_system_monitor_unregister(enc->mdev_info); |
---|
| 1853 | + enc->mdev_info = NULL; |
---|
| 1854 | + } |
---|
| 1855 | + if (enc->opp_table) { |
---|
| 1856 | + rockchip_uninit_opp_table(mpp->dev, &enc->opp_info); |
---|
| 1857 | + dev_pm_opp_put_clkname(enc->opp_table); |
---|
| 1858 | + dev_pm_opp_put_regulators(enc->opp_table); |
---|
| 1859 | + enc->opp_table = NULL; |
---|
| 1860 | + } |
---|
| 1861 | + |
---|
| 1862 | + return 0; |
---|
| 1863 | +} |
---|
1059 | 1864 | #endif |
---|
1060 | 1865 | |
---|
1061 | 1866 | static int rkvenc_init(struct mpp_dev *mpp) |
---|
.. | .. |
---|
1094 | 1899 | if (!enc->rst_core) |
---|
1095 | 1900 | mpp_err("No core reset resource define\n"); |
---|
1096 | 1901 | |
---|
| 1902 | +#ifdef CONFIG_PM_DEVFREQ |
---|
| 1903 | + ret = rkvenc_devfreq_init(mpp); |
---|
| 1904 | + if (ret) |
---|
| 1905 | + mpp_err("failed to add venc devfreq\n"); |
---|
| 1906 | +#endif |
---|
| 1907 | + |
---|
1097 | 1908 | return 0; |
---|
| 1909 | +} |
---|
| 1910 | + |
---|
| 1911 | +static int rkvenc_exit(struct mpp_dev *mpp) |
---|
| 1912 | +{ |
---|
| 1913 | +#ifdef CONFIG_PM_DEVFREQ |
---|
| 1914 | + rkvenc_devfreq_remove(mpp); |
---|
| 1915 | +#endif |
---|
| 1916 | + |
---|
| 1917 | + return 0; |
---|
| 1918 | +} |
---|
| 1919 | + |
---|
| 1920 | +static int rkvenc_soft_reset(struct mpp_dev *mpp) |
---|
| 1921 | +{ |
---|
| 1922 | + struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
| 1923 | + struct rkvenc_hw_info *hw = enc->hw_info; |
---|
| 1924 | + u32 rst_status = 0; |
---|
| 1925 | + int ret = 0; |
---|
| 1926 | + |
---|
| 1927 | + /* safe reset */ |
---|
| 1928 | + mpp_write(mpp, hw->int_mask_base, 0x3FF); |
---|
| 1929 | + mpp_write(mpp, hw->enc_clr_base, 0x3); |
---|
| 1930 | + ret = readl_relaxed_poll_timeout(mpp->reg_base + hw->int_sta_base, |
---|
| 1931 | + rst_status, |
---|
| 1932 | + rst_status & RKVENC_SCLR_DONE_STA, |
---|
| 1933 | + 0, 5); |
---|
| 1934 | + mpp_write(mpp, hw->int_clr_base, 0xffffffff); |
---|
| 1935 | + mpp_write(mpp, hw->int_sta_base, 0); |
---|
| 1936 | + |
---|
| 1937 | + return ret; |
---|
| 1938 | + |
---|
1098 | 1939 | } |
---|
1099 | 1940 | |
---|
1100 | 1941 | static int rkvenc_reset(struct mpp_dev *mpp) |
---|
1101 | 1942 | { |
---|
1102 | 1943 | struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
1103 | | - struct rkvenc_hw_info *hw = enc->hw_info; |
---|
| 1944 | + int ret = 0; |
---|
| 1945 | + struct mpp_taskqueue *queue = mpp->queue; |
---|
1104 | 1946 | |
---|
1105 | 1947 | mpp_debug_enter(); |
---|
1106 | 1948 | |
---|
1107 | | - /* safe reset */ |
---|
1108 | | - mpp_write(mpp, hw->int_mask_base, 0x3FF); |
---|
1109 | | - mpp_write(mpp, hw->enc_clr_base, 0x1); |
---|
1110 | | - udelay(5); |
---|
1111 | | - mpp_write(mpp, hw->int_clr_base, 0xffffffff); |
---|
1112 | | - mpp_write(mpp, hw->int_sta_base, 0); |
---|
| 1949 | + /* safe reset first*/ |
---|
| 1950 | + ret = rkvenc_soft_reset(mpp); |
---|
1113 | 1951 | |
---|
1114 | 1952 | /* cru reset */ |
---|
1115 | | - if (enc->rst_a && enc->rst_h && enc->rst_core) { |
---|
1116 | | - rockchip_pmu_idle_request(mpp->dev, true); |
---|
| 1953 | + if (ret && enc->rst_a && enc->rst_h && enc->rst_core) { |
---|
| 1954 | + mpp_err("soft reset timeout, use cru reset\n"); |
---|
| 1955 | + mpp_pmu_idle_request(mpp, true); |
---|
1117 | 1956 | mpp_safe_reset(enc->rst_a); |
---|
1118 | 1957 | mpp_safe_reset(enc->rst_h); |
---|
1119 | 1958 | mpp_safe_reset(enc->rst_core); |
---|
.. | .. |
---|
1121 | 1960 | mpp_safe_unreset(enc->rst_a); |
---|
1122 | 1961 | mpp_safe_unreset(enc->rst_h); |
---|
1123 | 1962 | mpp_safe_unreset(enc->rst_core); |
---|
1124 | | - rockchip_pmu_idle_request(mpp->dev, false); |
---|
| 1963 | + mpp_pmu_idle_request(mpp, false); |
---|
1125 | 1964 | } |
---|
| 1965 | + |
---|
| 1966 | + set_bit(mpp->core_id, &queue->core_idle); |
---|
| 1967 | + if (enc->ccu) |
---|
| 1968 | + enc->ccu->dchs[mpp->core_id].val = 0; |
---|
| 1969 | + |
---|
| 1970 | + mpp_dbg_core("core %d reset idle %lx\n", mpp->core_id, queue->core_idle); |
---|
1126 | 1971 | |
---|
1127 | 1972 | mpp_debug_leave(); |
---|
1128 | 1973 | |
---|
.. | .. |
---|
1162 | 2007 | return 0; |
---|
1163 | 2008 | } |
---|
1164 | 2009 | |
---|
| 2010 | +#define RKVENC2_WORK_TIMEOUT_DELAY (200) |
---|
| 2011 | +#define RKVENC2_WAIT_TIMEOUT_DELAY (2000) |
---|
| 2012 | + |
---|
| 2013 | +static void rkvenc2_task_pop_pending(struct mpp_task *task) |
---|
| 2014 | +{ |
---|
| 2015 | + struct mpp_session *session = task->session; |
---|
| 2016 | + |
---|
| 2017 | + mutex_lock(&session->pending_lock); |
---|
| 2018 | + list_del_init(&task->pending_link); |
---|
| 2019 | + mutex_unlock(&session->pending_lock); |
---|
| 2020 | + |
---|
| 2021 | + kref_put(&task->ref, mpp_free_task); |
---|
| 2022 | +} |
---|
| 2023 | + |
---|
| 2024 | +static int rkvenc2_task_default_process(struct mpp_dev *mpp, |
---|
| 2025 | + struct mpp_task *task) |
---|
| 2026 | +{ |
---|
| 2027 | + int ret = 0; |
---|
| 2028 | + |
---|
| 2029 | + if (mpp->dev_ops && mpp->dev_ops->result) |
---|
| 2030 | + ret = mpp->dev_ops->result(mpp, task, NULL); |
---|
| 2031 | + |
---|
| 2032 | + mpp_debug_func(DEBUG_TASK_INFO, "kref_read %d, ret %d\n", |
---|
| 2033 | + kref_read(&task->ref), ret); |
---|
| 2034 | + |
---|
| 2035 | + rkvenc2_task_pop_pending(task); |
---|
| 2036 | + |
---|
| 2037 | + return ret; |
---|
| 2038 | +} |
---|
| 2039 | + |
---|
| 2040 | +#define RKVENC2_TIMEOUT_DUMP_REG_START (0x5100) |
---|
| 2041 | +#define RKVENC2_TIMEOUT_DUMP_REG_END (0x5160) |
---|
| 2042 | + |
---|
| 2043 | +static void rkvenc2_task_timeout_process(struct mpp_session *session, |
---|
| 2044 | + struct mpp_task *task) |
---|
| 2045 | +{ |
---|
| 2046 | + atomic_inc(&task->abort_request); |
---|
| 2047 | + set_bit(TASK_STATE_ABORT, &task->state); |
---|
| 2048 | + |
---|
| 2049 | + mpp_err("session %d:%d count %d task %d ref %d timeout\n", |
---|
| 2050 | + session->pid, session->index, atomic_read(&session->task_count), |
---|
| 2051 | + task->task_id, kref_read(&task->ref)); |
---|
| 2052 | + |
---|
| 2053 | + if (task->mpp) { |
---|
| 2054 | + struct mpp_dev *mpp = task->mpp; |
---|
| 2055 | + u32 start = RKVENC2_TIMEOUT_DUMP_REG_START; |
---|
| 2056 | + u32 end = RKVENC2_TIMEOUT_DUMP_REG_END; |
---|
| 2057 | + u32 offset; |
---|
| 2058 | + |
---|
| 2059 | + dev_err(mpp->dev, "core %d dump timeout status:\n", mpp->core_id); |
---|
| 2060 | + |
---|
| 2061 | + for (offset = start; offset < end; offset += sizeof(u32)) |
---|
| 2062 | + mpp_reg_show(mpp, offset); |
---|
| 2063 | + } |
---|
| 2064 | + |
---|
| 2065 | + rkvenc2_task_pop_pending(task); |
---|
| 2066 | +} |
---|
| 2067 | + |
---|
| 2068 | +static int rkvenc2_wait_result(struct mpp_session *session, |
---|
| 2069 | + struct mpp_task_msgs *msgs) |
---|
| 2070 | +{ |
---|
| 2071 | + struct rkvenc_poll_slice_cfg cfg; |
---|
| 2072 | + struct rkvenc_task *enc_task; |
---|
| 2073 | + struct mpp_request *req; |
---|
| 2074 | + struct mpp_task *task; |
---|
| 2075 | + struct mpp_dev *mpp; |
---|
| 2076 | + union rkvenc2_slice_len_info slice_info; |
---|
| 2077 | + u32 task_id; |
---|
| 2078 | + int ret = 0; |
---|
| 2079 | + |
---|
| 2080 | + mutex_lock(&session->pending_lock); |
---|
| 2081 | + task = list_first_entry_or_null(&session->pending_list, |
---|
| 2082 | + struct mpp_task, |
---|
| 2083 | + pending_link); |
---|
| 2084 | + mutex_unlock(&session->pending_lock); |
---|
| 2085 | + if (!task) { |
---|
| 2086 | + mpp_err("session %p pending list is empty!\n", session); |
---|
| 2087 | + return -EIO; |
---|
| 2088 | + } |
---|
| 2089 | + |
---|
| 2090 | + mpp = mpp_get_task_used_device(task, session); |
---|
| 2091 | + enc_task = to_rkvenc_task(task); |
---|
| 2092 | + task_id = task->task_id; |
---|
| 2093 | + |
---|
| 2094 | + req = cmpxchg(&msgs->poll_req, msgs->poll_req, NULL); |
---|
| 2095 | + |
---|
| 2096 | + if (!enc_task->task_split || enc_task->task_split_done) { |
---|
| 2097 | +task_done_ret: |
---|
| 2098 | + ret = wait_event_interruptible(task->wait, test_bit(TASK_STATE_DONE, &task->state)); |
---|
| 2099 | + if (ret == -ERESTARTSYS) |
---|
| 2100 | + mpp_err("wait task break by signal in normal mode\n"); |
---|
| 2101 | + |
---|
| 2102 | + return rkvenc2_task_default_process(mpp, task); |
---|
| 2103 | + |
---|
| 2104 | + } |
---|
| 2105 | + |
---|
| 2106 | + /* not slice return just wait all slice length */ |
---|
| 2107 | + if (!req) { |
---|
| 2108 | + do { |
---|
| 2109 | + ret = wait_event_interruptible(task->wait, kfifo_out(&enc_task->slice_info, |
---|
| 2110 | + &slice_info, 1)); |
---|
| 2111 | + if (ret == -ERESTARTSYS) { |
---|
| 2112 | + mpp_err("wait task break by signal in slice all mode\n"); |
---|
| 2113 | + return 0; |
---|
| 2114 | + } |
---|
| 2115 | + mpp_dbg_slice("task %d rd %3d len %d %s\n", |
---|
| 2116 | + task_id, enc_task->slice_rd_cnt, slice_info.slice_len, |
---|
| 2117 | + slice_info.last ? "last" : ""); |
---|
| 2118 | + |
---|
| 2119 | + enc_task->slice_rd_cnt++; |
---|
| 2120 | + |
---|
| 2121 | + if (slice_info.last) |
---|
| 2122 | + goto task_done_ret; |
---|
| 2123 | + } while (1); |
---|
| 2124 | + } |
---|
| 2125 | + |
---|
| 2126 | + if (copy_from_user(&cfg, req->data, sizeof(cfg))) { |
---|
| 2127 | + mpp_err("copy_from_user failed\n"); |
---|
| 2128 | + return -EINVAL; |
---|
| 2129 | + } |
---|
| 2130 | + |
---|
| 2131 | + mpp_dbg_slice("task %d poll irq %d:%d\n", task->task_id, |
---|
| 2132 | + cfg.count_max, cfg.count_ret); |
---|
| 2133 | + cfg.count_ret = 0; |
---|
| 2134 | + |
---|
| 2135 | + /* handle slice mode poll return */ |
---|
| 2136 | + do { |
---|
| 2137 | + ret = wait_event_interruptible(task->wait, kfifo_out(&enc_task->slice_info, |
---|
| 2138 | + &slice_info, 1)); |
---|
| 2139 | + if (ret == -ERESTARTSYS) { |
---|
| 2140 | + mpp_err("wait task break by signal in slice one mode\n"); |
---|
| 2141 | + return 0; |
---|
| 2142 | + } |
---|
| 2143 | + mpp_dbg_slice("core %d task %d rd %3d len %d %s\n", task_id, |
---|
| 2144 | + mpp->core_id, enc_task->slice_rd_cnt, slice_info.slice_len, |
---|
| 2145 | + slice_info.last ? "last" : ""); |
---|
| 2146 | + enc_task->slice_rd_cnt++; |
---|
| 2147 | + if (cfg.count_ret < cfg.count_max) { |
---|
| 2148 | + struct rkvenc_poll_slice_cfg __user *ucfg = |
---|
| 2149 | + (struct rkvenc_poll_slice_cfg __user *)(req->data); |
---|
| 2150 | + u32 __user *dst = (u32 __user *)(ucfg + 1); |
---|
| 2151 | + |
---|
| 2152 | + /* Do NOT return here when put_user error. Just continue */ |
---|
| 2153 | + if (put_user(slice_info.val, dst + cfg.count_ret)) |
---|
| 2154 | + ret = -EFAULT; |
---|
| 2155 | + |
---|
| 2156 | + cfg.count_ret++; |
---|
| 2157 | + if (put_user(cfg.count_ret, &ucfg->count_ret)) |
---|
| 2158 | + ret = -EFAULT; |
---|
| 2159 | + } |
---|
| 2160 | + |
---|
| 2161 | + if (slice_info.last) { |
---|
| 2162 | + enc_task->task_split_done = 1; |
---|
| 2163 | + goto task_done_ret; |
---|
| 2164 | + } |
---|
| 2165 | + |
---|
| 2166 | + if (cfg.count_ret >= cfg.count_max) |
---|
| 2167 | + return 0; |
---|
| 2168 | + |
---|
| 2169 | + if (ret < 0) |
---|
| 2170 | + return ret; |
---|
| 2171 | + } while (!ret); |
---|
| 2172 | + |
---|
| 2173 | + rkvenc2_task_timeout_process(session, task); |
---|
| 2174 | + |
---|
| 2175 | + return ret; |
---|
| 2176 | +} |
---|
| 2177 | + |
---|
1165 | 2178 | static struct mpp_hw_ops rkvenc_hw_ops = { |
---|
1166 | 2179 | .init = rkvenc_init, |
---|
| 2180 | + .exit = rkvenc_exit, |
---|
1167 | 2181 | .clk_on = rkvenc_clk_on, |
---|
1168 | 2182 | .clk_off = rkvenc_clk_off, |
---|
1169 | 2183 | .set_freq = rkvenc_set_freq, |
---|
.. | .. |
---|
1171 | 2185 | }; |
---|
1172 | 2186 | |
---|
1173 | 2187 | static struct mpp_dev_ops rkvenc_dev_ops_v2 = { |
---|
| 2188 | + .wait_result = rkvenc2_wait_result, |
---|
1174 | 2189 | .alloc_task = rkvenc_alloc_task, |
---|
1175 | 2190 | .run = rkvenc_run, |
---|
1176 | 2191 | .irq = rkvenc_irq, |
---|
.. | .. |
---|
1184 | 2199 | .dump_session = rkvenc_dump_session, |
---|
1185 | 2200 | }; |
---|
1186 | 2201 | |
---|
| 2202 | +static struct mpp_dev_ops rkvenc_ccu_dev_ops = { |
---|
| 2203 | + .wait_result = rkvenc2_wait_result, |
---|
| 2204 | + .alloc_task = rkvenc_alloc_task, |
---|
| 2205 | + .prepare = rkvenc2_prepare, |
---|
| 2206 | + .run = rkvenc_run, |
---|
| 2207 | + .irq = rkvenc_irq, |
---|
| 2208 | + .isr = rkvenc_isr, |
---|
| 2209 | + .finish = rkvenc_finish, |
---|
| 2210 | + .result = rkvenc_result, |
---|
| 2211 | + .free_task = rkvenc_free_task, |
---|
| 2212 | + .ioctl = rkvenc_control, |
---|
| 2213 | + .init_session = rkvenc_init_session, |
---|
| 2214 | + .free_session = rkvenc_free_session, |
---|
| 2215 | + .dump_session = rkvenc_dump_session, |
---|
| 2216 | +}; |
---|
| 2217 | + |
---|
| 2218 | +static struct mpp_dev_ops vepu540c_dev_ops_v2 = { |
---|
| 2219 | + .wait_result = rkvenc2_wait_result, |
---|
| 2220 | + .alloc_task = rkvenc_alloc_task, |
---|
| 2221 | + .run = rkvenc_run, |
---|
| 2222 | + .irq = vepu540c_irq, |
---|
| 2223 | + .isr = rkvenc_isr, |
---|
| 2224 | + .finish = rkvenc_finish, |
---|
| 2225 | + .result = rkvenc_result, |
---|
| 2226 | + .free_task = rkvenc_free_task, |
---|
| 2227 | + .ioctl = rkvenc_control, |
---|
| 2228 | + .init_session = rkvenc_init_session, |
---|
| 2229 | + .free_session = rkvenc_free_session, |
---|
| 2230 | + .dump_session = rkvenc_dump_session, |
---|
| 2231 | +}; |
---|
1187 | 2232 | |
---|
1188 | 2233 | static const struct mpp_dev_var rkvenc_v2_data = { |
---|
1189 | 2234 | .device_type = MPP_DEVICE_RKVENC, |
---|
.. | .. |
---|
1193 | 2238 | .dev_ops = &rkvenc_dev_ops_v2, |
---|
1194 | 2239 | }; |
---|
1195 | 2240 | |
---|
| 2241 | +static const struct mpp_dev_var rkvenc_540c_data = { |
---|
| 2242 | + .device_type = MPP_DEVICE_RKVENC, |
---|
| 2243 | + .hw_info = &rkvenc_540c_hw_info.hw, |
---|
| 2244 | + .trans_info = trans_rkvenc_540c, |
---|
| 2245 | + .hw_ops = &rkvenc_hw_ops, |
---|
| 2246 | + .dev_ops = &vepu540c_dev_ops_v2, |
---|
| 2247 | +}; |
---|
| 2248 | + |
---|
| 2249 | +static const struct mpp_dev_var rkvenc_ccu_data = { |
---|
| 2250 | + .device_type = MPP_DEVICE_RKVENC, |
---|
| 2251 | + .hw_info = &rkvenc_v2_hw_info.hw, |
---|
| 2252 | + .trans_info = trans_rkvenc_v2, |
---|
| 2253 | + .hw_ops = &rkvenc_hw_ops, |
---|
| 2254 | + .dev_ops = &rkvenc_ccu_dev_ops, |
---|
| 2255 | +}; |
---|
| 2256 | + |
---|
1196 | 2257 | static const struct of_device_id mpp_rkvenc_dt_match[] = { |
---|
1197 | 2258 | { |
---|
1198 | 2259 | .compatible = "rockchip,rkv-encoder-v2", |
---|
1199 | 2260 | .data = &rkvenc_v2_data, |
---|
1200 | 2261 | }, |
---|
| 2262 | +#ifdef CONFIG_CPU_RK3528 |
---|
| 2263 | + { |
---|
| 2264 | + .compatible = "rockchip,rkv-encoder-rk3528", |
---|
| 2265 | + .data = &rkvenc_540c_data, |
---|
| 2266 | + }, |
---|
| 2267 | +#endif |
---|
| 2268 | +#ifdef CONFIG_CPU_RK3562 |
---|
| 2269 | + { |
---|
| 2270 | + .compatible = "rockchip,rkv-encoder-rk3562", |
---|
| 2271 | + .data = &rkvenc_540c_data, |
---|
| 2272 | + }, |
---|
| 2273 | +#endif |
---|
| 2274 | +#ifdef CONFIG_CPU_RK3588 |
---|
| 2275 | + { |
---|
| 2276 | + .compatible = "rockchip,rkv-encoder-v2-core", |
---|
| 2277 | + .data = &rkvenc_ccu_data, |
---|
| 2278 | + }, |
---|
| 2279 | + { |
---|
| 2280 | + .compatible = "rockchip,rkv-encoder-v2-ccu", |
---|
| 2281 | + }, |
---|
| 2282 | +#endif |
---|
1201 | 2283 | {}, |
---|
1202 | 2284 | }; |
---|
| 2285 | + |
---|
| 2286 | +static int rkvenc_ccu_probe(struct platform_device *pdev) |
---|
| 2287 | +{ |
---|
| 2288 | + struct rkvenc_ccu *ccu; |
---|
| 2289 | + struct device *dev = &pdev->dev; |
---|
| 2290 | + |
---|
| 2291 | + ccu = devm_kzalloc(dev, sizeof(*ccu), GFP_KERNEL); |
---|
| 2292 | + if (!ccu) |
---|
| 2293 | + return -ENOMEM; |
---|
| 2294 | + |
---|
| 2295 | + platform_set_drvdata(pdev, ccu); |
---|
| 2296 | + |
---|
| 2297 | + mutex_init(&ccu->lock); |
---|
| 2298 | + INIT_LIST_HEAD(&ccu->core_list); |
---|
| 2299 | + spin_lock_init(&ccu->lock_dchs); |
---|
| 2300 | + |
---|
| 2301 | + return 0; |
---|
| 2302 | +} |
---|
| 2303 | + |
---|
| 2304 | +static int rkvenc_attach_ccu(struct device *dev, struct rkvenc_dev *enc) |
---|
| 2305 | +{ |
---|
| 2306 | + struct device_node *np; |
---|
| 2307 | + struct platform_device *pdev; |
---|
| 2308 | + struct rkvenc_ccu *ccu; |
---|
| 2309 | + |
---|
| 2310 | + mpp_debug_enter(); |
---|
| 2311 | + |
---|
| 2312 | + np = of_parse_phandle(dev->of_node, "rockchip,ccu", 0); |
---|
| 2313 | + if (!np || !of_device_is_available(np)) |
---|
| 2314 | + return -ENODEV; |
---|
| 2315 | + |
---|
| 2316 | + pdev = of_find_device_by_node(np); |
---|
| 2317 | + of_node_put(np); |
---|
| 2318 | + if (!pdev) |
---|
| 2319 | + return -ENODEV; |
---|
| 2320 | + |
---|
| 2321 | + ccu = platform_get_drvdata(pdev); |
---|
| 2322 | + if (!ccu) |
---|
| 2323 | + return -ENOMEM; |
---|
| 2324 | + |
---|
| 2325 | + INIT_LIST_HEAD(&enc->core_link); |
---|
| 2326 | + mutex_lock(&ccu->lock); |
---|
| 2327 | + ccu->core_num++; |
---|
| 2328 | + list_add_tail(&enc->core_link, &ccu->core_list); |
---|
| 2329 | + mutex_unlock(&ccu->lock); |
---|
| 2330 | + |
---|
| 2331 | + /* attach the ccu-domain to current core */ |
---|
| 2332 | + if (!ccu->main_core) { |
---|
| 2333 | + /** |
---|
| 2334 | + * set the first device for the main-core, |
---|
| 2335 | + * then the domain of the main-core named ccu-domain |
---|
| 2336 | + */ |
---|
| 2337 | + ccu->main_core = &enc->mpp; |
---|
| 2338 | + } else { |
---|
| 2339 | + struct mpp_iommu_info *ccu_info, *cur_info; |
---|
| 2340 | + |
---|
| 2341 | + /* set the ccu-domain for current device */ |
---|
| 2342 | + ccu_info = ccu->main_core->iommu_info; |
---|
| 2343 | + cur_info = enc->mpp.iommu_info; |
---|
| 2344 | + |
---|
| 2345 | + if (cur_info) { |
---|
| 2346 | + cur_info->domain = ccu_info->domain; |
---|
| 2347 | + cur_info->rw_sem = ccu_info->rw_sem; |
---|
| 2348 | + } |
---|
| 2349 | + mpp_iommu_attach(cur_info); |
---|
| 2350 | + |
---|
| 2351 | + /* increase main core message capacity */ |
---|
| 2352 | + ccu->main_core->msgs_cap++; |
---|
| 2353 | + enc->mpp.msgs_cap = 0; |
---|
| 2354 | + } |
---|
| 2355 | + enc->ccu = ccu; |
---|
| 2356 | + |
---|
| 2357 | + dev_info(dev, "attach ccu as core %d\n", enc->mpp.core_id); |
---|
| 2358 | + mpp_debug_enter(); |
---|
| 2359 | + |
---|
| 2360 | + return 0; |
---|
| 2361 | +} |
---|
1203 | 2362 | |
---|
1204 | 2363 | static int rkvenc2_alloc_rcbbuf(struct platform_device *pdev, struct rkvenc_dev *enc) |
---|
1205 | 2364 | { |
---|
.. | .. |
---|
1215 | 2374 | |
---|
1216 | 2375 | /* get rcb iova start and size */ |
---|
1217 | 2376 | ret = device_property_read_u32_array(dev, "rockchip,rcb-iova", vals, 2); |
---|
1218 | | - if (ret) { |
---|
1219 | | - dev_err(dev, "could not find property rcb-iova\n"); |
---|
| 2377 | + if (ret) |
---|
1220 | 2378 | return ret; |
---|
1221 | | - } |
---|
| 2379 | + |
---|
1222 | 2380 | iova = PAGE_ALIGN(vals[0]); |
---|
1223 | 2381 | sram_used = PAGE_ALIGN(vals[1]); |
---|
1224 | 2382 | if (!sram_used) { |
---|
.. | .. |
---|
1300 | 2458 | return ret; |
---|
1301 | 2459 | } |
---|
1302 | 2460 | |
---|
| 2461 | +static int rkvenc2_iommu_fault_handle(struct iommu_domain *iommu, |
---|
| 2462 | + struct device *iommu_dev, |
---|
| 2463 | + unsigned long iova, int status, void *arg) |
---|
| 2464 | +{ |
---|
| 2465 | + struct mpp_dev *mpp = (struct mpp_dev *)arg; |
---|
| 2466 | + struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
| 2467 | + struct mpp_task *mpp_task; |
---|
| 2468 | + struct rkvenc_ccu *ccu = enc->ccu; |
---|
| 2469 | + |
---|
| 2470 | + if (ccu) { |
---|
| 2471 | + struct rkvenc_dev *core = NULL, *n; |
---|
| 2472 | + |
---|
| 2473 | + list_for_each_entry_safe(core, n, &ccu->core_list, core_link) { |
---|
| 2474 | + if (core->mpp.iommu_info && |
---|
| 2475 | + (&core->mpp.iommu_info->pdev->dev == iommu_dev)) { |
---|
| 2476 | + mpp = &core->mpp; |
---|
| 2477 | + break; |
---|
| 2478 | + } |
---|
| 2479 | + } |
---|
| 2480 | + } |
---|
| 2481 | + mpp_task = mpp->cur_task; |
---|
| 2482 | + dev_info(mpp->dev, "core %d page fault found dchs %08x\n", |
---|
| 2483 | + mpp->core_id, mpp_read_relaxed(&enc->mpp, DCHS_REG_OFFSET)); |
---|
| 2484 | + |
---|
| 2485 | + if (mpp_task) |
---|
| 2486 | + mpp_task_dump_mem_region(mpp, mpp_task); |
---|
| 2487 | + |
---|
| 2488 | + /* |
---|
| 2489 | + * Mask iommu irq, in order for iommu not repeatedly trigger pagefault. |
---|
| 2490 | + * Until the pagefault task finish by hw timeout. |
---|
| 2491 | + */ |
---|
| 2492 | + rockchip_iommu_mask_irq(mpp->dev); |
---|
| 2493 | + |
---|
| 2494 | + return 0; |
---|
| 2495 | +} |
---|
| 2496 | + |
---|
| 2497 | +static int rkvenc_core_probe(struct platform_device *pdev) |
---|
| 2498 | +{ |
---|
| 2499 | + int ret = 0; |
---|
| 2500 | + struct device *dev = &pdev->dev; |
---|
| 2501 | + struct rkvenc_dev *enc = NULL; |
---|
| 2502 | + struct mpp_dev *mpp = NULL; |
---|
| 2503 | + |
---|
| 2504 | + enc = devm_kzalloc(dev, sizeof(*enc), GFP_KERNEL); |
---|
| 2505 | + if (!enc) |
---|
| 2506 | + return -ENOMEM; |
---|
| 2507 | + |
---|
| 2508 | + mpp = &enc->mpp; |
---|
| 2509 | + platform_set_drvdata(pdev, mpp); |
---|
| 2510 | + |
---|
| 2511 | + if (pdev->dev.of_node) { |
---|
| 2512 | + struct device_node *np = pdev->dev.of_node; |
---|
| 2513 | + const struct of_device_id *match = NULL; |
---|
| 2514 | + |
---|
| 2515 | + match = of_match_node(mpp_rkvenc_dt_match, np); |
---|
| 2516 | + if (match) |
---|
| 2517 | + mpp->var = (struct mpp_dev_var *)match->data; |
---|
| 2518 | + |
---|
| 2519 | + mpp->core_id = of_alias_get_id(np, "rkvenc"); |
---|
| 2520 | + } |
---|
| 2521 | + |
---|
| 2522 | + ret = mpp_dev_probe(mpp, pdev); |
---|
| 2523 | + if (ret) |
---|
| 2524 | + return ret; |
---|
| 2525 | + |
---|
| 2526 | + /* attach core to ccu */ |
---|
| 2527 | + ret = rkvenc_attach_ccu(dev, enc); |
---|
| 2528 | + if (ret) { |
---|
| 2529 | + dev_err(dev, "attach ccu failed\n"); |
---|
| 2530 | + return ret; |
---|
| 2531 | + } |
---|
| 2532 | + rkvenc2_alloc_rcbbuf(pdev, enc); |
---|
| 2533 | + |
---|
| 2534 | + ret = devm_request_threaded_irq(dev, mpp->irq, |
---|
| 2535 | + mpp_dev_irq, |
---|
| 2536 | + mpp_dev_isr_sched, |
---|
| 2537 | + IRQF_ONESHOT, |
---|
| 2538 | + dev_name(dev), mpp); |
---|
| 2539 | + if (ret) { |
---|
| 2540 | + dev_err(dev, "register interrupter runtime failed\n"); |
---|
| 2541 | + return -EINVAL; |
---|
| 2542 | + } |
---|
| 2543 | + mpp->session_max_buffers = RKVENC_SESSION_MAX_BUFFERS; |
---|
| 2544 | + enc->hw_info = to_rkvenc_info(mpp->var->hw_info); |
---|
| 2545 | + mpp->fault_handler = rkvenc2_iommu_fault_handle; |
---|
| 2546 | + rkvenc_procfs_init(mpp); |
---|
| 2547 | + rkvenc_procfs_ccu_init(mpp); |
---|
| 2548 | + |
---|
| 2549 | + /* if current is main-core, register current device to mpp service */ |
---|
| 2550 | + if (mpp == enc->ccu->main_core) |
---|
| 2551 | + mpp_dev_register_srv(mpp, mpp->srv); |
---|
| 2552 | + |
---|
| 2553 | + return 0; |
---|
| 2554 | +} |
---|
| 2555 | + |
---|
1303 | 2556 | static int rkvenc_probe_default(struct platform_device *pdev) |
---|
1304 | 2557 | { |
---|
1305 | 2558 | int ret = 0; |
---|
.. | .. |
---|
1313 | 2566 | return -ENOMEM; |
---|
1314 | 2567 | |
---|
1315 | 2568 | mpp = &enc->mpp; |
---|
1316 | | - platform_set_drvdata(pdev, enc); |
---|
| 2569 | + platform_set_drvdata(pdev, mpp); |
---|
1317 | 2570 | |
---|
1318 | 2571 | if (pdev->dev.of_node) { |
---|
1319 | 2572 | match = of_match_node(mpp_rkvenc_dt_match, pdev->dev.of_node); |
---|
.. | .. |
---|
1353 | 2606 | { |
---|
1354 | 2607 | int ret = 0; |
---|
1355 | 2608 | struct device *dev = &pdev->dev; |
---|
| 2609 | + struct device_node *np = dev->of_node; |
---|
1356 | 2610 | |
---|
1357 | 2611 | dev_info(dev, "probing start\n"); |
---|
1358 | 2612 | |
---|
1359 | | - ret = rkvenc_probe_default(pdev); |
---|
| 2613 | + if (strstr(np->name, "ccu")) |
---|
| 2614 | + ret = rkvenc_ccu_probe(pdev); |
---|
| 2615 | + else if (strstr(np->name, "core")) |
---|
| 2616 | + ret = rkvenc_core_probe(pdev); |
---|
| 2617 | + else |
---|
| 2618 | + ret = rkvenc_probe_default(pdev); |
---|
1360 | 2619 | |
---|
1361 | 2620 | dev_info(dev, "probing finish\n"); |
---|
1362 | 2621 | |
---|
.. | .. |
---|
1369 | 2628 | |
---|
1370 | 2629 | if (enc->rcb_page) { |
---|
1371 | 2630 | size_t page_size = PAGE_ALIGN(enc->sram_used - enc->sram_size); |
---|
| 2631 | + int order = min(get_order(page_size), MAX_ORDER); |
---|
1372 | 2632 | |
---|
1373 | | - __free_pages(enc->rcb_page, get_order(page_size)); |
---|
| 2633 | + __free_pages(enc->rcb_page, order); |
---|
1374 | 2634 | } |
---|
1375 | 2635 | if (enc->sram_iova) { |
---|
1376 | 2636 | domain = enc->mpp.iommu_info->domain; |
---|
.. | .. |
---|
1383 | 2643 | static int rkvenc_remove(struct platform_device *pdev) |
---|
1384 | 2644 | { |
---|
1385 | 2645 | struct device *dev = &pdev->dev; |
---|
| 2646 | + struct device_node *np = dev->of_node; |
---|
1386 | 2647 | |
---|
1387 | | - struct rkvenc_dev *enc = platform_get_drvdata(pdev); |
---|
| 2648 | + if (strstr(np->name, "ccu")) { |
---|
| 2649 | + dev_info(dev, "remove ccu\n"); |
---|
| 2650 | + } else if (strstr(np->name, "core")) { |
---|
| 2651 | + struct mpp_dev *mpp = dev_get_drvdata(dev); |
---|
| 2652 | + struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
1388 | 2653 | |
---|
1389 | | - dev_info(dev, "remove device\n"); |
---|
1390 | | - rkvenc2_free_rcbbuf(pdev, enc); |
---|
1391 | | - mpp_dev_remove(&enc->mpp); |
---|
1392 | | - rkvenc_procfs_remove(&enc->mpp); |
---|
| 2654 | + dev_info(dev, "remove core\n"); |
---|
| 2655 | + if (enc->ccu) { |
---|
| 2656 | + mutex_lock(&enc->ccu->lock); |
---|
| 2657 | + list_del_init(&enc->core_link); |
---|
| 2658 | + enc->ccu->core_num--; |
---|
| 2659 | + mutex_unlock(&enc->ccu->lock); |
---|
| 2660 | + } |
---|
| 2661 | + rkvenc2_free_rcbbuf(pdev, enc); |
---|
| 2662 | + mpp_dev_remove(&enc->mpp); |
---|
| 2663 | + rkvenc_procfs_remove(&enc->mpp); |
---|
| 2664 | + } else { |
---|
| 2665 | + struct mpp_dev *mpp = dev_get_drvdata(dev); |
---|
| 2666 | + struct rkvenc_dev *enc = to_rkvenc_dev(mpp); |
---|
| 2667 | + |
---|
| 2668 | + dev_info(dev, "remove device\n"); |
---|
| 2669 | + rkvenc2_free_rcbbuf(pdev, enc); |
---|
| 2670 | + mpp_dev_remove(mpp); |
---|
| 2671 | + rkvenc_procfs_remove(mpp); |
---|
| 2672 | + } |
---|
1393 | 2673 | |
---|
1394 | 2674 | return 0; |
---|
1395 | 2675 | } |
---|
.. | .. |
---|
1397 | 2677 | static void rkvenc_shutdown(struct platform_device *pdev) |
---|
1398 | 2678 | { |
---|
1399 | 2679 | struct device *dev = &pdev->dev; |
---|
1400 | | - int ret; |
---|
1401 | | - int val; |
---|
1402 | | - struct rkvenc_dev *enc = platform_get_drvdata(pdev); |
---|
1403 | | - struct mpp_dev *mpp = &enc->mpp; |
---|
1404 | 2680 | |
---|
1405 | | - dev_info(dev, "shutdown device\n"); |
---|
1406 | | - |
---|
1407 | | - if (mpp->srv) |
---|
1408 | | - atomic_inc(&mpp->srv->shutdown_request); |
---|
1409 | | - |
---|
1410 | | - ret = readx_poll_timeout(atomic_read, |
---|
1411 | | - &mpp->task_count, |
---|
1412 | | - val, val == 0, 1000, 200000); |
---|
1413 | | - if (ret == -ETIMEDOUT) |
---|
1414 | | - dev_err(dev, "wait total running time out\n"); |
---|
1415 | | - |
---|
1416 | | - dev_info(dev, "shutdown success\n"); |
---|
| 2681 | + if (!strstr(dev_name(dev), "ccu")) |
---|
| 2682 | + mpp_dev_shutdown(pdev); |
---|
1417 | 2683 | } |
---|
1418 | 2684 | |
---|
1419 | 2685 | struct platform_driver rockchip_rkvenc2_driver = { |
---|