.. | .. |
---|
178 | 178 | #define RKVENC2_REG_SLICE_NUM_BASE (0x4034) |
---|
179 | 179 | #define RKVENC2_REG_SLICE_LEN_BASE (0x4038) |
---|
180 | 180 | |
---|
| 181 | +#define RKVENC2_REG_ST_BSB (0x402c) |
---|
| 182 | +#define RKVENC2_REG_ADR_BSBT (0x2b0) |
---|
| 183 | +#define RKVENC2_REG_ADR_BSBB (0x2b4) |
---|
| 184 | +#define RKVENC2_REG_ADR_BSBR (0x2b8) |
---|
| 185 | +#define RKVENC2_REG_ADR_BSBS (0x2bc) |
---|
| 186 | + |
---|
181 | 187 | union rkvenc2_slice_len_info { |
---|
182 | 188 | u32 val; |
---|
183 | 189 | |
---|
.. | .. |
---|
282 | 288 | dma_addr_t sram_iova; |
---|
283 | 289 | u32 sram_enabled; |
---|
284 | 290 | struct page *rcb_page; |
---|
| 291 | + |
---|
| 292 | + u32 bs_overflow; |
---|
285 | 293 | |
---|
286 | 294 | #ifdef CONFIG_PM_DEVFREQ |
---|
287 | 295 | struct rockchip_opp_info opp_info; |
---|
.. | .. |
---|
1290 | 1298 | struct rkvenc_hw_info *hw = enc->hw_info; |
---|
1291 | 1299 | struct mpp_task *mpp_task = NULL; |
---|
1292 | 1300 | struct rkvenc_task *task = NULL; |
---|
| 1301 | + u32 int_clear = 1; |
---|
| 1302 | + u32 irq_mask = 0; |
---|
1293 | 1303 | int ret = IRQ_NONE; |
---|
1294 | 1304 | |
---|
1295 | 1305 | mpp_debug_enter(); |
---|
.. | .. |
---|
1311 | 1321 | wake_up(&mpp_task->wait); |
---|
1312 | 1322 | } |
---|
1313 | 1323 | |
---|
1314 | | - mpp_write(mpp, hw->int_mask_base, 0x100); |
---|
1315 | | - mpp_write(mpp, hw->int_clr_base, 0xffffffff); |
---|
1316 | | - udelay(5); |
---|
1317 | | - mpp_write(mpp, hw->int_sta_base, 0); |
---|
1318 | | - |
---|
| 1324 | + irq_mask = INT_STA_ENC_DONE_STA; |
---|
1319 | 1325 | ret = IRQ_WAKE_THREAD; |
---|
| 1326 | + if (enc->bs_overflow) { |
---|
| 1327 | + mpp->irq_status |= INT_STA_BSF_OFLW_STA; |
---|
| 1328 | + enc->bs_overflow = 0; |
---|
| 1329 | + } |
---|
1320 | 1330 | } else if (mpp->irq_status & INT_STA_SLC_DONE_STA) { |
---|
1321 | 1331 | if (task && task->task_split) { |
---|
1322 | 1332 | mpp_time_part_diff(mpp_task); |
---|
.. | .. |
---|
1325 | 1335 | wake_up(&mpp_task->wait); |
---|
1326 | 1336 | } |
---|
1327 | 1337 | |
---|
1328 | | - mpp_write(mpp, hw->int_clr_base, INT_STA_SLC_DONE_STA); |
---|
| 1338 | + irq_mask = INT_STA_ENC_DONE_STA; |
---|
| 1339 | + int_clear = 0; |
---|
| 1340 | + } else if (mpp->irq_status & INT_STA_BSF_OFLW_STA) { |
---|
| 1341 | + u32 bs_rd = mpp_read(mpp, RKVENC2_REG_ADR_BSBR); |
---|
| 1342 | + u32 bs_wr = mpp_read(mpp, RKVENC2_REG_ST_BSB); |
---|
| 1343 | + u32 bs_top = mpp_read(mpp, RKVENC2_REG_ADR_BSBT); |
---|
| 1344 | + u32 bs_bot = mpp_read(mpp, RKVENC2_REG_ADR_BSBB); |
---|
| 1345 | + |
---|
| 1346 | + if (mpp_task) |
---|
| 1347 | + dev_err(mpp->dev, "task %d found bitstream overflow [%#08x %#08x %#08x %#08x]\n", |
---|
| 1348 | + mpp_task->task_index, bs_top, bs_bot, bs_wr, bs_rd); |
---|
| 1349 | + bs_wr += 128; |
---|
| 1350 | + if (bs_wr >= bs_top) |
---|
| 1351 | + bs_wr = bs_bot; |
---|
| 1352 | + /* clear int first */ |
---|
| 1353 | + mpp_write(mpp, hw->int_clr_base, mpp->irq_status); |
---|
| 1354 | + /* update write addr for enc continue */ |
---|
| 1355 | + mpp_write(mpp, RKVENC2_REG_ADR_BSBS, bs_wr); |
---|
| 1356 | + enc->bs_overflow = 1; |
---|
| 1357 | + irq_mask = 0; |
---|
| 1358 | + int_clear = 0; |
---|
| 1359 | + ret = IRQ_HANDLED; |
---|
| 1360 | + } else { |
---|
| 1361 | + dev_err(mpp->dev, "found error status %08x\n", mpp->irq_status); |
---|
| 1362 | + |
---|
| 1363 | + irq_mask = mpp->irq_status; |
---|
| 1364 | + ret = IRQ_WAKE_THREAD; |
---|
| 1365 | + } |
---|
| 1366 | + |
---|
| 1367 | + if (irq_mask) |
---|
| 1368 | + mpp_write(mpp, hw->int_mask_base, irq_mask); |
---|
| 1369 | + |
---|
| 1370 | + if (int_clear) { |
---|
| 1371 | + mpp_write(mpp, hw->int_clr_base, mpp->irq_status); |
---|
| 1372 | + udelay(5); |
---|
| 1373 | + mpp_write(mpp, hw->int_sta_base, 0); |
---|
1329 | 1374 | } |
---|
1330 | 1375 | |
---|
1331 | 1376 | mpp_debug_leave(); |
---|
.. | .. |
---|
2004 | 2049 | |
---|
2005 | 2050 | if (!enc_task->task_split || enc_task->task_split_done) { |
---|
2006 | 2051 | task_done_ret: |
---|
2007 | | - ret = wait_event_timeout(task->wait, |
---|
2008 | | - test_bit(TASK_STATE_DONE, &task->state), |
---|
2009 | | - msecs_to_jiffies(RKVENC2_WAIT_TIMEOUT_DELAY)); |
---|
| 2052 | + ret = wait_event_interruptible(task->wait, test_bit(TASK_STATE_DONE, &task->state)); |
---|
| 2053 | + if (ret == -ERESTARTSYS) |
---|
| 2054 | + mpp_err("wait task break by signal in normal mode\n"); |
---|
2010 | 2055 | |
---|
2011 | | - if (ret > 0) |
---|
2012 | | - return rkvenc2_task_default_process(mpp, task); |
---|
| 2056 | + return rkvenc2_task_default_process(mpp, task); |
---|
2013 | 2057 | |
---|
2014 | | - rkvenc2_task_timeout_process(session, task); |
---|
2015 | | - return ret; |
---|
2016 | 2058 | } |
---|
2017 | 2059 | |
---|
2018 | 2060 | /* not slice return just wait all slice length */ |
---|
2019 | 2061 | if (!req) { |
---|
2020 | 2062 | do { |
---|
2021 | | - ret = wait_event_timeout(task->wait, |
---|
2022 | | - kfifo_out(&enc_task->slice_info, &slice_info, 1), |
---|
2023 | | - msecs_to_jiffies(RKVENC2_WORK_TIMEOUT_DELAY)); |
---|
2024 | | - if (ret > 0) { |
---|
2025 | | - mpp_dbg_slice("task %d rd %3d len %d %s\n", |
---|
2026 | | - task_id, enc_task->slice_rd_cnt, slice_info.slice_len, |
---|
2027 | | - slice_info.last ? "last" : ""); |
---|
2028 | | - |
---|
2029 | | - enc_task->slice_rd_cnt++; |
---|
2030 | | - |
---|
2031 | | - if (slice_info.last) |
---|
2032 | | - goto task_done_ret; |
---|
2033 | | - |
---|
2034 | | - continue; |
---|
| 2063 | + ret = wait_event_interruptible(task->wait, kfifo_out(&enc_task->slice_info, |
---|
| 2064 | + &slice_info, 1)); |
---|
| 2065 | + if (ret == -ERESTARTSYS) { |
---|
| 2066 | + mpp_err("wait task break by signal in slice all mode\n"); |
---|
| 2067 | + return 0; |
---|
2035 | 2068 | } |
---|
| 2069 | + mpp_dbg_slice("task %d rd %3d len %d %s\n", |
---|
| 2070 | + task_id, enc_task->slice_rd_cnt, slice_info.slice_len, |
---|
| 2071 | + slice_info.last ? "last" : ""); |
---|
2036 | 2072 | |
---|
2037 | | - rkvenc2_task_timeout_process(session, task); |
---|
2038 | | - return ret; |
---|
| 2073 | + enc_task->slice_rd_cnt++; |
---|
| 2074 | + |
---|
| 2075 | + if (slice_info.last) |
---|
| 2076 | + goto task_done_ret; |
---|
2039 | 2077 | } while (1); |
---|
2040 | 2078 | } |
---|
2041 | 2079 | |
---|
.. | .. |
---|
2050 | 2088 | |
---|
2051 | 2089 | /* handle slice mode poll return */ |
---|
2052 | 2090 | do { |
---|
2053 | | - ret = wait_event_timeout(task->wait, |
---|
2054 | | - kfifo_out(&enc_task->slice_info, &slice_info, 1), |
---|
2055 | | - msecs_to_jiffies(RKVENC2_WORK_TIMEOUT_DELAY)); |
---|
2056 | | - if (ret > 0) { |
---|
2057 | | - mpp_dbg_slice("core %d task %d rd %3d len %d %s\n", task_id, |
---|
2058 | | - mpp->core_id, enc_task->slice_rd_cnt, slice_info.slice_len, |
---|
2059 | | - slice_info.last ? "last" : ""); |
---|
2060 | | - enc_task->slice_rd_cnt++; |
---|
2061 | | - if (cfg.count_ret < cfg.count_max) { |
---|
2062 | | - struct rkvenc_poll_slice_cfg __user *ucfg = |
---|
2063 | | - (struct rkvenc_poll_slice_cfg __user *)(req->data); |
---|
2064 | | - u32 __user *dst = (u32 __user *)(ucfg + 1); |
---|
2065 | | - |
---|
2066 | | - /* Do NOT return here when put_user error. Just continue */ |
---|
2067 | | - if (put_user(slice_info.val, dst + cfg.count_ret)) |
---|
2068 | | - ret = -EFAULT; |
---|
2069 | | - |
---|
2070 | | - cfg.count_ret++; |
---|
2071 | | - if (put_user(cfg.count_ret, &ucfg->count_ret)) |
---|
2072 | | - ret = -EFAULT; |
---|
2073 | | - } |
---|
2074 | | - |
---|
2075 | | - if (slice_info.last) { |
---|
2076 | | - enc_task->task_split_done = 1; |
---|
2077 | | - goto task_done_ret; |
---|
2078 | | - } |
---|
2079 | | - |
---|
2080 | | - if (cfg.count_ret >= cfg.count_max) |
---|
2081 | | - return 0; |
---|
2082 | | - |
---|
2083 | | - if (ret < 0) |
---|
2084 | | - return ret; |
---|
| 2091 | + ret = wait_event_interruptible(task->wait, kfifo_out(&enc_task->slice_info, |
---|
| 2092 | + &slice_info, 1)); |
---|
| 2093 | + if (ret == -ERESTARTSYS) { |
---|
| 2094 | + mpp_err("wait task break by signal in slice one mode\n"); |
---|
| 2095 | + return 0; |
---|
2085 | 2096 | } |
---|
2086 | | - } while (ret > 0); |
---|
| 2097 | + mpp_dbg_slice("core %d task %d rd %3d len %d %s\n", task_id, |
---|
| 2098 | + mpp->core_id, enc_task->slice_rd_cnt, slice_info.slice_len, |
---|
| 2099 | + slice_info.last ? "last" : ""); |
---|
| 2100 | + enc_task->slice_rd_cnt++; |
---|
| 2101 | + if (cfg.count_ret < cfg.count_max) { |
---|
| 2102 | + struct rkvenc_poll_slice_cfg __user *ucfg = |
---|
| 2103 | + (struct rkvenc_poll_slice_cfg __user *)(req->data); |
---|
| 2104 | + u32 __user *dst = (u32 __user *)(ucfg + 1); |
---|
| 2105 | + |
---|
| 2106 | + /* Do NOT return here when put_user error. Just continue */ |
---|
| 2107 | + if (put_user(slice_info.val, dst + cfg.count_ret)) |
---|
| 2108 | + ret = -EFAULT; |
---|
| 2109 | + |
---|
| 2110 | + cfg.count_ret++; |
---|
| 2111 | + if (put_user(cfg.count_ret, &ucfg->count_ret)) |
---|
| 2112 | + ret = -EFAULT; |
---|
| 2113 | + } |
---|
| 2114 | + |
---|
| 2115 | + if (slice_info.last) { |
---|
| 2116 | + enc_task->task_split_done = 1; |
---|
| 2117 | + goto task_done_ret; |
---|
| 2118 | + } |
---|
| 2119 | + |
---|
| 2120 | + if (cfg.count_ret >= cfg.count_max) |
---|
| 2121 | + return 0; |
---|
| 2122 | + |
---|
| 2123 | + if (ret < 0) |
---|
| 2124 | + return ret; |
---|
| 2125 | + } while (!ret); |
---|
2087 | 2126 | |
---|
2088 | 2127 | rkvenc2_task_timeout_process(session, task); |
---|
2089 | 2128 | |
---|
.. | .. |
---|
2243 | 2282 | ccu_info = ccu->main_core->iommu_info; |
---|
2244 | 2283 | cur_info = enc->mpp.iommu_info; |
---|
2245 | 2284 | |
---|
2246 | | - cur_info->domain = ccu_info->domain; |
---|
2247 | | - cur_info->rw_sem = ccu_info->rw_sem; |
---|
| 2285 | + if (cur_info) { |
---|
| 2286 | + cur_info->domain = ccu_info->domain; |
---|
| 2287 | + cur_info->rw_sem = ccu_info->rw_sem; |
---|
| 2288 | + } |
---|
2248 | 2289 | mpp_iommu_attach(cur_info); |
---|
2249 | 2290 | |
---|
2250 | 2291 | /* increase main core message capacity */ |
---|
.. | .. |
---|
2422 | 2463 | } |
---|
2423 | 2464 | mpp->session_max_buffers = RKVENC_SESSION_MAX_BUFFERS; |
---|
2424 | 2465 | enc->hw_info = to_rkvenc_info(mpp->var->hw_info); |
---|
2425 | | - mpp->iommu_info->hdl = rkvenc2_iommu_fault_handle; |
---|
| 2466 | + mpp->fault_handler = rkvenc2_iommu_fault_handle; |
---|
2426 | 2467 | rkvenc_procfs_init(mpp); |
---|
2427 | 2468 | rkvenc_procfs_ccu_init(mpp); |
---|
2428 | 2469 | |
---|