.. | .. |
---|
36 | 36 | #include "mpp_common.h" |
---|
37 | 37 | #include "mpp_iommu.h" |
---|
38 | 38 | |
---|
39 | | -#define MPP_WORK_TIMEOUT_DELAY (200) |
---|
40 | | -#define MPP_WAIT_TIMEOUT_DELAY (2000) |
---|
41 | | - |
---|
42 | | -/* Use 'v' as magic number */ |
---|
43 | | -#define MPP_IOC_MAGIC 'v' |
---|
44 | | - |
---|
45 | | -#define MPP_IOC_CFG_V1 _IOW(MPP_IOC_MAGIC, 1, unsigned int) |
---|
46 | | -#define MPP_IOC_CFG_V2 _IOW(MPP_IOC_MAGIC, 2, unsigned int) |
---|
47 | | - |
---|
48 | 39 | /* input parmater structure for version 1 */ |
---|
49 | 40 | struct mpp_msg_v1 { |
---|
50 | 41 | __u32 cmd; |
---|
.. | .. |
---|
60 | 51 | [MPP_DEVICE_VDPU2] = "VDPU2", |
---|
61 | 52 | [MPP_DEVICE_VDPU1_PP] = "VDPU1_PP", |
---|
62 | 53 | [MPP_DEVICE_VDPU2_PP] = "VDPU2_PP", |
---|
| 54 | + [MPP_DEVICE_AV1DEC] = "AV1DEC", |
---|
63 | 55 | [MPP_DEVICE_HEVC_DEC] = "HEVC_DEC", |
---|
64 | 56 | [MPP_DEVICE_RKVDEC] = "RKVDEC", |
---|
65 | 57 | [MPP_DEVICE_AVSPLUS_DEC] = "AVSPLUS_DEC", |
---|
| 58 | + [MPP_DEVICE_RKJPEGD] = "RKJPEGD", |
---|
66 | 59 | [MPP_DEVICE_RKVENC] = "RKVENC", |
---|
67 | 60 | [MPP_DEVICE_VEPU1] = "VEPU1", |
---|
68 | 61 | [MPP_DEVICE_VEPU2] = "VEPU2", |
---|
| 62 | + [MPP_DEVICE_VEPU2_JPEG] = "VEPU2", |
---|
69 | 63 | [MPP_DEVICE_VEPU22] = "VEPU22", |
---|
70 | 64 | [MPP_DEVICE_IEP2] = "IEP2", |
---|
71 | 65 | [MPP_DEVICE_VDPP] = "VDPP", |
---|
.. | .. |
---|
87 | 81 | |
---|
88 | 82 | #endif |
---|
89 | 83 | |
---|
90 | | -static void mpp_free_task(struct kref *ref); |
---|
91 | 84 | static void mpp_attach_workqueue(struct mpp_dev *mpp, |
---|
92 | 85 | struct mpp_taskqueue *queue); |
---|
93 | | - |
---|
94 | | -/* task queue schedule */ |
---|
95 | | -static int |
---|
96 | | -mpp_taskqueue_push_pending(struct mpp_taskqueue *queue, |
---|
97 | | - struct mpp_task *task) |
---|
98 | | -{ |
---|
99 | | - if (!task->session || !task->session->mpp) |
---|
100 | | - return -EINVAL; |
---|
101 | | - |
---|
102 | | - kref_get(&task->ref); |
---|
103 | | - mutex_lock(&queue->pending_lock); |
---|
104 | | - list_add_tail(&task->queue_link, &queue->pending_list); |
---|
105 | | - mutex_unlock(&queue->pending_lock); |
---|
106 | | - |
---|
107 | | - return 0; |
---|
108 | | -} |
---|
109 | 86 | |
---|
110 | 87 | static int |
---|
111 | 88 | mpp_taskqueue_pop_pending(struct mpp_taskqueue *queue, |
---|
.. | .. |
---|
149 | 126 | return flag; |
---|
150 | 127 | } |
---|
151 | 128 | |
---|
152 | | -static int |
---|
153 | | -mpp_taskqueue_pending_to_run(struct mpp_taskqueue *queue, |
---|
154 | | - struct mpp_task *task) |
---|
| 129 | +int mpp_taskqueue_pending_to_run(struct mpp_taskqueue *queue, struct mpp_task *task) |
---|
155 | 130 | { |
---|
156 | 131 | unsigned long flags; |
---|
157 | 132 | |
---|
.. | .. |
---|
231 | 206 | return 0; |
---|
232 | 207 | } |
---|
233 | 208 | |
---|
234 | | -static int mpp_session_clear(struct mpp_dev *mpp, |
---|
235 | | - struct mpp_session *session) |
---|
| 209 | +static void task_msgs_reset(struct mpp_task_msgs *msgs) |
---|
| 210 | +{ |
---|
| 211 | + list_del_init(&msgs->list); |
---|
| 212 | + |
---|
| 213 | + msgs->flags = 0; |
---|
| 214 | + msgs->req_cnt = 0; |
---|
| 215 | + msgs->set_cnt = 0; |
---|
| 216 | + msgs->poll_cnt = 0; |
---|
| 217 | +} |
---|
| 218 | + |
---|
| 219 | +static void task_msgs_init(struct mpp_task_msgs *msgs, struct mpp_session *session) |
---|
| 220 | +{ |
---|
| 221 | + INIT_LIST_HEAD(&msgs->list); |
---|
| 222 | + |
---|
| 223 | + msgs->session = session; |
---|
| 224 | + msgs->queue = NULL; |
---|
| 225 | + msgs->task = NULL; |
---|
| 226 | + msgs->mpp = NULL; |
---|
| 227 | + |
---|
| 228 | + msgs->ext_fd = -1; |
---|
| 229 | + |
---|
| 230 | + task_msgs_reset(msgs); |
---|
| 231 | +} |
---|
| 232 | + |
---|
| 233 | +static struct mpp_task_msgs *get_task_msgs(struct mpp_session *session) |
---|
| 234 | +{ |
---|
| 235 | + unsigned long flags; |
---|
| 236 | + struct mpp_task_msgs *msgs; |
---|
| 237 | + |
---|
| 238 | + spin_lock_irqsave(&session->lock_msgs, flags); |
---|
| 239 | + msgs = list_first_entry_or_null(&session->list_msgs_idle, |
---|
| 240 | + struct mpp_task_msgs, list_session); |
---|
| 241 | + if (msgs) { |
---|
| 242 | + list_move_tail(&msgs->list_session, &session->list_msgs); |
---|
| 243 | + spin_unlock_irqrestore(&session->lock_msgs, flags); |
---|
| 244 | + |
---|
| 245 | + return msgs; |
---|
| 246 | + } |
---|
| 247 | + spin_unlock_irqrestore(&session->lock_msgs, flags); |
---|
| 248 | + |
---|
| 249 | + msgs = kzalloc(sizeof(*msgs), GFP_KERNEL); |
---|
| 250 | + task_msgs_init(msgs, session); |
---|
| 251 | + INIT_LIST_HEAD(&msgs->list_session); |
---|
| 252 | + |
---|
| 253 | + spin_lock_irqsave(&session->lock_msgs, flags); |
---|
| 254 | + list_move_tail(&msgs->list_session, &session->list_msgs); |
---|
| 255 | + session->msgs_cnt++; |
---|
| 256 | + spin_unlock_irqrestore(&session->lock_msgs, flags); |
---|
| 257 | + |
---|
| 258 | + mpp_debug_func(DEBUG_TASK_INFO, "session %d:%d msgs cnt %d\n", |
---|
| 259 | + session->pid, session->index, session->msgs_cnt); |
---|
| 260 | + |
---|
| 261 | + return msgs; |
---|
| 262 | +} |
---|
| 263 | + |
---|
| 264 | +static void put_task_msgs(struct mpp_task_msgs *msgs) |
---|
| 265 | +{ |
---|
| 266 | + struct mpp_session *session = msgs->session; |
---|
| 267 | + unsigned long flags; |
---|
| 268 | + |
---|
| 269 | + if (!session) { |
---|
| 270 | + pr_err("invalid msgs without session\n"); |
---|
| 271 | + return; |
---|
| 272 | + } |
---|
| 273 | + |
---|
| 274 | + if (msgs->ext_fd >= 0) { |
---|
| 275 | + fdput(msgs->f); |
---|
| 276 | + msgs->ext_fd = -1; |
---|
| 277 | + } |
---|
| 278 | + |
---|
| 279 | + task_msgs_reset(msgs); |
---|
| 280 | + |
---|
| 281 | + spin_lock_irqsave(&session->lock_msgs, flags); |
---|
| 282 | + list_move_tail(&msgs->list_session, &session->list_msgs_idle); |
---|
| 283 | + spin_unlock_irqrestore(&session->lock_msgs, flags); |
---|
| 284 | +} |
---|
| 285 | + |
---|
| 286 | +static void clear_task_msgs(struct mpp_session *session) |
---|
| 287 | +{ |
---|
| 288 | + struct mpp_task_msgs *msgs, *n; |
---|
| 289 | + LIST_HEAD(list_to_free); |
---|
| 290 | + unsigned long flags; |
---|
| 291 | + |
---|
| 292 | + spin_lock_irqsave(&session->lock_msgs, flags); |
---|
| 293 | + |
---|
| 294 | + list_for_each_entry_safe(msgs, n, &session->list_msgs, list_session) |
---|
| 295 | + list_move_tail(&msgs->list_session, &list_to_free); |
---|
| 296 | + |
---|
| 297 | + list_for_each_entry_safe(msgs, n, &session->list_msgs_idle, list_session) |
---|
| 298 | + list_move_tail(&msgs->list_session, &list_to_free); |
---|
| 299 | + |
---|
| 300 | + spin_unlock_irqrestore(&session->lock_msgs, flags); |
---|
| 301 | + |
---|
| 302 | + list_for_each_entry_safe(msgs, n, &list_to_free, list_session) |
---|
| 303 | + kfree(msgs); |
---|
| 304 | +} |
---|
| 305 | + |
---|
| 306 | +static void mpp_session_clear_pending(struct mpp_session *session) |
---|
236 | 307 | { |
---|
237 | 308 | struct mpp_task *task = NULL, *n; |
---|
238 | | - |
---|
239 | | - /* clear session done list */ |
---|
240 | | - mutex_lock(&session->done_lock); |
---|
241 | | - list_for_each_entry_safe(task, n, |
---|
242 | | - &session->done_list, |
---|
243 | | - done_link) { |
---|
244 | | - list_del_init(&task->done_link); |
---|
245 | | - kref_put(&task->ref, mpp_free_task); |
---|
246 | | - } |
---|
247 | | - mutex_unlock(&session->done_lock); |
---|
248 | 309 | |
---|
249 | 310 | /* clear session pending list */ |
---|
250 | 311 | mutex_lock(&session->pending_lock); |
---|
.. | .. |
---|
257 | 318 | kref_put(&task->ref, mpp_free_task); |
---|
258 | 319 | } |
---|
259 | 320 | mutex_unlock(&session->pending_lock); |
---|
| 321 | +} |
---|
260 | 322 | |
---|
261 | | - return 0; |
---|
| 323 | +void mpp_session_cleanup_detach(struct mpp_taskqueue *queue, struct kthread_work *work) |
---|
| 324 | +{ |
---|
| 325 | + struct mpp_session *session, *n; |
---|
| 326 | + |
---|
| 327 | + if (!atomic_read(&queue->detach_count)) |
---|
| 328 | + return; |
---|
| 329 | + |
---|
| 330 | + mutex_lock(&queue->session_lock); |
---|
| 331 | + list_for_each_entry_safe(session, n, &queue->session_detach, session_link) { |
---|
| 332 | + s32 task_count = atomic_read(&session->task_count); |
---|
| 333 | + |
---|
| 334 | + if (!task_count) { |
---|
| 335 | + list_del_init(&session->session_link); |
---|
| 336 | + atomic_dec(&queue->detach_count); |
---|
| 337 | + } |
---|
| 338 | + |
---|
| 339 | + mutex_unlock(&queue->session_lock); |
---|
| 340 | + |
---|
| 341 | + if (task_count) { |
---|
| 342 | + mpp_dbg_session("session %d:%d not finished %d task cnt %d\n", |
---|
| 343 | + session->device_type, session->index, |
---|
| 344 | + atomic_read(&queue->detach_count), task_count); |
---|
| 345 | + |
---|
| 346 | + mpp_session_clear_pending(session); |
---|
| 347 | + } else { |
---|
| 348 | + mpp_dbg_session("queue detach %d\n", |
---|
| 349 | + atomic_read(&queue->detach_count)); |
---|
| 350 | + |
---|
| 351 | + mpp_session_deinit(session); |
---|
| 352 | + } |
---|
| 353 | + |
---|
| 354 | + mutex_lock(&queue->session_lock); |
---|
| 355 | + } |
---|
| 356 | + mutex_unlock(&queue->session_lock); |
---|
| 357 | + |
---|
| 358 | + if (atomic_read(&queue->detach_count)) { |
---|
| 359 | + mpp_dbg_session("queue detach %d again\n", |
---|
| 360 | + atomic_read(&queue->detach_count)); |
---|
| 361 | + |
---|
| 362 | + kthread_queue_work(&queue->worker, work); |
---|
| 363 | + } |
---|
262 | 364 | } |
---|
263 | 365 | |
---|
264 | 366 | static struct mpp_session *mpp_session_init(void) |
---|
.. | .. |
---|
271 | 373 | session->pid = current->pid; |
---|
272 | 374 | |
---|
273 | 375 | mutex_init(&session->pending_lock); |
---|
274 | | - mutex_init(&session->done_lock); |
---|
275 | 376 | INIT_LIST_HEAD(&session->pending_list); |
---|
276 | | - INIT_LIST_HEAD(&session->done_list); |
---|
277 | 377 | INIT_LIST_HEAD(&session->service_link); |
---|
278 | 378 | INIT_LIST_HEAD(&session->session_link); |
---|
279 | 379 | |
---|
280 | | - init_waitqueue_head(&session->wait); |
---|
281 | 380 | atomic_set(&session->task_count, 0); |
---|
282 | 381 | atomic_set(&session->release_request, 0); |
---|
| 382 | + |
---|
| 383 | + INIT_LIST_HEAD(&session->list_msgs); |
---|
| 384 | + INIT_LIST_HEAD(&session->list_msgs_idle); |
---|
| 385 | + spin_lock_init(&session->lock_msgs); |
---|
283 | 386 | |
---|
284 | 387 | mpp_dbg_session("session %p init\n", session); |
---|
285 | 388 | return session; |
---|
.. | .. |
---|
293 | 396 | if (mpp->dev_ops->free_session) |
---|
294 | 397 | mpp->dev_ops->free_session(session); |
---|
295 | 398 | |
---|
296 | | - mpp_session_clear(mpp, session); |
---|
| 399 | + mpp_session_clear_pending(session); |
---|
297 | 400 | |
---|
298 | 401 | if (session->dma) { |
---|
299 | 402 | mpp_iommu_down_read(mpp->iommu_info); |
---|
.. | .. |
---|
314 | 417 | list_del_init(&session->session_link); |
---|
315 | 418 | } |
---|
316 | 419 | |
---|
317 | | -int mpp_session_deinit(struct mpp_session *session) |
---|
| 420 | +void mpp_session_deinit(struct mpp_session *session) |
---|
318 | 421 | { |
---|
319 | | - u32 task_count = atomic_read(&session->task_count); |
---|
320 | | - |
---|
321 | | - mpp_dbg_session("session %p:%d task %d release\n", |
---|
322 | | - session, session->index, task_count); |
---|
323 | | - if (task_count) |
---|
324 | | - return -1; |
---|
| 422 | + mpp_dbg_session("session %d:%d task %d deinit\n", session->pid, |
---|
| 423 | + session->index, atomic_read(&session->task_count)); |
---|
325 | 424 | |
---|
326 | 425 | if (likely(session->deinit)) |
---|
327 | 426 | session->deinit(session); |
---|
328 | 427 | else |
---|
329 | 428 | pr_err("invalid NULL session deinit function\n"); |
---|
330 | 429 | |
---|
331 | | - mpp_dbg_session("session %p:%d deinit\n", session, session->index); |
---|
| 430 | + clear_task_msgs(session); |
---|
332 | 431 | |
---|
333 | 432 | kfree(session); |
---|
334 | | - return 0; |
---|
335 | 433 | } |
---|
336 | 434 | |
---|
337 | 435 | static void mpp_session_attach_workqueue(struct mpp_session *session, |
---|
338 | 436 | struct mpp_taskqueue *queue) |
---|
339 | 437 | { |
---|
340 | | - mpp_dbg_session("session %p:%d attach\n", session, session->index); |
---|
| 438 | + mpp_dbg_session("session %d:%d attach\n", session->pid, session->index); |
---|
341 | 439 | mutex_lock(&queue->session_lock); |
---|
342 | 440 | list_add_tail(&session->session_link, &queue->session_attach); |
---|
343 | 441 | mutex_unlock(&queue->session_lock); |
---|
.. | .. |
---|
351 | 449 | if (!session->mpp || !session->mpp->queue) |
---|
352 | 450 | return; |
---|
353 | 451 | |
---|
354 | | - mpp_dbg_session("session %p:%d detach\n", session, session->index); |
---|
| 452 | + mpp_dbg_session("session %d:%d detach\n", session->pid, session->index); |
---|
355 | 453 | mpp = session->mpp; |
---|
356 | 454 | queue = mpp->queue; |
---|
357 | 455 | |
---|
358 | 456 | mutex_lock(&queue->session_lock); |
---|
359 | 457 | list_del_init(&session->session_link); |
---|
360 | 458 | list_add_tail(&session->session_link, &queue->session_detach); |
---|
361 | | - queue->detach_count++; |
---|
| 459 | + atomic_inc(&queue->detach_count); |
---|
362 | 460 | mutex_unlock(&queue->session_lock); |
---|
363 | 461 | |
---|
364 | 462 | mpp_taskqueue_trigger_work(mpp); |
---|
.. | .. |
---|
370 | 468 | { |
---|
371 | 469 | kref_get(&task->ref); |
---|
372 | 470 | mutex_lock(&session->pending_lock); |
---|
| 471 | + if (session->srv->timing_en) { |
---|
| 472 | + task->on_pending = ktime_get(); |
---|
| 473 | + set_bit(TASK_TIMING_PENDING, &task->state); |
---|
| 474 | + } |
---|
373 | 475 | list_add_tail(&task->pending_link, &session->pending_list); |
---|
374 | 476 | mutex_unlock(&session->pending_lock); |
---|
375 | 477 | |
---|
.. | .. |
---|
402 | 504 | return task; |
---|
403 | 505 | } |
---|
404 | 506 | |
---|
405 | | -static int mpp_session_push_done(struct mpp_session *session, |
---|
406 | | - struct mpp_task *task) |
---|
407 | | -{ |
---|
408 | | - kref_get(&task->ref); |
---|
409 | | - mutex_lock(&session->done_lock); |
---|
410 | | - list_add_tail(&task->done_link, &session->done_list); |
---|
411 | | - mutex_unlock(&session->done_lock); |
---|
412 | | - |
---|
413 | | - return 0; |
---|
414 | | -} |
---|
415 | | - |
---|
416 | | -static int mpp_session_pop_done(struct mpp_session *session, |
---|
417 | | - struct mpp_task *task) |
---|
418 | | -{ |
---|
419 | | - mutex_lock(&session->done_lock); |
---|
420 | | - list_del_init(&task->done_link); |
---|
421 | | - mutex_unlock(&session->done_lock); |
---|
422 | | - set_bit(TASK_STATE_DONE, &task->state); |
---|
423 | | - kref_put(&task->ref, mpp_free_task); |
---|
424 | | - |
---|
425 | | - return 0; |
---|
426 | | -} |
---|
427 | | - |
---|
428 | | -static void mpp_free_task(struct kref *ref) |
---|
| 507 | +void mpp_free_task(struct kref *ref) |
---|
429 | 508 | { |
---|
430 | 509 | struct mpp_dev *mpp; |
---|
431 | 510 | struct mpp_session *session; |
---|
.. | .. |
---|
437 | 516 | } |
---|
438 | 517 | session = task->session; |
---|
439 | 518 | |
---|
440 | | - mpp_debug_func(DEBUG_TASK_INFO, |
---|
441 | | - "session=%p, task=%p, state=0x%lx, abort_request=%d\n", |
---|
442 | | - session, task, task->state, |
---|
| 519 | + mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d free state 0x%lx abort %d\n", |
---|
| 520 | + session->index, task->task_id, task->state, |
---|
443 | 521 | atomic_read(&task->abort_request)); |
---|
444 | | - if (!session->mpp) { |
---|
445 | | - mpp_err("session %p, session->mpp is null.\n", session); |
---|
446 | | - return; |
---|
447 | | - } |
---|
448 | | - mpp = session->mpp; |
---|
449 | 522 | |
---|
| 523 | + mpp = mpp_get_task_used_device(task, session); |
---|
450 | 524 | if (mpp->dev_ops->free_task) |
---|
451 | 525 | mpp->dev_ops->free_task(session, task); |
---|
| 526 | + |
---|
452 | 527 | /* Decrease reference count */ |
---|
453 | 528 | atomic_dec(&session->task_count); |
---|
454 | 529 | atomic_dec(&mpp->task_count); |
---|
.. | .. |
---|
462 | 537 | struct mpp_task, |
---|
463 | 538 | timeout_work); |
---|
464 | 539 | |
---|
465 | | - if (!test_bit(TASK_STATE_START, &task->state)) { |
---|
466 | | - mpp_err("task has not start\n"); |
---|
467 | | - schedule_delayed_work(&task->timeout_work, |
---|
468 | | - msecs_to_jiffies(MPP_WORK_TIMEOUT_DELAY)); |
---|
469 | | - return; |
---|
470 | | - } |
---|
471 | | - |
---|
472 | | - mpp_err("task %p processing time out!\n", task); |
---|
473 | | - if (!task->session) { |
---|
474 | | - mpp_err("task %p, task->session is null.\n", task); |
---|
475 | | - return; |
---|
476 | | - } |
---|
477 | | - session = task->session; |
---|
478 | | - |
---|
479 | | - if (!session->mpp) { |
---|
480 | | - mpp_err("session %p, session->mpp is null.\n", session); |
---|
481 | | - return; |
---|
482 | | - } |
---|
483 | | - mpp = session->mpp; |
---|
484 | | - |
---|
485 | | - synchronize_hardirq(mpp->irq); |
---|
486 | | - |
---|
487 | 540 | if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) { |
---|
488 | 541 | mpp_err("task has been handled\n"); |
---|
489 | 542 | return; |
---|
490 | 543 | } |
---|
| 544 | + |
---|
| 545 | + if (!task->session) { |
---|
| 546 | + mpp_err("task %p, task->session is null.\n", task); |
---|
| 547 | + return; |
---|
| 548 | + } |
---|
| 549 | + |
---|
| 550 | + session = task->session; |
---|
| 551 | + mpp_err("task %d:%d:%d processing time out!\n", session->pid, |
---|
| 552 | + session->index, task->task_id); |
---|
| 553 | + |
---|
| 554 | + if (!session->mpp) { |
---|
| 555 | + mpp_err("session %d:%d, session mpp is null.\n", session->pid, |
---|
| 556 | + session->index); |
---|
| 557 | + return; |
---|
| 558 | + } |
---|
| 559 | + |
---|
| 560 | + mpp_task_dump_timing(task, ktime_us_delta(ktime_get(), task->on_create)); |
---|
| 561 | + |
---|
| 562 | + mpp = mpp_get_task_used_device(task, session); |
---|
| 563 | + |
---|
| 564 | + /* disable core irq */ |
---|
| 565 | + disable_irq(mpp->irq); |
---|
| 566 | + /* disable mmu irq */ |
---|
| 567 | + if (mpp->iommu_info && mpp->iommu_info->got_irq) |
---|
| 568 | + disable_irq(mpp->iommu_info->irq); |
---|
491 | 569 | |
---|
492 | 570 | /* hardware maybe dead, reset it */ |
---|
493 | 571 | mpp_reset_up_read(mpp->reset_group); |
---|
494 | 572 | mpp_dev_reset(mpp); |
---|
495 | 573 | mpp_power_off(mpp); |
---|
496 | 574 | |
---|
497 | | - mpp_session_push_done(session, task); |
---|
| 575 | + mpp_iommu_dev_deactivate(mpp->iommu_info, mpp); |
---|
| 576 | + set_bit(TASK_STATE_TIMEOUT, &task->state); |
---|
| 577 | + set_bit(TASK_STATE_DONE, &task->state); |
---|
498 | 578 | /* Wake up the GET thread */ |
---|
499 | | - wake_up(&session->wait); |
---|
| 579 | + wake_up(&task->wait); |
---|
500 | 580 | |
---|
501 | 581 | /* remove task from taskqueue running list */ |
---|
502 | | - set_bit(TASK_STATE_TIMEOUT, &task->state); |
---|
503 | 582 | mpp_taskqueue_pop_running(mpp->queue, task); |
---|
| 583 | + |
---|
| 584 | + /* enable core irq */ |
---|
| 585 | + enable_irq(mpp->irq); |
---|
| 586 | + /* enable mmu irq */ |
---|
| 587 | + if (mpp->iommu_info && mpp->iommu_info->got_irq) |
---|
| 588 | + enable_irq(mpp->iommu_info->irq); |
---|
| 589 | + |
---|
| 590 | + mpp_taskqueue_trigger_work(mpp); |
---|
504 | 591 | } |
---|
505 | 592 | |
---|
506 | 593 | static int mpp_process_task_default(struct mpp_session *session, |
---|
507 | | - struct mpp_task_msgs *msgs) |
---|
| 594 | + struct mpp_task_msgs *msgs) |
---|
508 | 595 | { |
---|
509 | 596 | struct mpp_task *task = NULL; |
---|
510 | 597 | struct mpp_dev *mpp = session->mpp; |
---|
| 598 | + u32 timing_en; |
---|
| 599 | + ktime_t on_create; |
---|
511 | 600 | |
---|
512 | 601 | if (unlikely(!mpp)) { |
---|
513 | | - mpp_err("pid %d clinet %d found invalid process function\n", |
---|
| 602 | + mpp_err("pid %d client %d found invalid process function\n", |
---|
514 | 603 | session->pid, session->device_type); |
---|
515 | 604 | return -EINVAL; |
---|
516 | 605 | } |
---|
| 606 | + |
---|
| 607 | + timing_en = session->srv->timing_en; |
---|
| 608 | + if (timing_en) |
---|
| 609 | + on_create = ktime_get(); |
---|
517 | 610 | |
---|
518 | 611 | if (mpp->dev_ops->alloc_task) |
---|
519 | 612 | task = mpp->dev_ops->alloc_task(session, msgs); |
---|
.. | .. |
---|
521 | 614 | mpp_err("alloc_task failed.\n"); |
---|
522 | 615 | return -ENOMEM; |
---|
523 | 616 | } |
---|
| 617 | + |
---|
| 618 | + if (timing_en) { |
---|
| 619 | + task->on_create_end = ktime_get(); |
---|
| 620 | + task->on_create = on_create; |
---|
| 621 | + set_bit(TASK_TIMING_CREATE_END, &task->state); |
---|
| 622 | + set_bit(TASK_TIMING_CREATE, &task->state); |
---|
| 623 | + } |
---|
| 624 | + |
---|
| 625 | + /* ensure current device */ |
---|
| 626 | + mpp = mpp_get_task_used_device(task, session); |
---|
| 627 | + |
---|
524 | 628 | kref_init(&task->ref); |
---|
| 629 | + init_waitqueue_head(&task->wait); |
---|
525 | 630 | atomic_set(&task->abort_request, 0); |
---|
526 | 631 | task->task_index = atomic_fetch_inc(&mpp->task_index); |
---|
| 632 | + task->task_id = atomic_fetch_inc(&mpp->queue->task_id); |
---|
527 | 633 | INIT_DELAYED_WORK(&task->timeout_work, mpp_task_timeout_work); |
---|
528 | 634 | |
---|
529 | 635 | if (mpp->auto_freq_en && mpp->hw_ops->get_freq) |
---|
530 | 636 | mpp->hw_ops->get_freq(mpp, task); |
---|
| 637 | + |
---|
| 638 | + msgs->queue = mpp->queue; |
---|
| 639 | + msgs->task = task; |
---|
| 640 | + msgs->mpp = mpp; |
---|
531 | 641 | |
---|
532 | 642 | /* |
---|
533 | 643 | * Push task to session should be in front of push task to queue. |
---|
.. | .. |
---|
537 | 647 | */ |
---|
538 | 648 | atomic_inc(&session->task_count); |
---|
539 | 649 | mpp_session_push_pending(session, task); |
---|
540 | | - /* push current task to queue */ |
---|
541 | | - atomic_inc(&mpp->task_count); |
---|
542 | | - mpp_taskqueue_push_pending(mpp->queue, task); |
---|
543 | | - set_bit(TASK_STATE_PENDING, &task->state); |
---|
544 | | - /* trigger current queue to run task */ |
---|
545 | | - mpp_taskqueue_trigger_work(mpp); |
---|
546 | | - kref_put(&task->ref, mpp_free_task); |
---|
547 | 650 | |
---|
548 | 651 | return 0; |
---|
549 | 652 | } |
---|
.. | .. |
---|
599 | 702 | group->resets[type] = rst; |
---|
600 | 703 | group->queue = mpp->queue; |
---|
601 | 704 | } |
---|
602 | | - /* if reset not in the same queue, it means different device |
---|
603 | | - * may reset in the same time, then rw_sem_on should set true. |
---|
604 | | - */ |
---|
605 | | - group->rw_sem_on |= (group->queue != mpp->queue) ? true : false; |
---|
606 | 705 | dev_info(mpp->dev, "reset_group->rw_sem_on=%d\n", group->rw_sem_on); |
---|
607 | 706 | up_write(&group->rw_sem); |
---|
608 | 707 | |
---|
.. | .. |
---|
628 | 727 | mpp_iommu_down_write(mpp->iommu_info); |
---|
629 | 728 | mpp_reset_down_write(mpp->reset_group); |
---|
630 | 729 | atomic_set(&mpp->reset_request, 0); |
---|
631 | | - rockchip_save_qos(mpp->dev); |
---|
| 730 | + |
---|
632 | 731 | if (mpp->hw_ops->reset) |
---|
633 | 732 | mpp->hw_ops->reset(mpp); |
---|
634 | | - rockchip_restore_qos(mpp->dev); |
---|
635 | 733 | |
---|
636 | 734 | /* Note: if the domain does not change, iommu attach will be return |
---|
637 | 735 | * as an empty operation. Therefore, force to close and then open, |
---|
.. | .. |
---|
647 | 745 | return 0; |
---|
648 | 746 | } |
---|
649 | 747 | |
---|
| 748 | +void mpp_task_run_begin(struct mpp_task *task, u32 timing_en, u32 timeout) |
---|
| 749 | +{ |
---|
| 750 | + preempt_disable(); |
---|
| 751 | + |
---|
| 752 | + set_bit(TASK_STATE_START, &task->state); |
---|
| 753 | + |
---|
| 754 | + mpp_time_record(task); |
---|
| 755 | + schedule_delayed_work(&task->timeout_work, msecs_to_jiffies(timeout)); |
---|
| 756 | + |
---|
| 757 | + if (timing_en) { |
---|
| 758 | + task->on_sched_timeout = ktime_get(); |
---|
| 759 | + set_bit(TASK_TIMING_TO_SCHED, &task->state); |
---|
| 760 | + } |
---|
| 761 | +} |
---|
| 762 | + |
---|
| 763 | +void mpp_task_run_end(struct mpp_task *task, u32 timing_en) |
---|
| 764 | +{ |
---|
| 765 | + if (timing_en) { |
---|
| 766 | + task->on_run_end = ktime_get(); |
---|
| 767 | + set_bit(TASK_TIMING_RUN_END, &task->state); |
---|
| 768 | + } |
---|
| 769 | + |
---|
| 770 | +#ifdef MODULE |
---|
| 771 | + preempt_enable(); |
---|
| 772 | +#else |
---|
| 773 | + preempt_enable_no_resched(); |
---|
| 774 | +#endif |
---|
| 775 | +} |
---|
| 776 | + |
---|
650 | 777 | static int mpp_task_run(struct mpp_dev *mpp, |
---|
651 | 778 | struct mpp_task *task) |
---|
652 | 779 | { |
---|
653 | 780 | int ret; |
---|
| 781 | + u32 timing_en; |
---|
654 | 782 | |
---|
655 | 783 | mpp_debug_enter(); |
---|
| 784 | + |
---|
| 785 | + timing_en = mpp->srv->timing_en; |
---|
| 786 | + if (timing_en) { |
---|
| 787 | + task->on_run = ktime_get(); |
---|
| 788 | + set_bit(TASK_TIMING_RUN, &task->state); |
---|
| 789 | + } |
---|
656 | 790 | |
---|
657 | 791 | /* |
---|
658 | 792 | * before running, we have to switch grf ctrl bit to ensure |
---|
.. | .. |
---|
668 | 802 | mpp_set_grf(mpp->grf_info); |
---|
669 | 803 | } |
---|
670 | 804 | /* |
---|
| 805 | + * Lock the reader locker of the device resource lock here, |
---|
| 806 | + * release at the finish operation |
---|
| 807 | + */ |
---|
| 808 | + mpp_reset_down_read(mpp->reset_group); |
---|
| 809 | + |
---|
| 810 | + /* |
---|
671 | 811 | * for iommu share hardware, should attach to ensure |
---|
672 | 812 | * working in current device |
---|
673 | 813 | */ |
---|
674 | 814 | ret = mpp_iommu_attach(mpp->iommu_info); |
---|
675 | 815 | if (ret) { |
---|
676 | 816 | dev_err(mpp->dev, "mpp_iommu_attach failed\n"); |
---|
| 817 | + mpp_reset_up_read(mpp->reset_group); |
---|
677 | 818 | return -ENODATA; |
---|
678 | 819 | } |
---|
679 | 820 | |
---|
680 | 821 | mpp_power_on(mpp); |
---|
681 | | - mpp_time_record(task); |
---|
682 | | - mpp_debug(DEBUG_TASK_INFO, "pid %d, start hw %s\n", |
---|
683 | | - task->session->pid, dev_name(mpp->dev)); |
---|
| 822 | + mpp_debug_func(DEBUG_TASK_INFO, "pid %d run %s\n", |
---|
| 823 | + task->session->pid, dev_name(mpp->dev)); |
---|
684 | 824 | |
---|
685 | 825 | if (mpp->auto_freq_en && mpp->hw_ops->set_freq) |
---|
686 | 826 | mpp->hw_ops->set_freq(mpp, task); |
---|
687 | | - /* |
---|
688 | | - * TODO: Lock the reader locker of the device resource lock here, |
---|
689 | | - * release at the finish operation |
---|
690 | | - */ |
---|
691 | | - mpp_reset_down_read(mpp->reset_group); |
---|
692 | 827 | |
---|
693 | | - schedule_delayed_work(&task->timeout_work, |
---|
694 | | - msecs_to_jiffies(MPP_WORK_TIMEOUT_DELAY)); |
---|
| 828 | + mpp_iommu_dev_activate(mpp->iommu_info, mpp); |
---|
695 | 829 | if (mpp->dev_ops->run) |
---|
696 | 830 | mpp->dev_ops->run(mpp, task); |
---|
697 | | - set_bit(TASK_STATE_START, &task->state); |
---|
698 | 831 | |
---|
699 | 832 | mpp_debug_leave(); |
---|
700 | 833 | |
---|
.. | .. |
---|
709 | 842 | |
---|
710 | 843 | mpp_debug_enter(); |
---|
711 | 844 | |
---|
| 845 | +again: |
---|
712 | 846 | task = mpp_taskqueue_get_pending_task(queue); |
---|
713 | 847 | if (!task) |
---|
714 | 848 | goto done; |
---|
.. | .. |
---|
716 | 850 | /* if task timeout and aborted, remove it */ |
---|
717 | 851 | if (atomic_read(&task->abort_request) > 0) { |
---|
718 | 852 | mpp_taskqueue_pop_pending(queue, task); |
---|
719 | | - goto done; |
---|
| 853 | + goto again; |
---|
720 | 854 | } |
---|
721 | 855 | |
---|
722 | 856 | /* get device for current task */ |
---|
.. | .. |
---|
741 | 875 | */ |
---|
742 | 876 | /* Push a pending task to running queue */ |
---|
743 | 877 | if (task) { |
---|
| 878 | + struct mpp_dev *task_mpp = mpp_get_task_used_device(task, task->session); |
---|
| 879 | + |
---|
| 880 | + atomic_inc(&task_mpp->task_count); |
---|
744 | 881 | mpp_taskqueue_pending_to_run(queue, task); |
---|
745 | 882 | set_bit(TASK_STATE_RUNNING, &task->state); |
---|
746 | | - if (mpp_task_run(mpp, task)) |
---|
747 | | - mpp_taskqueue_pop_running(mpp->queue, task); |
---|
| 883 | + if (mpp_task_run(task_mpp, task)) |
---|
| 884 | + mpp_taskqueue_pop_running(queue, task); |
---|
| 885 | + else |
---|
| 886 | + goto again; |
---|
748 | 887 | } |
---|
749 | 888 | |
---|
750 | 889 | done: |
---|
751 | | - mutex_lock(&queue->session_lock); |
---|
752 | | - while (queue->detach_count) { |
---|
753 | | - struct mpp_session *session = NULL; |
---|
754 | | - |
---|
755 | | - session = list_first_entry_or_null(&queue->session_detach, struct mpp_session, |
---|
756 | | - session_link); |
---|
757 | | - if (session) { |
---|
758 | | - list_del_init(&session->session_link); |
---|
759 | | - queue->detach_count--; |
---|
760 | | - } |
---|
761 | | - |
---|
762 | | - mutex_unlock(&queue->session_lock); |
---|
763 | | - |
---|
764 | | - if (session) { |
---|
765 | | - mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev), |
---|
766 | | - queue->detach_count); |
---|
767 | | - mpp_session_deinit(session); |
---|
768 | | - } |
---|
769 | | - |
---|
770 | | - mutex_lock(&queue->session_lock); |
---|
771 | | - } |
---|
772 | | - mutex_unlock(&queue->session_lock); |
---|
| 890 | + mpp_session_cleanup_detach(queue, work_s); |
---|
773 | 891 | } |
---|
774 | 892 | |
---|
775 | 893 | static int mpp_wait_result_default(struct mpp_session *session, |
---|
776 | | - struct mpp_task_msgs *msgs) |
---|
| 894 | + struct mpp_task_msgs *msgs) |
---|
777 | 895 | { |
---|
778 | 896 | int ret; |
---|
779 | 897 | struct mpp_task *task; |
---|
780 | | - struct mpp_dev *mpp = session->mpp; |
---|
781 | | - |
---|
782 | | - if (unlikely(!mpp)) { |
---|
783 | | - mpp_err("pid %d clinet %d found invalid wait result function\n", |
---|
784 | | - session->pid, session->device_type); |
---|
785 | | - return -EINVAL; |
---|
786 | | - } |
---|
787 | | - |
---|
788 | | - ret = wait_event_timeout(session->wait, |
---|
789 | | - !list_empty(&session->done_list), |
---|
790 | | - msecs_to_jiffies(MPP_WAIT_TIMEOUT_DELAY)); |
---|
| 898 | + struct mpp_dev *mpp; |
---|
791 | 899 | |
---|
792 | 900 | task = mpp_session_get_pending_task(session); |
---|
793 | 901 | if (!task) { |
---|
794 | | - mpp_err("session %p pending list is empty!\n", session); |
---|
| 902 | + mpp_err("session %d:%d pending list is empty!\n", |
---|
| 903 | + session->pid, session->index); |
---|
795 | 904 | return -EIO; |
---|
796 | 905 | } |
---|
| 906 | + mpp = mpp_get_task_used_device(task, session); |
---|
797 | 907 | |
---|
798 | | - if (ret > 0) { |
---|
799 | | - u32 task_found = 0; |
---|
800 | | - struct mpp_task *loop = NULL, *n; |
---|
| 908 | + ret = wait_event_interruptible(task->wait, test_bit(TASK_STATE_DONE, &task->state)); |
---|
| 909 | + if (ret == -ERESTARTSYS) |
---|
| 910 | + mpp_err("wait task break by signal\n"); |
---|
801 | 911 | |
---|
802 | | - /* find task in session done list */ |
---|
803 | | - mutex_lock(&session->done_lock); |
---|
804 | | - list_for_each_entry_safe(loop, n, |
---|
805 | | - &session->done_list, |
---|
806 | | - done_link) { |
---|
807 | | - if (loop == task) { |
---|
808 | | - task_found = 1; |
---|
809 | | - break; |
---|
810 | | - } |
---|
811 | | - } |
---|
812 | | - mutex_unlock(&session->done_lock); |
---|
813 | | - if (task_found) { |
---|
814 | | - if (mpp->dev_ops->result) |
---|
815 | | - ret = mpp->dev_ops->result(mpp, task, msgs); |
---|
816 | | - mpp_session_pop_done(session, task); |
---|
| 912 | + if (mpp->dev_ops->result) |
---|
| 913 | + ret = mpp->dev_ops->result(mpp, task, msgs); |
---|
| 914 | + mpp_debug_func(DEBUG_TASK_INFO, "wait done session %d:%d count %d task %d state %lx\n", |
---|
| 915 | + session->device_type, session->index, atomic_read(&session->task_count), |
---|
| 916 | + task->task_index, task->state); |
---|
817 | 917 | |
---|
818 | | - if (test_bit(TASK_STATE_TIMEOUT, &task->state)) |
---|
819 | | - ret = -ETIMEDOUT; |
---|
820 | | - } else { |
---|
821 | | - mpp_err("session %p task %p, not found in done list!\n", |
---|
822 | | - session, task); |
---|
823 | | - ret = -EIO; |
---|
824 | | - } |
---|
825 | | - } else { |
---|
826 | | - atomic_inc(&task->abort_request); |
---|
827 | | - mpp_err("timeout, pid %d session %p:%d count %d cur_task %p index %d.\n", |
---|
828 | | - session->pid, session, session->index, |
---|
829 | | - atomic_read(&session->task_count), task, |
---|
830 | | - task->task_index); |
---|
831 | | - /* if twice and return timeout, otherwise, re-wait */ |
---|
832 | | - if (atomic_read(&task->abort_request) > 1) { |
---|
833 | | - mpp_err("session %p:%d, task %p index %d abort wait twice!\n", |
---|
834 | | - session, session->index, |
---|
835 | | - task, task->task_index); |
---|
836 | | - ret = -ETIMEDOUT; |
---|
837 | | - } else { |
---|
838 | | - return mpp_wait_result_default(session, msgs); |
---|
839 | | - } |
---|
840 | | - } |
---|
841 | | - |
---|
842 | | - mpp_debug_func(DEBUG_TASK_INFO, |
---|
843 | | - "kref_read=%d, ret=%d\n", kref_read(&task->ref), ret); |
---|
844 | 918 | mpp_session_pop_pending(session, task); |
---|
845 | 919 | |
---|
846 | 920 | return ret; |
---|
.. | .. |
---|
875 | 949 | of_node_put(np); |
---|
876 | 950 | if (!pdev) { |
---|
877 | 951 | dev_err(dev, "failed to get mpp service from node\n"); |
---|
878 | | - ret = -ENODEV; |
---|
879 | | - goto err_put_pdev; |
---|
| 952 | + return -ENODEV; |
---|
880 | 953 | } |
---|
881 | 954 | |
---|
882 | | - mpp->pdev_srv = pdev; |
---|
883 | 955 | mpp->srv = platform_get_drvdata(pdev); |
---|
| 956 | + platform_device_put(pdev); |
---|
884 | 957 | if (!mpp->srv) { |
---|
885 | | - dev_err(&pdev->dev, "failed attach service\n"); |
---|
886 | | - ret = -EINVAL; |
---|
887 | | - goto err_put_pdev; |
---|
| 958 | + dev_err(dev, "failed attach service\n"); |
---|
| 959 | + return -EINVAL; |
---|
888 | 960 | } |
---|
889 | 961 | |
---|
890 | 962 | ret = of_property_read_u32(dev->of_node, |
---|
891 | 963 | "rockchip,taskqueue-node", &taskqueue_node); |
---|
892 | 964 | if (ret) { |
---|
893 | 965 | dev_err(dev, "failed to get taskqueue-node\n"); |
---|
894 | | - goto err_put_pdev; |
---|
| 966 | + return ret; |
---|
895 | 967 | } else if (taskqueue_node >= mpp->srv->taskqueue_cnt) { |
---|
896 | 968 | dev_err(dev, "taskqueue-node %d must less than %d\n", |
---|
897 | 969 | taskqueue_node, mpp->srv->taskqueue_cnt); |
---|
898 | | - ret = -ENODEV; |
---|
899 | | - goto err_put_pdev; |
---|
| 970 | + return -ENODEV; |
---|
900 | 971 | } |
---|
901 | 972 | /* set taskqueue according dtsi */ |
---|
902 | 973 | queue = mpp->srv->task_queues[taskqueue_node]; |
---|
903 | 974 | if (!queue) { |
---|
904 | 975 | dev_err(dev, "taskqueue attach to invalid node %d\n", |
---|
905 | 976 | taskqueue_node); |
---|
906 | | - ret = -ENODEV; |
---|
907 | | - goto err_put_pdev; |
---|
| 977 | + return -ENODEV; |
---|
908 | 978 | } |
---|
909 | 979 | mpp_attach_workqueue(mpp, queue); |
---|
910 | 980 | |
---|
.. | .. |
---|
915 | 985 | if (reset_group_node >= mpp->srv->reset_group_cnt) { |
---|
916 | 986 | dev_err(dev, "resetgroup-node %d must less than %d\n", |
---|
917 | 987 | reset_group_node, mpp->srv->reset_group_cnt); |
---|
918 | | - ret = -ENODEV; |
---|
919 | | - goto err_put_pdev; |
---|
| 988 | + return -ENODEV; |
---|
920 | 989 | } else { |
---|
921 | 990 | mpp->reset_group = mpp->srv->reset_groups[reset_group_node]; |
---|
| 991 | + if (!mpp->reset_group->queue) |
---|
| 992 | + mpp->reset_group->queue = queue; |
---|
| 993 | + if (mpp->reset_group->queue != mpp->queue) |
---|
| 994 | + mpp->reset_group->rw_sem_on = true; |
---|
922 | 995 | } |
---|
923 | 996 | } |
---|
924 | 997 | |
---|
925 | 998 | return 0; |
---|
926 | | - |
---|
927 | | -err_put_pdev: |
---|
928 | | - platform_device_put(pdev); |
---|
929 | | - |
---|
930 | | - return ret; |
---|
931 | 999 | } |
---|
932 | 1000 | |
---|
933 | 1001 | struct mpp_taskqueue *mpp_taskqueue_init(struct device *dev) |
---|
.. | .. |
---|
951 | 1019 | |
---|
952 | 1020 | /* default taskqueue has max 16 task capacity */ |
---|
953 | 1021 | queue->task_capacity = MPP_MAX_TASK_CAPACITY; |
---|
| 1022 | + atomic_set(&queue->reset_request, 0); |
---|
| 1023 | + atomic_set(&queue->detach_count, 0); |
---|
| 1024 | + atomic_set(&queue->task_id, 0); |
---|
| 1025 | + queue->dev_active_flags = 0; |
---|
954 | 1026 | |
---|
955 | 1027 | return queue; |
---|
956 | 1028 | } |
---|
.. | .. |
---|
958 | 1030 | static void mpp_attach_workqueue(struct mpp_dev *mpp, |
---|
959 | 1031 | struct mpp_taskqueue *queue) |
---|
960 | 1032 | { |
---|
961 | | - mpp->queue = queue; |
---|
| 1033 | + s32 core_id; |
---|
| 1034 | + |
---|
962 | 1035 | INIT_LIST_HEAD(&mpp->queue_link); |
---|
| 1036 | + |
---|
963 | 1037 | mutex_lock(&queue->dev_lock); |
---|
| 1038 | + |
---|
| 1039 | + if (mpp->core_id >= 0) |
---|
| 1040 | + core_id = mpp->core_id; |
---|
| 1041 | + else |
---|
| 1042 | + core_id = queue->core_count; |
---|
| 1043 | + |
---|
| 1044 | + if (core_id < 0 || core_id >= MPP_MAX_CORE_NUM) { |
---|
| 1045 | + dev_err(mpp->dev, "invalid core id %d\n", core_id); |
---|
| 1046 | + goto done; |
---|
| 1047 | + } |
---|
| 1048 | + |
---|
| 1049 | + /* |
---|
| 1050 | + * multi devices with no multicores share one queue, |
---|
| 1051 | + * the core_id is default value 0. |
---|
| 1052 | + */ |
---|
| 1053 | + if (queue->cores[core_id]) { |
---|
| 1054 | + if (queue->cores[core_id] == mpp) |
---|
| 1055 | + goto done; |
---|
| 1056 | + |
---|
| 1057 | + core_id = queue->core_count; |
---|
| 1058 | + } |
---|
| 1059 | + |
---|
| 1060 | + queue->cores[core_id] = mpp; |
---|
| 1061 | + queue->core_count++; |
---|
| 1062 | + |
---|
| 1063 | + set_bit(core_id, &queue->core_idle); |
---|
964 | 1064 | list_add_tail(&mpp->queue_link, &queue->dev_list); |
---|
| 1065 | + if (queue->core_id_max < (u32)core_id) |
---|
| 1066 | + queue->core_id_max = (u32)core_id; |
---|
| 1067 | + |
---|
| 1068 | + mpp->core_id = core_id; |
---|
| 1069 | + mpp->queue = queue; |
---|
| 1070 | + |
---|
| 1071 | + mpp_dbg_core("%s attach queue as core %d\n", |
---|
| 1072 | + dev_name(mpp->dev), mpp->core_id); |
---|
| 1073 | + |
---|
965 | 1074 | if (queue->task_capacity > mpp->task_capacity) |
---|
966 | 1075 | queue->task_capacity = mpp->task_capacity; |
---|
| 1076 | + |
---|
| 1077 | +done: |
---|
967 | 1078 | mutex_unlock(&queue->dev_lock); |
---|
968 | 1079 | } |
---|
969 | 1080 | |
---|
.. | .. |
---|
973 | 1084 | |
---|
974 | 1085 | if (queue) { |
---|
975 | 1086 | mutex_lock(&queue->dev_lock); |
---|
| 1087 | + |
---|
| 1088 | + queue->cores[mpp->core_id] = NULL; |
---|
| 1089 | + queue->core_count--; |
---|
| 1090 | + |
---|
| 1091 | + clear_bit(mpp->core_id, &queue->core_idle); |
---|
976 | 1092 | list_del_init(&mpp->queue_link); |
---|
| 1093 | + |
---|
| 1094 | + mpp->queue = NULL; |
---|
| 1095 | + |
---|
977 | 1096 | mutex_unlock(&queue->dev_lock); |
---|
978 | 1097 | } |
---|
979 | 1098 | } |
---|
.. | .. |
---|
989 | 1108 | found = (cmd >= MPP_CMD_CONTROL_BASE && cmd < MPP_CMD_CONTROL_BUTT) ? true : found; |
---|
990 | 1109 | |
---|
991 | 1110 | return found ? 0 : -EINVAL; |
---|
992 | | -} |
---|
993 | | - |
---|
994 | | -static int mpp_parse_msg_v1(struct mpp_msg_v1 *msg, |
---|
995 | | - struct mpp_request *req) |
---|
996 | | -{ |
---|
997 | | - int ret = 0; |
---|
998 | | - |
---|
999 | | - req->cmd = msg->cmd; |
---|
1000 | | - req->flags = msg->flags; |
---|
1001 | | - req->size = msg->size; |
---|
1002 | | - req->offset = msg->offset; |
---|
1003 | | - req->data = (void __user *)(unsigned long)msg->data_ptr; |
---|
1004 | | - |
---|
1005 | | - mpp_debug(DEBUG_IOCTL, "cmd %x, flags %08x, size %d, offset %x\n", |
---|
1006 | | - req->cmd, req->flags, req->size, req->offset); |
---|
1007 | | - |
---|
1008 | | - ret = mpp_check_cmd_v1(req->cmd); |
---|
1009 | | - if (ret) |
---|
1010 | | - mpp_err("mpp cmd %x is not supproted.\n", req->cmd); |
---|
1011 | | - |
---|
1012 | | - return ret; |
---|
1013 | 1111 | } |
---|
1014 | 1112 | |
---|
1015 | 1113 | static inline int mpp_msg_is_last(struct mpp_request *req) |
---|
.. | .. |
---|
1061 | 1159 | int ret; |
---|
1062 | 1160 | struct mpp_dev *mpp; |
---|
1063 | 1161 | |
---|
1064 | | - mpp_debug(DEBUG_IOCTL, "req->cmd %x\n", req->cmd); |
---|
| 1162 | + mpp_debug(DEBUG_IOCTL, "cmd %x process\n", req->cmd); |
---|
| 1163 | + |
---|
1065 | 1164 | switch (req->cmd) { |
---|
1066 | 1165 | case MPP_CMD_QUERY_HW_SUPPORT: { |
---|
1067 | 1166 | u32 hw_support = srv->hw_support; |
---|
.. | .. |
---|
1087 | 1186 | if (test_bit(client_type, &srv->hw_support)) |
---|
1088 | 1187 | mpp = srv->sub_devices[client_type]; |
---|
1089 | 1188 | } |
---|
| 1189 | + |
---|
1090 | 1190 | if (!mpp) |
---|
1091 | 1191 | return -EINVAL; |
---|
| 1192 | + |
---|
1092 | 1193 | hw_info = mpp->var->hw_info; |
---|
1093 | 1194 | mpp_debug(DEBUG_IOCTL, "hw_id %08x\n", hw_info->hw_id); |
---|
1094 | 1195 | if (put_user(hw_info->hw_id, (u32 __user *)req->data)) |
---|
.. | .. |
---|
1119 | 1220 | mpp = srv->sub_devices[client_type]; |
---|
1120 | 1221 | if (!mpp) |
---|
1121 | 1222 | return -EINVAL; |
---|
| 1223 | + |
---|
1122 | 1224 | session->device_type = (enum MPP_DEVICE_TYPE)client_type; |
---|
1123 | 1225 | session->dma = mpp_dma_session_create(mpp->dev, mpp->session_max_buffers); |
---|
1124 | 1226 | session->mpp = mpp; |
---|
.. | .. |
---|
1140 | 1242 | if (ret) |
---|
1141 | 1243 | return ret; |
---|
1142 | 1244 | } |
---|
| 1245 | + |
---|
1143 | 1246 | mpp_session_attach_workqueue(session, mpp->queue); |
---|
1144 | 1247 | } break; |
---|
1145 | 1248 | case MPP_CMD_INIT_DRIVER_DATA: { |
---|
.. | .. |
---|
1182 | 1285 | case MPP_CMD_POLL_HW_FINISH: { |
---|
1183 | 1286 | msgs->flags |= req->flags; |
---|
1184 | 1287 | msgs->poll_cnt++; |
---|
| 1288 | + msgs->poll_req = NULL; |
---|
| 1289 | + } break; |
---|
| 1290 | + case MPP_CMD_POLL_HW_IRQ: { |
---|
| 1291 | + if (msgs->poll_cnt || msgs->poll_req) |
---|
| 1292 | + mpp_err("Do NOT poll hw irq when previous call not return\n"); |
---|
| 1293 | + |
---|
| 1294 | + msgs->flags |= req->flags; |
---|
| 1295 | + msgs->poll_cnt++; |
---|
| 1296 | + |
---|
| 1297 | + if (req->size && req->data) { |
---|
| 1298 | + if (!msgs->poll_req) |
---|
| 1299 | + msgs->poll_req = req; |
---|
| 1300 | + } else { |
---|
| 1301 | + msgs->poll_req = NULL; |
---|
| 1302 | + } |
---|
1185 | 1303 | } break; |
---|
1186 | 1304 | case MPP_CMD_RESET_SESSION: { |
---|
1187 | 1305 | int ret; |
---|
.. | .. |
---|
1197 | 1315 | if (!mpp) |
---|
1198 | 1316 | return -EINVAL; |
---|
1199 | 1317 | |
---|
1200 | | - mpp_session_clear(mpp, session); |
---|
| 1318 | + mpp_session_clear_pending(session); |
---|
1201 | 1319 | mpp_iommu_down_write(mpp->iommu_info); |
---|
1202 | 1320 | ret = mpp_dma_session_destroy(session->dma); |
---|
1203 | 1321 | mpp_iommu_up_write(mpp->iommu_info); |
---|
.. | .. |
---|
1271 | 1389 | default: { |
---|
1272 | 1390 | mpp = session->mpp; |
---|
1273 | 1391 | if (!mpp) { |
---|
1274 | | - mpp_err("pid %d not find clinet %d\n", |
---|
| 1392 | + mpp_err("pid %d not find client %d\n", |
---|
1275 | 1393 | session->pid, session->device_type); |
---|
1276 | 1394 | return -EINVAL; |
---|
1277 | 1395 | } |
---|
.. | .. |
---|
1285 | 1403 | return 0; |
---|
1286 | 1404 | } |
---|
1287 | 1405 | |
---|
1288 | | -static long mpp_dev_ioctl(struct file *filp, |
---|
1289 | | - unsigned int cmd, |
---|
1290 | | - unsigned long arg) |
---|
| 1406 | +static void task_msgs_add(struct mpp_task_msgs *msgs, struct list_head *head) |
---|
1291 | 1407 | { |
---|
| 1408 | + struct mpp_session *session = msgs->session; |
---|
1292 | 1409 | int ret = 0; |
---|
1293 | | - struct mpp_service *srv; |
---|
1294 | | - void __user *msg; |
---|
| 1410 | + |
---|
| 1411 | + /* process each task */ |
---|
| 1412 | + if (msgs->set_cnt) { |
---|
| 1413 | + /* NOTE: update msg_flags for fd over 1024 */ |
---|
| 1414 | + session->msg_flags = msgs->flags; |
---|
| 1415 | + ret = mpp_process_task(session, msgs); |
---|
| 1416 | + } |
---|
| 1417 | + |
---|
| 1418 | + if (!ret) { |
---|
| 1419 | + INIT_LIST_HEAD(&msgs->list); |
---|
| 1420 | + list_add_tail(&msgs->list, head); |
---|
| 1421 | + } else { |
---|
| 1422 | + put_task_msgs(msgs); |
---|
| 1423 | + } |
---|
| 1424 | +} |
---|
| 1425 | + |
---|
| 1426 | +static int mpp_collect_msgs(struct list_head *head, struct mpp_session *session, |
---|
| 1427 | + unsigned int cmd, void __user *msg) |
---|
| 1428 | +{ |
---|
| 1429 | + struct mpp_msg_v1 msg_v1; |
---|
1295 | 1430 | struct mpp_request *req; |
---|
1296 | | - struct mpp_task_msgs task_msgs; |
---|
1297 | | - struct mpp_session *session = |
---|
1298 | | - (struct mpp_session *)filp->private_data; |
---|
| 1431 | + struct mpp_task_msgs *msgs = NULL; |
---|
| 1432 | + int last = 1; |
---|
| 1433 | + int ret; |
---|
| 1434 | + |
---|
| 1435 | + if (cmd != MPP_IOC_CFG_V1) { |
---|
| 1436 | + mpp_err("unknown ioctl cmd %x\n", cmd); |
---|
| 1437 | + return -EINVAL; |
---|
| 1438 | + } |
---|
| 1439 | + |
---|
| 1440 | +next: |
---|
| 1441 | + /* first, parse to fixed struct */ |
---|
| 1442 | + if (copy_from_user(&msg_v1, msg, sizeof(msg_v1))) |
---|
| 1443 | + return -EFAULT; |
---|
| 1444 | + |
---|
| 1445 | + msg += sizeof(msg_v1); |
---|
| 1446 | + |
---|
| 1447 | + mpp_debug(DEBUG_IOCTL, "cmd %x collect flags %08x, size %d, offset %x\n", |
---|
| 1448 | + msg_v1.cmd, msg_v1.flags, msg_v1.size, msg_v1.offset); |
---|
| 1449 | + |
---|
| 1450 | + if (mpp_check_cmd_v1(msg_v1.cmd)) { |
---|
| 1451 | + mpp_err("mpp cmd %x is not supported.\n", msg_v1.cmd); |
---|
| 1452 | + return -EFAULT; |
---|
| 1453 | + } |
---|
| 1454 | + |
---|
| 1455 | + if (msg_v1.flags & MPP_FLAGS_MULTI_MSG) |
---|
| 1456 | + last = (msg_v1.flags & MPP_FLAGS_LAST_MSG) ? 1 : 0; |
---|
| 1457 | + else |
---|
| 1458 | + last = 1; |
---|
| 1459 | + |
---|
| 1460 | + /* check cmd for change msgs session */ |
---|
| 1461 | + if (msg_v1.cmd == MPP_CMD_SET_SESSION_FD) { |
---|
| 1462 | + struct mpp_bat_msg bat_msg; |
---|
| 1463 | + struct mpp_bat_msg __user *usr_cmd; |
---|
| 1464 | + struct fd f; |
---|
| 1465 | + |
---|
| 1466 | + /* try session switch here */ |
---|
| 1467 | + usr_cmd = (struct mpp_bat_msg __user *)(unsigned long)msg_v1.data_ptr; |
---|
| 1468 | + |
---|
| 1469 | + if (copy_from_user(&bat_msg, usr_cmd, sizeof(bat_msg))) |
---|
| 1470 | + return -EFAULT; |
---|
| 1471 | + |
---|
| 1472 | + /* skip finished message */ |
---|
| 1473 | + if (bat_msg.flag & MPP_BAT_MSG_DONE) |
---|
| 1474 | + goto session_switch_done; |
---|
| 1475 | + |
---|
| 1476 | + f = fdget(bat_msg.fd); |
---|
| 1477 | + if (!f.file) { |
---|
| 1478 | + int ret = -EBADF; |
---|
| 1479 | + |
---|
| 1480 | + mpp_err("fd %d get session failed\n", bat_msg.fd); |
---|
| 1481 | + |
---|
| 1482 | + if (copy_to_user(&usr_cmd->ret, &ret, sizeof(usr_cmd->ret))) |
---|
| 1483 | + mpp_err("copy_to_user failed.\n"); |
---|
| 1484 | + goto session_switch_done; |
---|
| 1485 | + } |
---|
| 1486 | + |
---|
| 1487 | + /* NOTE: add previous ready task to queue and drop empty task */ |
---|
| 1488 | + if (msgs) { |
---|
| 1489 | + if (msgs->req_cnt) |
---|
| 1490 | + task_msgs_add(msgs, head); |
---|
| 1491 | + else |
---|
| 1492 | + put_task_msgs(msgs); |
---|
| 1493 | + |
---|
| 1494 | + msgs = NULL; |
---|
| 1495 | + } |
---|
| 1496 | + |
---|
| 1497 | + /* switch session */ |
---|
| 1498 | + session = f.file->private_data; |
---|
| 1499 | + msgs = get_task_msgs(session); |
---|
| 1500 | + |
---|
| 1501 | + if (f.file->private_data == session) |
---|
| 1502 | + msgs->ext_fd = bat_msg.fd; |
---|
| 1503 | + |
---|
| 1504 | + msgs->f = f; |
---|
| 1505 | + |
---|
| 1506 | + mpp_debug(DEBUG_IOCTL, "fd %d, session %d msg_cnt %d\n", |
---|
| 1507 | + bat_msg.fd, session->index, session->msgs_cnt); |
---|
| 1508 | + |
---|
| 1509 | +session_switch_done: |
---|
| 1510 | + /* session id should NOT be the last message */ |
---|
| 1511 | + if (last) |
---|
| 1512 | + return 0; |
---|
| 1513 | + |
---|
| 1514 | + goto next; |
---|
| 1515 | + } |
---|
| 1516 | + |
---|
| 1517 | + if (!msgs) |
---|
| 1518 | + msgs = get_task_msgs(session); |
---|
| 1519 | + |
---|
| 1520 | + if (!msgs) { |
---|
| 1521 | + pr_err("session %d:%d failed to get task msgs", |
---|
| 1522 | + session->pid, session->index); |
---|
| 1523 | + return -EINVAL; |
---|
| 1524 | + } |
---|
| 1525 | + |
---|
| 1526 | + if (msgs->req_cnt >= MPP_MAX_MSG_NUM) { |
---|
| 1527 | + mpp_err("session %d message count %d more than %d.\n", |
---|
| 1528 | + session->index, msgs->req_cnt, MPP_MAX_MSG_NUM); |
---|
| 1529 | + return -EINVAL; |
---|
| 1530 | + } |
---|
| 1531 | + |
---|
| 1532 | + req = &msgs->reqs[msgs->req_cnt++]; |
---|
| 1533 | + req->cmd = msg_v1.cmd; |
---|
| 1534 | + req->flags = msg_v1.flags; |
---|
| 1535 | + req->size = msg_v1.size; |
---|
| 1536 | + req->offset = msg_v1.offset; |
---|
| 1537 | + req->data = (void __user *)(unsigned long)msg_v1.data_ptr; |
---|
| 1538 | + |
---|
| 1539 | + ret = mpp_process_request(session, session->srv, req, msgs); |
---|
| 1540 | + if (ret) { |
---|
| 1541 | + mpp_err("session %d process cmd %x ret %d\n", |
---|
| 1542 | + session->index, req->cmd, ret); |
---|
| 1543 | + return ret; |
---|
| 1544 | + } |
---|
| 1545 | + |
---|
| 1546 | + if (!last) |
---|
| 1547 | + goto next; |
---|
| 1548 | + |
---|
| 1549 | + task_msgs_add(msgs, head); |
---|
| 1550 | + msgs = NULL; |
---|
| 1551 | + |
---|
| 1552 | + return 0; |
---|
| 1553 | +} |
---|
| 1554 | + |
---|
| 1555 | +static void mpp_msgs_trigger(struct list_head *msgs_list) |
---|
| 1556 | +{ |
---|
| 1557 | + struct mpp_task_msgs *msgs, *n; |
---|
| 1558 | + struct mpp_dev *mpp_prev = NULL; |
---|
| 1559 | + struct mpp_taskqueue *queue_prev = NULL; |
---|
| 1560 | + |
---|
| 1561 | + /* push task to queue */ |
---|
| 1562 | + list_for_each_entry_safe(msgs, n, msgs_list, list) { |
---|
| 1563 | + struct mpp_dev *mpp; |
---|
| 1564 | + struct mpp_task *task; |
---|
| 1565 | + struct mpp_taskqueue *queue; |
---|
| 1566 | + |
---|
| 1567 | + if (!msgs->set_cnt || !msgs->queue) |
---|
| 1568 | + continue; |
---|
| 1569 | + |
---|
| 1570 | + mpp = msgs->mpp; |
---|
| 1571 | + task = msgs->task; |
---|
| 1572 | + queue = msgs->queue; |
---|
| 1573 | + |
---|
| 1574 | + if (queue_prev != queue) { |
---|
| 1575 | + if (queue_prev && mpp_prev) { |
---|
| 1576 | + mutex_unlock(&queue_prev->pending_lock); |
---|
| 1577 | + mpp_taskqueue_trigger_work(mpp_prev); |
---|
| 1578 | + } |
---|
| 1579 | + |
---|
| 1580 | + if (queue) |
---|
| 1581 | + mutex_lock(&queue->pending_lock); |
---|
| 1582 | + |
---|
| 1583 | + mpp_prev = mpp; |
---|
| 1584 | + queue_prev = queue; |
---|
| 1585 | + } |
---|
| 1586 | + |
---|
| 1587 | + if (test_bit(TASK_STATE_ABORT, &task->state)) |
---|
| 1588 | + pr_info("try to trigger abort task %d\n", task->task_id); |
---|
| 1589 | + |
---|
| 1590 | + set_bit(TASK_STATE_PENDING, &task->state); |
---|
| 1591 | + list_add_tail(&task->queue_link, &queue->pending_list); |
---|
| 1592 | + } |
---|
| 1593 | + |
---|
| 1594 | + if (mpp_prev && queue_prev) { |
---|
| 1595 | + mutex_unlock(&queue_prev->pending_lock); |
---|
| 1596 | + mpp_taskqueue_trigger_work(mpp_prev); |
---|
| 1597 | + } |
---|
| 1598 | +} |
---|
| 1599 | + |
---|
| 1600 | +static void mpp_msgs_wait(struct list_head *msgs_list) |
---|
| 1601 | +{ |
---|
| 1602 | + struct mpp_task_msgs *msgs, *n; |
---|
| 1603 | + |
---|
| 1604 | + /* poll and release each task */ |
---|
| 1605 | + list_for_each_entry_safe(msgs, n, msgs_list, list) { |
---|
| 1606 | + struct mpp_session *session = msgs->session; |
---|
| 1607 | + |
---|
| 1608 | + if (msgs->poll_cnt) { |
---|
| 1609 | + int ret = mpp_wait_result(session, msgs); |
---|
| 1610 | + |
---|
| 1611 | + if (ret) { |
---|
| 1612 | + mpp_err("session %d wait result ret %d\n", |
---|
| 1613 | + session->index, ret); |
---|
| 1614 | + } |
---|
| 1615 | + } |
---|
| 1616 | + |
---|
| 1617 | + put_task_msgs(msgs); |
---|
| 1618 | + |
---|
| 1619 | + } |
---|
| 1620 | +} |
---|
| 1621 | + |
---|
| 1622 | +static long mpp_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
---|
| 1623 | +{ |
---|
| 1624 | + struct mpp_service *srv; |
---|
| 1625 | + struct mpp_session *session = (struct mpp_session *)filp->private_data; |
---|
| 1626 | + struct list_head msgs_list; |
---|
| 1627 | + int ret = 0; |
---|
1299 | 1628 | |
---|
1300 | 1629 | mpp_debug_enter(); |
---|
1301 | 1630 | |
---|
.. | .. |
---|
1303 | 1632 | mpp_err("session %p\n", session); |
---|
1304 | 1633 | return -EINVAL; |
---|
1305 | 1634 | } |
---|
| 1635 | + |
---|
1306 | 1636 | srv = session->srv; |
---|
| 1637 | + |
---|
1307 | 1638 | if (atomic_read(&session->release_request) > 0) { |
---|
1308 | 1639 | mpp_debug(DEBUG_IOCTL, "release session had request\n"); |
---|
1309 | 1640 | return -EBUSY; |
---|
.. | .. |
---|
1313 | 1644 | return -EBUSY; |
---|
1314 | 1645 | } |
---|
1315 | 1646 | |
---|
1316 | | - msg = (void __user *)arg; |
---|
1317 | | - memset(&task_msgs, 0, sizeof(task_msgs)); |
---|
1318 | | - do { |
---|
1319 | | - req = &task_msgs.reqs[task_msgs.req_cnt]; |
---|
1320 | | - /* first, parse to fixed struct */ |
---|
1321 | | - switch (cmd) { |
---|
1322 | | - case MPP_IOC_CFG_V1: { |
---|
1323 | | - struct mpp_msg_v1 msg_v1; |
---|
| 1647 | + INIT_LIST_HEAD(&msgs_list); |
---|
1324 | 1648 | |
---|
1325 | | - memset(&msg_v1, 0, sizeof(msg_v1)); |
---|
1326 | | - if (copy_from_user(&msg_v1, msg, sizeof(msg_v1))) |
---|
1327 | | - return -EFAULT; |
---|
1328 | | - ret = mpp_parse_msg_v1(&msg_v1, req); |
---|
1329 | | - if (ret) |
---|
1330 | | - return -EFAULT; |
---|
| 1649 | + ret = mpp_collect_msgs(&msgs_list, session, cmd, (void __user *)arg); |
---|
| 1650 | + if (ret) |
---|
| 1651 | + mpp_err("collect msgs failed %d\n", ret); |
---|
1331 | 1652 | |
---|
1332 | | - msg += sizeof(msg_v1); |
---|
1333 | | - } break; |
---|
1334 | | - default: |
---|
1335 | | - mpp_err("unknown ioctl cmd %x\n", cmd); |
---|
1336 | | - return -EINVAL; |
---|
1337 | | - } |
---|
1338 | | - task_msgs.req_cnt++; |
---|
1339 | | - /* check loop times */ |
---|
1340 | | - if (task_msgs.req_cnt > MPP_MAX_MSG_NUM) { |
---|
1341 | | - mpp_err("fail, message count %d more than %d.\n", |
---|
1342 | | - task_msgs.req_cnt, MPP_MAX_MSG_NUM); |
---|
1343 | | - return -EINVAL; |
---|
1344 | | - } |
---|
1345 | | - /* second, process request */ |
---|
1346 | | - ret = mpp_process_request(session, srv, req, &task_msgs); |
---|
1347 | | - if (ret) |
---|
1348 | | - return -EFAULT; |
---|
1349 | | - /* last, process task message */ |
---|
1350 | | - if (mpp_msg_is_last(req)) { |
---|
1351 | | - session->msg_flags = task_msgs.flags; |
---|
1352 | | - if (task_msgs.set_cnt > 0) { |
---|
1353 | | - ret = mpp_process_task(session, &task_msgs); |
---|
1354 | | - if (ret) |
---|
1355 | | - return ret; |
---|
1356 | | - } |
---|
1357 | | - if (task_msgs.poll_cnt > 0) { |
---|
1358 | | - ret = mpp_wait_result(session, &task_msgs); |
---|
1359 | | - if (ret) |
---|
1360 | | - return ret; |
---|
1361 | | - } |
---|
1362 | | - } |
---|
1363 | | - } while (!mpp_msg_is_last(req)); |
---|
| 1653 | + mpp_msgs_trigger(&msgs_list); |
---|
| 1654 | + |
---|
| 1655 | + mpp_msgs_wait(&msgs_list); |
---|
1364 | 1656 | |
---|
1365 | 1657 | mpp_debug_leave(); |
---|
1366 | 1658 | |
---|
.. | .. |
---|
1410 | 1702 | /* wait for task all done */ |
---|
1411 | 1703 | atomic_inc(&session->release_request); |
---|
1412 | 1704 | |
---|
1413 | | - if (session->mpp) |
---|
| 1705 | + if (session->mpp || atomic_read(&session->task_count)) |
---|
1414 | 1706 | mpp_session_detach_workqueue(session); |
---|
1415 | 1707 | else |
---|
1416 | 1708 | mpp_session_deinit(session); |
---|
.. | .. |
---|
1421 | 1713 | return 0; |
---|
1422 | 1714 | } |
---|
1423 | 1715 | |
---|
1424 | | -static unsigned int |
---|
1425 | | -mpp_dev_poll(struct file *filp, poll_table *wait) |
---|
1426 | | -{ |
---|
1427 | | - unsigned int mask = 0; |
---|
1428 | | - struct mpp_session *session = |
---|
1429 | | - (struct mpp_session *)filp->private_data; |
---|
1430 | | - |
---|
1431 | | - poll_wait(filp, &session->wait, wait); |
---|
1432 | | - if (!list_empty(&session->done_list)) |
---|
1433 | | - mask |= POLLIN | POLLRDNORM; |
---|
1434 | | - |
---|
1435 | | - return mask; |
---|
1436 | | -} |
---|
1437 | | - |
---|
1438 | 1716 | const struct file_operations rockchip_mpp_fops = { |
---|
1439 | 1717 | .open = mpp_dev_open, |
---|
1440 | 1718 | .release = mpp_dev_release, |
---|
1441 | | - .poll = mpp_dev_poll, |
---|
1442 | 1719 | .unlocked_ioctl = mpp_dev_ioctl, |
---|
1443 | 1720 | #ifdef CONFIG_COMPAT |
---|
1444 | 1721 | .compat_ioctl = mpp_dev_ioctl, |
---|
.. | .. |
---|
1479 | 1756 | mpp_iommu_down_read(mpp->iommu_info); |
---|
1480 | 1757 | buffer = mpp_dma_import_fd(mpp->iommu_info, dma, fd); |
---|
1481 | 1758 | mpp_iommu_up_read(mpp->iommu_info); |
---|
1482 | | - if (IS_ERR_OR_NULL(buffer)) { |
---|
| 1759 | + if (IS_ERR(buffer)) { |
---|
1483 | 1760 | mpp_err("can't import dma-buf %d\n", fd); |
---|
1484 | | - return ERR_PTR(-ENOMEM); |
---|
| 1761 | + return ERR_CAST(buffer); |
---|
1485 | 1762 | } |
---|
1486 | 1763 | |
---|
1487 | 1764 | mem_region->hdl = buffer; |
---|
.. | .. |
---|
1511 | 1788 | cnt = session->trans_count; |
---|
1512 | 1789 | tbl = session->trans_table; |
---|
1513 | 1790 | } else { |
---|
1514 | | - struct mpp_dev *mpp = session->mpp; |
---|
| 1791 | + struct mpp_dev *mpp = mpp_get_task_used_device(task, session); |
---|
1515 | 1792 | struct mpp_trans_info *trans_info = mpp->var->trans_info; |
---|
1516 | 1793 | |
---|
1517 | 1794 | cnt = trans_info[fmt].count; |
---|
.. | .. |
---|
1647 | 1924 | return 0; |
---|
1648 | 1925 | } |
---|
1649 | 1926 | |
---|
1650 | | -int mpp_task_init(struct mpp_session *session, |
---|
1651 | | - struct mpp_task *task) |
---|
| 1927 | +int mpp_task_init(struct mpp_session *session, struct mpp_task *task) |
---|
1652 | 1928 | { |
---|
1653 | 1929 | INIT_LIST_HEAD(&task->pending_link); |
---|
1654 | 1930 | INIT_LIST_HEAD(&task->queue_link); |
---|
.. | .. |
---|
1663 | 1939 | int mpp_task_finish(struct mpp_session *session, |
---|
1664 | 1940 | struct mpp_task *task) |
---|
1665 | 1941 | { |
---|
1666 | | - struct mpp_dev *mpp = session->mpp; |
---|
| 1942 | + struct mpp_dev *mpp = mpp_get_task_used_device(task, session); |
---|
1667 | 1943 | |
---|
1668 | 1944 | if (mpp->dev_ops->finish) |
---|
1669 | 1945 | mpp->dev_ops->finish(mpp, task); |
---|
.. | .. |
---|
1673 | 1949 | mpp_dev_reset(mpp); |
---|
1674 | 1950 | mpp_power_off(mpp); |
---|
1675 | 1951 | |
---|
1676 | | - if (!atomic_read(&task->abort_request)) { |
---|
1677 | | - mpp_session_push_done(session, task); |
---|
1678 | | - /* Wake up the GET thread */ |
---|
1679 | | - wake_up(&session->wait); |
---|
1680 | | - } |
---|
1681 | 1952 | set_bit(TASK_STATE_FINISH, &task->state); |
---|
| 1953 | + set_bit(TASK_STATE_DONE, &task->state); |
---|
| 1954 | + |
---|
| 1955 | + if (session->srv->timing_en) { |
---|
| 1956 | + s64 time_diff; |
---|
| 1957 | + |
---|
| 1958 | + task->on_finish = ktime_get(); |
---|
| 1959 | + set_bit(TASK_TIMING_FINISH, &task->state); |
---|
| 1960 | + |
---|
| 1961 | + time_diff = ktime_us_delta(task->on_finish, task->on_create); |
---|
| 1962 | + |
---|
| 1963 | + if (mpp->timing_check && time_diff > (s64)mpp->timing_check) |
---|
| 1964 | + mpp_task_dump_timing(task, time_diff); |
---|
| 1965 | + } |
---|
| 1966 | + |
---|
| 1967 | + /* Wake up the GET thread */ |
---|
| 1968 | + wake_up(&task->wait); |
---|
1682 | 1969 | mpp_taskqueue_pop_running(mpp->queue, task); |
---|
1683 | 1970 | |
---|
1684 | 1971 | return 0; |
---|
.. | .. |
---|
1688 | 1975 | struct mpp_task *task) |
---|
1689 | 1976 | { |
---|
1690 | 1977 | struct mpp_mem_region *mem_region = NULL, *n; |
---|
1691 | | - struct mpp_dev *mpp = session->mpp; |
---|
| 1978 | + struct mpp_dev *mpp = mpp_get_task_used_device(task, session); |
---|
1692 | 1979 | |
---|
1693 | 1980 | /* release memory region attach to this registers table. */ |
---|
1694 | 1981 | list_for_each_entry_safe(mem_region, n, |
---|
.. | .. |
---|
1713 | 2000 | if (!task) |
---|
1714 | 2001 | return -EIO; |
---|
1715 | 2002 | |
---|
1716 | | - mpp_err("--- dump mem region ---\n"); |
---|
| 2003 | + mpp_err("--- dump task %d mem region ---\n", task->task_index); |
---|
1717 | 2004 | if (!list_empty(&task->mem_region_list)) { |
---|
1718 | 2005 | list_for_each_entry_safe(mem, n, |
---|
1719 | 2006 | &task->mem_region_list, |
---|
.. | .. |
---|
1753 | 2040 | return 0; |
---|
1754 | 2041 | } |
---|
1755 | 2042 | |
---|
1756 | | -int mpp_task_dump_hw_reg(struct mpp_dev *mpp, struct mpp_task *task) |
---|
| 2043 | +int mpp_task_dump_hw_reg(struct mpp_dev *mpp) |
---|
1757 | 2044 | { |
---|
1758 | | - if (!task) |
---|
1759 | | - return -EIO; |
---|
| 2045 | + u32 i; |
---|
| 2046 | + u32 s = mpp->var->hw_info->reg_start; |
---|
| 2047 | + u32 e = mpp->var->hw_info->reg_end; |
---|
1760 | 2048 | |
---|
1761 | | - if (mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) { |
---|
1762 | | - u32 i; |
---|
1763 | | - u32 s = task->hw_info->reg_start; |
---|
1764 | | - u32 e = task->hw_info->reg_end; |
---|
| 2049 | + mpp_err("--- dump hardware register ---\n"); |
---|
| 2050 | + for (i = s; i <= e; i++) { |
---|
| 2051 | + u32 reg = i * sizeof(u32); |
---|
1765 | 2052 | |
---|
1766 | | - mpp_err("--- dump hardware register ---\n"); |
---|
1767 | | - for (i = s; i <= e; i++) { |
---|
1768 | | - u32 reg = i * sizeof(u32); |
---|
1769 | | - |
---|
1770 | | - mpp_err("reg[%03d]: %04x: 0x%08x\n", |
---|
| 2053 | + mpp_err("reg[%03d]: %04x: 0x%08x\n", |
---|
1771 | 2054 | i, reg, readl_relaxed(mpp->reg_base + reg)); |
---|
1772 | | - } |
---|
1773 | 2055 | } |
---|
1774 | 2056 | |
---|
1775 | 2057 | return 0; |
---|
1776 | 2058 | } |
---|
1777 | 2059 | |
---|
1778 | | -static int mpp_iommu_handle(struct iommu_domain *iommu, |
---|
1779 | | - struct device *iommu_dev, |
---|
1780 | | - unsigned long iova, |
---|
1781 | | - int status, void *arg) |
---|
| 2060 | +void mpp_reg_show(struct mpp_dev *mpp, u32 offset) |
---|
1782 | 2061 | { |
---|
1783 | | - struct mpp_taskqueue *queue = (struct mpp_taskqueue *)arg; |
---|
1784 | | - struct mpp_task *task = mpp_taskqueue_get_running_task(queue); |
---|
1785 | | - struct mpp_dev *mpp; |
---|
| 2062 | + if (!mpp) |
---|
| 2063 | + return; |
---|
1786 | 2064 | |
---|
1787 | | - /* |
---|
1788 | | - * NOTE: In link mode, this task may not be the task of the current |
---|
1789 | | - * hardware processing error |
---|
1790 | | - */ |
---|
1791 | | - if (!task || !task->session) |
---|
1792 | | - return -EIO; |
---|
1793 | | - /* get mpp from cur task */ |
---|
1794 | | - mpp = task->session->mpp; |
---|
1795 | | - dev_err(mpp->dev, "fault addr 0x%08lx status %x\n", iova, status); |
---|
| 2065 | + dev_err(mpp->dev, "reg[%03d]: %04x: 0x%08x\n", |
---|
| 2066 | + offset >> 2, offset, mpp_read_relaxed(mpp, offset)); |
---|
| 2067 | +} |
---|
1796 | 2068 | |
---|
1797 | | - mpp_task_dump_mem_region(mpp, task); |
---|
1798 | | - mpp_task_dump_hw_reg(mpp, task); |
---|
| 2069 | +void mpp_reg_show_range(struct mpp_dev *mpp, u32 start, u32 end) |
---|
| 2070 | +{ |
---|
| 2071 | + u32 offset; |
---|
1799 | 2072 | |
---|
1800 | | - if (mpp->iommu_info->hdl) |
---|
1801 | | - mpp->iommu_info->hdl(iommu, iommu_dev, iova, status, arg); |
---|
| 2073 | + if (!mpp) |
---|
| 2074 | + return; |
---|
1802 | 2075 | |
---|
1803 | | - return 0; |
---|
| 2076 | + for (offset = start; offset < end; offset += sizeof(u32)) |
---|
| 2077 | + mpp_reg_show(mpp, offset); |
---|
1804 | 2078 | } |
---|
1805 | 2079 | |
---|
1806 | 2080 | /* The device will do more probing work after this */ |
---|
.. | .. |
---|
1815 | 2089 | |
---|
1816 | 2090 | /* Get disable auto frequent flag from dtsi */ |
---|
1817 | 2091 | mpp->auto_freq_en = !device_property_read_bool(dev, "rockchip,disable-auto-freq"); |
---|
| 2092 | + /* read flag for pum idle request */ |
---|
| 2093 | + mpp->skip_idle = device_property_read_bool(dev, "rockchip,skip-pmu-idle-request"); |
---|
| 2094 | + |
---|
| 2095 | + /* read link table capacity */ |
---|
| 2096 | + ret = of_property_read_u32(np, "rockchip,task-capacity", |
---|
| 2097 | + &mpp->task_capacity); |
---|
| 2098 | + if (ret) |
---|
| 2099 | + mpp->task_capacity = 1; |
---|
| 2100 | + |
---|
| 2101 | + mpp->dev = dev; |
---|
| 2102 | + mpp->hw_ops = mpp->var->hw_ops; |
---|
| 2103 | + mpp->dev_ops = mpp->var->dev_ops; |
---|
1818 | 2104 | |
---|
1819 | 2105 | /* Get and attach to service */ |
---|
1820 | 2106 | ret = mpp_attach_service(mpp, dev); |
---|
.. | .. |
---|
1823 | 2109 | return -ENODEV; |
---|
1824 | 2110 | } |
---|
1825 | 2111 | |
---|
1826 | | - mpp->dev = dev; |
---|
1827 | | - mpp->hw_ops = mpp->var->hw_ops; |
---|
1828 | | - mpp->dev_ops = mpp->var->dev_ops; |
---|
1829 | | - |
---|
1830 | | - /* read link table capacity */ |
---|
1831 | | - ret = of_property_read_u32(np, "rockchip,task-capacity", |
---|
1832 | | - &mpp->task_capacity); |
---|
1833 | | - if (ret) { |
---|
1834 | | - mpp->task_capacity = 1; |
---|
1835 | | - |
---|
1836 | | - /* power domain autosuspend delay 2s */ |
---|
1837 | | - pm_runtime_set_autosuspend_delay(dev, 2000); |
---|
1838 | | - pm_runtime_use_autosuspend(dev); |
---|
1839 | | - } else { |
---|
1840 | | - dev_info(dev, "%d task capacity link mode detected\n", |
---|
1841 | | - mpp->task_capacity); |
---|
1842 | | - /* do not setup autosuspend on multi task device */ |
---|
1843 | | - } |
---|
| 2112 | + /* power domain autosuspend delay 2s */ |
---|
| 2113 | + pm_runtime_set_autosuspend_delay(dev, 2000); |
---|
| 2114 | + pm_runtime_use_autosuspend(dev); |
---|
1844 | 2115 | |
---|
1845 | 2116 | kthread_init_work(&mpp->work, mpp_task_worker_default); |
---|
1846 | 2117 | |
---|
.. | .. |
---|
1851 | 2122 | |
---|
1852 | 2123 | device_init_wakeup(dev, true); |
---|
1853 | 2124 | pm_runtime_enable(dev); |
---|
1854 | | - |
---|
1855 | 2125 | mpp->irq = platform_get_irq(pdev, 0); |
---|
1856 | 2126 | if (mpp->irq < 0) { |
---|
1857 | 2127 | dev_err(dev, "No interrupt resource found\n"); |
---|
.. | .. |
---|
1878 | 2148 | ret = -ENOMEM; |
---|
1879 | 2149 | goto failed; |
---|
1880 | 2150 | } |
---|
| 2151 | + mpp->io_base = res->start; |
---|
1881 | 2152 | |
---|
1882 | | - pm_runtime_get_sync(dev); |
---|
1883 | 2153 | /* |
---|
1884 | 2154 | * TODO: here or at the device itself, some device does not |
---|
1885 | 2155 | * have the iommu, maybe in the device is better. |
---|
1886 | 2156 | */ |
---|
1887 | 2157 | mpp->iommu_info = mpp_iommu_probe(dev); |
---|
1888 | 2158 | if (IS_ERR(mpp->iommu_info)) { |
---|
1889 | | - dev_err(dev, "failed to attach iommu: %ld\n", |
---|
1890 | | - PTR_ERR(mpp->iommu_info)); |
---|
| 2159 | + dev_err(dev, "failed to attach iommu\n"); |
---|
| 2160 | + mpp->iommu_info = NULL; |
---|
1891 | 2161 | } |
---|
1892 | 2162 | if (mpp->hw_ops->init) { |
---|
1893 | 2163 | ret = mpp->hw_ops->init(mpp); |
---|
1894 | 2164 | if (ret) |
---|
1895 | | - goto failed_init; |
---|
| 2165 | + goto failed; |
---|
1896 | 2166 | } |
---|
1897 | | - /* set iommu fault handler */ |
---|
1898 | | - if (!IS_ERR(mpp->iommu_info)) |
---|
1899 | | - iommu_set_fault_handler(mpp->iommu_info->domain, |
---|
1900 | | - mpp_iommu_handle, mpp->queue); |
---|
1901 | 2167 | |
---|
1902 | 2168 | /* read hardware id */ |
---|
1903 | 2169 | if (hw_info->reg_id >= 0) { |
---|
| 2170 | + pm_runtime_get_sync(dev); |
---|
1904 | 2171 | if (mpp->hw_ops->clk_on) |
---|
1905 | 2172 | mpp->hw_ops->clk_on(mpp); |
---|
1906 | 2173 | |
---|
1907 | 2174 | hw_info->hw_id = mpp_read(mpp, hw_info->reg_id * sizeof(u32)); |
---|
1908 | 2175 | if (mpp->hw_ops->clk_off) |
---|
1909 | 2176 | mpp->hw_ops->clk_off(mpp); |
---|
| 2177 | + pm_runtime_put_sync(dev); |
---|
1910 | 2178 | } |
---|
1911 | 2179 | |
---|
1912 | | - pm_runtime_put_sync(dev); |
---|
1913 | | - |
---|
1914 | 2180 | return ret; |
---|
1915 | | -failed_init: |
---|
1916 | | - pm_runtime_put_sync(dev); |
---|
1917 | 2181 | failed: |
---|
1918 | 2182 | mpp_detach_workqueue(mpp); |
---|
1919 | 2183 | device_init_wakeup(dev, false); |
---|
.. | .. |
---|
1928 | 2192 | mpp->hw_ops->exit(mpp); |
---|
1929 | 2193 | |
---|
1930 | 2194 | mpp_iommu_remove(mpp->iommu_info); |
---|
1931 | | - platform_device_put(mpp->pdev_srv); |
---|
1932 | 2195 | mpp_detach_workqueue(mpp); |
---|
1933 | 2196 | device_init_wakeup(mpp->dev, false); |
---|
1934 | 2197 | pm_runtime_disable(mpp->dev); |
---|
1935 | 2198 | |
---|
1936 | 2199 | return 0; |
---|
| 2200 | +} |
---|
| 2201 | + |
---|
| 2202 | +void mpp_dev_shutdown(struct platform_device *pdev) |
---|
| 2203 | +{ |
---|
| 2204 | + int ret; |
---|
| 2205 | + int val; |
---|
| 2206 | + struct device *dev = &pdev->dev; |
---|
| 2207 | + struct mpp_dev *mpp = dev_get_drvdata(dev); |
---|
| 2208 | + |
---|
| 2209 | + dev_info(dev, "shutdown device\n"); |
---|
| 2210 | + |
---|
| 2211 | + atomic_inc(&mpp->srv->shutdown_request); |
---|
| 2212 | + ret = readx_poll_timeout(atomic_read, |
---|
| 2213 | + &mpp->task_count, |
---|
| 2214 | + val, val == 0, 20000, 200000); |
---|
| 2215 | + if (ret == -ETIMEDOUT) |
---|
| 2216 | + dev_err(dev, "wait total %d running time out\n", |
---|
| 2217 | + atomic_read(&mpp->task_count)); |
---|
| 2218 | + else |
---|
| 2219 | + dev_info(dev, "shutdown success\n"); |
---|
1937 | 2220 | } |
---|
1938 | 2221 | |
---|
1939 | 2222 | int mpp_dev_register_srv(struct mpp_dev *mpp, struct mpp_service *srv) |
---|
.. | .. |
---|
1951 | 2234 | struct mpp_dev *mpp = param; |
---|
1952 | 2235 | struct mpp_task *task = mpp->cur_task; |
---|
1953 | 2236 | irqreturn_t irq_ret = IRQ_NONE; |
---|
| 2237 | + u32 timing_en = mpp->srv->timing_en; |
---|
| 2238 | + |
---|
| 2239 | + if (task && timing_en) { |
---|
| 2240 | + task->on_irq = ktime_get(); |
---|
| 2241 | + set_bit(TASK_TIMING_IRQ, &task->state); |
---|
| 2242 | + } |
---|
1954 | 2243 | |
---|
1955 | 2244 | if (mpp->dev_ops->irq) |
---|
1956 | 2245 | irq_ret = mpp->dev_ops->irq(mpp); |
---|
1957 | 2246 | |
---|
1958 | 2247 | if (task) { |
---|
1959 | | - if (irq_ret != IRQ_NONE) { |
---|
| 2248 | + if (irq_ret == IRQ_WAKE_THREAD) { |
---|
1960 | 2249 | /* if wait or delayed work timeout, abort request will turn on, |
---|
1961 | 2250 | * isr should not to response, and handle it in delayed work |
---|
1962 | 2251 | */ |
---|
.. | .. |
---|
1966 | 2255 | irq_ret = IRQ_HANDLED; |
---|
1967 | 2256 | goto done; |
---|
1968 | 2257 | } |
---|
| 2258 | + if (timing_en) { |
---|
| 2259 | + task->on_cancel_timeout = ktime_get(); |
---|
| 2260 | + set_bit(TASK_TIMING_TO_CANCEL, &task->state); |
---|
| 2261 | + } |
---|
1969 | 2262 | cancel_delayed_work(&task->timeout_work); |
---|
1970 | 2263 | /* normal condition, set state and wake up isr thread */ |
---|
1971 | 2264 | set_bit(TASK_STATE_IRQ, &task->state); |
---|
1972 | 2265 | } |
---|
| 2266 | + |
---|
| 2267 | + if (irq_ret == IRQ_WAKE_THREAD) |
---|
| 2268 | + mpp_iommu_dev_deactivate(mpp->iommu_info, mpp); |
---|
1973 | 2269 | } else { |
---|
1974 | 2270 | mpp_debug(DEBUG_IRQ_CHECK, "error, task is null\n"); |
---|
1975 | 2271 | } |
---|
.. | .. |
---|
1981 | 2277 | { |
---|
1982 | 2278 | irqreturn_t ret = IRQ_NONE; |
---|
1983 | 2279 | struct mpp_dev *mpp = param; |
---|
| 2280 | + struct mpp_task *task = mpp->cur_task; |
---|
| 2281 | + |
---|
| 2282 | + if (task && mpp->srv->timing_en) { |
---|
| 2283 | + task->on_isr = ktime_get(); |
---|
| 2284 | + set_bit(TASK_TIMING_ISR, &task->state); |
---|
| 2285 | + } |
---|
1984 | 2286 | |
---|
1985 | 2287 | if (mpp->auto_freq_en && |
---|
1986 | 2288 | mpp->hw_ops->reduce_freq && |
---|
.. | .. |
---|
2030 | 2332 | |
---|
2031 | 2333 | int mpp_time_record(struct mpp_task *task) |
---|
2032 | 2334 | { |
---|
2033 | | - if (mpp_debug_unlikely(DEBUG_TIMING) && task) |
---|
2034 | | - do_gettimeofday(&task->start); |
---|
| 2335 | + if (mpp_debug_unlikely(DEBUG_TIMING) && task) { |
---|
| 2336 | + task->start = ktime_get(); |
---|
| 2337 | + task->part = task->start; |
---|
| 2338 | + } |
---|
| 2339 | + |
---|
| 2340 | + return 0; |
---|
| 2341 | +} |
---|
| 2342 | + |
---|
| 2343 | +int mpp_time_part_diff(struct mpp_task *task) |
---|
| 2344 | +{ |
---|
| 2345 | + if (mpp_debug_unlikely(DEBUG_TIMING)) { |
---|
| 2346 | + ktime_t end; |
---|
| 2347 | + struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session); |
---|
| 2348 | + |
---|
| 2349 | + end = ktime_get(); |
---|
| 2350 | + mpp_debug(DEBUG_PART_TIMING, "%s:%d session %d:%d part time: %lld us\n", |
---|
| 2351 | + dev_name(mpp->dev), task->core_id, task->session->pid, |
---|
| 2352 | + task->session->index, ktime_us_delta(end, task->part)); |
---|
| 2353 | + task->part = end; |
---|
| 2354 | + } |
---|
2035 | 2355 | |
---|
2036 | 2356 | return 0; |
---|
2037 | 2357 | } |
---|
2038 | 2358 | |
---|
2039 | 2359 | int mpp_time_diff(struct mpp_task *task) |
---|
2040 | 2360 | { |
---|
2041 | | - struct timeval end; |
---|
2042 | | - struct mpp_dev *mpp = task->session->mpp; |
---|
| 2361 | + if (mpp_debug_unlikely(DEBUG_TIMING)) { |
---|
| 2362 | + ktime_t end; |
---|
| 2363 | + struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session); |
---|
2043 | 2364 | |
---|
2044 | | - do_gettimeofday(&end); |
---|
2045 | | - mpp_debug(DEBUG_TIMING, "%s: pid: %d, session: %p, time: %ld us\n", |
---|
2046 | | - dev_name(mpp->dev), task->session->pid, task->session, |
---|
2047 | | - (end.tv_sec - task->start.tv_sec) * 1000000 + |
---|
2048 | | - (end.tv_usec - task->start.tv_usec)); |
---|
| 2365 | + end = ktime_get(); |
---|
| 2366 | + mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us\n", |
---|
| 2367 | + dev_name(mpp->dev), task->core_id, task->session->pid, |
---|
| 2368 | + task->session->index, ktime_us_delta(end, task->start)); |
---|
| 2369 | + } |
---|
2049 | 2370 | |
---|
2050 | 2371 | return 0; |
---|
| 2372 | +} |
---|
| 2373 | + |
---|
| 2374 | +int mpp_time_diff_with_hw_time(struct mpp_task *task, u32 clk_hz) |
---|
| 2375 | +{ |
---|
| 2376 | + if (mpp_debug_unlikely(DEBUG_TIMING)) { |
---|
| 2377 | + ktime_t end; |
---|
| 2378 | + struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session); |
---|
| 2379 | + |
---|
| 2380 | + end = ktime_get(); |
---|
| 2381 | + |
---|
| 2382 | + if (clk_hz) |
---|
| 2383 | + mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us hw %d us\n", |
---|
| 2384 | + dev_name(mpp->dev), task->core_id, task->session->pid, |
---|
| 2385 | + task->session->index, ktime_us_delta(end, task->start), |
---|
| 2386 | + task->hw_cycles / (clk_hz / 1000000)); |
---|
| 2387 | + else |
---|
| 2388 | + mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us\n", |
---|
| 2389 | + dev_name(mpp->dev), task->core_id, task->session->pid, |
---|
| 2390 | + task->session->index, ktime_us_delta(end, task->start)); |
---|
| 2391 | + } |
---|
| 2392 | + |
---|
| 2393 | + return 0; |
---|
| 2394 | +} |
---|
| 2395 | + |
---|
| 2396 | +#define LOG_TIMING(state, id, stage, time, base) \ |
---|
| 2397 | + do { \ |
---|
| 2398 | + if (test_bit(id, &state)) \ |
---|
| 2399 | + pr_info("timing: %-14s : %lld us\n", stage, ktime_us_delta(time, base)); \ |
---|
| 2400 | + else \ |
---|
| 2401 | + pr_info("timing: %-14s : invalid\n", stage); \ |
---|
| 2402 | + } while (0) |
---|
| 2403 | + |
---|
| 2404 | +void mpp_task_dump_timing(struct mpp_task *task, s64 time_diff) |
---|
| 2405 | +{ |
---|
| 2406 | + ktime_t s = task->on_create; |
---|
| 2407 | + unsigned long state = task->state; |
---|
| 2408 | + |
---|
| 2409 | + pr_info("task %d dump timing at %lld us:", task->task_id, time_diff); |
---|
| 2410 | + |
---|
| 2411 | + pr_info("timing: %-14s : %lld us\n", "create", ktime_to_us(s)); |
---|
| 2412 | + LOG_TIMING(state, TASK_TIMING_CREATE_END, "create end", task->on_create_end, s); |
---|
| 2413 | + LOG_TIMING(state, TASK_TIMING_PENDING, "pending", task->on_pending, s); |
---|
| 2414 | + LOG_TIMING(state, TASK_TIMING_RUN, "run", task->on_run, s); |
---|
| 2415 | + LOG_TIMING(state, TASK_TIMING_TO_SCHED, "timeout start", task->on_sched_timeout, s); |
---|
| 2416 | + LOG_TIMING(state, TASK_TIMING_RUN_END, "run end", task->on_run_end, s); |
---|
| 2417 | + LOG_TIMING(state, TASK_TIMING_IRQ, "irq", task->on_irq, s); |
---|
| 2418 | + LOG_TIMING(state, TASK_TIMING_TO_CANCEL, "timeout cancel", task->on_cancel_timeout, s); |
---|
| 2419 | + LOG_TIMING(state, TASK_TIMING_ISR, "isr", task->on_isr, s); |
---|
| 2420 | + LOG_TIMING(state, TASK_TIMING_FINISH, "finish", task->on_finish, s); |
---|
2051 | 2421 | } |
---|
2052 | 2422 | |
---|
2053 | 2423 | int mpp_write_req(struct mpp_dev *mpp, u32 *regs, |
---|
.. | .. |
---|
2184 | 2554 | if (clk_rate_hz) { |
---|
2185 | 2555 | clk_info->used_rate_hz = clk_rate_hz; |
---|
2186 | 2556 | clk_set_rate(clk_info->clk, clk_rate_hz); |
---|
| 2557 | + clk_info->real_rate_hz = clk_get_rate(clk_info->clk); |
---|
2187 | 2558 | } |
---|
2188 | 2559 | |
---|
2189 | 2560 | return 0; |
---|
.. | .. |
---|
2217 | 2588 | return count; |
---|
2218 | 2589 | } |
---|
2219 | 2590 | |
---|
2220 | | -static const struct file_operations procfs_fops_u32 = { |
---|
2221 | | - .open = fops_open_u32, |
---|
2222 | | - .read = seq_read, |
---|
2223 | | - .release = single_release, |
---|
2224 | | - .write = fops_write_u32, |
---|
| 2591 | +static const struct proc_ops procfs_fops_u32 = { |
---|
| 2592 | + .proc_open = fops_open_u32, |
---|
| 2593 | + .proc_read = seq_read, |
---|
| 2594 | + .proc_release = single_release, |
---|
| 2595 | + .proc_write = fops_write_u32, |
---|
2225 | 2596 | }; |
---|
2226 | 2597 | |
---|
2227 | 2598 | struct proc_dir_entry * |
---|
.. | .. |
---|
2230 | 2601 | { |
---|
2231 | 2602 | return proc_create_data(name, mode, parent, &procfs_fops_u32, data); |
---|
2232 | 2603 | } |
---|
| 2604 | + |
---|
| 2605 | +void mpp_procfs_create_common(struct proc_dir_entry *parent, struct mpp_dev *mpp) |
---|
| 2606 | +{ |
---|
| 2607 | + mpp_procfs_create_u32("disable_work", 0644, parent, &mpp->disable); |
---|
| 2608 | + mpp_procfs_create_u32("timing_check", 0644, parent, &mpp->timing_check); |
---|
| 2609 | +} |
---|
2233 | 2610 | #endif |
---|