.. | .. |
---|
36 | 36 | #include "mpp_common.h" |
---|
37 | 37 | #include "mpp_iommu.h" |
---|
38 | 38 | |
---|
39 | | -#define MPP_WORK_TIMEOUT_DELAY (200) |
---|
40 | 39 | #define MPP_WAIT_TIMEOUT_DELAY (2000) |
---|
41 | 40 | |
---|
42 | 41 | /* Use 'v' as magic number */ |
---|
.. | .. |
---|
231 | 230 | return 0; |
---|
232 | 231 | } |
---|
233 | 232 | |
---|
234 | | -static int mpp_session_clear(struct mpp_dev *mpp, |
---|
235 | | - struct mpp_session *session) |
---|
| 233 | +static int mpp_session_clear_pending(struct mpp_session *session) |
---|
236 | 234 | { |
---|
237 | 235 | struct mpp_task *task = NULL, *n; |
---|
238 | | - |
---|
239 | | - /* clear session done list */ |
---|
240 | | - mutex_lock(&session->done_lock); |
---|
241 | | - list_for_each_entry_safe(task, n, |
---|
242 | | - &session->done_list, |
---|
243 | | - done_link) { |
---|
244 | | - list_del_init(&task->done_link); |
---|
245 | | - kref_put(&task->ref, mpp_free_task); |
---|
246 | | - } |
---|
247 | | - mutex_unlock(&session->done_lock); |
---|
248 | 236 | |
---|
249 | 237 | /* clear session pending list */ |
---|
250 | 238 | mutex_lock(&session->pending_lock); |
---|
.. | .. |
---|
261 | 249 | return 0; |
---|
262 | 250 | } |
---|
263 | 251 | |
---|
| 252 | +void mpp_session_cleanup_detach(struct mpp_taskqueue *queue, struct kthread_work *work) |
---|
| 253 | +{ |
---|
| 254 | + struct mpp_session *session, *n; |
---|
| 255 | + |
---|
| 256 | + if (!atomic_read(&queue->detach_count)) |
---|
| 257 | + return; |
---|
| 258 | + |
---|
| 259 | + mutex_lock(&queue->session_lock); |
---|
| 260 | + list_for_each_entry_safe(session, n, &queue->session_detach, session_link) { |
---|
| 261 | + s32 task_count = atomic_read(&session->task_count); |
---|
| 262 | + |
---|
| 263 | + if (!task_count) { |
---|
| 264 | + list_del_init(&session->session_link); |
---|
| 265 | + atomic_dec(&queue->detach_count); |
---|
| 266 | + } |
---|
| 267 | + |
---|
| 268 | + mutex_unlock(&queue->session_lock); |
---|
| 269 | + |
---|
| 270 | + if (task_count) { |
---|
| 271 | + mpp_dbg_session("session %d:%d task not finished %d\n", |
---|
| 272 | + session->pid, session->index, |
---|
| 273 | + atomic_read(&queue->detach_count)); |
---|
| 274 | + |
---|
| 275 | + mpp_session_clear_pending(session); |
---|
| 276 | + } else { |
---|
| 277 | + mpp_dbg_session("queue detach %d\n", |
---|
| 278 | + atomic_read(&queue->detach_count)); |
---|
| 279 | + |
---|
| 280 | + mpp_session_deinit(session); |
---|
| 281 | + } |
---|
| 282 | + |
---|
| 283 | + mutex_lock(&queue->session_lock); |
---|
| 284 | + } |
---|
| 285 | + mutex_unlock(&queue->session_lock); |
---|
| 286 | + |
---|
| 287 | + if (atomic_read(&queue->detach_count)) { |
---|
| 288 | + mpp_dbg_session("queue detach %d again\n", |
---|
| 289 | + atomic_read(&queue->detach_count)); |
---|
| 290 | + |
---|
| 291 | + kthread_queue_work(&queue->worker, work); |
---|
| 292 | + } |
---|
| 293 | +} |
---|
| 294 | + |
---|
264 | 295 | static struct mpp_session *mpp_session_init(void) |
---|
265 | 296 | { |
---|
266 | 297 | struct mpp_session *session = kzalloc(sizeof(*session), GFP_KERNEL); |
---|
.. | .. |
---|
271 | 302 | session->pid = current->pid; |
---|
272 | 303 | |
---|
273 | 304 | mutex_init(&session->pending_lock); |
---|
274 | | - mutex_init(&session->done_lock); |
---|
275 | 305 | INIT_LIST_HEAD(&session->pending_list); |
---|
276 | | - INIT_LIST_HEAD(&session->done_list); |
---|
277 | 306 | INIT_LIST_HEAD(&session->service_link); |
---|
278 | 307 | INIT_LIST_HEAD(&session->session_link); |
---|
279 | 308 | |
---|
280 | | - init_waitqueue_head(&session->wait); |
---|
281 | 309 | atomic_set(&session->task_count, 0); |
---|
282 | 310 | atomic_set(&session->release_request, 0); |
---|
283 | 311 | |
---|
.. | .. |
---|
293 | 321 | if (mpp->dev_ops->free_session) |
---|
294 | 322 | mpp->dev_ops->free_session(session); |
---|
295 | 323 | |
---|
296 | | - mpp_session_clear(mpp, session); |
---|
| 324 | + mpp_session_clear_pending(session); |
---|
297 | 325 | |
---|
298 | 326 | if (session->dma) { |
---|
299 | 327 | mpp_iommu_down_read(mpp->iommu_info); |
---|
.. | .. |
---|
314 | 342 | list_del_init(&session->session_link); |
---|
315 | 343 | } |
---|
316 | 344 | |
---|
317 | | -int mpp_session_deinit(struct mpp_session *session) |
---|
| 345 | +void mpp_session_deinit(struct mpp_session *session) |
---|
318 | 346 | { |
---|
319 | | - u32 task_count = atomic_read(&session->task_count); |
---|
320 | | - |
---|
321 | | - mpp_dbg_session("session %p:%d task %d release\n", |
---|
322 | | - session, session->index, task_count); |
---|
323 | | - if (task_count) |
---|
324 | | - return -1; |
---|
| 347 | + mpp_dbg_session("session %d:%d task %d deinit\n", session->pid, |
---|
| 348 | + session->index, atomic_read(&session->task_count)); |
---|
325 | 349 | |
---|
326 | 350 | if (likely(session->deinit)) |
---|
327 | 351 | session->deinit(session); |
---|
.. | .. |
---|
331 | 355 | mpp_dbg_session("session %p:%d deinit\n", session, session->index); |
---|
332 | 356 | |
---|
333 | 357 | kfree(session); |
---|
334 | | - return 0; |
---|
335 | 358 | } |
---|
336 | 359 | |
---|
337 | 360 | static void mpp_session_attach_workqueue(struct mpp_session *session, |
---|
338 | 361 | struct mpp_taskqueue *queue) |
---|
339 | 362 | { |
---|
340 | | - mpp_dbg_session("session %p:%d attach\n", session, session->index); |
---|
| 363 | + mpp_dbg_session("session %d:%d attach\n", session->pid, session->index); |
---|
341 | 364 | mutex_lock(&queue->session_lock); |
---|
342 | 365 | list_add_tail(&session->session_link, &queue->session_attach); |
---|
343 | 366 | mutex_unlock(&queue->session_lock); |
---|
.. | .. |
---|
351 | 374 | if (!session->mpp || !session->mpp->queue) |
---|
352 | 375 | return; |
---|
353 | 376 | |
---|
354 | | - mpp_dbg_session("session %p:%d detach\n", session, session->index); |
---|
| 377 | + mpp_dbg_session("session %d:%d detach\n", session->pid, session->index); |
---|
355 | 378 | mpp = session->mpp; |
---|
356 | 379 | queue = mpp->queue; |
---|
357 | 380 | |
---|
358 | 381 | mutex_lock(&queue->session_lock); |
---|
359 | 382 | list_del_init(&session->session_link); |
---|
360 | 383 | list_add_tail(&session->session_link, &queue->session_detach); |
---|
361 | | - queue->detach_count++; |
---|
| 384 | + atomic_inc(&queue->detach_count); |
---|
362 | 385 | mutex_unlock(&queue->session_lock); |
---|
363 | 386 | |
---|
364 | 387 | mpp_taskqueue_trigger_work(mpp); |
---|
.. | .. |
---|
370 | 393 | { |
---|
371 | 394 | kref_get(&task->ref); |
---|
372 | 395 | mutex_lock(&session->pending_lock); |
---|
| 396 | + if (session->srv->timing_en) { |
---|
| 397 | + task->on_pending = ktime_get(); |
---|
| 398 | + set_bit(TASK_TIMING_PENDING, &task->state); |
---|
| 399 | + } |
---|
373 | 400 | list_add_tail(&task->pending_link, &session->pending_list); |
---|
374 | 401 | mutex_unlock(&session->pending_lock); |
---|
375 | 402 | |
---|
.. | .. |
---|
402 | 429 | return task; |
---|
403 | 430 | } |
---|
404 | 431 | |
---|
405 | | -static int mpp_session_push_done(struct mpp_session *session, |
---|
406 | | - struct mpp_task *task) |
---|
407 | | -{ |
---|
408 | | - kref_get(&task->ref); |
---|
409 | | - mutex_lock(&session->done_lock); |
---|
410 | | - list_add_tail(&task->done_link, &session->done_list); |
---|
411 | | - mutex_unlock(&session->done_lock); |
---|
412 | | - |
---|
413 | | - return 0; |
---|
414 | | -} |
---|
415 | | - |
---|
416 | | -static int mpp_session_pop_done(struct mpp_session *session, |
---|
417 | | - struct mpp_task *task) |
---|
418 | | -{ |
---|
419 | | - mutex_lock(&session->done_lock); |
---|
420 | | - list_del_init(&task->done_link); |
---|
421 | | - mutex_unlock(&session->done_lock); |
---|
422 | | - set_bit(TASK_STATE_DONE, &task->state); |
---|
423 | | - kref_put(&task->ref, mpp_free_task); |
---|
424 | | - |
---|
425 | | - return 0; |
---|
426 | | -} |
---|
427 | | - |
---|
428 | 432 | static void mpp_free_task(struct kref *ref) |
---|
429 | 433 | { |
---|
430 | 434 | struct mpp_dev *mpp; |
---|
.. | .. |
---|
438 | 442 | session = task->session; |
---|
439 | 443 | |
---|
440 | 444 | mpp_debug_func(DEBUG_TASK_INFO, |
---|
441 | | - "session=%p, task=%p, state=0x%lx, abort_request=%d\n", |
---|
442 | | - session, task, task->state, |
---|
443 | | - atomic_read(&task->abort_request)); |
---|
| 445 | + "session %d:%d task %d state 0x%lx abort_request %d\n", |
---|
| 446 | + session->device_type, session->index, task->task_index, |
---|
| 447 | + task->state, atomic_read(&task->abort_request)); |
---|
444 | 448 | if (!session->mpp) { |
---|
445 | 449 | mpp_err("session %p, session->mpp is null.\n", session); |
---|
446 | 450 | return; |
---|
.. | .. |
---|
469 | 473 | return; |
---|
470 | 474 | } |
---|
471 | 475 | |
---|
472 | | - mpp_err("task %p processing time out!\n", task); |
---|
473 | 476 | if (!task->session) { |
---|
474 | 477 | mpp_err("task %p, task->session is null.\n", task); |
---|
475 | 478 | return; |
---|
476 | 479 | } |
---|
| 480 | + |
---|
477 | 481 | session = task->session; |
---|
478 | 482 | |
---|
479 | 483 | if (!session->mpp) { |
---|
480 | | - mpp_err("session %p, session->mpp is null.\n", session); |
---|
| 484 | + mpp_err("session %d:%d, session mpp is null.\n", session->pid, |
---|
| 485 | + session->index); |
---|
481 | 486 | return; |
---|
482 | 487 | } |
---|
483 | 488 | mpp = session->mpp; |
---|
484 | | - |
---|
| 489 | + dev_err(mpp->dev, "session %d:%d task %d state %lx processing time out!\n", |
---|
| 490 | + session->device_type, session->index, task->task_index, task->state); |
---|
485 | 491 | synchronize_hardirq(mpp->irq); |
---|
486 | 492 | |
---|
487 | 493 | if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) { |
---|
.. | .. |
---|
489 | 495 | return; |
---|
490 | 496 | } |
---|
491 | 497 | |
---|
| 498 | + mpp_task_dump_timing(task, ktime_us_delta(ktime_get(), task->on_create)); |
---|
| 499 | + |
---|
| 500 | + /* disable core irq */ |
---|
| 501 | + disable_irq(mpp->irq); |
---|
| 502 | + /* disable mmu irq */ |
---|
| 503 | + mpp_iommu_disable_irq(mpp->iommu_info); |
---|
| 504 | + |
---|
492 | 505 | /* hardware maybe dead, reset it */ |
---|
493 | 506 | mpp_reset_up_read(mpp->reset_group); |
---|
494 | 507 | mpp_dev_reset(mpp); |
---|
495 | 508 | mpp_power_off(mpp); |
---|
496 | 509 | |
---|
497 | | - mpp_session_push_done(session, task); |
---|
| 510 | + set_bit(TASK_STATE_TIMEOUT, &task->state); |
---|
| 511 | + set_bit(TASK_STATE_DONE, &task->state); |
---|
498 | 512 | /* Wake up the GET thread */ |
---|
499 | | - wake_up(&session->wait); |
---|
| 513 | + wake_up(&task->wait); |
---|
500 | 514 | |
---|
501 | 515 | /* remove task from taskqueue running list */ |
---|
502 | | - set_bit(TASK_STATE_TIMEOUT, &task->state); |
---|
503 | 516 | mpp_taskqueue_pop_running(mpp->queue, task); |
---|
| 517 | + |
---|
| 518 | + /* enable core irq */ |
---|
| 519 | + enable_irq(mpp->irq); |
---|
| 520 | + /* enable mmu irq */ |
---|
| 521 | + mpp_iommu_enable_irq(mpp->iommu_info); |
---|
| 522 | + |
---|
| 523 | + mpp_taskqueue_trigger_work(mpp); |
---|
504 | 524 | } |
---|
505 | 525 | |
---|
506 | 526 | static int mpp_process_task_default(struct mpp_session *session, |
---|
.. | .. |
---|
508 | 528 | { |
---|
509 | 529 | struct mpp_task *task = NULL; |
---|
510 | 530 | struct mpp_dev *mpp = session->mpp; |
---|
| 531 | + u32 timing_en; |
---|
| 532 | + ktime_t on_create; |
---|
511 | 533 | |
---|
512 | 534 | if (unlikely(!mpp)) { |
---|
513 | 535 | mpp_err("pid %d clinet %d found invalid process function\n", |
---|
.. | .. |
---|
515 | 537 | return -EINVAL; |
---|
516 | 538 | } |
---|
517 | 539 | |
---|
| 540 | + timing_en = session->srv->timing_en; |
---|
| 541 | + if (timing_en) |
---|
| 542 | + on_create = ktime_get(); |
---|
| 543 | + |
---|
518 | 544 | if (mpp->dev_ops->alloc_task) |
---|
519 | 545 | task = mpp->dev_ops->alloc_task(session, msgs); |
---|
520 | 546 | if (!task) { |
---|
521 | 547 | mpp_err("alloc_task failed.\n"); |
---|
522 | 548 | return -ENOMEM; |
---|
523 | 549 | } |
---|
| 550 | + |
---|
| 551 | + if (timing_en) { |
---|
| 552 | + task->on_create_end = ktime_get(); |
---|
| 553 | + task->on_create = on_create; |
---|
| 554 | + set_bit(TASK_TIMING_CREATE_END, &task->state); |
---|
| 555 | + set_bit(TASK_TIMING_CREATE, &task->state); |
---|
| 556 | + } |
---|
| 557 | + |
---|
524 | 558 | kref_init(&task->ref); |
---|
| 559 | + init_waitqueue_head(&task->wait); |
---|
525 | 560 | atomic_set(&task->abort_request, 0); |
---|
526 | 561 | task->task_index = atomic_fetch_inc(&mpp->task_index); |
---|
527 | 562 | INIT_DELAYED_WORK(&task->timeout_work, mpp_task_timeout_work); |
---|
.. | .. |
---|
544 | 579 | /* trigger current queue to run task */ |
---|
545 | 580 | mpp_taskqueue_trigger_work(mpp); |
---|
546 | 581 | kref_put(&task->ref, mpp_free_task); |
---|
547 | | - |
---|
| 582 | + mpp_debug_func(DEBUG_TASK_INFO, |
---|
| 583 | + "session %d:%d task %d state 0x%lx\n", |
---|
| 584 | + session->device_type, session->index, |
---|
| 585 | + task->task_index, task->state); |
---|
548 | 586 | return 0; |
---|
549 | 587 | } |
---|
550 | 588 | |
---|
.. | .. |
---|
599 | 637 | group->resets[type] = rst; |
---|
600 | 638 | group->queue = mpp->queue; |
---|
601 | 639 | } |
---|
602 | | - /* if reset not in the same queue, it means different device |
---|
603 | | - * may reset in the same time, then rw_sem_on should set true. |
---|
604 | | - */ |
---|
605 | | - group->rw_sem_on |= (group->queue != mpp->queue) ? true : false; |
---|
606 | 640 | dev_info(mpp->dev, "reset_group->rw_sem_on=%d\n", group->rw_sem_on); |
---|
607 | 641 | up_write(&group->rw_sem); |
---|
608 | 642 | |
---|
.. | .. |
---|
647 | 681 | return 0; |
---|
648 | 682 | } |
---|
649 | 683 | |
---|
| 684 | +void mpp_task_run_begin(struct mpp_task *task, u32 timing_en, u32 timeout) |
---|
| 685 | +{ |
---|
| 686 | + preempt_disable(); |
---|
| 687 | + |
---|
| 688 | + set_bit(TASK_STATE_START, &task->state); |
---|
| 689 | + |
---|
| 690 | + mpp_time_record(task); |
---|
| 691 | + schedule_delayed_work(&task->timeout_work, msecs_to_jiffies(timeout)); |
---|
| 692 | + |
---|
| 693 | + if (timing_en) { |
---|
| 694 | + task->on_sched_timeout = ktime_get(); |
---|
| 695 | + set_bit(TASK_TIMING_TO_SCHED, &task->state); |
---|
| 696 | + } |
---|
| 697 | +} |
---|
| 698 | + |
---|
| 699 | +void mpp_task_run_end(struct mpp_task *task, u32 timing_en) |
---|
| 700 | +{ |
---|
| 701 | + if (timing_en) { |
---|
| 702 | + task->on_run_end = ktime_get(); |
---|
| 703 | + set_bit(TASK_TIMING_RUN_END, &task->state); |
---|
| 704 | + } |
---|
| 705 | + |
---|
| 706 | +#ifdef MODULE |
---|
| 707 | + preempt_enable(); |
---|
| 708 | +#else |
---|
| 709 | + preempt_enable_no_resched(); |
---|
| 710 | +#endif |
---|
| 711 | +} |
---|
| 712 | + |
---|
650 | 713 | static int mpp_task_run(struct mpp_dev *mpp, |
---|
651 | 714 | struct mpp_task *task) |
---|
652 | 715 | { |
---|
653 | 716 | int ret; |
---|
| 717 | + struct mpp_session *session = task->session; |
---|
| 718 | + u32 timing_en; |
---|
654 | 719 | |
---|
655 | 720 | mpp_debug_enter(); |
---|
| 721 | + |
---|
| 722 | + timing_en = mpp->srv->timing_en; |
---|
| 723 | + if (timing_en) { |
---|
| 724 | + task->on_run = ktime_get(); |
---|
| 725 | + set_bit(TASK_TIMING_RUN, &task->state); |
---|
| 726 | + } |
---|
656 | 727 | |
---|
657 | 728 | /* |
---|
658 | 729 | * before running, we have to switch grf ctrl bit to ensure |
---|
.. | .. |
---|
679 | 750 | |
---|
680 | 751 | mpp_power_on(mpp); |
---|
681 | 752 | mpp_time_record(task); |
---|
682 | | - mpp_debug(DEBUG_TASK_INFO, "pid %d, start hw %s\n", |
---|
683 | | - task->session->pid, dev_name(mpp->dev)); |
---|
| 753 | + mpp_debug_func(DEBUG_TASK_INFO, |
---|
| 754 | + "%s session %d:%d task=%d state=0x%lx\n", |
---|
| 755 | + dev_name(mpp->dev), session->device_type, |
---|
| 756 | + session->index, task->task_index, task->state); |
---|
684 | 757 | |
---|
685 | 758 | if (mpp->auto_freq_en && mpp->hw_ops->set_freq) |
---|
686 | 759 | mpp->hw_ops->set_freq(mpp, task); |
---|
.. | .. |
---|
690 | 763 | */ |
---|
691 | 764 | mpp_reset_down_read(mpp->reset_group); |
---|
692 | 765 | |
---|
693 | | - schedule_delayed_work(&task->timeout_work, |
---|
694 | | - msecs_to_jiffies(MPP_WORK_TIMEOUT_DELAY)); |
---|
695 | 766 | if (mpp->dev_ops->run) |
---|
696 | 767 | mpp->dev_ops->run(mpp, task); |
---|
697 | 768 | set_bit(TASK_STATE_START, &task->state); |
---|
.. | .. |
---|
709 | 780 | |
---|
710 | 781 | mpp_debug_enter(); |
---|
711 | 782 | |
---|
| 783 | +get_task: |
---|
712 | 784 | task = mpp_taskqueue_get_pending_task(queue); |
---|
713 | 785 | if (!task) |
---|
714 | 786 | goto done; |
---|
.. | .. |
---|
716 | 788 | /* if task timeout and aborted, remove it */ |
---|
717 | 789 | if (atomic_read(&task->abort_request) > 0) { |
---|
718 | 790 | mpp_taskqueue_pop_pending(queue, task); |
---|
719 | | - goto done; |
---|
| 791 | + goto get_task; |
---|
720 | 792 | } |
---|
721 | 793 | |
---|
722 | 794 | /* get device for current task */ |
---|
.. | .. |
---|
748 | 820 | } |
---|
749 | 821 | |
---|
750 | 822 | done: |
---|
751 | | - mutex_lock(&queue->session_lock); |
---|
752 | | - while (queue->detach_count) { |
---|
753 | | - struct mpp_session *session = NULL; |
---|
754 | | - |
---|
755 | | - session = list_first_entry_or_null(&queue->session_detach, struct mpp_session, |
---|
756 | | - session_link); |
---|
757 | | - if (session) { |
---|
758 | | - list_del_init(&session->session_link); |
---|
759 | | - queue->detach_count--; |
---|
760 | | - } |
---|
761 | | - |
---|
762 | | - mutex_unlock(&queue->session_lock); |
---|
763 | | - |
---|
764 | | - if (session) { |
---|
765 | | - mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev), |
---|
766 | | - queue->detach_count); |
---|
767 | | - mpp_session_deinit(session); |
---|
768 | | - } |
---|
769 | | - |
---|
770 | | - mutex_lock(&queue->session_lock); |
---|
771 | | - } |
---|
772 | | - mutex_unlock(&queue->session_lock); |
---|
| 823 | + mpp_session_cleanup_detach(queue, work_s); |
---|
773 | 824 | } |
---|
774 | 825 | |
---|
775 | 826 | static int mpp_wait_result_default(struct mpp_session *session, |
---|
.. | .. |
---|
785 | 836 | return -EINVAL; |
---|
786 | 837 | } |
---|
787 | 838 | |
---|
788 | | - ret = wait_event_timeout(session->wait, |
---|
789 | | - !list_empty(&session->done_list), |
---|
790 | | - msecs_to_jiffies(MPP_WAIT_TIMEOUT_DELAY)); |
---|
791 | | - |
---|
792 | 839 | task = mpp_session_get_pending_task(session); |
---|
793 | 840 | if (!task) { |
---|
794 | | - mpp_err("session %p pending list is empty!\n", session); |
---|
| 841 | + mpp_err("session %d:%d pending list is empty!\n", |
---|
| 842 | + session->pid, session->index); |
---|
795 | 843 | return -EIO; |
---|
796 | 844 | } |
---|
797 | 845 | |
---|
| 846 | + ret = wait_event_timeout(task->wait, |
---|
| 847 | + test_bit(TASK_STATE_DONE, &task->state), |
---|
| 848 | + msecs_to_jiffies(MPP_WAIT_TIMEOUT_DELAY)); |
---|
798 | 849 | if (ret > 0) { |
---|
799 | | - u32 task_found = 0; |
---|
800 | | - struct mpp_task *loop = NULL, *n; |
---|
801 | | - |
---|
802 | | - /* find task in session done list */ |
---|
803 | | - mutex_lock(&session->done_lock); |
---|
804 | | - list_for_each_entry_safe(loop, n, |
---|
805 | | - &session->done_list, |
---|
806 | | - done_link) { |
---|
807 | | - if (loop == task) { |
---|
808 | | - task_found = 1; |
---|
809 | | - break; |
---|
810 | | - } |
---|
811 | | - } |
---|
812 | | - mutex_unlock(&session->done_lock); |
---|
813 | | - if (task_found) { |
---|
814 | | - if (mpp->dev_ops->result) |
---|
815 | | - ret = mpp->dev_ops->result(mpp, task, msgs); |
---|
816 | | - mpp_session_pop_done(session, task); |
---|
817 | | - |
---|
818 | | - if (test_bit(TASK_STATE_TIMEOUT, &task->state)) |
---|
819 | | - ret = -ETIMEDOUT; |
---|
820 | | - } else { |
---|
821 | | - mpp_err("session %p task %p, not found in done list!\n", |
---|
822 | | - session, task); |
---|
823 | | - ret = -EIO; |
---|
824 | | - } |
---|
| 850 | + if (mpp->dev_ops->result) |
---|
| 851 | + ret = mpp->dev_ops->result(mpp, task, msgs); |
---|
825 | 852 | } else { |
---|
826 | 853 | atomic_inc(&task->abort_request); |
---|
827 | | - mpp_err("timeout, pid %d session %p:%d count %d cur_task %p index %d.\n", |
---|
828 | | - session->pid, session, session->index, |
---|
829 | | - atomic_read(&session->task_count), task, |
---|
830 | | - task->task_index); |
---|
831 | | - /* if twice and return timeout, otherwise, re-wait */ |
---|
832 | | - if (atomic_read(&task->abort_request) > 1) { |
---|
833 | | - mpp_err("session %p:%d, task %p index %d abort wait twice!\n", |
---|
834 | | - session, session->index, |
---|
835 | | - task, task->task_index); |
---|
836 | | - ret = -ETIMEDOUT; |
---|
837 | | - } else { |
---|
838 | | - return mpp_wait_result_default(session, msgs); |
---|
839 | | - } |
---|
| 854 | + set_bit(TASK_STATE_ABORT, &task->state); |
---|
| 855 | + mpp_err("timeout, pid %d session %d:%d count %d cur_task %d state %lx\n", |
---|
| 856 | + session->pid, session->device_type, session->index, |
---|
| 857 | + atomic_read(&session->task_count), task->task_index, task->state); |
---|
840 | 858 | } |
---|
841 | 859 | |
---|
842 | 860 | mpp_debug_func(DEBUG_TASK_INFO, |
---|
843 | | - "kref_read=%d, ret=%d\n", kref_read(&task->ref), ret); |
---|
| 861 | + "session %d:%d task %d state 0x%lx kref_read %d ret %d\n", |
---|
| 862 | + session->device_type, |
---|
| 863 | + session->index, task->task_index, task->state, |
---|
| 864 | + kref_read(&task->ref), ret); |
---|
844 | 865 | mpp_session_pop_pending(session, task); |
---|
845 | 866 | |
---|
846 | 867 | return ret; |
---|
.. | .. |
---|
919 | 940 | goto err_put_pdev; |
---|
920 | 941 | } else { |
---|
921 | 942 | mpp->reset_group = mpp->srv->reset_groups[reset_group_node]; |
---|
| 943 | + if (!mpp->reset_group->queue) |
---|
| 944 | + mpp->reset_group->queue = queue; |
---|
| 945 | + if (mpp->reset_group->queue != mpp->queue) |
---|
| 946 | + mpp->reset_group->rw_sem_on = true; |
---|
922 | 947 | } |
---|
923 | 948 | } |
---|
924 | 949 | |
---|
.. | .. |
---|
951 | 976 | |
---|
952 | 977 | /* default taskqueue has max 16 task capacity */ |
---|
953 | 978 | queue->task_capacity = MPP_MAX_TASK_CAPACITY; |
---|
| 979 | + |
---|
| 980 | + mutex_init(&queue->ref_lock); |
---|
| 981 | + atomic_set(&queue->runtime_cnt, 0); |
---|
| 982 | + atomic_set(&queue->detach_count, 0); |
---|
954 | 983 | |
---|
955 | 984 | return queue; |
---|
956 | 985 | } |
---|
.. | .. |
---|
1197 | 1226 | if (!mpp) |
---|
1198 | 1227 | return -EINVAL; |
---|
1199 | 1228 | |
---|
1200 | | - mpp_session_clear(mpp, session); |
---|
| 1229 | + mpp_session_clear_pending(session); |
---|
1201 | 1230 | mpp_iommu_down_write(mpp->iommu_info); |
---|
1202 | 1231 | ret = mpp_dma_session_destroy(session->dma); |
---|
1203 | 1232 | mpp_iommu_up_write(mpp->iommu_info); |
---|
.. | .. |
---|
1410 | 1439 | /* wait for task all done */ |
---|
1411 | 1440 | atomic_inc(&session->release_request); |
---|
1412 | 1441 | |
---|
1413 | | - if (session->mpp) |
---|
| 1442 | + if (session->mpp || atomic_read(&session->task_count)) |
---|
1414 | 1443 | mpp_session_detach_workqueue(session); |
---|
1415 | 1444 | else |
---|
1416 | 1445 | mpp_session_deinit(session); |
---|
.. | .. |
---|
1421 | 1450 | return 0; |
---|
1422 | 1451 | } |
---|
1423 | 1452 | |
---|
1424 | | -static unsigned int |
---|
1425 | | -mpp_dev_poll(struct file *filp, poll_table *wait) |
---|
1426 | | -{ |
---|
1427 | | - unsigned int mask = 0; |
---|
1428 | | - struct mpp_session *session = |
---|
1429 | | - (struct mpp_session *)filp->private_data; |
---|
1430 | | - |
---|
1431 | | - poll_wait(filp, &session->wait, wait); |
---|
1432 | | - if (!list_empty(&session->done_list)) |
---|
1433 | | - mask |= POLLIN | POLLRDNORM; |
---|
1434 | | - |
---|
1435 | | - return mask; |
---|
1436 | | -} |
---|
1437 | | - |
---|
1438 | 1453 | const struct file_operations rockchip_mpp_fops = { |
---|
1439 | 1454 | .open = mpp_dev_open, |
---|
1440 | 1455 | .release = mpp_dev_release, |
---|
1441 | | - .poll = mpp_dev_poll, |
---|
1442 | 1456 | .unlocked_ioctl = mpp_dev_ioctl, |
---|
1443 | 1457 | #ifdef CONFIG_COMPAT |
---|
1444 | 1458 | .compat_ioctl = mpp_dev_ioctl, |
---|
.. | .. |
---|
1673 | 1687 | mpp_dev_reset(mpp); |
---|
1674 | 1688 | mpp_power_off(mpp); |
---|
1675 | 1689 | |
---|
1676 | | - if (!atomic_read(&task->abort_request)) { |
---|
1677 | | - mpp_session_push_done(session, task); |
---|
1678 | | - /* Wake up the GET thread */ |
---|
1679 | | - wake_up(&session->wait); |
---|
1680 | | - } |
---|
1681 | 1690 | set_bit(TASK_STATE_FINISH, &task->state); |
---|
| 1691 | + set_bit(TASK_STATE_DONE, &task->state); |
---|
| 1692 | + |
---|
| 1693 | + if (session->srv->timing_en) { |
---|
| 1694 | + s64 time_diff; |
---|
| 1695 | + |
---|
| 1696 | + task->on_finish = ktime_get(); |
---|
| 1697 | + set_bit(TASK_TIMING_FINISH, &task->state); |
---|
| 1698 | + |
---|
| 1699 | + time_diff = ktime_us_delta(task->on_finish, task->on_create); |
---|
| 1700 | + |
---|
| 1701 | + if (mpp->timing_check && time_diff > (s64)mpp->timing_check) |
---|
| 1702 | + mpp_task_dump_timing(task, time_diff); |
---|
| 1703 | + } |
---|
| 1704 | + |
---|
| 1705 | + /* Wake up the GET thread */ |
---|
| 1706 | + wake_up(&task->wait); |
---|
1682 | 1707 | mpp_taskqueue_pop_running(mpp->queue, task); |
---|
1683 | 1708 | |
---|
1684 | 1709 | return 0; |
---|
.. | .. |
---|
1780 | 1805 | unsigned long iova, |
---|
1781 | 1806 | int status, void *arg) |
---|
1782 | 1807 | { |
---|
1783 | | - struct mpp_taskqueue *queue = (struct mpp_taskqueue *)arg; |
---|
| 1808 | + struct mpp_dev *mpp = (struct mpp_dev *)arg; |
---|
| 1809 | + struct mpp_taskqueue *queue = mpp->queue; |
---|
1784 | 1810 | struct mpp_task *task = mpp_taskqueue_get_running_task(queue); |
---|
1785 | | - struct mpp_dev *mpp; |
---|
1786 | 1811 | |
---|
1787 | 1812 | /* |
---|
1788 | 1813 | * NOTE: In link mode, this task may not be the task of the current |
---|
.. | .. |
---|
1798 | 1823 | mpp_task_dump_hw_reg(mpp, task); |
---|
1799 | 1824 | |
---|
1800 | 1825 | if (mpp->iommu_info->hdl) |
---|
1801 | | - mpp->iommu_info->hdl(iommu, iommu_dev, iova, status, arg); |
---|
| 1826 | + mpp->iommu_info->hdl(iommu, iommu_dev, iova, status, mpp); |
---|
1802 | 1827 | |
---|
1803 | 1828 | return 0; |
---|
1804 | 1829 | } |
---|
.. | .. |
---|
1815 | 1840 | |
---|
1816 | 1841 | /* Get disable auto frequent flag from dtsi */ |
---|
1817 | 1842 | mpp->auto_freq_en = !device_property_read_bool(dev, "rockchip,disable-auto-freq"); |
---|
| 1843 | + /* read flag for pum idle request */ |
---|
| 1844 | + mpp->skip_idle = device_property_read_bool(dev, "rockchip,skip-pmu-idle-request"); |
---|
1818 | 1845 | |
---|
1819 | 1846 | /* Get and attach to service */ |
---|
1820 | 1847 | ret = mpp_attach_service(mpp, dev); |
---|
.. | .. |
---|
1897 | 1924 | /* set iommu fault handler */ |
---|
1898 | 1925 | if (!IS_ERR(mpp->iommu_info)) |
---|
1899 | 1926 | iommu_set_fault_handler(mpp->iommu_info->domain, |
---|
1900 | | - mpp_iommu_handle, mpp->queue); |
---|
| 1927 | + mpp_iommu_handle, mpp); |
---|
1901 | 1928 | |
---|
1902 | 1929 | /* read hardware id */ |
---|
1903 | 1930 | if (hw_info->reg_id >= 0) { |
---|
.. | .. |
---|
1951 | 1978 | struct mpp_dev *mpp = param; |
---|
1952 | 1979 | struct mpp_task *task = mpp->cur_task; |
---|
1953 | 1980 | irqreturn_t irq_ret = IRQ_NONE; |
---|
| 1981 | + u32 timing_en = mpp->srv->timing_en; |
---|
| 1982 | + |
---|
| 1983 | + if (task && timing_en) { |
---|
| 1984 | + task->on_irq = ktime_get(); |
---|
| 1985 | + set_bit(TASK_TIMING_IRQ, &task->state); |
---|
| 1986 | + } |
---|
1954 | 1987 | |
---|
1955 | 1988 | if (mpp->dev_ops->irq) |
---|
1956 | 1989 | irq_ret = mpp->dev_ops->irq(mpp); |
---|
.. | .. |
---|
1965 | 1998 | mpp->irq_status); |
---|
1966 | 1999 | irq_ret = IRQ_HANDLED; |
---|
1967 | 2000 | goto done; |
---|
| 2001 | + } |
---|
| 2002 | + if (timing_en) { |
---|
| 2003 | + task->on_cancel_timeout = ktime_get(); |
---|
| 2004 | + set_bit(TASK_TIMING_TO_CANCEL, &task->state); |
---|
1968 | 2005 | } |
---|
1969 | 2006 | cancel_delayed_work(&task->timeout_work); |
---|
1970 | 2007 | /* normal condition, set state and wake up isr thread */ |
---|
.. | .. |
---|
1981 | 2018 | { |
---|
1982 | 2019 | irqreturn_t ret = IRQ_NONE; |
---|
1983 | 2020 | struct mpp_dev *mpp = param; |
---|
| 2021 | + struct mpp_task *task = mpp->cur_task; |
---|
| 2022 | + |
---|
| 2023 | + if (task && mpp->srv->timing_en) { |
---|
| 2024 | + task->on_isr = ktime_get(); |
---|
| 2025 | + set_bit(TASK_TIMING_ISR, &task->state); |
---|
| 2026 | + } |
---|
1984 | 2027 | |
---|
1985 | 2028 | if (mpp->auto_freq_en && |
---|
1986 | 2029 | mpp->hw_ops->reduce_freq && |
---|
.. | .. |
---|
2030 | 2073 | |
---|
2031 | 2074 | int mpp_time_record(struct mpp_task *task) |
---|
2032 | 2075 | { |
---|
2033 | | - if (mpp_debug_unlikely(DEBUG_TIMING) && task) |
---|
2034 | | - do_gettimeofday(&task->start); |
---|
| 2076 | + if (mpp_debug_unlikely(DEBUG_TIMING) && task) { |
---|
| 2077 | + task->start = ktime_get(); |
---|
| 2078 | + task->part = task->start; |
---|
| 2079 | + } |
---|
| 2080 | + |
---|
| 2081 | + return 0; |
---|
| 2082 | +} |
---|
| 2083 | + |
---|
| 2084 | +int mpp_time_part_diff(struct mpp_task *task) |
---|
| 2085 | +{ |
---|
| 2086 | + ktime_t end; |
---|
| 2087 | + struct mpp_dev *mpp = task->session->mpp; |
---|
| 2088 | + |
---|
| 2089 | + end = ktime_get(); |
---|
| 2090 | + mpp_debug(DEBUG_PART_TIMING, "%s: session %d:%d part time: %lld us\n", |
---|
| 2091 | + dev_name(mpp->dev), task->session->pid, task->session->index, |
---|
| 2092 | + ktime_us_delta(end, task->part)); |
---|
| 2093 | + task->part = end; |
---|
2035 | 2094 | |
---|
2036 | 2095 | return 0; |
---|
2037 | 2096 | } |
---|
2038 | 2097 | |
---|
2039 | 2098 | int mpp_time_diff(struct mpp_task *task) |
---|
2040 | 2099 | { |
---|
2041 | | - struct timeval end; |
---|
| 2100 | + ktime_t end; |
---|
2042 | 2101 | struct mpp_dev *mpp = task->session->mpp; |
---|
2043 | 2102 | |
---|
2044 | | - do_gettimeofday(&end); |
---|
2045 | | - mpp_debug(DEBUG_TIMING, "%s: pid: %d, session: %p, time: %ld us\n", |
---|
2046 | | - dev_name(mpp->dev), task->session->pid, task->session, |
---|
2047 | | - (end.tv_sec - task->start.tv_sec) * 1000000 + |
---|
2048 | | - (end.tv_usec - task->start.tv_usec)); |
---|
| 2103 | + end = ktime_get(); |
---|
| 2104 | + mpp_debug(DEBUG_TIMING, "%s: session %d:%d task time: %lld us\n", |
---|
| 2105 | + dev_name(mpp->dev), task->session->pid, task->session->index, |
---|
| 2106 | + ktime_us_delta(end, task->start)); |
---|
2049 | 2107 | |
---|
2050 | 2108 | return 0; |
---|
| 2109 | +} |
---|
| 2110 | + |
---|
| 2111 | +int mpp_time_diff_with_hw_time(struct mpp_task *task, u32 clk_hz) |
---|
| 2112 | +{ |
---|
| 2113 | + if (mpp_debug_unlikely(DEBUG_TIMING)) { |
---|
| 2114 | + ktime_t end; |
---|
| 2115 | + struct mpp_dev *mpp = task->session->mpp; |
---|
| 2116 | + |
---|
| 2117 | + end = ktime_get(); |
---|
| 2118 | + |
---|
| 2119 | + if (clk_hz) |
---|
| 2120 | + mpp_debug(DEBUG_TIMING, "%s: session %d time: %lld us hw %d us\n", |
---|
| 2121 | + dev_name(mpp->dev), task->session->index, |
---|
| 2122 | + ktime_us_delta(end, task->start), |
---|
| 2123 | + task->hw_cycles / (clk_hz / 1000000)); |
---|
| 2124 | + else |
---|
| 2125 | + mpp_debug(DEBUG_TIMING, "%s: session %d time: %lld us\n", |
---|
| 2126 | + dev_name(mpp->dev), task->session->index, |
---|
| 2127 | + ktime_us_delta(end, task->start)); |
---|
| 2128 | + } |
---|
| 2129 | + |
---|
| 2130 | + return 0; |
---|
| 2131 | +} |
---|
| 2132 | + |
---|
| 2133 | +#define LOG_TIMING(state, id, stage, time, base) \ |
---|
| 2134 | + do { \ |
---|
| 2135 | + if (test_bit(id, &state)) \ |
---|
| 2136 | + pr_info("timing: %-14s : %lld us\n", stage, ktime_us_delta(time, base)); \ |
---|
| 2137 | + else \ |
---|
| 2138 | + pr_info("timing: %-14s : invalid\n", stage); \ |
---|
| 2139 | + } while (0) |
---|
| 2140 | + |
---|
| 2141 | +void mpp_task_dump_timing(struct mpp_task *task, s64 time_diff) |
---|
| 2142 | +{ |
---|
| 2143 | + ktime_t s = task->on_create; |
---|
| 2144 | + unsigned long state = task->state; |
---|
| 2145 | + |
---|
| 2146 | + pr_info("task %d dump timing at %lld us:", task->task_index, time_diff); |
---|
| 2147 | + |
---|
| 2148 | + pr_info("timing: %-14s : %lld us\n", "create", ktime_to_us(s)); |
---|
| 2149 | + LOG_TIMING(state, TASK_TIMING_CREATE_END, "create end", task->on_create_end, s); |
---|
| 2150 | + LOG_TIMING(state, TASK_TIMING_PENDING, "pending", task->on_pending, s); |
---|
| 2151 | + LOG_TIMING(state, TASK_TIMING_RUN, "run", task->on_run, s); |
---|
| 2152 | + LOG_TIMING(state, TASK_TIMING_TO_SCHED, "timeout start", task->on_sched_timeout, s); |
---|
| 2153 | + LOG_TIMING(state, TASK_TIMING_RUN_END, "run end", task->on_run_end, s); |
---|
| 2154 | + LOG_TIMING(state, TASK_TIMING_IRQ, "irq", task->on_irq, s); |
---|
| 2155 | + LOG_TIMING(state, TASK_TIMING_TO_CANCEL, "timeout cancel", task->on_cancel_timeout, s); |
---|
| 2156 | + LOG_TIMING(state, TASK_TIMING_ISR, "isr", task->on_isr, s); |
---|
| 2157 | + LOG_TIMING(state, TASK_TIMING_FINISH, "finish", task->on_finish, s); |
---|
2051 | 2158 | } |
---|
2052 | 2159 | |
---|
2053 | 2160 | int mpp_write_req(struct mpp_dev *mpp, u32 *regs, |
---|
.. | .. |
---|
2184 | 2291 | if (clk_rate_hz) { |
---|
2185 | 2292 | clk_info->used_rate_hz = clk_rate_hz; |
---|
2186 | 2293 | clk_set_rate(clk_info->clk, clk_rate_hz); |
---|
| 2294 | + clk_info->real_rate_hz = clk_get_rate(clk_info->clk); |
---|
2187 | 2295 | } |
---|
2188 | 2296 | |
---|
2189 | 2297 | return 0; |
---|
.. | .. |
---|
2230 | 2338 | { |
---|
2231 | 2339 | return proc_create_data(name, mode, parent, &procfs_fops_u32, data); |
---|
2232 | 2340 | } |
---|
| 2341 | + |
---|
| 2342 | +void mpp_procfs_create_common(struct proc_dir_entry *parent, struct mpp_dev *mpp) |
---|
| 2343 | +{ |
---|
| 2344 | + mpp_procfs_create_u32("timing_check", 0644, parent, &mpp->timing_check); |
---|
| 2345 | +} |
---|
2233 | 2346 | #endif |
---|