| .. | .. |
|---|
| 53 | 53 | __u64 data_ptr; |
|---|
| 54 | 54 | }; |
|---|
| 55 | 55 | |
|---|
| 56 | +#define MPP_BAT_MSG_DONE (0x00000001) |
|---|
| 57 | + |
|---|
| 58 | +struct mpp_bat_msg { |
|---|
| 59 | + __u64 flag; |
|---|
| 60 | + __u32 fd; |
|---|
| 61 | + __s32 ret; |
|---|
| 62 | +}; |
|---|
| 63 | + |
|---|
| 56 | 64 | #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS |
|---|
| 57 | 65 | const char *mpp_device_name[MPP_DEVICE_BUTT] = { |
|---|
| 58 | 66 | [MPP_DEVICE_VDPU1] = "VDPU1", |
|---|
| 59 | 67 | [MPP_DEVICE_VDPU2] = "VDPU2", |
|---|
| 60 | 68 | [MPP_DEVICE_VDPU1_PP] = "VDPU1_PP", |
|---|
| 61 | 69 | [MPP_DEVICE_VDPU2_PP] = "VDPU2_PP", |
|---|
| 70 | + [MPP_DEVICE_AV1DEC] = "AV1DEC", |
|---|
| 62 | 71 | [MPP_DEVICE_HEVC_DEC] = "HEVC_DEC", |
|---|
| 63 | 72 | [MPP_DEVICE_RKVDEC] = "RKVDEC", |
|---|
| 64 | 73 | [MPP_DEVICE_AVSPLUS_DEC] = "AVSPLUS_DEC", |
|---|
| 74 | + [MPP_DEVICE_RKJPEGD] = "RKJPEGD", |
|---|
| 65 | 75 | [MPP_DEVICE_RKVENC] = "RKVENC", |
|---|
| 66 | 76 | [MPP_DEVICE_VEPU1] = "VEPU1", |
|---|
| 67 | 77 | [MPP_DEVICE_VEPU2] = "VEPU2", |
|---|
| 78 | + [MPP_DEVICE_VEPU2_JPEG] = "VEPU2", |
|---|
| 68 | 79 | [MPP_DEVICE_VEPU22] = "VEPU22", |
|---|
| 69 | 80 | [MPP_DEVICE_IEP2] = "IEP2", |
|---|
| 70 | 81 | [MPP_DEVICE_VDPP] = "VDPP", |
|---|
| .. | .. |
|---|
| 86 | 97 | |
|---|
| 87 | 98 | #endif |
|---|
| 88 | 99 | |
|---|
| 89 | | -static void mpp_free_task(struct kref *ref); |
|---|
| 90 | 100 | static void mpp_attach_workqueue(struct mpp_dev *mpp, |
|---|
| 91 | 101 | struct mpp_taskqueue *queue); |
|---|
| 92 | | - |
|---|
| 93 | | -/* task queue schedule */ |
|---|
| 94 | | -static int |
|---|
| 95 | | -mpp_taskqueue_push_pending(struct mpp_taskqueue *queue, |
|---|
| 96 | | - struct mpp_task *task) |
|---|
| 97 | | -{ |
|---|
| 98 | | - if (!task->session || !task->session->mpp) |
|---|
| 99 | | - return -EINVAL; |
|---|
| 100 | | - |
|---|
| 101 | | - kref_get(&task->ref); |
|---|
| 102 | | - mutex_lock(&queue->pending_lock); |
|---|
| 103 | | - list_add_tail(&task->queue_link, &queue->pending_list); |
|---|
| 104 | | - mutex_unlock(&queue->pending_lock); |
|---|
| 105 | | - |
|---|
| 106 | | - return 0; |
|---|
| 107 | | -} |
|---|
| 108 | 102 | |
|---|
| 109 | 103 | static int |
|---|
| 110 | 104 | mpp_taskqueue_pop_pending(struct mpp_taskqueue *queue, |
|---|
| .. | .. |
|---|
| 148 | 142 | return flag; |
|---|
| 149 | 143 | } |
|---|
| 150 | 144 | |
|---|
| 151 | | -static int |
|---|
| 152 | | -mpp_taskqueue_pending_to_run(struct mpp_taskqueue *queue, |
|---|
| 153 | | - struct mpp_task *task) |
|---|
| 145 | +int mpp_taskqueue_pending_to_run(struct mpp_taskqueue *queue, struct mpp_task *task) |
|---|
| 154 | 146 | { |
|---|
| 155 | 147 | unsigned long flags; |
|---|
| 156 | 148 | |
|---|
| .. | .. |
|---|
| 230 | 222 | return 0; |
|---|
| 231 | 223 | } |
|---|
| 232 | 224 | |
|---|
| 233 | | -static int mpp_session_clear_pending(struct mpp_session *session) |
|---|
| 225 | +static void task_msgs_reset(struct mpp_task_msgs *msgs) |
|---|
| 226 | +{ |
|---|
| 227 | + list_del_init(&msgs->list); |
|---|
| 228 | + |
|---|
| 229 | + msgs->flags = 0; |
|---|
| 230 | + msgs->req_cnt = 0; |
|---|
| 231 | + msgs->set_cnt = 0; |
|---|
| 232 | + msgs->poll_cnt = 0; |
|---|
| 233 | +} |
|---|
| 234 | + |
|---|
| 235 | +static void task_msgs_init(struct mpp_task_msgs *msgs, struct mpp_session *session) |
|---|
| 236 | +{ |
|---|
| 237 | + INIT_LIST_HEAD(&msgs->list); |
|---|
| 238 | + |
|---|
| 239 | + msgs->session = session; |
|---|
| 240 | + msgs->queue = NULL; |
|---|
| 241 | + msgs->task = NULL; |
|---|
| 242 | + msgs->mpp = NULL; |
|---|
| 243 | + |
|---|
| 244 | + msgs->ext_fd = -1; |
|---|
| 245 | + |
|---|
| 246 | + task_msgs_reset(msgs); |
|---|
| 247 | +} |
|---|
| 248 | + |
|---|
| 249 | +static struct mpp_task_msgs *get_task_msgs(struct mpp_session *session) |
|---|
| 250 | +{ |
|---|
| 251 | + unsigned long flags; |
|---|
| 252 | + struct mpp_task_msgs *msgs; |
|---|
| 253 | + |
|---|
| 254 | + spin_lock_irqsave(&session->lock_msgs, flags); |
|---|
| 255 | + msgs = list_first_entry_or_null(&session->list_msgs_idle, |
|---|
| 256 | + struct mpp_task_msgs, list_session); |
|---|
| 257 | + if (msgs) { |
|---|
| 258 | + list_move_tail(&msgs->list_session, &session->list_msgs); |
|---|
| 259 | + spin_unlock_irqrestore(&session->lock_msgs, flags); |
|---|
| 260 | + |
|---|
| 261 | + return msgs; |
|---|
| 262 | + } |
|---|
| 263 | + spin_unlock_irqrestore(&session->lock_msgs, flags); |
|---|
| 264 | + |
|---|
| 265 | + msgs = kzalloc(sizeof(*msgs), GFP_KERNEL); |
|---|
| 266 | + task_msgs_init(msgs, session); |
|---|
| 267 | + INIT_LIST_HEAD(&msgs->list_session); |
|---|
| 268 | + |
|---|
| 269 | + spin_lock_irqsave(&session->lock_msgs, flags); |
|---|
| 270 | + list_move_tail(&msgs->list_session, &session->list_msgs); |
|---|
| 271 | + session->msgs_cnt++; |
|---|
| 272 | + spin_unlock_irqrestore(&session->lock_msgs, flags); |
|---|
| 273 | + |
|---|
| 274 | + mpp_debug_func(DEBUG_TASK_INFO, "session %d:%d msgs cnt %d\n", |
|---|
| 275 | + session->pid, session->index, session->msgs_cnt); |
|---|
| 276 | + |
|---|
| 277 | + return msgs; |
|---|
| 278 | +} |
|---|
| 279 | + |
|---|
| 280 | +static void put_task_msgs(struct mpp_task_msgs *msgs) |
|---|
| 281 | +{ |
|---|
| 282 | + struct mpp_session *session = msgs->session; |
|---|
| 283 | + unsigned long flags; |
|---|
| 284 | + |
|---|
| 285 | + if (!session) { |
|---|
| 286 | + pr_err("invalid msgs without session\n"); |
|---|
| 287 | + return; |
|---|
| 288 | + } |
|---|
| 289 | + |
|---|
| 290 | + if (msgs->ext_fd >= 0) { |
|---|
| 291 | + fdput(msgs->f); |
|---|
| 292 | + msgs->ext_fd = -1; |
|---|
| 293 | + } |
|---|
| 294 | + |
|---|
| 295 | + task_msgs_reset(msgs); |
|---|
| 296 | + |
|---|
| 297 | + spin_lock_irqsave(&session->lock_msgs, flags); |
|---|
| 298 | + list_move_tail(&msgs->list_session, &session->list_msgs_idle); |
|---|
| 299 | + spin_unlock_irqrestore(&session->lock_msgs, flags); |
|---|
| 300 | +} |
|---|
| 301 | + |
|---|
| 302 | +static void clear_task_msgs(struct mpp_session *session) |
|---|
| 303 | +{ |
|---|
| 304 | + struct mpp_task_msgs *msgs, *n; |
|---|
| 305 | + LIST_HEAD(list_to_free); |
|---|
| 306 | + unsigned long flags; |
|---|
| 307 | + |
|---|
| 308 | + spin_lock_irqsave(&session->lock_msgs, flags); |
|---|
| 309 | + |
|---|
| 310 | + list_for_each_entry_safe(msgs, n, &session->list_msgs, list_session) |
|---|
| 311 | + list_move_tail(&msgs->list_session, &list_to_free); |
|---|
| 312 | + |
|---|
| 313 | + list_for_each_entry_safe(msgs, n, &session->list_msgs_idle, list_session) |
|---|
| 314 | + list_move_tail(&msgs->list_session, &list_to_free); |
|---|
| 315 | + |
|---|
| 316 | + spin_unlock_irqrestore(&session->lock_msgs, flags); |
|---|
| 317 | + |
|---|
| 318 | + list_for_each_entry_safe(msgs, n, &list_to_free, list_session) |
|---|
| 319 | + kfree(msgs); |
|---|
| 320 | +} |
|---|
| 321 | + |
|---|
| 322 | +static void mpp_session_clear_pending(struct mpp_session *session) |
|---|
| 234 | 323 | { |
|---|
| 235 | 324 | struct mpp_task *task = NULL, *n; |
|---|
| 236 | 325 | |
|---|
| .. | .. |
|---|
| 245 | 334 | kref_put(&task->ref, mpp_free_task); |
|---|
| 246 | 335 | } |
|---|
| 247 | 336 | mutex_unlock(&session->pending_lock); |
|---|
| 248 | | - |
|---|
| 249 | | - return 0; |
|---|
| 250 | 337 | } |
|---|
| 251 | 338 | |
|---|
| 252 | 339 | void mpp_session_cleanup_detach(struct mpp_taskqueue *queue, struct kthread_work *work) |
|---|
| .. | .. |
|---|
| 309 | 396 | atomic_set(&session->task_count, 0); |
|---|
| 310 | 397 | atomic_set(&session->release_request, 0); |
|---|
| 311 | 398 | |
|---|
| 399 | + INIT_LIST_HEAD(&session->list_msgs); |
|---|
| 400 | + INIT_LIST_HEAD(&session->list_msgs_idle); |
|---|
| 401 | + spin_lock_init(&session->lock_msgs); |
|---|
| 402 | + |
|---|
| 312 | 403 | mpp_dbg_session("session %p init\n", session); |
|---|
| 313 | 404 | return session; |
|---|
| 314 | 405 | } |
|---|
| .. | .. |
|---|
| 352 | 443 | else |
|---|
| 353 | 444 | pr_err("invalid NULL session deinit function\n"); |
|---|
| 354 | 445 | |
|---|
| 355 | | - mpp_dbg_session("session %p:%d deinit\n", session, session->index); |
|---|
| 446 | + clear_task_msgs(session); |
|---|
| 356 | 447 | |
|---|
| 357 | 448 | kfree(session); |
|---|
| 358 | 449 | } |
|---|
| .. | .. |
|---|
| 429 | 520 | return task; |
|---|
| 430 | 521 | } |
|---|
| 431 | 522 | |
|---|
| 432 | | -static void mpp_free_task(struct kref *ref) |
|---|
| 523 | +void mpp_free_task(struct kref *ref) |
|---|
| 433 | 524 | { |
|---|
| 434 | 525 | struct mpp_dev *mpp; |
|---|
| 435 | 526 | struct mpp_session *session; |
|---|
| .. | .. |
|---|
| 441 | 532 | } |
|---|
| 442 | 533 | session = task->session; |
|---|
| 443 | 534 | |
|---|
| 444 | | - mpp_debug_func(DEBUG_TASK_INFO, |
|---|
| 445 | | - "session %d:%d task %d state 0x%lx abort_request %d\n", |
|---|
| 446 | | - session->device_type, session->index, task->task_index, |
|---|
| 447 | | - task->state, atomic_read(&task->abort_request)); |
|---|
| 448 | | - if (!session->mpp) { |
|---|
| 449 | | - mpp_err("session %p, session->mpp is null.\n", session); |
|---|
| 450 | | - return; |
|---|
| 451 | | - } |
|---|
| 452 | | - mpp = session->mpp; |
|---|
| 535 | + mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d free state 0x%lx abort %d\n", |
|---|
| 536 | + session->index, task->task_id, task->state, |
|---|
| 537 | + atomic_read(&task->abort_request)); |
|---|
| 453 | 538 | |
|---|
| 539 | + mpp = mpp_get_task_used_device(task, session); |
|---|
| 454 | 540 | if (mpp->dev_ops->free_task) |
|---|
| 455 | 541 | mpp->dev_ops->free_task(session, task); |
|---|
| 542 | + |
|---|
| 456 | 543 | /* Decrease reference count */ |
|---|
| 457 | 544 | atomic_dec(&session->task_count); |
|---|
| 458 | 545 | atomic_dec(&mpp->task_count); |
|---|
| .. | .. |
|---|
| 466 | 553 | struct mpp_task, |
|---|
| 467 | 554 | timeout_work); |
|---|
| 468 | 555 | |
|---|
| 469 | | - if (!test_bit(TASK_STATE_START, &task->state)) { |
|---|
| 470 | | - mpp_err("task has not start\n"); |
|---|
| 471 | | - schedule_delayed_work(&task->timeout_work, |
|---|
| 472 | | - msecs_to_jiffies(MPP_WORK_TIMEOUT_DELAY)); |
|---|
| 556 | + if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) { |
|---|
| 557 | + mpp_err("task has been handled\n"); |
|---|
| 473 | 558 | return; |
|---|
| 474 | 559 | } |
|---|
| 475 | 560 | |
|---|
| .. | .. |
|---|
| 479 | 564 | } |
|---|
| 480 | 565 | |
|---|
| 481 | 566 | session = task->session; |
|---|
| 567 | + mpp_err("task %d:%d:%d processing time out!\n", session->pid, |
|---|
| 568 | + session->index, task->task_id); |
|---|
| 482 | 569 | |
|---|
| 483 | 570 | if (!session->mpp) { |
|---|
| 484 | 571 | mpp_err("session %d:%d, session mpp is null.\n", session->pid, |
|---|
| 485 | 572 | session->index); |
|---|
| 486 | 573 | return; |
|---|
| 487 | 574 | } |
|---|
| 488 | | - mpp = session->mpp; |
|---|
| 489 | | - dev_err(mpp->dev, "session %d:%d task %d state %lx processing time out!\n", |
|---|
| 490 | | - session->device_type, session->index, task->task_index, task->state); |
|---|
| 491 | | - synchronize_hardirq(mpp->irq); |
|---|
| 492 | | - |
|---|
| 493 | | - if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) { |
|---|
| 494 | | - mpp_err("task has been handled\n"); |
|---|
| 495 | | - return; |
|---|
| 496 | | - } |
|---|
| 497 | 575 | |
|---|
| 498 | 576 | mpp_task_dump_timing(task, ktime_us_delta(ktime_get(), task->on_create)); |
|---|
| 577 | + |
|---|
| 578 | + mpp = mpp_get_task_used_device(task, session); |
|---|
| 499 | 579 | |
|---|
| 500 | 580 | /* disable core irq */ |
|---|
| 501 | 581 | disable_irq(mpp->irq); |
|---|
| 502 | 582 | /* disable mmu irq */ |
|---|
| 503 | | - mpp_iommu_disable_irq(mpp->iommu_info); |
|---|
| 583 | + if (mpp->iommu_info && mpp->iommu_info->got_irq) |
|---|
| 584 | + disable_irq(mpp->iommu_info->irq); |
|---|
| 504 | 585 | |
|---|
| 505 | 586 | /* hardware maybe dead, reset it */ |
|---|
| 506 | 587 | mpp_reset_up_read(mpp->reset_group); |
|---|
| .. | .. |
|---|
| 518 | 599 | /* enable core irq */ |
|---|
| 519 | 600 | enable_irq(mpp->irq); |
|---|
| 520 | 601 | /* enable mmu irq */ |
|---|
| 521 | | - mpp_iommu_enable_irq(mpp->iommu_info); |
|---|
| 602 | + if (mpp->iommu_info && mpp->iommu_info->got_irq) |
|---|
| 603 | + enable_irq(mpp->iommu_info->irq); |
|---|
| 522 | 604 | |
|---|
| 523 | 605 | mpp_taskqueue_trigger_work(mpp); |
|---|
| 524 | 606 | } |
|---|
| 525 | 607 | |
|---|
| 526 | 608 | static int mpp_process_task_default(struct mpp_session *session, |
|---|
| 527 | | - struct mpp_task_msgs *msgs) |
|---|
| 609 | + struct mpp_task_msgs *msgs) |
|---|
| 528 | 610 | { |
|---|
| 529 | 611 | struct mpp_task *task = NULL; |
|---|
| 530 | 612 | struct mpp_dev *mpp = session->mpp; |
|---|
| .. | .. |
|---|
| 532 | 614 | ktime_t on_create; |
|---|
| 533 | 615 | |
|---|
| 534 | 616 | if (unlikely(!mpp)) { |
|---|
| 535 | | - mpp_err("pid %d clinet %d found invalid process function\n", |
|---|
| 617 | + mpp_err("pid %d client %d found invalid process function\n", |
|---|
| 536 | 618 | session->pid, session->device_type); |
|---|
| 537 | 619 | return -EINVAL; |
|---|
| 538 | 620 | } |
|---|
| .. | .. |
|---|
| 555 | 637 | set_bit(TASK_TIMING_CREATE, &task->state); |
|---|
| 556 | 638 | } |
|---|
| 557 | 639 | |
|---|
| 640 | + /* ensure current device */ |
|---|
| 641 | + mpp = mpp_get_task_used_device(task, session); |
|---|
| 642 | + |
|---|
| 558 | 643 | kref_init(&task->ref); |
|---|
| 559 | 644 | init_waitqueue_head(&task->wait); |
|---|
| 560 | 645 | atomic_set(&task->abort_request, 0); |
|---|
| 561 | 646 | task->task_index = atomic_fetch_inc(&mpp->task_index); |
|---|
| 647 | + task->task_id = atomic_fetch_inc(&mpp->queue->task_id); |
|---|
| 562 | 648 | INIT_DELAYED_WORK(&task->timeout_work, mpp_task_timeout_work); |
|---|
| 563 | 649 | |
|---|
| 564 | 650 | if (mpp->auto_freq_en && mpp->hw_ops->get_freq) |
|---|
| 565 | 651 | mpp->hw_ops->get_freq(mpp, task); |
|---|
| 652 | + |
|---|
| 653 | + msgs->queue = mpp->queue; |
|---|
| 654 | + msgs->task = task; |
|---|
| 655 | + msgs->mpp = mpp; |
|---|
| 566 | 656 | |
|---|
| 567 | 657 | /* |
|---|
| 568 | 658 | * Push task to session should be in front of push task to queue. |
|---|
| .. | .. |
|---|
| 572 | 662 | */ |
|---|
| 573 | 663 | atomic_inc(&session->task_count); |
|---|
| 574 | 664 | mpp_session_push_pending(session, task); |
|---|
| 575 | | - /* push current task to queue */ |
|---|
| 576 | | - atomic_inc(&mpp->task_count); |
|---|
| 577 | | - mpp_taskqueue_push_pending(mpp->queue, task); |
|---|
| 578 | | - set_bit(TASK_STATE_PENDING, &task->state); |
|---|
| 579 | | - /* trigger current queue to run task */ |
|---|
| 580 | | - mpp_taskqueue_trigger_work(mpp); |
|---|
| 581 | | - kref_put(&task->ref, mpp_free_task); |
|---|
| 582 | | - mpp_debug_func(DEBUG_TASK_INFO, |
|---|
| 583 | | - "session %d:%d task %d state 0x%lx\n", |
|---|
| 584 | | - session->device_type, session->index, |
|---|
| 585 | | - task->task_index, task->state); |
|---|
| 665 | + |
|---|
| 586 | 666 | return 0; |
|---|
| 587 | 667 | } |
|---|
| 588 | 668 | |
|---|
| .. | .. |
|---|
| 637 | 717 | group->resets[type] = rst; |
|---|
| 638 | 718 | group->queue = mpp->queue; |
|---|
| 639 | 719 | } |
|---|
| 720 | + /* if reset not in the same queue, it means different device |
|---|
| 721 | + * may reset in the same time, then rw_sem_on should set true. |
|---|
| 722 | + */ |
|---|
| 723 | + group->rw_sem_on |= (group->queue != mpp->queue) ? true : false; |
|---|
| 640 | 724 | dev_info(mpp->dev, "reset_group->rw_sem_on=%d\n", group->rw_sem_on); |
|---|
| 641 | 725 | up_write(&group->rw_sem); |
|---|
| 642 | 726 | |
|---|
| .. | .. |
|---|
| 662 | 746 | mpp_iommu_down_write(mpp->iommu_info); |
|---|
| 663 | 747 | mpp_reset_down_write(mpp->reset_group); |
|---|
| 664 | 748 | atomic_set(&mpp->reset_request, 0); |
|---|
| 665 | | - rockchip_save_qos(mpp->dev); |
|---|
| 749 | + |
|---|
| 666 | 750 | if (mpp->hw_ops->reset) |
|---|
| 667 | 751 | mpp->hw_ops->reset(mpp); |
|---|
| 668 | | - rockchip_restore_qos(mpp->dev); |
|---|
| 669 | 752 | |
|---|
| 670 | 753 | /* Note: if the domain does not change, iommu attach will be return |
|---|
| 671 | 754 | * as an empty operation. Therefore, force to close and then open, |
|---|
| .. | .. |
|---|
| 714 | 797 | struct mpp_task *task) |
|---|
| 715 | 798 | { |
|---|
| 716 | 799 | int ret; |
|---|
| 717 | | - struct mpp_session *session = task->session; |
|---|
| 718 | 800 | u32 timing_en; |
|---|
| 719 | 801 | |
|---|
| 720 | 802 | mpp_debug_enter(); |
|---|
| .. | .. |
|---|
| 749 | 831 | } |
|---|
| 750 | 832 | |
|---|
| 751 | 833 | mpp_power_on(mpp); |
|---|
| 752 | | - mpp_time_record(task); |
|---|
| 753 | | - mpp_debug_func(DEBUG_TASK_INFO, |
|---|
| 754 | | - "%s session %d:%d task=%d state=0x%lx\n", |
|---|
| 755 | | - dev_name(mpp->dev), session->device_type, |
|---|
| 756 | | - session->index, task->task_index, task->state); |
|---|
| 834 | + mpp_debug_func(DEBUG_TASK_INFO, "pid %d run %s\n", |
|---|
| 835 | + task->session->pid, dev_name(mpp->dev)); |
|---|
| 757 | 836 | |
|---|
| 758 | 837 | if (mpp->auto_freq_en && mpp->hw_ops->set_freq) |
|---|
| 759 | 838 | mpp->hw_ops->set_freq(mpp, task); |
|---|
| .. | .. |
|---|
| 763 | 842 | */ |
|---|
| 764 | 843 | mpp_reset_down_read(mpp->reset_group); |
|---|
| 765 | 844 | |
|---|
| 845 | + mpp_iommu_dev_activate(mpp->iommu_info, mpp); |
|---|
| 766 | 846 | if (mpp->dev_ops->run) |
|---|
| 767 | 847 | mpp->dev_ops->run(mpp, task); |
|---|
| 768 | | - set_bit(TASK_STATE_START, &task->state); |
|---|
| 769 | 848 | |
|---|
| 770 | 849 | mpp_debug_leave(); |
|---|
| 771 | 850 | |
|---|
| .. | .. |
|---|
| 780 | 859 | |
|---|
| 781 | 860 | mpp_debug_enter(); |
|---|
| 782 | 861 | |
|---|
| 783 | | -get_task: |
|---|
| 862 | +again: |
|---|
| 784 | 863 | task = mpp_taskqueue_get_pending_task(queue); |
|---|
| 785 | 864 | if (!task) |
|---|
| 786 | 865 | goto done; |
|---|
| .. | .. |
|---|
| 788 | 867 | /* if task timeout and aborted, remove it */ |
|---|
| 789 | 868 | if (atomic_read(&task->abort_request) > 0) { |
|---|
| 790 | 869 | mpp_taskqueue_pop_pending(queue, task); |
|---|
| 791 | | - goto get_task; |
|---|
| 870 | + goto again; |
|---|
| 792 | 871 | } |
|---|
| 793 | 872 | |
|---|
| 794 | 873 | /* get device for current task */ |
|---|
| .. | .. |
|---|
| 813 | 892 | */ |
|---|
| 814 | 893 | /* Push a pending task to running queue */ |
|---|
| 815 | 894 | if (task) { |
|---|
| 895 | + struct mpp_dev *task_mpp = mpp_get_task_used_device(task, task->session); |
|---|
| 896 | + |
|---|
| 897 | + atomic_inc(&task_mpp->task_count); |
|---|
| 816 | 898 | mpp_taskqueue_pending_to_run(queue, task); |
|---|
| 817 | 899 | set_bit(TASK_STATE_RUNNING, &task->state); |
|---|
| 818 | | - if (mpp_task_run(mpp, task)) |
|---|
| 819 | | - mpp_taskqueue_pop_running(mpp->queue, task); |
|---|
| 900 | + if (mpp_task_run(task_mpp, task)) |
|---|
| 901 | + mpp_taskqueue_pop_running(queue, task); |
|---|
| 902 | + else |
|---|
| 903 | + goto again; |
|---|
| 820 | 904 | } |
|---|
| 821 | 905 | |
|---|
| 822 | 906 | done: |
|---|
| .. | .. |
|---|
| 824 | 908 | } |
|---|
| 825 | 909 | |
|---|
| 826 | 910 | static int mpp_wait_result_default(struct mpp_session *session, |
|---|
| 827 | | - struct mpp_task_msgs *msgs) |
|---|
| 911 | + struct mpp_task_msgs *msgs) |
|---|
| 828 | 912 | { |
|---|
| 829 | 913 | int ret; |
|---|
| 830 | 914 | struct mpp_task *task; |
|---|
| 831 | | - struct mpp_dev *mpp = session->mpp; |
|---|
| 832 | | - |
|---|
| 833 | | - if (unlikely(!mpp)) { |
|---|
| 834 | | - mpp_err("pid %d clinet %d found invalid wait result function\n", |
|---|
| 835 | | - session->pid, session->device_type); |
|---|
| 836 | | - return -EINVAL; |
|---|
| 837 | | - } |
|---|
| 915 | + struct mpp_dev *mpp; |
|---|
| 838 | 916 | |
|---|
| 839 | 917 | task = mpp_session_get_pending_task(session); |
|---|
| 840 | 918 | if (!task) { |
|---|
| .. | .. |
|---|
| 842 | 920 | session->pid, session->index); |
|---|
| 843 | 921 | return -EIO; |
|---|
| 844 | 922 | } |
|---|
| 923 | + mpp = mpp_get_task_used_device(task, session); |
|---|
| 845 | 924 | |
|---|
| 846 | 925 | ret = wait_event_timeout(task->wait, |
|---|
| 847 | 926 | test_bit(TASK_STATE_DONE, &task->state), |
|---|
| .. | .. |
|---|
| 852 | 931 | } else { |
|---|
| 853 | 932 | atomic_inc(&task->abort_request); |
|---|
| 854 | 933 | set_bit(TASK_STATE_ABORT, &task->state); |
|---|
| 855 | | - mpp_err("timeout, pid %d session %d:%d count %d cur_task %d state %lx\n", |
|---|
| 856 | | - session->pid, session->device_type, session->index, |
|---|
| 857 | | - atomic_read(&session->task_count), task->task_index, task->state); |
|---|
| 934 | + mpp_err("timeout, pid %d session %d:%d count %d cur_task %p id %d\n", |
|---|
| 935 | + session->pid, session->pid, session->index, |
|---|
| 936 | + atomic_read(&session->task_count), task, |
|---|
| 937 | + task->task_id); |
|---|
| 858 | 938 | } |
|---|
| 859 | 939 | |
|---|
| 860 | | - mpp_debug_func(DEBUG_TASK_INFO, |
|---|
| 861 | | - "session %d:%d task %d state 0x%lx kref_read %d ret %d\n", |
|---|
| 862 | | - session->device_type, |
|---|
| 863 | | - session->index, task->task_index, task->state, |
|---|
| 864 | | - kref_read(&task->ref), ret); |
|---|
| 940 | + mpp_debug_func(DEBUG_TASK_INFO, "task %d kref_%d\n", |
|---|
| 941 | + task->task_id, kref_read(&task->ref)); |
|---|
| 942 | + |
|---|
| 865 | 943 | mpp_session_pop_pending(session, task); |
|---|
| 866 | 944 | |
|---|
| 867 | 945 | return ret; |
|---|
| .. | .. |
|---|
| 896 | 974 | of_node_put(np); |
|---|
| 897 | 975 | if (!pdev) { |
|---|
| 898 | 976 | dev_err(dev, "failed to get mpp service from node\n"); |
|---|
| 899 | | - ret = -ENODEV; |
|---|
| 900 | | - goto err_put_pdev; |
|---|
| 977 | + return -ENODEV; |
|---|
| 901 | 978 | } |
|---|
| 902 | 979 | |
|---|
| 903 | | - mpp->pdev_srv = pdev; |
|---|
| 904 | 980 | mpp->srv = platform_get_drvdata(pdev); |
|---|
| 981 | + platform_device_put(pdev); |
|---|
| 905 | 982 | if (!mpp->srv) { |
|---|
| 906 | | - dev_err(&pdev->dev, "failed attach service\n"); |
|---|
| 907 | | - ret = -EINVAL; |
|---|
| 908 | | - goto err_put_pdev; |
|---|
| 983 | + dev_err(dev, "failed attach service\n"); |
|---|
| 984 | + return -EINVAL; |
|---|
| 909 | 985 | } |
|---|
| 910 | 986 | |
|---|
| 911 | 987 | ret = of_property_read_u32(dev->of_node, |
|---|
| 912 | 988 | "rockchip,taskqueue-node", &taskqueue_node); |
|---|
| 913 | 989 | if (ret) { |
|---|
| 914 | 990 | dev_err(dev, "failed to get taskqueue-node\n"); |
|---|
| 915 | | - goto err_put_pdev; |
|---|
| 991 | + return ret; |
|---|
| 916 | 992 | } else if (taskqueue_node >= mpp->srv->taskqueue_cnt) { |
|---|
| 917 | 993 | dev_err(dev, "taskqueue-node %d must less than %d\n", |
|---|
| 918 | 994 | taskqueue_node, mpp->srv->taskqueue_cnt); |
|---|
| 919 | | - ret = -ENODEV; |
|---|
| 920 | | - goto err_put_pdev; |
|---|
| 995 | + return -ENODEV; |
|---|
| 921 | 996 | } |
|---|
| 922 | 997 | /* set taskqueue according dtsi */ |
|---|
| 923 | 998 | queue = mpp->srv->task_queues[taskqueue_node]; |
|---|
| 924 | 999 | if (!queue) { |
|---|
| 925 | 1000 | dev_err(dev, "taskqueue attach to invalid node %d\n", |
|---|
| 926 | 1001 | taskqueue_node); |
|---|
| 927 | | - ret = -ENODEV; |
|---|
| 928 | | - goto err_put_pdev; |
|---|
| 1002 | + return -ENODEV; |
|---|
| 929 | 1003 | } |
|---|
| 930 | 1004 | mpp_attach_workqueue(mpp, queue); |
|---|
| 931 | 1005 | |
|---|
| .. | .. |
|---|
| 936 | 1010 | if (reset_group_node >= mpp->srv->reset_group_cnt) { |
|---|
| 937 | 1011 | dev_err(dev, "resetgroup-node %d must less than %d\n", |
|---|
| 938 | 1012 | reset_group_node, mpp->srv->reset_group_cnt); |
|---|
| 939 | | - ret = -ENODEV; |
|---|
| 940 | | - goto err_put_pdev; |
|---|
| 1013 | + return -ENODEV; |
|---|
| 941 | 1014 | } else { |
|---|
| 942 | 1015 | mpp->reset_group = mpp->srv->reset_groups[reset_group_node]; |
|---|
| 943 | | - if (!mpp->reset_group->queue) |
|---|
| 944 | | - mpp->reset_group->queue = queue; |
|---|
| 945 | | - if (mpp->reset_group->queue != mpp->queue) |
|---|
| 946 | | - mpp->reset_group->rw_sem_on = true; |
|---|
| 947 | 1016 | } |
|---|
| 948 | 1017 | } |
|---|
| 949 | 1018 | |
|---|
| 950 | 1019 | return 0; |
|---|
| 951 | | - |
|---|
| 952 | | -err_put_pdev: |
|---|
| 953 | | - platform_device_put(pdev); |
|---|
| 954 | | - |
|---|
| 955 | | - return ret; |
|---|
| 956 | 1020 | } |
|---|
| 957 | 1021 | |
|---|
| 958 | 1022 | struct mpp_taskqueue *mpp_taskqueue_init(struct device *dev) |
|---|
| .. | .. |
|---|
| 976 | 1040 | |
|---|
| 977 | 1041 | /* default taskqueue has max 16 task capacity */ |
|---|
| 978 | 1042 | queue->task_capacity = MPP_MAX_TASK_CAPACITY; |
|---|
| 979 | | - |
|---|
| 980 | | - mutex_init(&queue->ref_lock); |
|---|
| 981 | | - atomic_set(&queue->runtime_cnt, 0); |
|---|
| 1043 | + atomic_set(&queue->reset_request, 0); |
|---|
| 982 | 1044 | atomic_set(&queue->detach_count, 0); |
|---|
| 1045 | + atomic_set(&queue->task_id, 0); |
|---|
| 1046 | + queue->dev_active_flags = 0; |
|---|
| 983 | 1047 | |
|---|
| 984 | 1048 | return queue; |
|---|
| 985 | 1049 | } |
|---|
| .. | .. |
|---|
| 987 | 1051 | static void mpp_attach_workqueue(struct mpp_dev *mpp, |
|---|
| 988 | 1052 | struct mpp_taskqueue *queue) |
|---|
| 989 | 1053 | { |
|---|
| 990 | | - mpp->queue = queue; |
|---|
| 1054 | + s32 core_id; |
|---|
| 1055 | + |
|---|
| 991 | 1056 | INIT_LIST_HEAD(&mpp->queue_link); |
|---|
| 1057 | + |
|---|
| 992 | 1058 | mutex_lock(&queue->dev_lock); |
|---|
| 1059 | + |
|---|
| 1060 | + if (mpp->core_id >= 0) |
|---|
| 1061 | + core_id = mpp->core_id; |
|---|
| 1062 | + else |
|---|
| 1063 | + core_id = queue->core_count; |
|---|
| 1064 | + |
|---|
| 1065 | + if (core_id < 0 || core_id >= MPP_MAX_CORE_NUM) { |
|---|
| 1066 | + dev_err(mpp->dev, "invalid core id %d\n", core_id); |
|---|
| 1067 | + goto done; |
|---|
| 1068 | + } |
|---|
| 1069 | + |
|---|
| 1070 | + /* |
|---|
| 1071 | + * multi devices with no multicores share one queue, |
|---|
| 1072 | + * the core_id is default value 0. |
|---|
| 1073 | + */ |
|---|
| 1074 | + if (queue->cores[core_id]) { |
|---|
| 1075 | + if (queue->cores[core_id] == mpp) |
|---|
| 1076 | + goto done; |
|---|
| 1077 | + |
|---|
| 1078 | + core_id = queue->core_count; |
|---|
| 1079 | + } |
|---|
| 1080 | + |
|---|
| 1081 | + queue->cores[core_id] = mpp; |
|---|
| 1082 | + queue->core_count++; |
|---|
| 1083 | + |
|---|
| 1084 | + set_bit(core_id, &queue->core_idle); |
|---|
| 993 | 1085 | list_add_tail(&mpp->queue_link, &queue->dev_list); |
|---|
| 1086 | + if (queue->core_id_max < (u32)core_id) |
|---|
| 1087 | + queue->core_id_max = (u32)core_id; |
|---|
| 1088 | + |
|---|
| 1089 | + mpp->core_id = core_id; |
|---|
| 1090 | + mpp->queue = queue; |
|---|
| 1091 | + |
|---|
| 1092 | + mpp_dbg_core("%s attach queue as core %d\n", |
|---|
| 1093 | + dev_name(mpp->dev), mpp->core_id); |
|---|
| 1094 | + |
|---|
| 994 | 1095 | if (queue->task_capacity > mpp->task_capacity) |
|---|
| 995 | 1096 | queue->task_capacity = mpp->task_capacity; |
|---|
| 1097 | + |
|---|
| 1098 | +done: |
|---|
| 996 | 1099 | mutex_unlock(&queue->dev_lock); |
|---|
| 997 | 1100 | } |
|---|
| 998 | 1101 | |
|---|
| .. | .. |
|---|
| 1002 | 1105 | |
|---|
| 1003 | 1106 | if (queue) { |
|---|
| 1004 | 1107 | mutex_lock(&queue->dev_lock); |
|---|
| 1108 | + |
|---|
| 1109 | + queue->cores[mpp->core_id] = NULL; |
|---|
| 1110 | + queue->core_count--; |
|---|
| 1111 | + |
|---|
| 1112 | + clear_bit(mpp->core_id, &queue->core_idle); |
|---|
| 1005 | 1113 | list_del_init(&mpp->queue_link); |
|---|
| 1114 | + |
|---|
| 1115 | + mpp->queue = NULL; |
|---|
| 1116 | + |
|---|
| 1006 | 1117 | mutex_unlock(&queue->dev_lock); |
|---|
| 1007 | 1118 | } |
|---|
| 1008 | 1119 | } |
|---|
| .. | .. |
|---|
| 1018 | 1129 | found = (cmd >= MPP_CMD_CONTROL_BASE && cmd < MPP_CMD_CONTROL_BUTT) ? true : found; |
|---|
| 1019 | 1130 | |
|---|
| 1020 | 1131 | return found ? 0 : -EINVAL; |
|---|
| 1021 | | -} |
|---|
| 1022 | | - |
|---|
| 1023 | | -static int mpp_parse_msg_v1(struct mpp_msg_v1 *msg, |
|---|
| 1024 | | - struct mpp_request *req) |
|---|
| 1025 | | -{ |
|---|
| 1026 | | - int ret = 0; |
|---|
| 1027 | | - |
|---|
| 1028 | | - req->cmd = msg->cmd; |
|---|
| 1029 | | - req->flags = msg->flags; |
|---|
| 1030 | | - req->size = msg->size; |
|---|
| 1031 | | - req->offset = msg->offset; |
|---|
| 1032 | | - req->data = (void __user *)(unsigned long)msg->data_ptr; |
|---|
| 1033 | | - |
|---|
| 1034 | | - mpp_debug(DEBUG_IOCTL, "cmd %x, flags %08x, size %d, offset %x\n", |
|---|
| 1035 | | - req->cmd, req->flags, req->size, req->offset); |
|---|
| 1036 | | - |
|---|
| 1037 | | - ret = mpp_check_cmd_v1(req->cmd); |
|---|
| 1038 | | - if (ret) |
|---|
| 1039 | | - mpp_err("mpp cmd %x is not supproted.\n", req->cmd); |
|---|
| 1040 | | - |
|---|
| 1041 | | - return ret; |
|---|
| 1042 | 1132 | } |
|---|
| 1043 | 1133 | |
|---|
| 1044 | 1134 | static inline int mpp_msg_is_last(struct mpp_request *req) |
|---|
| .. | .. |
|---|
| 1090 | 1180 | int ret; |
|---|
| 1091 | 1181 | struct mpp_dev *mpp; |
|---|
| 1092 | 1182 | |
|---|
| 1093 | | - mpp_debug(DEBUG_IOCTL, "req->cmd %x\n", req->cmd); |
|---|
| 1183 | + mpp_debug(DEBUG_IOCTL, "cmd %x process\n", req->cmd); |
|---|
| 1184 | + |
|---|
| 1094 | 1185 | switch (req->cmd) { |
|---|
| 1095 | 1186 | case MPP_CMD_QUERY_HW_SUPPORT: { |
|---|
| 1096 | 1187 | u32 hw_support = srv->hw_support; |
|---|
| .. | .. |
|---|
| 1116 | 1207 | if (test_bit(client_type, &srv->hw_support)) |
|---|
| 1117 | 1208 | mpp = srv->sub_devices[client_type]; |
|---|
| 1118 | 1209 | } |
|---|
| 1210 | + |
|---|
| 1119 | 1211 | if (!mpp) |
|---|
| 1120 | 1212 | return -EINVAL; |
|---|
| 1213 | + |
|---|
| 1121 | 1214 | hw_info = mpp->var->hw_info; |
|---|
| 1122 | 1215 | mpp_debug(DEBUG_IOCTL, "hw_id %08x\n", hw_info->hw_id); |
|---|
| 1123 | 1216 | if (put_user(hw_info->hw_id, (u32 __user *)req->data)) |
|---|
| .. | .. |
|---|
| 1148 | 1241 | mpp = srv->sub_devices[client_type]; |
|---|
| 1149 | 1242 | if (!mpp) |
|---|
| 1150 | 1243 | return -EINVAL; |
|---|
| 1244 | + |
|---|
| 1151 | 1245 | session->device_type = (enum MPP_DEVICE_TYPE)client_type; |
|---|
| 1152 | 1246 | session->dma = mpp_dma_session_create(mpp->dev, mpp->session_max_buffers); |
|---|
| 1153 | 1247 | session->mpp = mpp; |
|---|
| .. | .. |
|---|
| 1169 | 1263 | if (ret) |
|---|
| 1170 | 1264 | return ret; |
|---|
| 1171 | 1265 | } |
|---|
| 1266 | + |
|---|
| 1172 | 1267 | mpp_session_attach_workqueue(session, mpp->queue); |
|---|
| 1173 | 1268 | } break; |
|---|
| 1174 | 1269 | case MPP_CMD_INIT_DRIVER_DATA: { |
|---|
| .. | .. |
|---|
| 1211 | 1306 | case MPP_CMD_POLL_HW_FINISH: { |
|---|
| 1212 | 1307 | msgs->flags |= req->flags; |
|---|
| 1213 | 1308 | msgs->poll_cnt++; |
|---|
| 1309 | + msgs->poll_req = NULL; |
|---|
| 1310 | + } break; |
|---|
| 1311 | + case MPP_CMD_POLL_HW_IRQ: { |
|---|
| 1312 | + if (msgs->poll_cnt || msgs->poll_req) |
|---|
| 1313 | + mpp_err("Do NOT poll hw irq when previous call not return\n"); |
|---|
| 1314 | + |
|---|
| 1315 | + msgs->flags |= req->flags; |
|---|
| 1316 | + msgs->poll_cnt++; |
|---|
| 1317 | + |
|---|
| 1318 | + if (req->size && req->data) { |
|---|
| 1319 | + if (!msgs->poll_req) |
|---|
| 1320 | + msgs->poll_req = req; |
|---|
| 1321 | + } else { |
|---|
| 1322 | + msgs->poll_req = NULL; |
|---|
| 1323 | + } |
|---|
| 1214 | 1324 | } break; |
|---|
| 1215 | 1325 | case MPP_CMD_RESET_SESSION: { |
|---|
| 1216 | 1326 | int ret; |
|---|
| .. | .. |
|---|
| 1300 | 1410 | default: { |
|---|
| 1301 | 1411 | mpp = session->mpp; |
|---|
| 1302 | 1412 | if (!mpp) { |
|---|
| 1303 | | - mpp_err("pid %d not find clinet %d\n", |
|---|
| 1413 | + mpp_err("pid %d not find client %d\n", |
|---|
| 1304 | 1414 | session->pid, session->device_type); |
|---|
| 1305 | 1415 | return -EINVAL; |
|---|
| 1306 | 1416 | } |
|---|
| .. | .. |
|---|
| 1314 | 1424 | return 0; |
|---|
| 1315 | 1425 | } |
|---|
| 1316 | 1426 | |
|---|
| 1317 | | -static long mpp_dev_ioctl(struct file *filp, |
|---|
| 1318 | | - unsigned int cmd, |
|---|
| 1319 | | - unsigned long arg) |
|---|
| 1427 | +static void task_msgs_add(struct mpp_task_msgs *msgs, struct list_head *head) |
|---|
| 1320 | 1428 | { |
|---|
| 1429 | + struct mpp_session *session = msgs->session; |
|---|
| 1321 | 1430 | int ret = 0; |
|---|
| 1322 | | - struct mpp_service *srv; |
|---|
| 1323 | | - void __user *msg; |
|---|
| 1431 | + |
|---|
| 1432 | + /* process each task */ |
|---|
| 1433 | + if (msgs->set_cnt) { |
|---|
| 1434 | + /* NOTE: update msg_flags for fd over 1024 */ |
|---|
| 1435 | + session->msg_flags = msgs->flags; |
|---|
| 1436 | + ret = mpp_process_task(session, msgs); |
|---|
| 1437 | + } |
|---|
| 1438 | + |
|---|
| 1439 | + if (!ret) { |
|---|
| 1440 | + INIT_LIST_HEAD(&msgs->list); |
|---|
| 1441 | + list_add_tail(&msgs->list, head); |
|---|
| 1442 | + } else { |
|---|
| 1443 | + put_task_msgs(msgs); |
|---|
| 1444 | + } |
|---|
| 1445 | +} |
|---|
| 1446 | + |
|---|
| 1447 | +static int mpp_collect_msgs(struct list_head *head, struct mpp_session *session, |
|---|
| 1448 | + unsigned int cmd, void __user *msg) |
|---|
| 1449 | +{ |
|---|
| 1450 | + struct mpp_msg_v1 msg_v1; |
|---|
| 1324 | 1451 | struct mpp_request *req; |
|---|
| 1325 | | - struct mpp_task_msgs task_msgs; |
|---|
| 1326 | | - struct mpp_session *session = |
|---|
| 1327 | | - (struct mpp_session *)filp->private_data; |
|---|
| 1452 | + struct mpp_task_msgs *msgs = NULL; |
|---|
| 1453 | + int last = 1; |
|---|
| 1454 | + int ret; |
|---|
| 1455 | + |
|---|
| 1456 | + if (cmd != MPP_IOC_CFG_V1) { |
|---|
| 1457 | + mpp_err("unknown ioctl cmd %x\n", cmd); |
|---|
| 1458 | + return -EINVAL; |
|---|
| 1459 | + } |
|---|
| 1460 | + |
|---|
| 1461 | +next: |
|---|
| 1462 | + /* first, parse to fixed struct */ |
|---|
| 1463 | + if (copy_from_user(&msg_v1, msg, sizeof(msg_v1))) |
|---|
| 1464 | + return -EFAULT; |
|---|
| 1465 | + |
|---|
| 1466 | + msg += sizeof(msg_v1); |
|---|
| 1467 | + |
|---|
| 1468 | + mpp_debug(DEBUG_IOCTL, "cmd %x collect flags %08x, size %d, offset %x\n", |
|---|
| 1469 | + msg_v1.cmd, msg_v1.flags, msg_v1.size, msg_v1.offset); |
|---|
| 1470 | + |
|---|
| 1471 | + if (mpp_check_cmd_v1(msg_v1.cmd)) { |
|---|
| 1472 | + mpp_err("mpp cmd %x is not supported.\n", msg_v1.cmd); |
|---|
| 1473 | + return -EFAULT; |
|---|
| 1474 | + } |
|---|
| 1475 | + |
|---|
| 1476 | + if (msg_v1.flags & MPP_FLAGS_MULTI_MSG) |
|---|
| 1477 | + last = (msg_v1.flags & MPP_FLAGS_LAST_MSG) ? 1 : 0; |
|---|
| 1478 | + else |
|---|
| 1479 | + last = 1; |
|---|
| 1480 | + |
|---|
| 1481 | + /* check cmd for change msgs session */ |
|---|
| 1482 | + if (msg_v1.cmd == MPP_CMD_SET_SESSION_FD) { |
|---|
| 1483 | + struct mpp_bat_msg bat_msg; |
|---|
| 1484 | + struct mpp_bat_msg __user *usr_cmd; |
|---|
| 1485 | + struct fd f; |
|---|
| 1486 | + |
|---|
| 1487 | + /* try session switch here */ |
|---|
| 1488 | + usr_cmd = (struct mpp_bat_msg __user *)(unsigned long)msg_v1.data_ptr; |
|---|
| 1489 | + |
|---|
| 1490 | + if (copy_from_user(&bat_msg, usr_cmd, sizeof(bat_msg))) |
|---|
| 1491 | + return -EFAULT; |
|---|
| 1492 | + |
|---|
| 1493 | + /* skip finished message */ |
|---|
| 1494 | + if (bat_msg.flag & MPP_BAT_MSG_DONE) |
|---|
| 1495 | + goto session_switch_done; |
|---|
| 1496 | + |
|---|
| 1497 | + f = fdget(bat_msg.fd); |
|---|
| 1498 | + if (!f.file) { |
|---|
| 1499 | + int ret = -EBADF; |
|---|
| 1500 | + |
|---|
| 1501 | + mpp_err("fd %d get session failed\n", bat_msg.fd); |
|---|
| 1502 | + |
|---|
| 1503 | + if (copy_to_user(&usr_cmd->ret, &ret, sizeof(usr_cmd->ret))) |
|---|
| 1504 | + mpp_err("copy_to_user failed.\n"); |
|---|
| 1505 | + goto session_switch_done; |
|---|
| 1506 | + } |
|---|
| 1507 | + |
|---|
| 1508 | + /* NOTE: add previous ready task to queue and drop empty task */ |
|---|
| 1509 | + if (msgs) { |
|---|
| 1510 | + if (msgs->req_cnt) |
|---|
| 1511 | + task_msgs_add(msgs, head); |
|---|
| 1512 | + else |
|---|
| 1513 | + put_task_msgs(msgs); |
|---|
| 1514 | + |
|---|
| 1515 | + msgs = NULL; |
|---|
| 1516 | + } |
|---|
| 1517 | + |
|---|
| 1518 | + /* switch session */ |
|---|
| 1519 | + session = f.file->private_data; |
|---|
| 1520 | + msgs = get_task_msgs(session); |
|---|
| 1521 | + |
|---|
| 1522 | + if (f.file->private_data == session) |
|---|
| 1523 | + msgs->ext_fd = bat_msg.fd; |
|---|
| 1524 | + |
|---|
| 1525 | + msgs->f = f; |
|---|
| 1526 | + |
|---|
| 1527 | + mpp_debug(DEBUG_IOCTL, "fd %d, session %d msg_cnt %d\n", |
|---|
| 1528 | + bat_msg.fd, session->index, session->msgs_cnt); |
|---|
| 1529 | + |
|---|
| 1530 | +session_switch_done: |
|---|
| 1531 | + /* session id should NOT be the last message */ |
|---|
| 1532 | + if (last) |
|---|
| 1533 | + return 0; |
|---|
| 1534 | + |
|---|
| 1535 | + goto next; |
|---|
| 1536 | + } |
|---|
| 1537 | + |
|---|
| 1538 | + if (!msgs) |
|---|
| 1539 | + msgs = get_task_msgs(session); |
|---|
| 1540 | + |
|---|
| 1541 | + if (!msgs) { |
|---|
| 1542 | + pr_err("session %d:%d failed to get task msgs", |
|---|
| 1543 | + session->pid, session->index); |
|---|
| 1544 | + return -EINVAL; |
|---|
| 1545 | + } |
|---|
| 1546 | + |
|---|
| 1547 | + if (msgs->req_cnt >= MPP_MAX_MSG_NUM) { |
|---|
| 1548 | + mpp_err("session %d message count %d more than %d.\n", |
|---|
| 1549 | + session->index, msgs->req_cnt, MPP_MAX_MSG_NUM); |
|---|
| 1550 | + return -EINVAL; |
|---|
| 1551 | + } |
|---|
| 1552 | + |
|---|
| 1553 | + req = &msgs->reqs[msgs->req_cnt++]; |
|---|
| 1554 | + req->cmd = msg_v1.cmd; |
|---|
| 1555 | + req->flags = msg_v1.flags; |
|---|
| 1556 | + req->size = msg_v1.size; |
|---|
| 1557 | + req->offset = msg_v1.offset; |
|---|
| 1558 | + req->data = (void __user *)(unsigned long)msg_v1.data_ptr; |
|---|
| 1559 | + |
|---|
| 1560 | + ret = mpp_process_request(session, session->srv, req, msgs); |
|---|
| 1561 | + if (ret) { |
|---|
| 1562 | + mpp_err("session %d process cmd %x ret %d\n", |
|---|
| 1563 | + session->index, req->cmd, ret); |
|---|
| 1564 | + return ret; |
|---|
| 1565 | + } |
|---|
| 1566 | + |
|---|
| 1567 | + if (!last) |
|---|
| 1568 | + goto next; |
|---|
| 1569 | + |
|---|
| 1570 | + task_msgs_add(msgs, head); |
|---|
| 1571 | + msgs = NULL; |
|---|
| 1572 | + |
|---|
| 1573 | + return 0; |
|---|
| 1574 | +} |
|---|
| 1575 | + |
|---|
| 1576 | +static void mpp_msgs_trigger(struct list_head *msgs_list) |
|---|
| 1577 | +{ |
|---|
| 1578 | + struct mpp_task_msgs *msgs, *n; |
|---|
| 1579 | + struct mpp_dev *mpp_prev = NULL; |
|---|
| 1580 | + struct mpp_taskqueue *queue_prev = NULL; |
|---|
| 1581 | + |
|---|
| 1582 | + /* push task to queue */ |
|---|
| 1583 | + list_for_each_entry_safe(msgs, n, msgs_list, list) { |
|---|
| 1584 | + struct mpp_dev *mpp; |
|---|
| 1585 | + struct mpp_task *task; |
|---|
| 1586 | + struct mpp_taskqueue *queue; |
|---|
| 1587 | + |
|---|
| 1588 | + if (!msgs->set_cnt || !msgs->queue) |
|---|
| 1589 | + continue; |
|---|
| 1590 | + |
|---|
| 1591 | + mpp = msgs->mpp; |
|---|
| 1592 | + task = msgs->task; |
|---|
| 1593 | + queue = msgs->queue; |
|---|
| 1594 | + |
|---|
| 1595 | + if (queue_prev != queue) { |
|---|
| 1596 | + if (queue_prev && mpp_prev) { |
|---|
| 1597 | + mutex_unlock(&queue_prev->pending_lock); |
|---|
| 1598 | + mpp_taskqueue_trigger_work(mpp_prev); |
|---|
| 1599 | + } |
|---|
| 1600 | + |
|---|
| 1601 | + if (queue) |
|---|
| 1602 | + mutex_lock(&queue->pending_lock); |
|---|
| 1603 | + |
|---|
| 1604 | + mpp_prev = mpp; |
|---|
| 1605 | + queue_prev = queue; |
|---|
| 1606 | + } |
|---|
| 1607 | + |
|---|
| 1608 | + if (test_bit(TASK_STATE_ABORT, &task->state)) |
|---|
| 1609 | + pr_info("try to trigger abort task %d\n", task->task_id); |
|---|
| 1610 | + |
|---|
| 1611 | + set_bit(TASK_STATE_PENDING, &task->state); |
|---|
| 1612 | + list_add_tail(&task->queue_link, &queue->pending_list); |
|---|
| 1613 | + } |
|---|
| 1614 | + |
|---|
| 1615 | + if (mpp_prev && queue_prev) { |
|---|
| 1616 | + mutex_unlock(&queue_prev->pending_lock); |
|---|
| 1617 | + mpp_taskqueue_trigger_work(mpp_prev); |
|---|
| 1618 | + } |
|---|
| 1619 | +} |
|---|
| 1620 | + |
|---|
| 1621 | +static void mpp_msgs_wait(struct list_head *msgs_list) |
|---|
| 1622 | +{ |
|---|
| 1623 | + struct mpp_task_msgs *msgs, *n; |
|---|
| 1624 | + |
|---|
| 1625 | + /* poll and release each task */ |
|---|
| 1626 | + list_for_each_entry_safe(msgs, n, msgs_list, list) { |
|---|
| 1627 | + struct mpp_session *session = msgs->session; |
|---|
| 1628 | + |
|---|
| 1629 | + if (msgs->poll_cnt) { |
|---|
| 1630 | + int ret = mpp_wait_result(session, msgs); |
|---|
| 1631 | + |
|---|
| 1632 | + if (ret) { |
|---|
| 1633 | + mpp_err("session %d wait result ret %d\n", |
|---|
| 1634 | + session->index, ret); |
|---|
| 1635 | + } |
|---|
| 1636 | + } |
|---|
| 1637 | + |
|---|
| 1638 | + put_task_msgs(msgs); |
|---|
| 1639 | + |
|---|
| 1640 | + } |
|---|
| 1641 | +} |
|---|
| 1642 | + |
|---|
| 1643 | +static long mpp_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
|---|
| 1644 | +{ |
|---|
| 1645 | + struct mpp_service *srv; |
|---|
| 1646 | + struct mpp_session *session = (struct mpp_session *)filp->private_data; |
|---|
| 1647 | + struct list_head msgs_list; |
|---|
| 1648 | + int ret = 0; |
|---|
| 1328 | 1649 | |
|---|
| 1329 | 1650 | mpp_debug_enter(); |
|---|
| 1330 | 1651 | |
|---|
| .. | .. |
|---|
| 1332 | 1653 | mpp_err("session %p\n", session); |
|---|
| 1333 | 1654 | return -EINVAL; |
|---|
| 1334 | 1655 | } |
|---|
| 1656 | + |
|---|
| 1335 | 1657 | srv = session->srv; |
|---|
| 1658 | + |
|---|
| 1336 | 1659 | if (atomic_read(&session->release_request) > 0) { |
|---|
| 1337 | 1660 | mpp_debug(DEBUG_IOCTL, "release session had request\n"); |
|---|
| 1338 | 1661 | return -EBUSY; |
|---|
| .. | .. |
|---|
| 1342 | 1665 | return -EBUSY; |
|---|
| 1343 | 1666 | } |
|---|
| 1344 | 1667 | |
|---|
| 1345 | | - msg = (void __user *)arg; |
|---|
| 1346 | | - memset(&task_msgs, 0, sizeof(task_msgs)); |
|---|
| 1347 | | - do { |
|---|
| 1348 | | - req = &task_msgs.reqs[task_msgs.req_cnt]; |
|---|
| 1349 | | - /* first, parse to fixed struct */ |
|---|
| 1350 | | - switch (cmd) { |
|---|
| 1351 | | - case MPP_IOC_CFG_V1: { |
|---|
| 1352 | | - struct mpp_msg_v1 msg_v1; |
|---|
| 1668 | + INIT_LIST_HEAD(&msgs_list); |
|---|
| 1353 | 1669 | |
|---|
| 1354 | | - memset(&msg_v1, 0, sizeof(msg_v1)); |
|---|
| 1355 | | - if (copy_from_user(&msg_v1, msg, sizeof(msg_v1))) |
|---|
| 1356 | | - return -EFAULT; |
|---|
| 1357 | | - ret = mpp_parse_msg_v1(&msg_v1, req); |
|---|
| 1358 | | - if (ret) |
|---|
| 1359 | | - return -EFAULT; |
|---|
| 1670 | + ret = mpp_collect_msgs(&msgs_list, session, cmd, (void __user *)arg); |
|---|
| 1671 | + if (ret) |
|---|
| 1672 | + mpp_err("collect msgs failed %d\n", ret); |
|---|
| 1360 | 1673 | |
|---|
| 1361 | | - msg += sizeof(msg_v1); |
|---|
| 1362 | | - } break; |
|---|
| 1363 | | - default: |
|---|
| 1364 | | - mpp_err("unknown ioctl cmd %x\n", cmd); |
|---|
| 1365 | | - return -EINVAL; |
|---|
| 1366 | | - } |
|---|
| 1367 | | - task_msgs.req_cnt++; |
|---|
| 1368 | | - /* check loop times */ |
|---|
| 1369 | | - if (task_msgs.req_cnt > MPP_MAX_MSG_NUM) { |
|---|
| 1370 | | - mpp_err("fail, message count %d more than %d.\n", |
|---|
| 1371 | | - task_msgs.req_cnt, MPP_MAX_MSG_NUM); |
|---|
| 1372 | | - return -EINVAL; |
|---|
| 1373 | | - } |
|---|
| 1374 | | - /* second, process request */ |
|---|
| 1375 | | - ret = mpp_process_request(session, srv, req, &task_msgs); |
|---|
| 1376 | | - if (ret) |
|---|
| 1377 | | - return -EFAULT; |
|---|
| 1378 | | - /* last, process task message */ |
|---|
| 1379 | | - if (mpp_msg_is_last(req)) { |
|---|
| 1380 | | - session->msg_flags = task_msgs.flags; |
|---|
| 1381 | | - if (task_msgs.set_cnt > 0) { |
|---|
| 1382 | | - ret = mpp_process_task(session, &task_msgs); |
|---|
| 1383 | | - if (ret) |
|---|
| 1384 | | - return ret; |
|---|
| 1385 | | - } |
|---|
| 1386 | | - if (task_msgs.poll_cnt > 0) { |
|---|
| 1387 | | - ret = mpp_wait_result(session, &task_msgs); |
|---|
| 1388 | | - if (ret) |
|---|
| 1389 | | - return ret; |
|---|
| 1390 | | - } |
|---|
| 1391 | | - } |
|---|
| 1392 | | - } while (!mpp_msg_is_last(req)); |
|---|
| 1674 | + mpp_msgs_trigger(&msgs_list); |
|---|
| 1675 | + |
|---|
| 1676 | + mpp_msgs_wait(&msgs_list); |
|---|
| 1393 | 1677 | |
|---|
| 1394 | 1678 | mpp_debug_leave(); |
|---|
| 1395 | 1679 | |
|---|
| .. | .. |
|---|
| 1493 | 1777 | mpp_iommu_down_read(mpp->iommu_info); |
|---|
| 1494 | 1778 | buffer = mpp_dma_import_fd(mpp->iommu_info, dma, fd); |
|---|
| 1495 | 1779 | mpp_iommu_up_read(mpp->iommu_info); |
|---|
| 1496 | | - if (IS_ERR_OR_NULL(buffer)) { |
|---|
| 1780 | + if (IS_ERR(buffer)) { |
|---|
| 1497 | 1781 | mpp_err("can't import dma-buf %d\n", fd); |
|---|
| 1498 | | - return ERR_PTR(-ENOMEM); |
|---|
| 1782 | + return ERR_CAST(buffer); |
|---|
| 1499 | 1783 | } |
|---|
| 1500 | 1784 | |
|---|
| 1501 | 1785 | mem_region->hdl = buffer; |
|---|
| .. | .. |
|---|
| 1525 | 1809 | cnt = session->trans_count; |
|---|
| 1526 | 1810 | tbl = session->trans_table; |
|---|
| 1527 | 1811 | } else { |
|---|
| 1528 | | - struct mpp_dev *mpp = session->mpp; |
|---|
| 1812 | + struct mpp_dev *mpp = mpp_get_task_used_device(task, session); |
|---|
| 1529 | 1813 | struct mpp_trans_info *trans_info = mpp->var->trans_info; |
|---|
| 1530 | 1814 | |
|---|
| 1531 | 1815 | cnt = trans_info[fmt].count; |
|---|
| .. | .. |
|---|
| 1661 | 1945 | return 0; |
|---|
| 1662 | 1946 | } |
|---|
| 1663 | 1947 | |
|---|
| 1664 | | -int mpp_task_init(struct mpp_session *session, |
|---|
| 1665 | | - struct mpp_task *task) |
|---|
| 1948 | +int mpp_task_init(struct mpp_session *session, struct mpp_task *task) |
|---|
| 1666 | 1949 | { |
|---|
| 1667 | 1950 | INIT_LIST_HEAD(&task->pending_link); |
|---|
| 1668 | 1951 | INIT_LIST_HEAD(&task->queue_link); |
|---|
| .. | .. |
|---|
| 1677 | 1960 | int mpp_task_finish(struct mpp_session *session, |
|---|
| 1678 | 1961 | struct mpp_task *task) |
|---|
| 1679 | 1962 | { |
|---|
| 1680 | | - struct mpp_dev *mpp = session->mpp; |
|---|
| 1963 | + struct mpp_dev *mpp = mpp_get_task_used_device(task, session); |
|---|
| 1681 | 1964 | |
|---|
| 1682 | 1965 | if (mpp->dev_ops->finish) |
|---|
| 1683 | 1966 | mpp->dev_ops->finish(mpp, task); |
|---|
| .. | .. |
|---|
| 1713 | 1996 | struct mpp_task *task) |
|---|
| 1714 | 1997 | { |
|---|
| 1715 | 1998 | struct mpp_mem_region *mem_region = NULL, *n; |
|---|
| 1716 | | - struct mpp_dev *mpp = session->mpp; |
|---|
| 1999 | + struct mpp_dev *mpp = mpp_get_task_used_device(task, session); |
|---|
| 1717 | 2000 | |
|---|
| 1718 | 2001 | /* release memory region attach to this registers table. */ |
|---|
| 1719 | 2002 | list_for_each_entry_safe(mem_region, n, |
|---|
| .. | .. |
|---|
| 1738 | 2021 | if (!task) |
|---|
| 1739 | 2022 | return -EIO; |
|---|
| 1740 | 2023 | |
|---|
| 1741 | | - mpp_err("--- dump mem region ---\n"); |
|---|
| 2024 | + mpp_err("--- dump task %d mem region ---\n", task->task_index); |
|---|
| 1742 | 2025 | if (!list_empty(&task->mem_region_list)) { |
|---|
| 1743 | 2026 | list_for_each_entry_safe(mem, n, |
|---|
| 1744 | 2027 | &task->mem_region_list, |
|---|
| .. | .. |
|---|
| 1778 | 2061 | return 0; |
|---|
| 1779 | 2062 | } |
|---|
| 1780 | 2063 | |
|---|
| 1781 | | -int mpp_task_dump_hw_reg(struct mpp_dev *mpp, struct mpp_task *task) |
|---|
| 2064 | +int mpp_task_dump_hw_reg(struct mpp_dev *mpp) |
|---|
| 1782 | 2065 | { |
|---|
| 1783 | | - if (!task) |
|---|
| 1784 | | - return -EIO; |
|---|
| 2066 | + u32 i; |
|---|
| 2067 | + u32 s = mpp->var->hw_info->reg_start; |
|---|
| 2068 | + u32 e = mpp->var->hw_info->reg_end; |
|---|
| 1785 | 2069 | |
|---|
| 1786 | | - if (mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) { |
|---|
| 1787 | | - u32 i; |
|---|
| 1788 | | - u32 s = task->hw_info->reg_start; |
|---|
| 1789 | | - u32 e = task->hw_info->reg_end; |
|---|
| 2070 | + mpp_err("--- dump hardware register ---\n"); |
|---|
| 2071 | + for (i = s; i <= e; i++) { |
|---|
| 2072 | + u32 reg = i * sizeof(u32); |
|---|
| 1790 | 2073 | |
|---|
| 1791 | | - mpp_err("--- dump hardware register ---\n"); |
|---|
| 1792 | | - for (i = s; i <= e; i++) { |
|---|
| 1793 | | - u32 reg = i * sizeof(u32); |
|---|
| 1794 | | - |
|---|
| 1795 | | - mpp_err("reg[%03d]: %04x: 0x%08x\n", |
|---|
| 2074 | + mpp_err("reg[%03d]: %04x: 0x%08x\n", |
|---|
| 1796 | 2075 | i, reg, readl_relaxed(mpp->reg_base + reg)); |
|---|
| 1797 | | - } |
|---|
| 1798 | 2076 | } |
|---|
| 1799 | 2077 | |
|---|
| 1800 | 2078 | return 0; |
|---|
| 1801 | 2079 | } |
|---|
| 1802 | 2080 | |
|---|
| 1803 | | -static int mpp_iommu_handle(struct iommu_domain *iommu, |
|---|
| 1804 | | - struct device *iommu_dev, |
|---|
| 1805 | | - unsigned long iova, |
|---|
| 1806 | | - int status, void *arg) |
|---|
| 2081 | +void mpp_reg_show(struct mpp_dev *mpp, u32 offset) |
|---|
| 1807 | 2082 | { |
|---|
| 1808 | | - struct mpp_dev *mpp = (struct mpp_dev *)arg; |
|---|
| 1809 | | - struct mpp_taskqueue *queue = mpp->queue; |
|---|
| 1810 | | - struct mpp_task *task = mpp_taskqueue_get_running_task(queue); |
|---|
| 2083 | + if (!mpp) |
|---|
| 2084 | + return; |
|---|
| 1811 | 2085 | |
|---|
| 1812 | | - /* |
|---|
| 1813 | | - * NOTE: In link mode, this task may not be the task of the current |
|---|
| 1814 | | - * hardware processing error |
|---|
| 1815 | | - */ |
|---|
| 1816 | | - if (!task || !task->session) |
|---|
| 1817 | | - return -EIO; |
|---|
| 1818 | | - /* get mpp from cur task */ |
|---|
| 1819 | | - mpp = task->session->mpp; |
|---|
| 1820 | | - dev_err(mpp->dev, "fault addr 0x%08lx status %x\n", iova, status); |
|---|
| 2086 | + dev_err(mpp->dev, "reg[%03d]: %04x: 0x%08x\n", |
|---|
| 2087 | + offset >> 2, offset, mpp_read_relaxed(mpp, offset)); |
|---|
| 2088 | +} |
|---|
| 1821 | 2089 | |
|---|
| 1822 | | - mpp_task_dump_mem_region(mpp, task); |
|---|
| 1823 | | - mpp_task_dump_hw_reg(mpp, task); |
|---|
| 2090 | +void mpp_reg_show_range(struct mpp_dev *mpp, u32 start, u32 end) |
|---|
| 2091 | +{ |
|---|
| 2092 | + u32 offset; |
|---|
| 1824 | 2093 | |
|---|
| 1825 | | - if (mpp->iommu_info->hdl) |
|---|
| 1826 | | - mpp->iommu_info->hdl(iommu, iommu_dev, iova, status, mpp); |
|---|
| 2094 | + if (!mpp) |
|---|
| 2095 | + return; |
|---|
| 1827 | 2096 | |
|---|
| 1828 | | - return 0; |
|---|
| 2097 | + for (offset = start; offset < end; offset += sizeof(u32)) |
|---|
| 2098 | + mpp_reg_show(mpp, offset); |
|---|
| 1829 | 2099 | } |
|---|
| 1830 | 2100 | |
|---|
| 1831 | 2101 | /* The device will do more probing work after this */ |
|---|
| .. | .. |
|---|
| 1843 | 2113 | /* read flag for pum idle request */ |
|---|
| 1844 | 2114 | mpp->skip_idle = device_property_read_bool(dev, "rockchip,skip-pmu-idle-request"); |
|---|
| 1845 | 2115 | |
|---|
| 2116 | + /* read link table capacity */ |
|---|
| 2117 | + ret = of_property_read_u32(np, "rockchip,task-capacity", |
|---|
| 2118 | + &mpp->task_capacity); |
|---|
| 2119 | + if (ret) |
|---|
| 2120 | + mpp->task_capacity = 1; |
|---|
| 2121 | + |
|---|
| 2122 | + mpp->dev = dev; |
|---|
| 2123 | + mpp->hw_ops = mpp->var->hw_ops; |
|---|
| 2124 | + mpp->dev_ops = mpp->var->dev_ops; |
|---|
| 2125 | + |
|---|
| 1846 | 2126 | /* Get and attach to service */ |
|---|
| 1847 | 2127 | ret = mpp_attach_service(mpp, dev); |
|---|
| 1848 | 2128 | if (ret) { |
|---|
| .. | .. |
|---|
| 1850 | 2130 | return -ENODEV; |
|---|
| 1851 | 2131 | } |
|---|
| 1852 | 2132 | |
|---|
| 1853 | | - mpp->dev = dev; |
|---|
| 1854 | | - mpp->hw_ops = mpp->var->hw_ops; |
|---|
| 1855 | | - mpp->dev_ops = mpp->var->dev_ops; |
|---|
| 1856 | | - |
|---|
| 1857 | | - /* read link table capacity */ |
|---|
| 1858 | | - ret = of_property_read_u32(np, "rockchip,task-capacity", |
|---|
| 1859 | | - &mpp->task_capacity); |
|---|
| 1860 | | - if (ret) { |
|---|
| 1861 | | - mpp->task_capacity = 1; |
|---|
| 1862 | | - |
|---|
| 1863 | | - /* power domain autosuspend delay 2s */ |
|---|
| 1864 | | - pm_runtime_set_autosuspend_delay(dev, 2000); |
|---|
| 1865 | | - pm_runtime_use_autosuspend(dev); |
|---|
| 1866 | | - } else { |
|---|
| 1867 | | - dev_info(dev, "%d task capacity link mode detected\n", |
|---|
| 1868 | | - mpp->task_capacity); |
|---|
| 1869 | | - /* do not setup autosuspend on multi task device */ |
|---|
| 1870 | | - } |
|---|
| 2133 | + /* power domain autosuspend delay 2s */ |
|---|
| 2134 | + pm_runtime_set_autosuspend_delay(dev, 2000); |
|---|
| 2135 | + pm_runtime_use_autosuspend(dev); |
|---|
| 1871 | 2136 | |
|---|
| 1872 | 2137 | kthread_init_work(&mpp->work, mpp_task_worker_default); |
|---|
| 1873 | 2138 | |
|---|
| .. | .. |
|---|
| 1878 | 2143 | |
|---|
| 1879 | 2144 | device_init_wakeup(dev, true); |
|---|
| 1880 | 2145 | pm_runtime_enable(dev); |
|---|
| 1881 | | - |
|---|
| 1882 | 2146 | mpp->irq = platform_get_irq(pdev, 0); |
|---|
| 1883 | 2147 | if (mpp->irq < 0) { |
|---|
| 1884 | 2148 | dev_err(dev, "No interrupt resource found\n"); |
|---|
| .. | .. |
|---|
| 1905 | 2169 | ret = -ENOMEM; |
|---|
| 1906 | 2170 | goto failed; |
|---|
| 1907 | 2171 | } |
|---|
| 2172 | + mpp->io_base = res->start; |
|---|
| 1908 | 2173 | |
|---|
| 1909 | | - pm_runtime_get_sync(dev); |
|---|
| 1910 | 2174 | /* |
|---|
| 1911 | 2175 | * TODO: here or at the device itself, some device does not |
|---|
| 1912 | 2176 | * have the iommu, maybe in the device is better. |
|---|
| 1913 | 2177 | */ |
|---|
| 1914 | 2178 | mpp->iommu_info = mpp_iommu_probe(dev); |
|---|
| 1915 | 2179 | if (IS_ERR(mpp->iommu_info)) { |
|---|
| 1916 | | - dev_err(dev, "failed to attach iommu: %ld\n", |
|---|
| 1917 | | - PTR_ERR(mpp->iommu_info)); |
|---|
| 2180 | + dev_err(dev, "failed to attach iommu\n"); |
|---|
| 2181 | + mpp->iommu_info = NULL; |
|---|
| 1918 | 2182 | } |
|---|
| 1919 | 2183 | if (mpp->hw_ops->init) { |
|---|
| 1920 | 2184 | ret = mpp->hw_ops->init(mpp); |
|---|
| 1921 | 2185 | if (ret) |
|---|
| 1922 | | - goto failed_init; |
|---|
| 2186 | + goto failed; |
|---|
| 1923 | 2187 | } |
|---|
| 1924 | | - /* set iommu fault handler */ |
|---|
| 1925 | | - if (!IS_ERR(mpp->iommu_info)) |
|---|
| 1926 | | - iommu_set_fault_handler(mpp->iommu_info->domain, |
|---|
| 1927 | | - mpp_iommu_handle, mpp); |
|---|
| 1928 | 2188 | |
|---|
| 1929 | 2189 | /* read hardware id */ |
|---|
| 1930 | 2190 | if (hw_info->reg_id >= 0) { |
|---|
| 2191 | + pm_runtime_get_sync(dev); |
|---|
| 1931 | 2192 | if (mpp->hw_ops->clk_on) |
|---|
| 1932 | 2193 | mpp->hw_ops->clk_on(mpp); |
|---|
| 1933 | 2194 | |
|---|
| 1934 | 2195 | hw_info->hw_id = mpp_read(mpp, hw_info->reg_id * sizeof(u32)); |
|---|
| 1935 | 2196 | if (mpp->hw_ops->clk_off) |
|---|
| 1936 | 2197 | mpp->hw_ops->clk_off(mpp); |
|---|
| 2198 | + pm_runtime_put_sync(dev); |
|---|
| 1937 | 2199 | } |
|---|
| 1938 | 2200 | |
|---|
| 1939 | | - pm_runtime_put_sync(dev); |
|---|
| 1940 | | - |
|---|
| 1941 | 2201 | return ret; |
|---|
| 1942 | | -failed_init: |
|---|
| 1943 | | - pm_runtime_put_sync(dev); |
|---|
| 1944 | 2202 | failed: |
|---|
| 1945 | 2203 | mpp_detach_workqueue(mpp); |
|---|
| 1946 | 2204 | device_init_wakeup(dev, false); |
|---|
| .. | .. |
|---|
| 1955 | 2213 | mpp->hw_ops->exit(mpp); |
|---|
| 1956 | 2214 | |
|---|
| 1957 | 2215 | mpp_iommu_remove(mpp->iommu_info); |
|---|
| 1958 | | - platform_device_put(mpp->pdev_srv); |
|---|
| 1959 | 2216 | mpp_detach_workqueue(mpp); |
|---|
| 1960 | 2217 | device_init_wakeup(mpp->dev, false); |
|---|
| 1961 | 2218 | pm_runtime_disable(mpp->dev); |
|---|
| 1962 | 2219 | |
|---|
| 1963 | 2220 | return 0; |
|---|
| 2221 | +} |
|---|
| 2222 | + |
|---|
| 2223 | +void mpp_dev_shutdown(struct platform_device *pdev) |
|---|
| 2224 | +{ |
|---|
| 2225 | + int ret; |
|---|
| 2226 | + int val; |
|---|
| 2227 | + struct device *dev = &pdev->dev; |
|---|
| 2228 | + struct mpp_dev *mpp = dev_get_drvdata(dev); |
|---|
| 2229 | + |
|---|
| 2230 | + dev_info(dev, "shutdown device\n"); |
|---|
| 2231 | + |
|---|
| 2232 | + atomic_inc(&mpp->srv->shutdown_request); |
|---|
| 2233 | + ret = readx_poll_timeout(atomic_read, |
|---|
| 2234 | + &mpp->task_count, |
|---|
| 2235 | + val, val == 0, 20000, 200000); |
|---|
| 2236 | + if (ret == -ETIMEDOUT) |
|---|
| 2237 | + dev_err(dev, "wait total %d running time out\n", |
|---|
| 2238 | + atomic_read(&mpp->task_count)); |
|---|
| 2239 | + else |
|---|
| 2240 | + dev_info(dev, "shutdown success\n"); |
|---|
| 1964 | 2241 | } |
|---|
| 1965 | 2242 | |
|---|
| 1966 | 2243 | int mpp_dev_register_srv(struct mpp_dev *mpp, struct mpp_service *srv) |
|---|
| .. | .. |
|---|
| 2007 | 2284 | /* normal condition, set state and wake up isr thread */ |
|---|
| 2008 | 2285 | set_bit(TASK_STATE_IRQ, &task->state); |
|---|
| 2009 | 2286 | } |
|---|
| 2287 | + |
|---|
| 2288 | + if (irq_ret == IRQ_WAKE_THREAD) |
|---|
| 2289 | + mpp_iommu_dev_deactivate(mpp->iommu_info, mpp); |
|---|
| 2010 | 2290 | } else { |
|---|
| 2011 | 2291 | mpp_debug(DEBUG_IRQ_CHECK, "error, task is null\n"); |
|---|
| 2012 | 2292 | } |
|---|
| .. | .. |
|---|
| 2083 | 2363 | |
|---|
| 2084 | 2364 | int mpp_time_part_diff(struct mpp_task *task) |
|---|
| 2085 | 2365 | { |
|---|
| 2086 | | - ktime_t end; |
|---|
| 2087 | | - struct mpp_dev *mpp = task->session->mpp; |
|---|
| 2366 | + if (mpp_debug_unlikely(DEBUG_TIMING)) { |
|---|
| 2367 | + ktime_t end; |
|---|
| 2368 | + struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session); |
|---|
| 2088 | 2369 | |
|---|
| 2089 | | - end = ktime_get(); |
|---|
| 2090 | | - mpp_debug(DEBUG_PART_TIMING, "%s: session %d:%d part time: %lld us\n", |
|---|
| 2091 | | - dev_name(mpp->dev), task->session->pid, task->session->index, |
|---|
| 2092 | | - ktime_us_delta(end, task->part)); |
|---|
| 2093 | | - task->part = end; |
|---|
| 2370 | + end = ktime_get(); |
|---|
| 2371 | + mpp_debug(DEBUG_PART_TIMING, "%s:%d session %d:%d part time: %lld us\n", |
|---|
| 2372 | + dev_name(mpp->dev), task->core_id, task->session->pid, |
|---|
| 2373 | + task->session->index, ktime_us_delta(end, task->part)); |
|---|
| 2374 | + task->part = end; |
|---|
| 2375 | + } |
|---|
| 2094 | 2376 | |
|---|
| 2095 | 2377 | return 0; |
|---|
| 2096 | 2378 | } |
|---|
| 2097 | 2379 | |
|---|
| 2098 | 2380 | int mpp_time_diff(struct mpp_task *task) |
|---|
| 2099 | 2381 | { |
|---|
| 2100 | | - ktime_t end; |
|---|
| 2101 | | - struct mpp_dev *mpp = task->session->mpp; |
|---|
| 2382 | + if (mpp_debug_unlikely(DEBUG_TIMING)) { |
|---|
| 2383 | + ktime_t end; |
|---|
| 2384 | + struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session); |
|---|
| 2102 | 2385 | |
|---|
| 2103 | | - end = ktime_get(); |
|---|
| 2104 | | - mpp_debug(DEBUG_TIMING, "%s: session %d:%d task time: %lld us\n", |
|---|
| 2105 | | - dev_name(mpp->dev), task->session->pid, task->session->index, |
|---|
| 2106 | | - ktime_us_delta(end, task->start)); |
|---|
| 2386 | + end = ktime_get(); |
|---|
| 2387 | + mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us\n", |
|---|
| 2388 | + dev_name(mpp->dev), task->core_id, task->session->pid, |
|---|
| 2389 | + task->session->index, ktime_us_delta(end, task->start)); |
|---|
| 2390 | + } |
|---|
| 2107 | 2391 | |
|---|
| 2108 | 2392 | return 0; |
|---|
| 2109 | 2393 | } |
|---|
| .. | .. |
|---|
| 2112 | 2396 | { |
|---|
| 2113 | 2397 | if (mpp_debug_unlikely(DEBUG_TIMING)) { |
|---|
| 2114 | 2398 | ktime_t end; |
|---|
| 2115 | | - struct mpp_dev *mpp = task->session->mpp; |
|---|
| 2399 | + struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session); |
|---|
| 2116 | 2400 | |
|---|
| 2117 | 2401 | end = ktime_get(); |
|---|
| 2118 | 2402 | |
|---|
| 2119 | 2403 | if (clk_hz) |
|---|
| 2120 | | - mpp_debug(DEBUG_TIMING, "%s: session %d time: %lld us hw %d us\n", |
|---|
| 2121 | | - dev_name(mpp->dev), task->session->index, |
|---|
| 2122 | | - ktime_us_delta(end, task->start), |
|---|
| 2404 | + mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us hw %d us\n", |
|---|
| 2405 | + dev_name(mpp->dev), task->core_id, task->session->pid, |
|---|
| 2406 | + task->session->index, ktime_us_delta(end, task->start), |
|---|
| 2123 | 2407 | task->hw_cycles / (clk_hz / 1000000)); |
|---|
| 2124 | 2408 | else |
|---|
| 2125 | | - mpp_debug(DEBUG_TIMING, "%s: session %d time: %lld us\n", |
|---|
| 2126 | | - dev_name(mpp->dev), task->session->index, |
|---|
| 2127 | | - ktime_us_delta(end, task->start)); |
|---|
| 2409 | + mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us\n", |
|---|
| 2410 | + dev_name(mpp->dev), task->core_id, task->session->pid, |
|---|
| 2411 | + task->session->index, ktime_us_delta(end, task->start)); |
|---|
| 2128 | 2412 | } |
|---|
| 2129 | 2413 | |
|---|
| 2130 | 2414 | return 0; |
|---|
| .. | .. |
|---|
| 2143 | 2427 | ktime_t s = task->on_create; |
|---|
| 2144 | 2428 | unsigned long state = task->state; |
|---|
| 2145 | 2429 | |
|---|
| 2146 | | - pr_info("task %d dump timing at %lld us:", task->task_index, time_diff); |
|---|
| 2430 | + pr_info("task %d dump timing at %lld us:", task->task_id, time_diff); |
|---|
| 2147 | 2431 | |
|---|
| 2148 | 2432 | pr_info("timing: %-14s : %lld us\n", "create", ktime_to_us(s)); |
|---|
| 2149 | 2433 | LOG_TIMING(state, TASK_TIMING_CREATE_END, "create end", task->on_create_end, s); |
|---|
| .. | .. |
|---|
| 2325 | 2609 | return count; |
|---|
| 2326 | 2610 | } |
|---|
| 2327 | 2611 | |
|---|
| 2328 | | -static const struct file_operations procfs_fops_u32 = { |
|---|
| 2329 | | - .open = fops_open_u32, |
|---|
| 2330 | | - .read = seq_read, |
|---|
| 2331 | | - .release = single_release, |
|---|
| 2332 | | - .write = fops_write_u32, |
|---|
| 2612 | +static const struct proc_ops procfs_fops_u32 = { |
|---|
| 2613 | + .proc_open = fops_open_u32, |
|---|
| 2614 | + .proc_read = seq_read, |
|---|
| 2615 | + .proc_release = single_release, |
|---|
| 2616 | + .proc_write = fops_write_u32, |
|---|
| 2333 | 2617 | }; |
|---|
| 2334 | 2618 | |
|---|
| 2335 | 2619 | struct proc_dir_entry * |
|---|
| .. | .. |
|---|
| 2341 | 2625 | |
|---|
| 2342 | 2626 | void mpp_procfs_create_common(struct proc_dir_entry *parent, struct mpp_dev *mpp) |
|---|
| 2343 | 2627 | { |
|---|
| 2628 | + mpp_procfs_create_u32("disable_work", 0644, parent, &mpp->disable); |
|---|
| 2344 | 2629 | mpp_procfs_create_u32("timing_check", 0644, parent, &mpp->timing_check); |
|---|
| 2345 | 2630 | } |
|---|
| 2346 | 2631 | #endif |
|---|