.. | .. |
---|
1 | 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ |
---|
2 | 2 | /* |
---|
3 | 3 | * |
---|
4 | | - * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved. |
---|
| 4 | + * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved. |
---|
5 | 5 | * |
---|
6 | 6 | * This program is free software and is provided to you under the terms of the |
---|
7 | 7 | * GNU General Public License version 2 as published by the Free Software |
---|
.. | .. |
---|
23 | 23 | #define _KBASE_CSF_SCHEDULER_H_ |
---|
24 | 24 | |
---|
25 | 25 | #include "mali_kbase_csf.h" |
---|
| 26 | +#include "mali_kbase_csf_event.h" |
---|
26 | 27 | |
---|
27 | 28 | /** |
---|
28 | 29 | * kbase_csf_scheduler_queue_start() - Enable the running of GPU command queue |
---|
.. | .. |
---|
35 | 36 | * If the CSG is already scheduled and resident, the CSI will be started |
---|
36 | 37 | * right away, otherwise once the group is made resident. |
---|
37 | 38 | * |
---|
38 | | - * Return: 0 on success, or negative on failure. |
---|
| 39 | + * Return: 0 on success, or negative on failure. -EBUSY is returned to |
---|
| 40 | + * indicate to the caller that queue could not be enabled due to Scheduler |
---|
| 41 | + * state and the caller can try to enable the queue after sometime. |
---|
39 | 42 | */ |
---|
40 | 43 | int kbase_csf_scheduler_queue_start(struct kbase_queue *queue); |
---|
41 | 44 | |
---|
.. | .. |
---|
250 | 253 | * kbase_csf_scheduler_group_copy_suspend_buf - Suspend a queue |
---|
251 | 254 | * group and copy suspend buffer. |
---|
252 | 255 | * |
---|
253 | | - * This function is called to suspend a queue group and copy the suspend_buffer |
---|
254 | | - * contents to the input buffer provided. |
---|
255 | | - * |
---|
256 | 256 | * @group: Pointer to the queue group to be suspended. |
---|
257 | 257 | * @sus_buf: Pointer to the structure which contains details of the |
---|
258 | 258 | * user buffer and its kernel pinned pages to which we need to copy |
---|
259 | 259 | * the group suspend buffer. |
---|
| 260 | + * |
---|
| 261 | + * This function is called to suspend a queue group and copy the suspend_buffer |
---|
| 262 | + * contents to the input buffer provided. |
---|
260 | 263 | * |
---|
261 | 264 | * Return: 0 on success, or negative on failure. |
---|
262 | 265 | */ |
---|
.. | .. |
---|
374 | 377 | * kbase_csf_scheduler_pm_active - Perform scheduler power active operation |
---|
375 | 378 | * |
---|
376 | 379 | * Note: This function will increase the scheduler's internal pm_active_count |
---|
377 | | - * value, ensuring that both GPU and MCU are powered for access. |
---|
| 380 | + * value, ensuring that both GPU and MCU are powered for access. The MCU may |
---|
| 381 | + * not have actually become active when this function returns, so need to |
---|
| 382 | + * call kbase_csf_scheduler_wait_mcu_active() for that. |
---|
| 383 | + * |
---|
| 384 | + * This function should not be called with global scheduler lock held. |
---|
378 | 385 | * |
---|
379 | 386 | * @kbdev: Instance of a GPU platform device that implements a CSF interface. |
---|
380 | 387 | */ |
---|
.. | .. |
---|
384 | 391 | * kbase_csf_scheduler_pm_idle - Perform the scheduler power idle operation |
---|
385 | 392 | * |
---|
386 | 393 | * Note: This function will decrease the scheduler's internal pm_active_count |
---|
387 | | - * value. On reaching 0, the MCU and GPU could be powered off. |
---|
| 394 | + * value. On reaching 0, the MCU and GPU could be powered off. This function |
---|
| 395 | + * should not be called with global scheduler lock held. |
---|
388 | 396 | * |
---|
389 | 397 | * @kbdev: Instance of a GPU platform device that implements a CSF interface. |
---|
390 | 398 | */ |
---|
391 | 399 | void kbase_csf_scheduler_pm_idle(struct kbase_device *kbdev); |
---|
| 400 | + |
---|
| 401 | +/** |
---|
| 402 | + * kbase_csf_scheduler_wait_mcu_active - Wait for the MCU to actually become active |
---|
| 403 | + * |
---|
| 404 | + * @kbdev: Instance of a GPU platform device that implements a CSF interface. |
---|
| 405 | + * |
---|
| 406 | + * This function will wait for the MCU to actually become active. It is supposed |
---|
| 407 | + * to be called after calling kbase_csf_scheduler_pm_active(). It is needed as |
---|
| 408 | + * kbase_csf_scheduler_pm_active() may not make the MCU active right away. |
---|
| 409 | + * |
---|
| 410 | + * Return: 0 if the MCU was successfully activated otherwise an error code. |
---|
| 411 | + */ |
---|
| 412 | +int kbase_csf_scheduler_wait_mcu_active(struct kbase_device *kbdev); |
---|
| 413 | + |
---|
| 414 | +/** |
---|
| 415 | + * kbase_csf_scheduler_pm_resume_no_lock - Reactivate the scheduler on system resume |
---|
| 416 | + * |
---|
| 417 | + * @kbdev: Instance of a GPU platform device that implements a CSF interface. |
---|
| 418 | + * |
---|
| 419 | + * This function will make the scheduler resume the scheduling of queue groups |
---|
| 420 | + * and take the power managemenet reference, if there are any runnable groups. |
---|
| 421 | + * The caller must have acquired the global Scheduler lock. |
---|
| 422 | + */ |
---|
| 423 | +void kbase_csf_scheduler_pm_resume_no_lock(struct kbase_device *kbdev); |
---|
392 | 424 | |
---|
393 | 425 | /** |
---|
394 | 426 | * kbase_csf_scheduler_pm_resume - Reactivate the scheduler on system resume |
---|
.. | .. |
---|
401 | 433 | void kbase_csf_scheduler_pm_resume(struct kbase_device *kbdev); |
---|
402 | 434 | |
---|
403 | 435 | /** |
---|
| 436 | + * kbase_csf_scheduler_pm_suspend_no_lock - Idle the scheduler on system suspend |
---|
| 437 | + * |
---|
| 438 | + * @kbdev: Instance of a GPU platform device that implements a CSF interface. |
---|
| 439 | + * |
---|
| 440 | + * This function will make the scheduler suspend all the running queue groups |
---|
| 441 | + * and drop its power managemenet reference. |
---|
| 442 | + * The caller must have acquired the global Scheduler lock. |
---|
| 443 | + * |
---|
| 444 | + * Return: 0 on success. |
---|
| 445 | + */ |
---|
| 446 | +int kbase_csf_scheduler_pm_suspend_no_lock(struct kbase_device *kbdev); |
---|
| 447 | + |
---|
| 448 | +/** |
---|
404 | 449 | * kbase_csf_scheduler_pm_suspend - Idle the scheduler on system suspend |
---|
405 | 450 | * |
---|
406 | 451 | * @kbdev: Instance of a GPU platform device that implements a CSF interface. |
---|
407 | 452 | * |
---|
408 | 453 | * This function will make the scheduler suspend all the running queue groups |
---|
409 | 454 | * and drop its power managemenet reference. |
---|
| 455 | + * |
---|
| 456 | + * Return: 0 on success. |
---|
410 | 457 | */ |
---|
411 | | -void kbase_csf_scheduler_pm_suspend(struct kbase_device *kbdev); |
---|
| 458 | +int kbase_csf_scheduler_pm_suspend(struct kbase_device *kbdev); |
---|
412 | 459 | |
---|
413 | 460 | /** |
---|
414 | 461 | * kbase_csf_scheduler_all_csgs_idle() - Check if the scheduler internal |
---|
.. | .. |
---|
427 | 474 | } |
---|
428 | 475 | |
---|
429 | 476 | /** |
---|
430 | | - * kbase_csf_scheduler_advance_tick_nolock() - Advance the scheduling tick |
---|
| 477 | + * kbase_csf_scheduler_tick_advance_nolock() - Advance the scheduling tick |
---|
431 | 478 | * |
---|
432 | 479 | * @kbdev: Pointer to the device |
---|
433 | 480 | * |
---|
.. | .. |
---|
437 | 484 | * The caller must hold the interrupt lock. |
---|
438 | 485 | */ |
---|
439 | 486 | static inline void |
---|
440 | | -kbase_csf_scheduler_advance_tick_nolock(struct kbase_device *kbdev) |
---|
| 487 | +kbase_csf_scheduler_tick_advance_nolock(struct kbase_device *kbdev) |
---|
441 | 488 | { |
---|
442 | 489 | struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler; |
---|
443 | 490 | |
---|
444 | 491 | lockdep_assert_held(&scheduler->interrupt_lock); |
---|
445 | 492 | |
---|
446 | 493 | if (scheduler->tick_timer_active) { |
---|
447 | | - KBASE_KTRACE_ADD(kbdev, SCHEDULER_ADVANCE_TICK, NULL, 0u); |
---|
| 494 | + KBASE_KTRACE_ADD(kbdev, SCHEDULER_TICK_ADVANCE, NULL, 0u); |
---|
448 | 495 | scheduler->tick_timer_active = false; |
---|
449 | 496 | queue_work(scheduler->wq, &scheduler->tick_work); |
---|
450 | 497 | } else { |
---|
451 | | - KBASE_KTRACE_ADD(kbdev, SCHEDULER_NOADVANCE_TICK, NULL, 0u); |
---|
| 498 | + KBASE_KTRACE_ADD(kbdev, SCHEDULER_TICK_NOADVANCE, NULL, 0u); |
---|
452 | 499 | } |
---|
453 | 500 | } |
---|
454 | 501 | |
---|
455 | 502 | /** |
---|
456 | | - * kbase_csf_scheduler_advance_tick() - Advance the scheduling tick |
---|
| 503 | + * kbase_csf_scheduler_tick_advance() - Advance the scheduling tick |
---|
457 | 504 | * |
---|
458 | 505 | * @kbdev: Pointer to the device |
---|
459 | 506 | * |
---|
.. | .. |
---|
461 | 508 | * immediate execution, but only if the tick hrtimer is active. If the timer |
---|
462 | 509 | * is inactive then the tick work item is already in flight. |
---|
463 | 510 | */ |
---|
464 | | -static inline void kbase_csf_scheduler_advance_tick(struct kbase_device *kbdev) |
---|
| 511 | +static inline void kbase_csf_scheduler_tick_advance(struct kbase_device *kbdev) |
---|
465 | 512 | { |
---|
466 | 513 | struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler; |
---|
467 | 514 | unsigned long flags; |
---|
468 | 515 | |
---|
469 | 516 | spin_lock_irqsave(&scheduler->interrupt_lock, flags); |
---|
470 | | - kbase_csf_scheduler_advance_tick_nolock(kbdev); |
---|
| 517 | + kbase_csf_scheduler_tick_advance_nolock(kbdev); |
---|
471 | 518 | spin_unlock_irqrestore(&scheduler->interrupt_lock, flags); |
---|
| 519 | +} |
---|
| 520 | + |
---|
| 521 | +/** |
---|
| 522 | + * kbase_csf_scheduler_invoke_tick() - Invoke the scheduling tick |
---|
| 523 | + * |
---|
| 524 | + * @kbdev: Pointer to the device |
---|
| 525 | + * |
---|
| 526 | + * This function will queue the scheduling tick work item for immediate |
---|
| 527 | + * execution if tick timer is not active. This can be called from interrupt |
---|
| 528 | + * context to resume the scheduling after GPU was put to sleep. |
---|
| 529 | + */ |
---|
| 530 | +static inline void kbase_csf_scheduler_invoke_tick(struct kbase_device *kbdev) |
---|
| 531 | +{ |
---|
| 532 | + struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler; |
---|
| 533 | + unsigned long flags; |
---|
| 534 | + |
---|
| 535 | + KBASE_KTRACE_ADD(kbdev, SCHEDULER_TICK_INVOKE, NULL, 0u); |
---|
| 536 | + spin_lock_irqsave(&scheduler->interrupt_lock, flags); |
---|
| 537 | + if (!scheduler->tick_timer_active) |
---|
| 538 | + queue_work(scheduler->wq, &scheduler->tick_work); |
---|
| 539 | + spin_unlock_irqrestore(&scheduler->interrupt_lock, flags); |
---|
| 540 | +} |
---|
| 541 | + |
---|
| 542 | +/** |
---|
| 543 | + * kbase_csf_scheduler_invoke_tock() - Invoke the scheduling tock |
---|
| 544 | + * |
---|
| 545 | + * @kbdev: Pointer to the device |
---|
| 546 | + * |
---|
| 547 | + * This function will queue the scheduling tock work item for immediate |
---|
| 548 | + * execution. |
---|
| 549 | + */ |
---|
| 550 | +static inline void kbase_csf_scheduler_invoke_tock(struct kbase_device *kbdev) |
---|
| 551 | +{ |
---|
| 552 | + struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler; |
---|
| 553 | + |
---|
| 554 | + KBASE_KTRACE_ADD(kbdev, SCHEDULER_TOCK_INVOKE, NULL, 0u); |
---|
| 555 | + if (atomic_cmpxchg(&scheduler->pending_tock_work, false, true) == false) |
---|
| 556 | + mod_delayed_work(scheduler->wq, &scheduler->tock_work, 0); |
---|
472 | 557 | } |
---|
473 | 558 | |
---|
474 | 559 | /** |
---|
.. | .. |
---|
491 | 576 | return (queue->trace_buffer_size && queue->trace_buffer_base); |
---|
492 | 577 | } |
---|
493 | 578 | |
---|
| 579 | +#ifdef KBASE_PM_RUNTIME |
---|
| 580 | +/** |
---|
| 581 | + * kbase_csf_scheduler_reval_idleness_post_sleep() - Check GPU's idleness after |
---|
| 582 | + * putting MCU to sleep state |
---|
| 583 | + * |
---|
| 584 | + * @kbdev: Pointer to the device |
---|
| 585 | + * |
---|
| 586 | + * This function re-evaluates the idleness of on-slot queue groups after MCU |
---|
| 587 | + * was put to the sleep state and invokes the scheduling tick if any of the |
---|
| 588 | + * on-slot queue group became non-idle. |
---|
| 589 | + * CSG_OUTPUT_BLOCK.CSG_STATUS_STATE.IDLE bit is checked to determine the |
---|
| 590 | + * idleness which is updated by MCU firmware on handling of the sleep request. |
---|
| 591 | + * |
---|
| 592 | + * This function is needed to detect if more work was flushed in the window |
---|
| 593 | + * between the GPU idle notification and the enabling of Doorbell mirror |
---|
| 594 | + * interrupt (from MCU state machine). Once Doorbell mirror interrupt is |
---|
| 595 | + * enabled, Host can receive the notification on User doorbell rings. |
---|
| 596 | + */ |
---|
| 597 | +void kbase_csf_scheduler_reval_idleness_post_sleep(struct kbase_device *kbdev); |
---|
| 598 | + |
---|
| 599 | +/** |
---|
| 600 | + * kbase_csf_scheduler_handle_runtime_suspend() - Handle runtime suspend by |
---|
| 601 | + * suspending CSGs. |
---|
| 602 | + * |
---|
| 603 | + * @kbdev: Pointer to the device |
---|
| 604 | + * |
---|
| 605 | + * This function is called from the runtime suspend callback function for |
---|
| 606 | + * suspending all the on-slot queue groups. If any of the group is found to |
---|
| 607 | + * be non-idle after the completion of CSG suspend operation or the CSG |
---|
| 608 | + * suspend operation times out, then the scheduling tick is invoked and an |
---|
| 609 | + * error is returned so that the GPU power down can be aborted. |
---|
| 610 | + * |
---|
| 611 | + * Return: 0 if all the CSGs were suspended, otherwise an error code. |
---|
| 612 | + */ |
---|
| 613 | +int kbase_csf_scheduler_handle_runtime_suspend(struct kbase_device *kbdev); |
---|
| 614 | +#endif |
---|
| 615 | + |
---|
| 616 | +/** |
---|
| 617 | + * kbase_csf_scheduler_process_gpu_idle_event() - Process GPU idle IRQ |
---|
| 618 | + * |
---|
| 619 | + * @kbdev: Pointer to the device |
---|
| 620 | + * |
---|
| 621 | + * This function is called when a GPU idle IRQ has been raised. |
---|
| 622 | + */ |
---|
| 623 | +void kbase_csf_scheduler_process_gpu_idle_event(struct kbase_device *kbdev); |
---|
| 624 | + |
---|
| 625 | +/** |
---|
| 626 | + * kbase_csf_scheduler_get_nr_active_csgs() - Get the number of active CSGs |
---|
| 627 | + * |
---|
| 628 | + * @kbdev: Pointer to the device |
---|
| 629 | + * |
---|
| 630 | + * This function calculates the number of CSG slots that have a queue group |
---|
| 631 | + * resident on them. |
---|
| 632 | + * |
---|
| 633 | + * Note: This function should not be used if the interrupt_lock is held. Use |
---|
| 634 | + * kbase_csf_scheduler_get_nr_active_csgs_locked() instead. |
---|
| 635 | + * |
---|
| 636 | + * Return: number of active CSGs. |
---|
| 637 | + */ |
---|
| 638 | +u32 kbase_csf_scheduler_get_nr_active_csgs(struct kbase_device *kbdev); |
---|
| 639 | + |
---|
| 640 | +/** |
---|
| 641 | + * kbase_csf_scheduler_get_nr_active_csgs_locked() - Get the number of active |
---|
| 642 | + * CSGs |
---|
| 643 | + * |
---|
| 644 | + * @kbdev: Pointer to the device |
---|
| 645 | + * |
---|
| 646 | + * This function calculates the number of CSG slots that have a queue group |
---|
| 647 | + * resident on them. |
---|
| 648 | + * |
---|
| 649 | + * Note: This function should be called with interrupt_lock held. |
---|
| 650 | + * |
---|
| 651 | + * Return: number of active CSGs. |
---|
| 652 | + */ |
---|
| 653 | +u32 kbase_csf_scheduler_get_nr_active_csgs_locked(struct kbase_device *kbdev); |
---|
| 654 | + |
---|
| 655 | +/** |
---|
| 656 | + * kbase_csf_scheduler_force_wakeup() - Forcefully resume the scheduling of CSGs |
---|
| 657 | + * |
---|
| 658 | + * @kbdev: Pointer to the device |
---|
| 659 | + * |
---|
| 660 | + * This function is called to forcefully resume the scheduling of CSGs, even |
---|
| 661 | + * when there wasn't any work submitted for them. |
---|
| 662 | + * This function is only used for testing purpose. |
---|
| 663 | + */ |
---|
| 664 | +void kbase_csf_scheduler_force_wakeup(struct kbase_device *kbdev); |
---|
| 665 | + |
---|
| 666 | +#ifdef KBASE_PM_RUNTIME |
---|
| 667 | +/** |
---|
| 668 | + * kbase_csf_scheduler_force_sleep() - Forcefully put the Scheduler to sleeping |
---|
| 669 | + * state. |
---|
| 670 | + * |
---|
| 671 | + * @kbdev: Pointer to the device |
---|
| 672 | + * |
---|
| 673 | + * This function is called to forcefully put the Scheduler to sleeping state |
---|
| 674 | + * and trigger the sleep of MCU. If the CSGs are not idle, then the Scheduler |
---|
| 675 | + * would get reactivated again immediately. |
---|
| 676 | + * This function is only used for testing purpose. |
---|
| 677 | + */ |
---|
| 678 | +void kbase_csf_scheduler_force_sleep(struct kbase_device *kbdev); |
---|
| 679 | +#endif |
---|
| 680 | + |
---|
494 | 681 | #endif /* _KBASE_CSF_SCHEDULER_H_ */ |
---|