hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/gpu/arm/bifrost/csf/mali_kbase_csf_scheduler.h
....@@ -1,7 +1,7 @@
11 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
22 /*
33 *
4
- * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
4
+ * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved.
55 *
66 * This program is free software and is provided to you under the terms of the
77 * GNU General Public License version 2 as published by the Free Software
....@@ -23,6 +23,7 @@
2323 #define _KBASE_CSF_SCHEDULER_H_
2424
2525 #include "mali_kbase_csf.h"
26
+#include "mali_kbase_csf_event.h"
2627
2728 /**
2829 * kbase_csf_scheduler_queue_start() - Enable the running of GPU command queue
....@@ -35,7 +36,9 @@
3536 * If the CSG is already scheduled and resident, the CSI will be started
3637 * right away, otherwise once the group is made resident.
3738 *
38
- * Return: 0 on success, or negative on failure.
39
+ * Return: 0 on success, or negative on failure. -EBUSY is returned to
40
+ * indicate to the caller that queue could not be enabled due to Scheduler
41
+ * state and the caller can try to enable the queue after sometime.
3942 */
4043 int kbase_csf_scheduler_queue_start(struct kbase_queue *queue);
4144
....@@ -250,13 +253,13 @@
250253 * kbase_csf_scheduler_group_copy_suspend_buf - Suspend a queue
251254 * group and copy suspend buffer.
252255 *
253
- * This function is called to suspend a queue group and copy the suspend_buffer
254
- * contents to the input buffer provided.
255
- *
256256 * @group: Pointer to the queue group to be suspended.
257257 * @sus_buf: Pointer to the structure which contains details of the
258258 * user buffer and its kernel pinned pages to which we need to copy
259259 * the group suspend buffer.
260
+ *
261
+ * This function is called to suspend a queue group and copy the suspend_buffer
262
+ * contents to the input buffer provided.
260263 *
261264 * Return: 0 on success, or negative on failure.
262265 */
....@@ -374,7 +377,11 @@
374377 * kbase_csf_scheduler_pm_active - Perform scheduler power active operation
375378 *
376379 * Note: This function will increase the scheduler's internal pm_active_count
377
- * value, ensuring that both GPU and MCU are powered for access.
380
+ * value, ensuring that both GPU and MCU are powered for access. The MCU may
381
+ * not have actually become active when this function returns, so need to
382
+ * call kbase_csf_scheduler_wait_mcu_active() for that.
383
+ *
384
+ * This function should not be called with global scheduler lock held.
378385 *
379386 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
380387 */
....@@ -384,11 +391,36 @@
384391 * kbase_csf_scheduler_pm_idle - Perform the scheduler power idle operation
385392 *
386393 * Note: This function will decrease the scheduler's internal pm_active_count
387
- * value. On reaching 0, the MCU and GPU could be powered off.
394
+ * value. On reaching 0, the MCU and GPU could be powered off. This function
395
+ * should not be called with global scheduler lock held.
388396 *
389397 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
390398 */
391399 void kbase_csf_scheduler_pm_idle(struct kbase_device *kbdev);
400
+
401
+/**
402
+ * kbase_csf_scheduler_wait_mcu_active - Wait for the MCU to actually become active
403
+ *
404
+ * @kbdev: Instance of a GPU platform device that implements a CSF interface.
405
+ *
406
+ * This function will wait for the MCU to actually become active. It is supposed
407
+ * to be called after calling kbase_csf_scheduler_pm_active(). It is needed as
408
+ * kbase_csf_scheduler_pm_active() may not make the MCU active right away.
409
+ *
410
+ * Return: 0 if the MCU was successfully activated otherwise an error code.
411
+ */
412
+int kbase_csf_scheduler_wait_mcu_active(struct kbase_device *kbdev);
413
+
414
+/**
415
+ * kbase_csf_scheduler_pm_resume_no_lock - Reactivate the scheduler on system resume
416
+ *
417
+ * @kbdev: Instance of a GPU platform device that implements a CSF interface.
418
+ *
419
+ * This function will make the scheduler resume the scheduling of queue groups
420
+ * and take the power managemenet reference, if there are any runnable groups.
421
+ * The caller must have acquired the global Scheduler lock.
422
+ */
423
+void kbase_csf_scheduler_pm_resume_no_lock(struct kbase_device *kbdev);
392424
393425 /**
394426 * kbase_csf_scheduler_pm_resume - Reactivate the scheduler on system resume
....@@ -401,14 +433,29 @@
401433 void kbase_csf_scheduler_pm_resume(struct kbase_device *kbdev);
402434
403435 /**
436
+ * kbase_csf_scheduler_pm_suspend_no_lock - Idle the scheduler on system suspend
437
+ *
438
+ * @kbdev: Instance of a GPU platform device that implements a CSF interface.
439
+ *
440
+ * This function will make the scheduler suspend all the running queue groups
441
+ * and drop its power managemenet reference.
442
+ * The caller must have acquired the global Scheduler lock.
443
+ *
444
+ * Return: 0 on success.
445
+ */
446
+int kbase_csf_scheduler_pm_suspend_no_lock(struct kbase_device *kbdev);
447
+
448
+/**
404449 * kbase_csf_scheduler_pm_suspend - Idle the scheduler on system suspend
405450 *
406451 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
407452 *
408453 * This function will make the scheduler suspend all the running queue groups
409454 * and drop its power managemenet reference.
455
+ *
456
+ * Return: 0 on success.
410457 */
411
-void kbase_csf_scheduler_pm_suspend(struct kbase_device *kbdev);
458
+int kbase_csf_scheduler_pm_suspend(struct kbase_device *kbdev);
412459
413460 /**
414461 * kbase_csf_scheduler_all_csgs_idle() - Check if the scheduler internal
....@@ -427,7 +474,7 @@
427474 }
428475
429476 /**
430
- * kbase_csf_scheduler_advance_tick_nolock() - Advance the scheduling tick
477
+ * kbase_csf_scheduler_tick_advance_nolock() - Advance the scheduling tick
431478 *
432479 * @kbdev: Pointer to the device
433480 *
....@@ -437,23 +484,23 @@
437484 * The caller must hold the interrupt lock.
438485 */
439486 static inline void
440
-kbase_csf_scheduler_advance_tick_nolock(struct kbase_device *kbdev)
487
+kbase_csf_scheduler_tick_advance_nolock(struct kbase_device *kbdev)
441488 {
442489 struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
443490
444491 lockdep_assert_held(&scheduler->interrupt_lock);
445492
446493 if (scheduler->tick_timer_active) {
447
- KBASE_KTRACE_ADD(kbdev, SCHEDULER_ADVANCE_TICK, NULL, 0u);
494
+ KBASE_KTRACE_ADD(kbdev, SCHEDULER_TICK_ADVANCE, NULL, 0u);
448495 scheduler->tick_timer_active = false;
449496 queue_work(scheduler->wq, &scheduler->tick_work);
450497 } else {
451
- KBASE_KTRACE_ADD(kbdev, SCHEDULER_NOADVANCE_TICK, NULL, 0u);
498
+ KBASE_KTRACE_ADD(kbdev, SCHEDULER_TICK_NOADVANCE, NULL, 0u);
452499 }
453500 }
454501
455502 /**
456
- * kbase_csf_scheduler_advance_tick() - Advance the scheduling tick
503
+ * kbase_csf_scheduler_tick_advance() - Advance the scheduling tick
457504 *
458505 * @kbdev: Pointer to the device
459506 *
....@@ -461,14 +508,52 @@
461508 * immediate execution, but only if the tick hrtimer is active. If the timer
462509 * is inactive then the tick work item is already in flight.
463510 */
464
-static inline void kbase_csf_scheduler_advance_tick(struct kbase_device *kbdev)
511
+static inline void kbase_csf_scheduler_tick_advance(struct kbase_device *kbdev)
465512 {
466513 struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
467514 unsigned long flags;
468515
469516 spin_lock_irqsave(&scheduler->interrupt_lock, flags);
470
- kbase_csf_scheduler_advance_tick_nolock(kbdev);
517
+ kbase_csf_scheduler_tick_advance_nolock(kbdev);
471518 spin_unlock_irqrestore(&scheduler->interrupt_lock, flags);
519
+}
520
+
521
+/**
522
+ * kbase_csf_scheduler_invoke_tick() - Invoke the scheduling tick
523
+ *
524
+ * @kbdev: Pointer to the device
525
+ *
526
+ * This function will queue the scheduling tick work item for immediate
527
+ * execution if tick timer is not active. This can be called from interrupt
528
+ * context to resume the scheduling after GPU was put to sleep.
529
+ */
530
+static inline void kbase_csf_scheduler_invoke_tick(struct kbase_device *kbdev)
531
+{
532
+ struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
533
+ unsigned long flags;
534
+
535
+ KBASE_KTRACE_ADD(kbdev, SCHEDULER_TICK_INVOKE, NULL, 0u);
536
+ spin_lock_irqsave(&scheduler->interrupt_lock, flags);
537
+ if (!scheduler->tick_timer_active)
538
+ queue_work(scheduler->wq, &scheduler->tick_work);
539
+ spin_unlock_irqrestore(&scheduler->interrupt_lock, flags);
540
+}
541
+
542
+/**
543
+ * kbase_csf_scheduler_invoke_tock() - Invoke the scheduling tock
544
+ *
545
+ * @kbdev: Pointer to the device
546
+ *
547
+ * This function will queue the scheduling tock work item for immediate
548
+ * execution.
549
+ */
550
+static inline void kbase_csf_scheduler_invoke_tock(struct kbase_device *kbdev)
551
+{
552
+ struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
553
+
554
+ KBASE_KTRACE_ADD(kbdev, SCHEDULER_TOCK_INVOKE, NULL, 0u);
555
+ if (atomic_cmpxchg(&scheduler->pending_tock_work, false, true) == false)
556
+ mod_delayed_work(scheduler->wq, &scheduler->tock_work, 0);
472557 }
473558
474559 /**
....@@ -491,4 +576,106 @@
491576 return (queue->trace_buffer_size && queue->trace_buffer_base);
492577 }
493578
579
+#ifdef KBASE_PM_RUNTIME
580
+/**
581
+ * kbase_csf_scheduler_reval_idleness_post_sleep() - Check GPU's idleness after
582
+ * putting MCU to sleep state
583
+ *
584
+ * @kbdev: Pointer to the device
585
+ *
586
+ * This function re-evaluates the idleness of on-slot queue groups after MCU
587
+ * was put to the sleep state and invokes the scheduling tick if any of the
588
+ * on-slot queue group became non-idle.
589
+ * CSG_OUTPUT_BLOCK.CSG_STATUS_STATE.IDLE bit is checked to determine the
590
+ * idleness which is updated by MCU firmware on handling of the sleep request.
591
+ *
592
+ * This function is needed to detect if more work was flushed in the window
593
+ * between the GPU idle notification and the enabling of Doorbell mirror
594
+ * interrupt (from MCU state machine). Once Doorbell mirror interrupt is
595
+ * enabled, Host can receive the notification on User doorbell rings.
596
+ */
597
+void kbase_csf_scheduler_reval_idleness_post_sleep(struct kbase_device *kbdev);
598
+
599
+/**
600
+ * kbase_csf_scheduler_handle_runtime_suspend() - Handle runtime suspend by
601
+ * suspending CSGs.
602
+ *
603
+ * @kbdev: Pointer to the device
604
+ *
605
+ * This function is called from the runtime suspend callback function for
606
+ * suspending all the on-slot queue groups. If any of the group is found to
607
+ * be non-idle after the completion of CSG suspend operation or the CSG
608
+ * suspend operation times out, then the scheduling tick is invoked and an
609
+ * error is returned so that the GPU power down can be aborted.
610
+ *
611
+ * Return: 0 if all the CSGs were suspended, otherwise an error code.
612
+ */
613
+int kbase_csf_scheduler_handle_runtime_suspend(struct kbase_device *kbdev);
614
+#endif
615
+
616
+/**
617
+ * kbase_csf_scheduler_process_gpu_idle_event() - Process GPU idle IRQ
618
+ *
619
+ * @kbdev: Pointer to the device
620
+ *
621
+ * This function is called when a GPU idle IRQ has been raised.
622
+ */
623
+void kbase_csf_scheduler_process_gpu_idle_event(struct kbase_device *kbdev);
624
+
625
+/**
626
+ * kbase_csf_scheduler_get_nr_active_csgs() - Get the number of active CSGs
627
+ *
628
+ * @kbdev: Pointer to the device
629
+ *
630
+ * This function calculates the number of CSG slots that have a queue group
631
+ * resident on them.
632
+ *
633
+ * Note: This function should not be used if the interrupt_lock is held. Use
634
+ * kbase_csf_scheduler_get_nr_active_csgs_locked() instead.
635
+ *
636
+ * Return: number of active CSGs.
637
+ */
638
+u32 kbase_csf_scheduler_get_nr_active_csgs(struct kbase_device *kbdev);
639
+
640
+/**
641
+ * kbase_csf_scheduler_get_nr_active_csgs_locked() - Get the number of active
642
+ * CSGs
643
+ *
644
+ * @kbdev: Pointer to the device
645
+ *
646
+ * This function calculates the number of CSG slots that have a queue group
647
+ * resident on them.
648
+ *
649
+ * Note: This function should be called with interrupt_lock held.
650
+ *
651
+ * Return: number of active CSGs.
652
+ */
653
+u32 kbase_csf_scheduler_get_nr_active_csgs_locked(struct kbase_device *kbdev);
654
+
655
+/**
656
+ * kbase_csf_scheduler_force_wakeup() - Forcefully resume the scheduling of CSGs
657
+ *
658
+ * @kbdev: Pointer to the device
659
+ *
660
+ * This function is called to forcefully resume the scheduling of CSGs, even
661
+ * when there wasn't any work submitted for them.
662
+ * This function is only used for testing purpose.
663
+ */
664
+void kbase_csf_scheduler_force_wakeup(struct kbase_device *kbdev);
665
+
666
+#ifdef KBASE_PM_RUNTIME
667
+/**
668
+ * kbase_csf_scheduler_force_sleep() - Forcefully put the Scheduler to sleeping
669
+ * state.
670
+ *
671
+ * @kbdev: Pointer to the device
672
+ *
673
+ * This function is called to forcefully put the Scheduler to sleeping state
674
+ * and trigger the sleep of MCU. If the CSGs are not idle, then the Scheduler
675
+ * would get reactivated again immediately.
676
+ * This function is only used for testing purpose.
677
+ */
678
+void kbase_csf_scheduler_force_sleep(struct kbase_device *kbdev);
679
+#endif
680
+
494681 #endif /* _KBASE_CSF_SCHEDULER_H_ */