hc
2024-05-16 8d2a02b24d66aa359e83eebc1ed3c0f85367a1cb
kernel/drivers/gpu/arm/bifrost/platform/devicetree/mali_kbase_runtime_pm.c
....@@ -1,7 +1,7 @@
11 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
22 /*
33 *
4
- * (C) COPYRIGHT 2015-2021 ARM Limited. All rights reserved.
4
+ * (C) COPYRIGHT 2015-2022 ARM Limited. All rights reserved.
55 *
66 * This program is free software and is provided to you under the terms of the
77 * GNU General Public License version 2 as published by the Free Software
....@@ -21,6 +21,7 @@
2121
2222 #include <mali_kbase.h>
2323 #include <mali_kbase_defs.h>
24
+#include <device/mali_kbase_device.h>
2425 #include <linux/pm_runtime.h>
2526 #include <linux/clk.h>
2627 #include <linux/clk-provider.h>
....@@ -71,18 +72,32 @@
7172 WARN_ON(regulator_disable(kbdev->regulators[i]));
7273 }
7374 #endif
75
+
7476 }
7577
7678 static int pm_callback_power_on(struct kbase_device *kbdev)
7779 {
7880 int ret = 1; /* Assume GPU has been powered off */
7981 int error;
82
+ unsigned long flags;
8083
81
- dev_dbg(kbdev->dev, "pm_callback_power_on %p\n",
82
- (void *)kbdev->dev->pm_domain);
84
+ dev_dbg(kbdev->dev, "%s %pK\n", __func__, (void *)kbdev->dev->pm_domain);
85
+
86
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
87
+ WARN_ON(kbdev->pm.backend.gpu_powered);
88
+#if MALI_USE_CSF
89
+ if (likely(kbdev->csf.firmware_inited)) {
90
+ WARN_ON(!kbdev->pm.active_count);
91
+ WARN_ON(kbdev->pm.runtime_active);
92
+ }
93
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
8394
8495 enable_gpu_power_control(kbdev);
96
+ CSTD_UNUSED(error);
97
+#else
98
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
8599
100
+#ifdef KBASE_PM_RUNTIME
86101 error = pm_runtime_get_sync(kbdev->dev);
87102 if (error == 1) {
88103 /*
....@@ -91,30 +106,106 @@
91106 */
92107 ret = 0;
93108 }
94
-
95109 dev_dbg(kbdev->dev, "pm_runtime_get_sync returned %d\n", error);
110
+#else
111
+ enable_gpu_power_control(kbdev);
112
+#endif /* KBASE_PM_RUNTIME */
113
+
114
+#endif /* MALI_USE_CSF */
96115
97116 return ret;
98117 }
99118
100119 static void pm_callback_power_off(struct kbase_device *kbdev)
101120 {
102
- dev_dbg(kbdev->dev, "pm_callback_power_off\n");
121
+ unsigned long flags;
122
+
123
+ dev_dbg(kbdev->dev, "%s\n", __func__);
124
+
125
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
126
+ WARN_ON(kbdev->pm.backend.gpu_powered);
127
+#if MALI_USE_CSF
128
+ if (likely(kbdev->csf.firmware_inited)) {
129
+#ifdef CONFIG_MALI_BIFROST_DEBUG
130
+ WARN_ON(kbase_csf_scheduler_get_nr_active_csgs(kbdev));
131
+#endif
132
+ WARN_ON(kbdev->pm.backend.mcu_state != KBASE_MCU_OFF);
133
+ }
134
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
135
+
136
+ /* Power down the GPU immediately */
137
+ disable_gpu_power_control(kbdev);
138
+#else /* MALI_USE_CSF */
139
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
140
+
141
+#ifdef KBASE_PM_RUNTIME
142
+ pm_runtime_mark_last_busy(kbdev->dev);
143
+ pm_runtime_put_autosuspend(kbdev->dev);
144
+#else
145
+ /* Power down the GPU immediately as runtime PM is disabled */
146
+ disable_gpu_power_control(kbdev);
147
+#endif
148
+#endif /* MALI_USE_CSF */
149
+}
150
+
151
+#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
152
+static void pm_callback_runtime_gpu_active(struct kbase_device *kbdev)
153
+{
154
+ unsigned long flags;
155
+ int error;
156
+
157
+ lockdep_assert_held(&kbdev->pm.lock);
158
+
159
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
160
+ WARN_ON(!kbdev->pm.backend.gpu_powered);
161
+ WARN_ON(!kbdev->pm.active_count);
162
+ WARN_ON(kbdev->pm.runtime_active);
163
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
164
+
165
+ if (pm_runtime_status_suspended(kbdev->dev)) {
166
+ error = pm_runtime_get_sync(kbdev->dev);
167
+ dev_dbg(kbdev->dev, "pm_runtime_get_sync returned %d", error);
168
+ } else {
169
+ /* Call the async version here, otherwise there could be
170
+ * a deadlock if the runtime suspend operation is ongoing.
171
+ * Caller would have taken the kbdev->pm.lock and/or the
172
+ * scheduler lock, and the runtime suspend callback function
173
+ * will also try to acquire the same lock(s).
174
+ */
175
+ error = pm_runtime_get(kbdev->dev);
176
+ dev_dbg(kbdev->dev, "pm_runtime_get returned %d", error);
177
+ }
178
+
179
+ kbdev->pm.runtime_active = true;
180
+}
181
+
182
+static void pm_callback_runtime_gpu_idle(struct kbase_device *kbdev)
183
+{
184
+ unsigned long flags;
185
+
186
+ lockdep_assert_held(&kbdev->pm.lock);
187
+
188
+ dev_dbg(kbdev->dev, "%s", __func__);
189
+
190
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
191
+ WARN_ON(!kbdev->pm.backend.gpu_powered);
192
+ WARN_ON(kbdev->pm.backend.l2_state != KBASE_L2_OFF);
193
+ WARN_ON(kbdev->pm.active_count);
194
+ WARN_ON(!kbdev->pm.runtime_active);
195
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
103196
104197 pm_runtime_mark_last_busy(kbdev->dev);
105198 pm_runtime_put_autosuspend(kbdev->dev);
106
-
107
-#ifndef KBASE_PM_RUNTIME
108
- disable_gpu_power_control(kbdev);
109
-#endif
199
+ kbdev->pm.runtime_active = false;
110200 }
201
+#endif
111202
112203 #ifdef KBASE_PM_RUNTIME
113204 static int kbase_device_runtime_init(struct kbase_device *kbdev)
114205 {
115206 int ret = 0;
116207
117
- dev_dbg(kbdev->dev, "kbase_device_runtime_init\n");
208
+ dev_dbg(kbdev->dev, "%s\n", __func__);
118209
119210 pm_runtime_set_autosuspend_delay(kbdev->dev, AUTO_SUSPEND_DELAY);
120211 pm_runtime_use_autosuspend(kbdev->dev);
....@@ -124,7 +215,12 @@
124215
125216 if (!pm_runtime_enabled(kbdev->dev)) {
126217 dev_warn(kbdev->dev, "pm_runtime not enabled");
127
- ret = -ENOSYS;
218
+ ret = -EINVAL;
219
+ } else if (atomic_read(&kbdev->dev->power.usage_count)) {
220
+ dev_warn(kbdev->dev,
221
+ "%s: Device runtime usage count unexpectedly non zero %d",
222
+ __func__, atomic_read(&kbdev->dev->power.usage_count));
223
+ ret = -EINVAL;
128224 }
129225
130226 return ret;
....@@ -132,24 +228,34 @@
132228
133229 static void kbase_device_runtime_disable(struct kbase_device *kbdev)
134230 {
135
- dev_dbg(kbdev->dev, "kbase_device_runtime_disable\n");
231
+ dev_dbg(kbdev->dev, "%s\n", __func__);
232
+
233
+ if (atomic_read(&kbdev->dev->power.usage_count))
234
+ dev_warn(kbdev->dev,
235
+ "%s: Device runtime usage count unexpectedly non zero %d",
236
+ __func__, atomic_read(&kbdev->dev->power.usage_count));
237
+
136238 pm_runtime_disable(kbdev->dev);
137239 }
138
-#endif
240
+#endif /* KBASE_PM_RUNTIME */
139241
140242 static int pm_callback_runtime_on(struct kbase_device *kbdev)
141243 {
142
- dev_dbg(kbdev->dev, "pm_callback_runtime_on\n");
244
+ dev_dbg(kbdev->dev, "%s\n", __func__);
143245
246
+#if !MALI_USE_CSF
144247 enable_gpu_power_control(kbdev);
248
+#endif
145249 return 0;
146250 }
147251
148252 static void pm_callback_runtime_off(struct kbase_device *kbdev)
149253 {
150
- dev_dbg(kbdev->dev, "pm_callback_runtime_off\n");
254
+ dev_dbg(kbdev->dev, "%s\n", __func__);
151255
256
+#if !MALI_USE_CSF
152257 disable_gpu_power_control(kbdev);
258
+#endif
153259 }
154260
155261 static void pm_callback_resume(struct kbase_device *kbdev)
....@@ -163,6 +269,7 @@
163269 {
164270 pm_callback_runtime_off(kbdev);
165271 }
272
+
166273
167274 struct kbase_pm_callback_conf pm_callbacks = {
168275 .power_on_callback = pm_callback_power_on,
....@@ -180,6 +287,12 @@
180287 .power_runtime_on_callback = NULL,
181288 .power_runtime_off_callback = NULL,
182289 #endif /* KBASE_PM_RUNTIME */
290
+
291
+#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
292
+ .power_runtime_gpu_idle_callback = pm_callback_runtime_gpu_idle,
293
+ .power_runtime_gpu_active_callback = pm_callback_runtime_gpu_active,
294
+#else
295
+ .power_runtime_gpu_idle_callback = NULL,
296
+ .power_runtime_gpu_active_callback = NULL,
297
+#endif
183298 };
184
-
185
-