hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/hwspinlock/hwspinlock_core.c
....@@ -9,6 +9,7 @@
99
1010 #define pr_fmt(fmt) "%s: " fmt, __func__
1111
12
+#include <linux/delay.h>
1213 #include <linux/kernel.h>
1314 #include <linux/module.h>
1415 #include <linux/spinlock.h>
....@@ -22,6 +23,9 @@
2223 #include <linux/of.h>
2324
2425 #include "hwspinlock_internal.h"
26
+
27
+/* retry delay used in atomic context */
28
+#define HWSPINLOCK_RETRY_DELAY_US 100
2529
2630 /* radix tree tags */
2731 #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
....@@ -68,11 +72,11 @@
6872 * user need some time-consuming or sleepable operations under the hardware
6973 * lock, they need one sleepable lock (like mutex) to protect the operations.
7074 *
71
- * If the mode is not HWLOCK_RAW, upon a successful return from this function,
72
- * preemption (and possibly interrupts) is disabled, so the caller must not
73
- * sleep, and is advised to release the hwspinlock as soon as possible. This is
74
- * required in order to minimize remote cores polling on the hardware
75
- * interconnect.
75
+ * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
76
+ * return from this function, preemption (and possibly interrupts) is disabled,
77
+ * so the caller must not sleep, and is advised to release the hwspinlock as
78
+ * soon as possible. This is required in order to minimize remote cores polling
79
+ * on the hardware interconnect.
7680 *
7781 * The user decides whether local interrupts are disabled or not, and if yes,
7882 * whether he wants their previous state to be saved. It is up to the user
....@@ -88,8 +92,8 @@
8892 {
8993 int ret;
9094
91
- BUG_ON(!hwlock);
92
- BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
95
+ if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
96
+ return -EINVAL;
9397
9498 /*
9599 * This spin_lock{_irq, _irqsave} serves three purposes:
....@@ -112,6 +116,7 @@
112116 ret = spin_trylock_irq(&hwlock->lock);
113117 break;
114118 case HWLOCK_RAW:
119
+ case HWLOCK_IN_ATOMIC:
115120 ret = 1;
116121 break;
117122 default:
....@@ -136,6 +141,7 @@
136141 spin_unlock_irq(&hwlock->lock);
137142 break;
138143 case HWLOCK_RAW:
144
+ case HWLOCK_IN_ATOMIC:
139145 /* Nothing to do */
140146 break;
141147 default:
....@@ -179,11 +185,14 @@
179185 * user need some time-consuming or sleepable operations under the hardware
180186 * lock, they need one sleepable lock (like mutex) to protect the operations.
181187 *
182
- * If the mode is not HWLOCK_RAW, upon a successful return from this function,
183
- * preemption is disabled (and possibly local interrupts, too), so the caller
184
- * must not sleep, and is advised to release the hwspinlock as soon as possible.
185
- * This is required in order to minimize remote cores polling on the
186
- * hardware interconnect.
188
+ * If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout
189
+ * is handled with busy-waiting delays, hence shall not exceed few msecs.
190
+ *
191
+ * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
192
+ * return from this function, preemption (and possibly interrupts) is disabled,
193
+ * so the caller must not sleep, and is advised to release the hwspinlock as
194
+ * soon as possible. This is required in order to minimize remote cores polling
195
+ * on the hardware interconnect.
187196 *
188197 * The user decides whether local interrupts are disabled or not, and if yes,
189198 * whether he wants their previous state to be saved. It is up to the user
....@@ -198,7 +207,7 @@
198207 int mode, unsigned long *flags)
199208 {
200209 int ret;
201
- unsigned long expire;
210
+ unsigned long expire, atomic_delay = 0;
202211
203212 expire = msecs_to_jiffies(to) + jiffies;
204213
....@@ -212,8 +221,15 @@
212221 * The lock is already taken, let's check if the user wants
213222 * us to try again
214223 */
215
- if (time_is_before_eq_jiffies(expire))
216
- return -ETIMEDOUT;
224
+ if (mode == HWLOCK_IN_ATOMIC) {
225
+ udelay(HWSPINLOCK_RETRY_DELAY_US);
226
+ atomic_delay += HWSPINLOCK_RETRY_DELAY_US;
227
+ if (atomic_delay > to * 1000)
228
+ return -ETIMEDOUT;
229
+ } else {
230
+ if (time_is_before_eq_jiffies(expire))
231
+ return -ETIMEDOUT;
232
+ }
217233
218234 /*
219235 * Allow platform-specific relax handlers to prevent
....@@ -248,8 +264,8 @@
248264 */
249265 void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
250266 {
251
- BUG_ON(!hwlock);
252
- BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
267
+ if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
268
+ return;
253269
254270 /*
255271 * We must make sure that memory operations (both reads and writes),
....@@ -276,6 +292,7 @@
276292 spin_unlock_irq(&hwlock->lock);
277293 break;
278294 case HWLOCK_RAW:
295
+ case HWLOCK_IN_ATOMIC:
279296 /* Nothing to do */
280297 break;
281298 default:
....@@ -332,6 +349,11 @@
332349 &args);
333350 if (ret)
334351 return ret;
352
+
353
+ if (!of_device_is_available(args.np)) {
354
+ ret = -ENOENT;
355
+ goto out;
356
+ }
335357
336358 /* Find the hwspinlock device: we need its base_id */
337359 ret = -EPROBE_DEFER;
....@@ -635,12 +657,14 @@
635657
636658 /* notify PM core that power is now needed */
637659 ret = pm_runtime_get_sync(dev);
638
- if (ret < 0) {
660
+ if (ret < 0 && ret != -EACCES) {
639661 dev_err(dev, "%s: can't power on device\n", __func__);
640662 pm_runtime_put_noidle(dev);
641663 module_put(dev->driver->owner);
642664 return ret;
643665 }
666
+
667
+ ret = 0;
644668
645669 /* mark hwspinlock as used, should not fail */
646670 tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
....@@ -798,9 +822,7 @@
798822 }
799823
800824 /* notify the underlying device that power is not needed */
801
- ret = pm_runtime_put(dev);
802
- if (ret < 0)
803
- goto out;
825
+ pm_runtime_put(dev);
804826
805827 /* mark this hwspinlock as available */
806828 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),