.. | .. |
---|
9 | 9 | |
---|
10 | 10 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
---|
11 | 11 | |
---|
| 12 | +#include <linux/delay.h> |
---|
12 | 13 | #include <linux/kernel.h> |
---|
13 | 14 | #include <linux/module.h> |
---|
14 | 15 | #include <linux/spinlock.h> |
---|
.. | .. |
---|
22 | 23 | #include <linux/of.h> |
---|
23 | 24 | |
---|
24 | 25 | #include "hwspinlock_internal.h" |
---|
| 26 | + |
---|
| 27 | +/* retry delay used in atomic context */ |
---|
| 28 | +#define HWSPINLOCK_RETRY_DELAY_US 100 |
---|
25 | 29 | |
---|
26 | 30 | /* radix tree tags */ |
---|
27 | 31 | #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */ |
---|
.. | .. |
---|
68 | 72 | * user need some time-consuming or sleepable operations under the hardware |
---|
69 | 73 | * lock, they need one sleepable lock (like mutex) to protect the operations. |
---|
70 | 74 | * |
---|
71 | | - * If the mode is not HWLOCK_RAW, upon a successful return from this function, |
---|
72 | | - * preemption (and possibly interrupts) is disabled, so the caller must not |
---|
73 | | - * sleep, and is advised to release the hwspinlock as soon as possible. This is |
---|
74 | | - * required in order to minimize remote cores polling on the hardware |
---|
75 | | - * interconnect. |
---|
| 75 | + * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful |
---|
| 76 | + * return from this function, preemption (and possibly interrupts) is disabled, |
---|
| 77 | + * so the caller must not sleep, and is advised to release the hwspinlock as |
---|
| 78 | + * soon as possible. This is required in order to minimize remote cores polling |
---|
| 79 | + * on the hardware interconnect. |
---|
76 | 80 | * |
---|
77 | 81 | * The user decides whether local interrupts are disabled or not, and if yes, |
---|
78 | 82 | * whether he wants their previous state to be saved. It is up to the user |
---|
.. | .. |
---|
88 | 92 | { |
---|
89 | 93 | int ret; |
---|
90 | 94 | |
---|
91 | | - BUG_ON(!hwlock); |
---|
92 | | - BUG_ON(!flags && mode == HWLOCK_IRQSTATE); |
---|
| 95 | + if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE))) |
---|
| 96 | + return -EINVAL; |
---|
93 | 97 | |
---|
94 | 98 | /* |
---|
95 | 99 | * This spin_lock{_irq, _irqsave} serves three purposes: |
---|
.. | .. |
---|
112 | 116 | ret = spin_trylock_irq(&hwlock->lock); |
---|
113 | 117 | break; |
---|
114 | 118 | case HWLOCK_RAW: |
---|
| 119 | + case HWLOCK_IN_ATOMIC: |
---|
115 | 120 | ret = 1; |
---|
116 | 121 | break; |
---|
117 | 122 | default: |
---|
.. | .. |
---|
136 | 141 | spin_unlock_irq(&hwlock->lock); |
---|
137 | 142 | break; |
---|
138 | 143 | case HWLOCK_RAW: |
---|
| 144 | + case HWLOCK_IN_ATOMIC: |
---|
139 | 145 | /* Nothing to do */ |
---|
140 | 146 | break; |
---|
141 | 147 | default: |
---|
.. | .. |
---|
179 | 185 | * user need some time-consuming or sleepable operations under the hardware |
---|
180 | 186 | * lock, they need one sleepable lock (like mutex) to protect the operations. |
---|
181 | 187 | * |
---|
182 | | - * If the mode is not HWLOCK_RAW, upon a successful return from this function, |
---|
183 | | - * preemption is disabled (and possibly local interrupts, too), so the caller |
---|
184 | | - * must not sleep, and is advised to release the hwspinlock as soon as possible. |
---|
185 | | - * This is required in order to minimize remote cores polling on the |
---|
186 | | - * hardware interconnect. |
---|
| 188 | + * If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout |
---|
| 189 | + * is handled with busy-waiting delays, hence shall not exceed few msecs. |
---|
| 190 | + * |
---|
| 191 | + * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful |
---|
| 192 | + * return from this function, preemption (and possibly interrupts) is disabled, |
---|
| 193 | + * so the caller must not sleep, and is advised to release the hwspinlock as |
---|
| 194 | + * soon as possible. This is required in order to minimize remote cores polling |
---|
| 195 | + * on the hardware interconnect. |
---|
187 | 196 | * |
---|
188 | 197 | * The user decides whether local interrupts are disabled or not, and if yes, |
---|
189 | 198 | * whether he wants their previous state to be saved. It is up to the user |
---|
.. | .. |
---|
198 | 207 | int mode, unsigned long *flags) |
---|
199 | 208 | { |
---|
200 | 209 | int ret; |
---|
201 | | - unsigned long expire; |
---|
| 210 | + unsigned long expire, atomic_delay = 0; |
---|
202 | 211 | |
---|
203 | 212 | expire = msecs_to_jiffies(to) + jiffies; |
---|
204 | 213 | |
---|
.. | .. |
---|
212 | 221 | * The lock is already taken, let's check if the user wants |
---|
213 | 222 | * us to try again |
---|
214 | 223 | */ |
---|
215 | | - if (time_is_before_eq_jiffies(expire)) |
---|
216 | | - return -ETIMEDOUT; |
---|
| 224 | + if (mode == HWLOCK_IN_ATOMIC) { |
---|
| 225 | + udelay(HWSPINLOCK_RETRY_DELAY_US); |
---|
| 226 | + atomic_delay += HWSPINLOCK_RETRY_DELAY_US; |
---|
| 227 | + if (atomic_delay > to * 1000) |
---|
| 228 | + return -ETIMEDOUT; |
---|
| 229 | + } else { |
---|
| 230 | + if (time_is_before_eq_jiffies(expire)) |
---|
| 231 | + return -ETIMEDOUT; |
---|
| 232 | + } |
---|
217 | 233 | |
---|
218 | 234 | /* |
---|
219 | 235 | * Allow platform-specific relax handlers to prevent |
---|
.. | .. |
---|
248 | 264 | */ |
---|
249 | 265 | void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) |
---|
250 | 266 | { |
---|
251 | | - BUG_ON(!hwlock); |
---|
252 | | - BUG_ON(!flags && mode == HWLOCK_IRQSTATE); |
---|
| 267 | + if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE))) |
---|
| 268 | + return; |
---|
253 | 269 | |
---|
254 | 270 | /* |
---|
255 | 271 | * We must make sure that memory operations (both reads and writes), |
---|
.. | .. |
---|
276 | 292 | spin_unlock_irq(&hwlock->lock); |
---|
277 | 293 | break; |
---|
278 | 294 | case HWLOCK_RAW: |
---|
| 295 | + case HWLOCK_IN_ATOMIC: |
---|
279 | 296 | /* Nothing to do */ |
---|
280 | 297 | break; |
---|
281 | 298 | default: |
---|
.. | .. |
---|
332 | 349 | &args); |
---|
333 | 350 | if (ret) |
---|
334 | 351 | return ret; |
---|
| 352 | + |
---|
| 353 | + if (!of_device_is_available(args.np)) { |
---|
| 354 | + ret = -ENOENT; |
---|
| 355 | + goto out; |
---|
| 356 | + } |
---|
335 | 357 | |
---|
336 | 358 | /* Find the hwspinlock device: we need its base_id */ |
---|
337 | 359 | ret = -EPROBE_DEFER; |
---|
.. | .. |
---|
635 | 657 | |
---|
636 | 658 | /* notify PM core that power is now needed */ |
---|
637 | 659 | ret = pm_runtime_get_sync(dev); |
---|
638 | | - if (ret < 0) { |
---|
| 660 | + if (ret < 0 && ret != -EACCES) { |
---|
639 | 661 | dev_err(dev, "%s: can't power on device\n", __func__); |
---|
640 | 662 | pm_runtime_put_noidle(dev); |
---|
641 | 663 | module_put(dev->driver->owner); |
---|
642 | 664 | return ret; |
---|
643 | 665 | } |
---|
| 666 | + |
---|
| 667 | + ret = 0; |
---|
644 | 668 | |
---|
645 | 669 | /* mark hwspinlock as used, should not fail */ |
---|
646 | 670 | tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock), |
---|
.. | .. |
---|
798 | 822 | } |
---|
799 | 823 | |
---|
800 | 824 | /* notify the underlying device that power is not needed */ |
---|
801 | | - ret = pm_runtime_put(dev); |
---|
802 | | - if (ret < 0) |
---|
803 | | - goto out; |
---|
| 825 | + pm_runtime_put(dev); |
---|
804 | 826 | |
---|
805 | 827 | /* mark this hwspinlock as available */ |
---|
806 | 828 | tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock), |
---|