| .. | .. |
|---|
| 198 | 198 | struct timer_base { |
|---|
| 199 | 199 | raw_spinlock_t lock; |
|---|
| 200 | 200 | struct timer_list *running_timer; |
|---|
| 201 | + spinlock_t expiry_lock; |
|---|
| 201 | 202 | unsigned long clk; |
|---|
| 202 | 203 | unsigned long next_expiry; |
|---|
| 203 | 204 | unsigned int cpu; |
|---|
| .. | .. |
|---|
| 214 | 215 | static DEFINE_STATIC_KEY_FALSE(timers_nohz_active); |
|---|
| 215 | 216 | static DEFINE_MUTEX(timer_keys_mutex); |
|---|
| 216 | 217 | |
|---|
| 217 | | -static void timer_update_keys(struct work_struct *work); |
|---|
| 218 | | -static DECLARE_WORK(timer_update_work, timer_update_keys); |
|---|
| 218 | +static struct swork_event timer_update_swork; |
|---|
| 219 | 219 | |
|---|
| 220 | 220 | #ifdef CONFIG_SMP |
|---|
| 221 | 221 | unsigned int sysctl_timer_migration = 1; |
|---|
| .. | .. |
|---|
| 233 | 233 | static inline void timers_update_migration(void) { } |
|---|
| 234 | 234 | #endif /* !CONFIG_SMP */ |
|---|
| 235 | 235 | |
|---|
| 236 | | -static void timer_update_keys(struct work_struct *work) |
|---|
| 236 | +static void timer_update_keys(struct swork_event *event) |
|---|
| 237 | 237 | { |
|---|
| 238 | 238 | mutex_lock(&timer_keys_mutex); |
|---|
| 239 | 239 | timers_update_migration(); |
|---|
| .. | .. |
|---|
| 243 | 243 | |
|---|
| 244 | 244 | void timers_update_nohz(void) |
|---|
| 245 | 245 | { |
|---|
| 246 | | - schedule_work(&timer_update_work); |
|---|
| 246 | + swork_queue(&timer_update_swork); |
|---|
| 247 | 247 | } |
|---|
| 248 | + |
|---|
| 249 | +static __init int hrtimer_init_thread(void) |
|---|
| 250 | +{ |
|---|
| 251 | + WARN_ON(swork_get()); |
|---|
| 252 | + INIT_SWORK(&timer_update_swork, timer_update_keys); |
|---|
| 253 | + return 0; |
|---|
| 254 | +} |
|---|
| 255 | +early_initcall(hrtimer_init_thread); |
|---|
| 248 | 256 | |
|---|
| 249 | 257 | int timer_migration_handler(struct ctl_table *table, int write, |
|---|
| 250 | 258 | void __user *buffer, size_t *lenp, |
|---|
| .. | .. |
|---|
| 1219 | 1227 | } |
|---|
| 1220 | 1228 | EXPORT_SYMBOL(del_timer); |
|---|
| 1221 | 1229 | |
|---|
| 1230 | +static int __try_to_del_timer_sync(struct timer_list *timer, |
|---|
| 1231 | + struct timer_base **basep) |
|---|
| 1232 | +{ |
|---|
| 1233 | + struct timer_base *base; |
|---|
| 1234 | + unsigned long flags; |
|---|
| 1235 | + int ret = -1; |
|---|
| 1236 | + |
|---|
| 1237 | + debug_assert_init(timer); |
|---|
| 1238 | + |
|---|
| 1239 | + *basep = base = lock_timer_base(timer, &flags); |
|---|
| 1240 | + |
|---|
| 1241 | + if (base->running_timer != timer) |
|---|
| 1242 | + ret = detach_if_pending(timer, base, true); |
|---|
| 1243 | + |
|---|
| 1244 | + raw_spin_unlock_irqrestore(&base->lock, flags); |
|---|
| 1245 | + |
|---|
| 1246 | + return ret; |
|---|
| 1247 | +} |
|---|
| 1248 | + |
|---|
| 1222 | 1249 | /** |
|---|
| 1223 | 1250 | * try_to_del_timer_sync - Try to deactivate a timer |
|---|
| 1224 | 1251 | * @timer: timer to delete |
|---|
| .. | .. |
|---|
| 1229 | 1256 | int try_to_del_timer_sync(struct timer_list *timer) |
|---|
| 1230 | 1257 | { |
|---|
| 1231 | 1258 | struct timer_base *base; |
|---|
| 1232 | | - unsigned long flags; |
|---|
| 1233 | | - int ret = -1; |
|---|
| 1234 | 1259 | |
|---|
| 1235 | | - debug_assert_init(timer); |
|---|
| 1236 | | - |
|---|
| 1237 | | - base = lock_timer_base(timer, &flags); |
|---|
| 1238 | | - |
|---|
| 1239 | | - if (base->running_timer != timer) |
|---|
| 1240 | | - ret = detach_if_pending(timer, base, true); |
|---|
| 1241 | | - |
|---|
| 1242 | | - raw_spin_unlock_irqrestore(&base->lock, flags); |
|---|
| 1243 | | - |
|---|
| 1244 | | - return ret; |
|---|
| 1260 | + return __try_to_del_timer_sync(timer, &base); |
|---|
| 1245 | 1261 | } |
|---|
| 1246 | 1262 | EXPORT_SYMBOL(try_to_del_timer_sync); |
|---|
| 1247 | 1263 | |
|---|
| 1248 | | -#ifdef CONFIG_SMP |
|---|
| 1264 | +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) |
|---|
| 1265 | +static int __del_timer_sync(struct timer_list *timer) |
|---|
| 1266 | +{ |
|---|
| 1267 | + struct timer_base *base; |
|---|
| 1268 | + int ret; |
|---|
| 1269 | + |
|---|
| 1270 | + for (;;) { |
|---|
| 1271 | + ret = __try_to_del_timer_sync(timer, &base); |
|---|
| 1272 | + if (ret >= 0) |
|---|
| 1273 | + return ret; |
|---|
| 1274 | + |
|---|
| 1275 | + /* |
|---|
| 1276 | + * When accessing the lock, timers of base are no longer expired |
|---|
| 1277 | + * and so timer is no longer running. |
|---|
| 1278 | + */ |
|---|
| 1279 | + spin_lock(&base->expiry_lock); |
|---|
| 1280 | + spin_unlock(&base->expiry_lock); |
|---|
| 1281 | + } |
|---|
| 1282 | +} |
|---|
| 1283 | + |
|---|
| 1249 | 1284 | /** |
|---|
| 1250 | 1285 | * del_timer_sync - deactivate a timer and wait for the handler to finish. |
|---|
| 1251 | 1286 | * @timer: the timer to be deactivated |
|---|
| .. | .. |
|---|
| 1301 | 1336 | * could lead to deadlock. |
|---|
| 1302 | 1337 | */ |
|---|
| 1303 | 1338 | WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE)); |
|---|
| 1304 | | - for (;;) { |
|---|
| 1305 | | - int ret = try_to_del_timer_sync(timer); |
|---|
| 1306 | | - if (ret >= 0) |
|---|
| 1307 | | - return ret; |
|---|
| 1308 | | - cpu_relax(); |
|---|
| 1309 | | - } |
|---|
| 1339 | + |
|---|
| 1340 | + return __del_timer_sync(timer); |
|---|
| 1310 | 1341 | } |
|---|
| 1311 | 1342 | EXPORT_SYMBOL(del_timer_sync); |
|---|
| 1312 | 1343 | #endif |
|---|
| .. | .. |
|---|
| 1369 | 1400 | if (timer->flags & TIMER_IRQSAFE) { |
|---|
| 1370 | 1401 | raw_spin_unlock(&base->lock); |
|---|
| 1371 | 1402 | call_timer_fn(timer, fn); |
|---|
| 1403 | + base->running_timer = NULL; |
|---|
| 1404 | + spin_unlock(&base->expiry_lock); |
|---|
| 1405 | + spin_lock(&base->expiry_lock); |
|---|
| 1372 | 1406 | raw_spin_lock(&base->lock); |
|---|
| 1373 | 1407 | } else { |
|---|
| 1374 | 1408 | raw_spin_unlock_irq(&base->lock); |
|---|
| 1375 | 1409 | call_timer_fn(timer, fn); |
|---|
| 1410 | + base->running_timer = NULL; |
|---|
| 1411 | + spin_unlock(&base->expiry_lock); |
|---|
| 1412 | + spin_lock(&base->expiry_lock); |
|---|
| 1376 | 1413 | raw_spin_lock_irq(&base->lock); |
|---|
| 1377 | 1414 | } |
|---|
| 1378 | 1415 | } |
|---|
| .. | .. |
|---|
| 1669 | 1706 | if (!time_after_eq(jiffies, base->clk)) |
|---|
| 1670 | 1707 | return; |
|---|
| 1671 | 1708 | |
|---|
| 1709 | + spin_lock(&base->expiry_lock); |
|---|
| 1672 | 1710 | raw_spin_lock_irq(&base->lock); |
|---|
| 1673 | 1711 | |
|---|
| 1674 | 1712 | /* |
|---|
| .. | .. |
|---|
| 1695 | 1733 | while (levels--) |
|---|
| 1696 | 1734 | expire_timers(base, heads + levels); |
|---|
| 1697 | 1735 | } |
|---|
| 1698 | | - base->running_timer = NULL; |
|---|
| 1699 | 1736 | raw_spin_unlock_irq(&base->lock); |
|---|
| 1737 | + spin_unlock(&base->expiry_lock); |
|---|
| 1700 | 1738 | } |
|---|
| 1701 | 1739 | |
|---|
| 1702 | 1740 | /* |
|---|
| .. | .. |
|---|
| 1705 | 1743 | static __latent_entropy void run_timer_softirq(struct softirq_action *h) |
|---|
| 1706 | 1744 | { |
|---|
| 1707 | 1745 | struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); |
|---|
| 1746 | + |
|---|
| 1747 | + irq_work_tick_soft(); |
|---|
| 1708 | 1748 | |
|---|
| 1709 | 1749 | __run_timers(base); |
|---|
| 1710 | 1750 | if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) |
|---|
| .. | .. |
|---|
| 1941 | 1981 | base->cpu = cpu; |
|---|
| 1942 | 1982 | raw_spin_lock_init(&base->lock); |
|---|
| 1943 | 1983 | base->clk = jiffies; |
|---|
| 1984 | + spin_lock_init(&base->expiry_lock); |
|---|
| 1944 | 1985 | } |
|---|
| 1945 | 1986 | } |
|---|
| 1946 | 1987 | |
|---|