hc
2023-11-20 2e7bd41e4e8ab3d1efdabd9e263a2f7fe79bff8c
kernel/kernel/time/timer.c
....@@ -198,6 +198,7 @@
198198 struct timer_base {
199199 raw_spinlock_t lock;
200200 struct timer_list *running_timer;
201
+ spinlock_t expiry_lock;
201202 unsigned long clk;
202203 unsigned long next_expiry;
203204 unsigned int cpu;
....@@ -214,8 +215,7 @@
214215 static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
215216 static DEFINE_MUTEX(timer_keys_mutex);
216217
217
-static void timer_update_keys(struct work_struct *work);
218
-static DECLARE_WORK(timer_update_work, timer_update_keys);
218
+static struct swork_event timer_update_swork;
219219
220220 #ifdef CONFIG_SMP
221221 unsigned int sysctl_timer_migration = 1;
....@@ -233,7 +233,7 @@
233233 static inline void timers_update_migration(void) { }
234234 #endif /* !CONFIG_SMP */
235235
236
-static void timer_update_keys(struct work_struct *work)
236
+static void timer_update_keys(struct swork_event *event)
237237 {
238238 mutex_lock(&timer_keys_mutex);
239239 timers_update_migration();
....@@ -243,8 +243,16 @@
243243
244244 void timers_update_nohz(void)
245245 {
246
- schedule_work(&timer_update_work);
246
+ swork_queue(&timer_update_swork);
247247 }
248
+
249
+static __init int hrtimer_init_thread(void)
250
+{
251
+ WARN_ON(swork_get());
252
+ INIT_SWORK(&timer_update_swork, timer_update_keys);
253
+ return 0;
254
+}
255
+early_initcall(hrtimer_init_thread);
248256
249257 int timer_migration_handler(struct ctl_table *table, int write,
250258 void __user *buffer, size_t *lenp,
....@@ -1219,6 +1227,25 @@
12191227 }
12201228 EXPORT_SYMBOL(del_timer);
12211229
1230
+static int __try_to_del_timer_sync(struct timer_list *timer,
1231
+ struct timer_base **basep)
1232
+{
1233
+ struct timer_base *base;
1234
+ unsigned long flags;
1235
+ int ret = -1;
1236
+
1237
+ debug_assert_init(timer);
1238
+
1239
+ *basep = base = lock_timer_base(timer, &flags);
1240
+
1241
+ if (base->running_timer != timer)
1242
+ ret = detach_if_pending(timer, base, true);
1243
+
1244
+ raw_spin_unlock_irqrestore(&base->lock, flags);
1245
+
1246
+ return ret;
1247
+}
1248
+
12221249 /**
12231250 * try_to_del_timer_sync - Try to deactivate a timer
12241251 * @timer: timer to delete
....@@ -1229,23 +1256,31 @@
12291256 int try_to_del_timer_sync(struct timer_list *timer)
12301257 {
12311258 struct timer_base *base;
1232
- unsigned long flags;
1233
- int ret = -1;
12341259
1235
- debug_assert_init(timer);
1236
-
1237
- base = lock_timer_base(timer, &flags);
1238
-
1239
- if (base->running_timer != timer)
1240
- ret = detach_if_pending(timer, base, true);
1241
-
1242
- raw_spin_unlock_irqrestore(&base->lock, flags);
1243
-
1244
- return ret;
1260
+ return __try_to_del_timer_sync(timer, &base);
12451261 }
12461262 EXPORT_SYMBOL(try_to_del_timer_sync);
12471263
1248
-#ifdef CONFIG_SMP
1264
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
1265
+static int __del_timer_sync(struct timer_list *timer)
1266
+{
1267
+ struct timer_base *base;
1268
+ int ret;
1269
+
1270
+ for (;;) {
1271
+ ret = __try_to_del_timer_sync(timer, &base);
1272
+ if (ret >= 0)
1273
+ return ret;
1274
+
1275
+ /*
1276
+ * When accessing the lock, timers of base are no longer expired
1277
+ * and so timer is no longer running.
1278
+ */
1279
+ spin_lock(&base->expiry_lock);
1280
+ spin_unlock(&base->expiry_lock);
1281
+ }
1282
+}
1283
+
12491284 /**
12501285 * del_timer_sync - deactivate a timer and wait for the handler to finish.
12511286 * @timer: the timer to be deactivated
....@@ -1301,12 +1336,8 @@
13011336 * could lead to deadlock.
13021337 */
13031338 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
1304
- for (;;) {
1305
- int ret = try_to_del_timer_sync(timer);
1306
- if (ret >= 0)
1307
- return ret;
1308
- cpu_relax();
1309
- }
1339
+
1340
+ return __del_timer_sync(timer);
13101341 }
13111342 EXPORT_SYMBOL(del_timer_sync);
13121343 #endif
....@@ -1369,10 +1400,16 @@
13691400 if (timer->flags & TIMER_IRQSAFE) {
13701401 raw_spin_unlock(&base->lock);
13711402 call_timer_fn(timer, fn);
1403
+ base->running_timer = NULL;
1404
+ spin_unlock(&base->expiry_lock);
1405
+ spin_lock(&base->expiry_lock);
13721406 raw_spin_lock(&base->lock);
13731407 } else {
13741408 raw_spin_unlock_irq(&base->lock);
13751409 call_timer_fn(timer, fn);
1410
+ base->running_timer = NULL;
1411
+ spin_unlock(&base->expiry_lock);
1412
+ spin_lock(&base->expiry_lock);
13761413 raw_spin_lock_irq(&base->lock);
13771414 }
13781415 }
....@@ -1669,6 +1706,7 @@
16691706 if (!time_after_eq(jiffies, base->clk))
16701707 return;
16711708
1709
+ spin_lock(&base->expiry_lock);
16721710 raw_spin_lock_irq(&base->lock);
16731711
16741712 /*
....@@ -1695,8 +1733,8 @@
16951733 while (levels--)
16961734 expire_timers(base, heads + levels);
16971735 }
1698
- base->running_timer = NULL;
16991736 raw_spin_unlock_irq(&base->lock);
1737
+ spin_unlock(&base->expiry_lock);
17001738 }
17011739
17021740 /*
....@@ -1705,6 +1743,8 @@
17051743 static __latent_entropy void run_timer_softirq(struct softirq_action *h)
17061744 {
17071745 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1746
+
1747
+ irq_work_tick_soft();
17081748
17091749 __run_timers(base);
17101750 if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
....@@ -1941,6 +1981,7 @@
19411981 base->cpu = cpu;
19421982 raw_spin_lock_init(&base->lock);
19431983 base->clk = jiffies;
1984
+ spin_lock_init(&base->expiry_lock);
19441985 }
19451986 }
19461987