hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/kernel/jump_label.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * jump label support
34 *
....@@ -18,7 +19,7 @@
1819 #include <linux/cpu.h>
1920 #include <asm/sections.h>
2021
21
-/* mutex to protect coming/going of the the jump_label table */
22
+/* mutex to protect coming/going of the jump_label table */
2223 static DEFINE_MUTEX(jump_label_mutex);
2324
2425 void jump_label_lock(void)
....@@ -36,23 +37,57 @@
3637 const struct jump_entry *jea = a;
3738 const struct jump_entry *jeb = b;
3839
39
- if (jea->key < jeb->key)
40
+ /*
41
+ * Entrires are sorted by key.
42
+ */
43
+ if (jump_entry_key(jea) < jump_entry_key(jeb))
4044 return -1;
4145
42
- if (jea->key > jeb->key)
46
+ if (jump_entry_key(jea) > jump_entry_key(jeb))
47
+ return 1;
48
+
49
+ /*
50
+ * In the batching mode, entries should also be sorted by the code
51
+ * inside the already sorted list of entries, enabling a bsearch in
52
+ * the vector.
53
+ */
54
+ if (jump_entry_code(jea) < jump_entry_code(jeb))
55
+ return -1;
56
+
57
+ if (jump_entry_code(jea) > jump_entry_code(jeb))
4358 return 1;
4459
4560 return 0;
61
+}
62
+
63
+static void jump_label_swap(void *a, void *b, int size)
64
+{
65
+ long delta = (unsigned long)a - (unsigned long)b;
66
+ struct jump_entry *jea = a;
67
+ struct jump_entry *jeb = b;
68
+ struct jump_entry tmp = *jea;
69
+
70
+ jea->code = jeb->code - delta;
71
+ jea->target = jeb->target - delta;
72
+ jea->key = jeb->key - delta;
73
+
74
+ jeb->code = tmp.code + delta;
75
+ jeb->target = tmp.target + delta;
76
+ jeb->key = tmp.key + delta;
4677 }
4778
4879 static void
4980 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
5081 {
5182 unsigned long size;
83
+ void *swapfn = NULL;
84
+
85
+ if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
86
+ swapfn = jump_label_swap;
5287
5388 size = (((unsigned long)stop - (unsigned long)start)
5489 / sizeof(struct jump_entry));
55
- sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
90
+ sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
5691 }
5792
5893 static void jump_label_update(struct static_key *key);
....@@ -182,13 +217,13 @@
182217 }
183218 EXPORT_SYMBOL_GPL(static_key_disable);
184219
185
-static void __static_key_slow_dec_cpuslocked(struct static_key *key,
186
- unsigned long rate_limit,
187
- struct delayed_work *work)
220
+static bool static_key_slow_try_dec(struct static_key *key)
188221 {
189222 int val;
190223
191
- lockdep_assert_cpus_held();
224
+ val = atomic_fetch_add_unless(&key->enabled, -1, 1);
225
+ if (val == 1)
226
+ return false;
192227
193228 /*
194229 * The negative count check is valid even when a negative
....@@ -197,66 +232,70 @@
197232 * returns is unbalanced, because all other static_key_slow_inc()
198233 * instances block while the update is in progress.
199234 */
200
- val = atomic_fetch_add_unless(&key->enabled, -1, 1);
201
- if (val != 1) {
202
- WARN(val < 0, "jump label: negative count!\n");
235
+ WARN(val < 0, "jump label: negative count!\n");
236
+ return true;
237
+}
238
+
239
+static void __static_key_slow_dec_cpuslocked(struct static_key *key)
240
+{
241
+ lockdep_assert_cpus_held();
242
+
243
+ if (static_key_slow_try_dec(key))
203244 return;
204
- }
205245
206246 jump_label_lock();
207
- if (atomic_dec_and_test(&key->enabled)) {
208
- if (rate_limit) {
209
- atomic_inc(&key->enabled);
210
- schedule_delayed_work(work, rate_limit);
211
- } else {
212
- jump_label_update(key);
213
- }
214
- }
247
+ if (atomic_dec_and_test(&key->enabled))
248
+ jump_label_update(key);
215249 jump_label_unlock();
216250 }
217251
218
-static void __static_key_slow_dec(struct static_key *key,
219
- unsigned long rate_limit,
220
- struct delayed_work *work)
252
+static void __static_key_slow_dec(struct static_key *key)
221253 {
222254 cpus_read_lock();
223
- __static_key_slow_dec_cpuslocked(key, rate_limit, work);
255
+ __static_key_slow_dec_cpuslocked(key);
224256 cpus_read_unlock();
225257 }
226258
227
-static void jump_label_update_timeout(struct work_struct *work)
259
+void jump_label_update_timeout(struct work_struct *work)
228260 {
229261 struct static_key_deferred *key =
230262 container_of(work, struct static_key_deferred, work.work);
231
- __static_key_slow_dec(&key->key, 0, NULL);
263
+ __static_key_slow_dec(&key->key);
232264 }
265
+EXPORT_SYMBOL_GPL(jump_label_update_timeout);
233266
234267 void static_key_slow_dec(struct static_key *key)
235268 {
236269 STATIC_KEY_CHECK_USE(key);
237
- __static_key_slow_dec(key, 0, NULL);
270
+ __static_key_slow_dec(key);
238271 }
239272 EXPORT_SYMBOL_GPL(static_key_slow_dec);
240273
241274 void static_key_slow_dec_cpuslocked(struct static_key *key)
242275 {
243276 STATIC_KEY_CHECK_USE(key);
244
- __static_key_slow_dec_cpuslocked(key, 0, NULL);
277
+ __static_key_slow_dec_cpuslocked(key);
245278 }
246279
247
-void static_key_slow_dec_deferred(struct static_key_deferred *key)
280
+void __static_key_slow_dec_deferred(struct static_key *key,
281
+ struct delayed_work *work,
282
+ unsigned long timeout)
248283 {
249284 STATIC_KEY_CHECK_USE(key);
250
- __static_key_slow_dec(&key->key, key->timeout, &key->work);
251
-}
252
-EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
253285
254
-void static_key_deferred_flush(struct static_key_deferred *key)
286
+ if (static_key_slow_try_dec(key))
287
+ return;
288
+
289
+ schedule_delayed_work(work, timeout);
290
+}
291
+EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
292
+
293
+void __static_key_deferred_flush(void *key, struct delayed_work *work)
255294 {
256295 STATIC_KEY_CHECK_USE(key);
257
- flush_delayed_work(&key->work);
296
+ flush_delayed_work(work);
258297 }
259
-EXPORT_SYMBOL_GPL(static_key_deferred_flush);
298
+EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
260299
261300 void jump_label_rate_limit(struct static_key_deferred *key,
262301 unsigned long rl)
....@@ -269,22 +308,24 @@
269308
270309 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
271310 {
272
- if (entry->code <= (unsigned long)end &&
273
- entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
311
+ if (jump_entry_code(entry) <= (unsigned long)end &&
312
+ jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
274313 return 1;
275314
276315 return 0;
277316 }
278317
279318 static int __jump_label_text_reserved(struct jump_entry *iter_start,
280
- struct jump_entry *iter_stop, void *start, void *end)
319
+ struct jump_entry *iter_stop, void *start, void *end, bool init)
281320 {
282321 struct jump_entry *iter;
283322
284323 iter = iter_start;
285324 while (iter < iter_stop) {
286
- if (addr_conflict(iter, start, end))
287
- return 1;
325
+ if (init || !jump_entry_is_init(iter)) {
326
+ if (addr_conflict(iter, start, end))
327
+ return 1;
328
+ }
288329 iter++;
289330 }
290331
....@@ -329,16 +370,6 @@
329370 key->type |= JUMP_TYPE_LINKED;
330371 }
331372
332
-static inline struct static_key *jump_entry_key(struct jump_entry *entry)
333
-{
334
- return (struct static_key *)((unsigned long)entry->key & ~1UL);
335
-}
336
-
337
-static bool jump_entry_branch(struct jump_entry *entry)
338
-{
339
- return (unsigned long)entry->key & 1UL;
340
-}
341
-
342373 /***
343374 * A 'struct static_key' uses a union such that it either points directly
344375 * to a table of 'struct jump_entry' or to a linked list of modules which in
....@@ -363,30 +394,71 @@
363394 {
364395 struct static_key *key = jump_entry_key(entry);
365396 bool enabled = static_key_enabled(key);
366
- bool branch = jump_entry_branch(entry);
397
+ bool branch = jump_entry_is_branch(entry);
367398
368399 /* See the comment in linux/jump_label.h */
369400 return enabled ^ branch;
370401 }
371402
403
+static bool jump_label_can_update(struct jump_entry *entry, bool init)
404
+{
405
+ /*
406
+ * Cannot update code that was in an init text area.
407
+ */
408
+ if (!init && jump_entry_is_init(entry))
409
+ return false;
410
+
411
+ if (!kernel_text_address(jump_entry_code(entry))) {
412
+ /*
413
+ * This skips patching built-in __exit, which
414
+ * is part of init_section_contains() but is
415
+ * not part of kernel_text_address().
416
+ *
417
+ * Skipping built-in __exit is fine since it
418
+ * will never be executed.
419
+ */
420
+ WARN_ONCE(!jump_entry_is_init(entry),
421
+ "can't patch jump_label at %pS",
422
+ (void *)jump_entry_code(entry));
423
+ return false;
424
+ }
425
+
426
+ return true;
427
+}
428
+
429
+#ifndef HAVE_JUMP_LABEL_BATCH
372430 static void __jump_label_update(struct static_key *key,
373431 struct jump_entry *entry,
374
- struct jump_entry *stop)
432
+ struct jump_entry *stop,
433
+ bool init)
375434 {
376435 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
377
- /*
378
- * An entry->code of 0 indicates an entry which has been
379
- * disabled because it was in an init text area.
380
- */
381
- if (entry->code) {
382
- if (kernel_text_address(entry->code))
383
- arch_jump_label_transform(entry, jump_label_type(entry));
384
- else
385
- WARN_ONCE(1, "can't patch jump_label at %pS",
386
- (void *)(unsigned long)entry->code);
387
- }
436
+ if (jump_label_can_update(entry, init))
437
+ arch_jump_label_transform(entry, jump_label_type(entry));
388438 }
389439 }
440
+#else
441
+static void __jump_label_update(struct static_key *key,
442
+ struct jump_entry *entry,
443
+ struct jump_entry *stop,
444
+ bool init)
445
+{
446
+ for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
447
+
448
+ if (!jump_label_can_update(entry, init))
449
+ continue;
450
+
451
+ if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
452
+ /*
453
+ * Queue is full: Apply the current queue and try again.
454
+ */
455
+ arch_jump_label_transform_apply();
456
+ BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
457
+ }
458
+ }
459
+ arch_jump_label_transform_apply();
460
+}
461
+#endif
390462
391463 void __init jump_label_init(void)
392464 {
....@@ -418,6 +490,9 @@
418490 if (jump_label_type(iter) == JUMP_LABEL_NOP)
419491 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
420492
493
+ if (init_section_contains((void *)jump_entry_code(iter), 1))
494
+ jump_entry_set_init(iter);
495
+
421496 iterk = jump_entry_key(iter);
422497 if (iterk == key)
423498 continue;
....@@ -430,26 +505,13 @@
430505 cpus_read_unlock();
431506 }
432507
433
-/* Disable any jump label entries in __init/__exit code */
434
-void __init jump_label_invalidate_initmem(void)
435
-{
436
- struct jump_entry *iter_start = __start___jump_table;
437
- struct jump_entry *iter_stop = __stop___jump_table;
438
- struct jump_entry *iter;
439
-
440
- for (iter = iter_start; iter < iter_stop; iter++) {
441
- if (init_section_contains((void *)(unsigned long)iter->code, 1))
442
- iter->code = 0;
443
- }
444
-}
445
-
446508 #ifdef CONFIG_MODULES
447509
448510 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
449511 {
450512 struct static_key *key = jump_entry_key(entry);
451513 bool type = static_key_type(key);
452
- bool branch = jump_entry_branch(entry);
514
+ bool branch = jump_entry_is_branch(entry);
453515
454516 /* See the comment in linux/jump_label.h */
455517 return type ^ branch;
....@@ -463,7 +525,7 @@
463525
464526 static inline struct static_key_mod *static_key_mod(struct static_key *key)
465527 {
466
- WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED));
528
+ WARN_ON_ONCE(!static_key_linked(key));
467529 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
468530 }
469531
....@@ -487,19 +549,25 @@
487549 static int __jump_label_mod_text_reserved(void *start, void *end)
488550 {
489551 struct module *mod;
552
+ int ret;
490553
491554 preempt_disable();
492555 mod = __module_text_address((unsigned long)start);
493556 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
557
+ if (!try_module_get(mod))
558
+ mod = NULL;
494559 preempt_enable();
495560
496561 if (!mod)
497562 return 0;
498563
499
-
500
- return __jump_label_text_reserved(mod->jump_entries,
564
+ ret = __jump_label_text_reserved(mod->jump_entries,
501565 mod->jump_entries + mod->num_jump_entries,
502
- start, end);
566
+ start, end, mod->state == MODULE_STATE_COMING);
567
+
568
+ module_put(mod);
569
+
570
+ return ret;
503571 }
504572
505573 static void __jump_label_mod_update(struct static_key *key)
....@@ -522,7 +590,8 @@
522590 stop = __stop___jump_table;
523591 else
524592 stop = m->jump_entries + m->num_jump_entries;
525
- __jump_label_update(key, mod->entries, stop);
593
+ __jump_label_update(key, mod->entries, stop,
594
+ m && m->state == MODULE_STATE_COMING);
526595 }
527596 }
528597
....@@ -568,12 +637,15 @@
568637 for (iter = iter_start; iter < iter_stop; iter++) {
569638 struct static_key *iterk;
570639
640
+ if (within_module_init(jump_entry_code(iter), mod))
641
+ jump_entry_set_init(iter);
642
+
571643 iterk = jump_entry_key(iter);
572644 if (iterk == key)
573645 continue;
574646
575647 key = iterk;
576
- if (within_module(iter->key, mod)) {
648
+ if (within_module((unsigned long)key, mod)) {
577649 static_key_set_entries(key, iter);
578650 continue;
579651 }
....@@ -603,7 +675,7 @@
603675
604676 /* Only update if we've changed from our initial state */
605677 if (jump_label_type(iter) != jump_label_init_type(iter))
606
- __jump_label_update(key, iter, iter_stop);
678
+ __jump_label_update(key, iter, iter_stop, true);
607679 }
608680
609681 return 0;
....@@ -623,7 +695,7 @@
623695
624696 key = jump_entry_key(iter);
625697
626
- if (within_module(iter->key, mod))
698
+ if (within_module((unsigned long)key, mod))
627699 continue;
628700
629701 /* No memory during module load */
....@@ -659,19 +731,6 @@
659731 }
660732 }
661733
662
-/* Disable any jump label entries in module init code */
663
-static void jump_label_invalidate_module_init(struct module *mod)
664
-{
665
- struct jump_entry *iter_start = mod->jump_entries;
666
- struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
667
- struct jump_entry *iter;
668
-
669
- for (iter = iter_start; iter < iter_stop; iter++) {
670
- if (within_module_init(iter->code, mod))
671
- iter->code = 0;
672
- }
673
-}
674
-
675734 static int
676735 jump_label_module_notify(struct notifier_block *self, unsigned long val,
677736 void *data)
....@@ -692,9 +751,6 @@
692751 break;
693752 case MODULE_STATE_GOING:
694753 jump_label_del_module(mod);
695
- break;
696
- case MODULE_STATE_LIVE:
697
- jump_label_invalidate_module_init(mod);
698754 break;
699755 }
700756
....@@ -732,8 +788,9 @@
732788 */
733789 int jump_label_text_reserved(void *start, void *end)
734790 {
791
+ bool init = system_state < SYSTEM_RUNNING;
735792 int ret = __jump_label_text_reserved(__start___jump_table,
736
- __stop___jump_table, start, end);
793
+ __stop___jump_table, start, end, init);
737794
738795 if (ret)
739796 return ret;
....@@ -765,7 +822,8 @@
765822 entry = static_key_entries(key);
766823 /* if there are no users, entry can be NULL */
767824 if (entry)
768
- __jump_label_update(key, entry, stop);
825
+ __jump_label_update(key, entry, stop,
826
+ system_state < SYSTEM_RUNNING);
769827 }
770828
771829 #ifdef CONFIG_STATIC_KEYS_SELFTEST