.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * jump label support |
---|
3 | 4 | * |
---|
.. | .. |
---|
18 | 19 | #include <linux/cpu.h> |
---|
19 | 20 | #include <asm/sections.h> |
---|
20 | 21 | |
---|
21 | | -/* mutex to protect coming/going of the the jump_label table */ |
---|
| 22 | +/* mutex to protect coming/going of the jump_label table */ |
---|
22 | 23 | static DEFINE_MUTEX(jump_label_mutex); |
---|
23 | 24 | |
---|
24 | 25 | void jump_label_lock(void) |
---|
.. | .. |
---|
36 | 37 | const struct jump_entry *jea = a; |
---|
37 | 38 | const struct jump_entry *jeb = b; |
---|
38 | 39 | |
---|
39 | | - if (jea->key < jeb->key) |
---|
| 40 | + /* |
---|
| 41 | + * Entrires are sorted by key. |
---|
| 42 | + */ |
---|
| 43 | + if (jump_entry_key(jea) < jump_entry_key(jeb)) |
---|
40 | 44 | return -1; |
---|
41 | 45 | |
---|
42 | | - if (jea->key > jeb->key) |
---|
| 46 | + if (jump_entry_key(jea) > jump_entry_key(jeb)) |
---|
| 47 | + return 1; |
---|
| 48 | + |
---|
| 49 | + /* |
---|
| 50 | + * In the batching mode, entries should also be sorted by the code |
---|
| 51 | + * inside the already sorted list of entries, enabling a bsearch in |
---|
| 52 | + * the vector. |
---|
| 53 | + */ |
---|
| 54 | + if (jump_entry_code(jea) < jump_entry_code(jeb)) |
---|
| 55 | + return -1; |
---|
| 56 | + |
---|
| 57 | + if (jump_entry_code(jea) > jump_entry_code(jeb)) |
---|
43 | 58 | return 1; |
---|
44 | 59 | |
---|
45 | 60 | return 0; |
---|
| 61 | +} |
---|
| 62 | + |
---|
| 63 | +static void jump_label_swap(void *a, void *b, int size) |
---|
| 64 | +{ |
---|
| 65 | + long delta = (unsigned long)a - (unsigned long)b; |
---|
| 66 | + struct jump_entry *jea = a; |
---|
| 67 | + struct jump_entry *jeb = b; |
---|
| 68 | + struct jump_entry tmp = *jea; |
---|
| 69 | + |
---|
| 70 | + jea->code = jeb->code - delta; |
---|
| 71 | + jea->target = jeb->target - delta; |
---|
| 72 | + jea->key = jeb->key - delta; |
---|
| 73 | + |
---|
| 74 | + jeb->code = tmp.code + delta; |
---|
| 75 | + jeb->target = tmp.target + delta; |
---|
| 76 | + jeb->key = tmp.key + delta; |
---|
46 | 77 | } |
---|
47 | 78 | |
---|
48 | 79 | static void |
---|
49 | 80 | jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) |
---|
50 | 81 | { |
---|
51 | 82 | unsigned long size; |
---|
| 83 | + void *swapfn = NULL; |
---|
| 84 | + |
---|
| 85 | + if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE)) |
---|
| 86 | + swapfn = jump_label_swap; |
---|
52 | 87 | |
---|
53 | 88 | size = (((unsigned long)stop - (unsigned long)start) |
---|
54 | 89 | / sizeof(struct jump_entry)); |
---|
55 | | - sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); |
---|
| 90 | + sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn); |
---|
56 | 91 | } |
---|
57 | 92 | |
---|
58 | 93 | static void jump_label_update(struct static_key *key); |
---|
.. | .. |
---|
182 | 217 | } |
---|
183 | 218 | EXPORT_SYMBOL_GPL(static_key_disable); |
---|
184 | 219 | |
---|
185 | | -static void __static_key_slow_dec_cpuslocked(struct static_key *key, |
---|
186 | | - unsigned long rate_limit, |
---|
187 | | - struct delayed_work *work) |
---|
| 220 | +static bool static_key_slow_try_dec(struct static_key *key) |
---|
188 | 221 | { |
---|
189 | 222 | int val; |
---|
190 | 223 | |
---|
191 | | - lockdep_assert_cpus_held(); |
---|
| 224 | + val = atomic_fetch_add_unless(&key->enabled, -1, 1); |
---|
| 225 | + if (val == 1) |
---|
| 226 | + return false; |
---|
192 | 227 | |
---|
193 | 228 | /* |
---|
194 | 229 | * The negative count check is valid even when a negative |
---|
.. | .. |
---|
197 | 232 | * returns is unbalanced, because all other static_key_slow_inc() |
---|
198 | 233 | * instances block while the update is in progress. |
---|
199 | 234 | */ |
---|
200 | | - val = atomic_fetch_add_unless(&key->enabled, -1, 1); |
---|
201 | | - if (val != 1) { |
---|
202 | | - WARN(val < 0, "jump label: negative count!\n"); |
---|
| 235 | + WARN(val < 0, "jump label: negative count!\n"); |
---|
| 236 | + return true; |
---|
| 237 | +} |
---|
| 238 | + |
---|
| 239 | +static void __static_key_slow_dec_cpuslocked(struct static_key *key) |
---|
| 240 | +{ |
---|
| 241 | + lockdep_assert_cpus_held(); |
---|
| 242 | + |
---|
| 243 | + if (static_key_slow_try_dec(key)) |
---|
203 | 244 | return; |
---|
204 | | - } |
---|
205 | 245 | |
---|
206 | 246 | jump_label_lock(); |
---|
207 | | - if (atomic_dec_and_test(&key->enabled)) { |
---|
208 | | - if (rate_limit) { |
---|
209 | | - atomic_inc(&key->enabled); |
---|
210 | | - schedule_delayed_work(work, rate_limit); |
---|
211 | | - } else { |
---|
212 | | - jump_label_update(key); |
---|
213 | | - } |
---|
214 | | - } |
---|
| 247 | + if (atomic_dec_and_test(&key->enabled)) |
---|
| 248 | + jump_label_update(key); |
---|
215 | 249 | jump_label_unlock(); |
---|
216 | 250 | } |
---|
217 | 251 | |
---|
218 | | -static void __static_key_slow_dec(struct static_key *key, |
---|
219 | | - unsigned long rate_limit, |
---|
220 | | - struct delayed_work *work) |
---|
| 252 | +static void __static_key_slow_dec(struct static_key *key) |
---|
221 | 253 | { |
---|
222 | 254 | cpus_read_lock(); |
---|
223 | | - __static_key_slow_dec_cpuslocked(key, rate_limit, work); |
---|
| 255 | + __static_key_slow_dec_cpuslocked(key); |
---|
224 | 256 | cpus_read_unlock(); |
---|
225 | 257 | } |
---|
226 | 258 | |
---|
227 | | -static void jump_label_update_timeout(struct work_struct *work) |
---|
| 259 | +void jump_label_update_timeout(struct work_struct *work) |
---|
228 | 260 | { |
---|
229 | 261 | struct static_key_deferred *key = |
---|
230 | 262 | container_of(work, struct static_key_deferred, work.work); |
---|
231 | | - __static_key_slow_dec(&key->key, 0, NULL); |
---|
| 263 | + __static_key_slow_dec(&key->key); |
---|
232 | 264 | } |
---|
| 265 | +EXPORT_SYMBOL_GPL(jump_label_update_timeout); |
---|
233 | 266 | |
---|
234 | 267 | void static_key_slow_dec(struct static_key *key) |
---|
235 | 268 | { |
---|
236 | 269 | STATIC_KEY_CHECK_USE(key); |
---|
237 | | - __static_key_slow_dec(key, 0, NULL); |
---|
| 270 | + __static_key_slow_dec(key); |
---|
238 | 271 | } |
---|
239 | 272 | EXPORT_SYMBOL_GPL(static_key_slow_dec); |
---|
240 | 273 | |
---|
241 | 274 | void static_key_slow_dec_cpuslocked(struct static_key *key) |
---|
242 | 275 | { |
---|
243 | 276 | STATIC_KEY_CHECK_USE(key); |
---|
244 | | - __static_key_slow_dec_cpuslocked(key, 0, NULL); |
---|
| 277 | + __static_key_slow_dec_cpuslocked(key); |
---|
245 | 278 | } |
---|
246 | 279 | |
---|
247 | | -void static_key_slow_dec_deferred(struct static_key_deferred *key) |
---|
| 280 | +void __static_key_slow_dec_deferred(struct static_key *key, |
---|
| 281 | + struct delayed_work *work, |
---|
| 282 | + unsigned long timeout) |
---|
248 | 283 | { |
---|
249 | 284 | STATIC_KEY_CHECK_USE(key); |
---|
250 | | - __static_key_slow_dec(&key->key, key->timeout, &key->work); |
---|
251 | | -} |
---|
252 | | -EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); |
---|
253 | 285 | |
---|
254 | | -void static_key_deferred_flush(struct static_key_deferred *key) |
---|
| 286 | + if (static_key_slow_try_dec(key)) |
---|
| 287 | + return; |
---|
| 288 | + |
---|
| 289 | + schedule_delayed_work(work, timeout); |
---|
| 290 | +} |
---|
| 291 | +EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred); |
---|
| 292 | + |
---|
| 293 | +void __static_key_deferred_flush(void *key, struct delayed_work *work) |
---|
255 | 294 | { |
---|
256 | 295 | STATIC_KEY_CHECK_USE(key); |
---|
257 | | - flush_delayed_work(&key->work); |
---|
| 296 | + flush_delayed_work(work); |
---|
258 | 297 | } |
---|
259 | | -EXPORT_SYMBOL_GPL(static_key_deferred_flush); |
---|
| 298 | +EXPORT_SYMBOL_GPL(__static_key_deferred_flush); |
---|
260 | 299 | |
---|
261 | 300 | void jump_label_rate_limit(struct static_key_deferred *key, |
---|
262 | 301 | unsigned long rl) |
---|
.. | .. |
---|
269 | 308 | |
---|
270 | 309 | static int addr_conflict(struct jump_entry *entry, void *start, void *end) |
---|
271 | 310 | { |
---|
272 | | - if (entry->code <= (unsigned long)end && |
---|
273 | | - entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start) |
---|
| 311 | + if (jump_entry_code(entry) <= (unsigned long)end && |
---|
| 312 | + jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start) |
---|
274 | 313 | return 1; |
---|
275 | 314 | |
---|
276 | 315 | return 0; |
---|
277 | 316 | } |
---|
278 | 317 | |
---|
279 | 318 | static int __jump_label_text_reserved(struct jump_entry *iter_start, |
---|
280 | | - struct jump_entry *iter_stop, void *start, void *end) |
---|
| 319 | + struct jump_entry *iter_stop, void *start, void *end, bool init) |
---|
281 | 320 | { |
---|
282 | 321 | struct jump_entry *iter; |
---|
283 | 322 | |
---|
284 | 323 | iter = iter_start; |
---|
285 | 324 | while (iter < iter_stop) { |
---|
286 | | - if (addr_conflict(iter, start, end)) |
---|
287 | | - return 1; |
---|
| 325 | + if (init || !jump_entry_is_init(iter)) { |
---|
| 326 | + if (addr_conflict(iter, start, end)) |
---|
| 327 | + return 1; |
---|
| 328 | + } |
---|
288 | 329 | iter++; |
---|
289 | 330 | } |
---|
290 | 331 | |
---|
.. | .. |
---|
329 | 370 | key->type |= JUMP_TYPE_LINKED; |
---|
330 | 371 | } |
---|
331 | 372 | |
---|
332 | | -static inline struct static_key *jump_entry_key(struct jump_entry *entry) |
---|
333 | | -{ |
---|
334 | | - return (struct static_key *)((unsigned long)entry->key & ~1UL); |
---|
335 | | -} |
---|
336 | | - |
---|
337 | | -static bool jump_entry_branch(struct jump_entry *entry) |
---|
338 | | -{ |
---|
339 | | - return (unsigned long)entry->key & 1UL; |
---|
340 | | -} |
---|
341 | | - |
---|
342 | 373 | /*** |
---|
343 | 374 | * A 'struct static_key' uses a union such that it either points directly |
---|
344 | 375 | * to a table of 'struct jump_entry' or to a linked list of modules which in |
---|
.. | .. |
---|
363 | 394 | { |
---|
364 | 395 | struct static_key *key = jump_entry_key(entry); |
---|
365 | 396 | bool enabled = static_key_enabled(key); |
---|
366 | | - bool branch = jump_entry_branch(entry); |
---|
| 397 | + bool branch = jump_entry_is_branch(entry); |
---|
367 | 398 | |
---|
368 | 399 | /* See the comment in linux/jump_label.h */ |
---|
369 | 400 | return enabled ^ branch; |
---|
370 | 401 | } |
---|
371 | 402 | |
---|
| 403 | +static bool jump_label_can_update(struct jump_entry *entry, bool init) |
---|
| 404 | +{ |
---|
| 405 | + /* |
---|
| 406 | + * Cannot update code that was in an init text area. |
---|
| 407 | + */ |
---|
| 408 | + if (!init && jump_entry_is_init(entry)) |
---|
| 409 | + return false; |
---|
| 410 | + |
---|
| 411 | + if (!kernel_text_address(jump_entry_code(entry))) { |
---|
| 412 | + /* |
---|
| 413 | + * This skips patching built-in __exit, which |
---|
| 414 | + * is part of init_section_contains() but is |
---|
| 415 | + * not part of kernel_text_address(). |
---|
| 416 | + * |
---|
| 417 | + * Skipping built-in __exit is fine since it |
---|
| 418 | + * will never be executed. |
---|
| 419 | + */ |
---|
| 420 | + WARN_ONCE(!jump_entry_is_init(entry), |
---|
| 421 | + "can't patch jump_label at %pS", |
---|
| 422 | + (void *)jump_entry_code(entry)); |
---|
| 423 | + return false; |
---|
| 424 | + } |
---|
| 425 | + |
---|
| 426 | + return true; |
---|
| 427 | +} |
---|
| 428 | + |
---|
| 429 | +#ifndef HAVE_JUMP_LABEL_BATCH |
---|
372 | 430 | static void __jump_label_update(struct static_key *key, |
---|
373 | 431 | struct jump_entry *entry, |
---|
374 | | - struct jump_entry *stop) |
---|
| 432 | + struct jump_entry *stop, |
---|
| 433 | + bool init) |
---|
375 | 434 | { |
---|
376 | 435 | for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { |
---|
377 | | - /* |
---|
378 | | - * An entry->code of 0 indicates an entry which has been |
---|
379 | | - * disabled because it was in an init text area. |
---|
380 | | - */ |
---|
381 | | - if (entry->code) { |
---|
382 | | - if (kernel_text_address(entry->code)) |
---|
383 | | - arch_jump_label_transform(entry, jump_label_type(entry)); |
---|
384 | | - else |
---|
385 | | - WARN_ONCE(1, "can't patch jump_label at %pS", |
---|
386 | | - (void *)(unsigned long)entry->code); |
---|
387 | | - } |
---|
| 436 | + if (jump_label_can_update(entry, init)) |
---|
| 437 | + arch_jump_label_transform(entry, jump_label_type(entry)); |
---|
388 | 438 | } |
---|
389 | 439 | } |
---|
| 440 | +#else |
---|
| 441 | +static void __jump_label_update(struct static_key *key, |
---|
| 442 | + struct jump_entry *entry, |
---|
| 443 | + struct jump_entry *stop, |
---|
| 444 | + bool init) |
---|
| 445 | +{ |
---|
| 446 | + for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { |
---|
| 447 | + |
---|
| 448 | + if (!jump_label_can_update(entry, init)) |
---|
| 449 | + continue; |
---|
| 450 | + |
---|
| 451 | + if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) { |
---|
| 452 | + /* |
---|
| 453 | + * Queue is full: Apply the current queue and try again. |
---|
| 454 | + */ |
---|
| 455 | + arch_jump_label_transform_apply(); |
---|
| 456 | + BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry))); |
---|
| 457 | + } |
---|
| 458 | + } |
---|
| 459 | + arch_jump_label_transform_apply(); |
---|
| 460 | +} |
---|
| 461 | +#endif |
---|
390 | 462 | |
---|
391 | 463 | void __init jump_label_init(void) |
---|
392 | 464 | { |
---|
.. | .. |
---|
418 | 490 | if (jump_label_type(iter) == JUMP_LABEL_NOP) |
---|
419 | 491 | arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); |
---|
420 | 492 | |
---|
| 493 | + if (init_section_contains((void *)jump_entry_code(iter), 1)) |
---|
| 494 | + jump_entry_set_init(iter); |
---|
| 495 | + |
---|
421 | 496 | iterk = jump_entry_key(iter); |
---|
422 | 497 | if (iterk == key) |
---|
423 | 498 | continue; |
---|
.. | .. |
---|
430 | 505 | cpus_read_unlock(); |
---|
431 | 506 | } |
---|
432 | 507 | |
---|
433 | | -/* Disable any jump label entries in __init/__exit code */ |
---|
434 | | -void __init jump_label_invalidate_initmem(void) |
---|
435 | | -{ |
---|
436 | | - struct jump_entry *iter_start = __start___jump_table; |
---|
437 | | - struct jump_entry *iter_stop = __stop___jump_table; |
---|
438 | | - struct jump_entry *iter; |
---|
439 | | - |
---|
440 | | - for (iter = iter_start; iter < iter_stop; iter++) { |
---|
441 | | - if (init_section_contains((void *)(unsigned long)iter->code, 1)) |
---|
442 | | - iter->code = 0; |
---|
443 | | - } |
---|
444 | | -} |
---|
445 | | - |
---|
446 | 508 | #ifdef CONFIG_MODULES |
---|
447 | 509 | |
---|
448 | 510 | static enum jump_label_type jump_label_init_type(struct jump_entry *entry) |
---|
449 | 511 | { |
---|
450 | 512 | struct static_key *key = jump_entry_key(entry); |
---|
451 | 513 | bool type = static_key_type(key); |
---|
452 | | - bool branch = jump_entry_branch(entry); |
---|
| 514 | + bool branch = jump_entry_is_branch(entry); |
---|
453 | 515 | |
---|
454 | 516 | /* See the comment in linux/jump_label.h */ |
---|
455 | 517 | return type ^ branch; |
---|
.. | .. |
---|
463 | 525 | |
---|
464 | 526 | static inline struct static_key_mod *static_key_mod(struct static_key *key) |
---|
465 | 527 | { |
---|
466 | | - WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED)); |
---|
| 528 | + WARN_ON_ONCE(!static_key_linked(key)); |
---|
467 | 529 | return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); |
---|
468 | 530 | } |
---|
469 | 531 | |
---|
.. | .. |
---|
487 | 549 | static int __jump_label_mod_text_reserved(void *start, void *end) |
---|
488 | 550 | { |
---|
489 | 551 | struct module *mod; |
---|
| 552 | + int ret; |
---|
490 | 553 | |
---|
491 | 554 | preempt_disable(); |
---|
492 | 555 | mod = __module_text_address((unsigned long)start); |
---|
493 | 556 | WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); |
---|
| 557 | + if (!try_module_get(mod)) |
---|
| 558 | + mod = NULL; |
---|
494 | 559 | preempt_enable(); |
---|
495 | 560 | |
---|
496 | 561 | if (!mod) |
---|
497 | 562 | return 0; |
---|
498 | 563 | |
---|
499 | | - |
---|
500 | | - return __jump_label_text_reserved(mod->jump_entries, |
---|
| 564 | + ret = __jump_label_text_reserved(mod->jump_entries, |
---|
501 | 565 | mod->jump_entries + mod->num_jump_entries, |
---|
502 | | - start, end); |
---|
| 566 | + start, end, mod->state == MODULE_STATE_COMING); |
---|
| 567 | + |
---|
| 568 | + module_put(mod); |
---|
| 569 | + |
---|
| 570 | + return ret; |
---|
503 | 571 | } |
---|
504 | 572 | |
---|
505 | 573 | static void __jump_label_mod_update(struct static_key *key) |
---|
.. | .. |
---|
522 | 590 | stop = __stop___jump_table; |
---|
523 | 591 | else |
---|
524 | 592 | stop = m->jump_entries + m->num_jump_entries; |
---|
525 | | - __jump_label_update(key, mod->entries, stop); |
---|
| 593 | + __jump_label_update(key, mod->entries, stop, |
---|
| 594 | + m && m->state == MODULE_STATE_COMING); |
---|
526 | 595 | } |
---|
527 | 596 | } |
---|
528 | 597 | |
---|
.. | .. |
---|
568 | 637 | for (iter = iter_start; iter < iter_stop; iter++) { |
---|
569 | 638 | struct static_key *iterk; |
---|
570 | 639 | |
---|
| 640 | + if (within_module_init(jump_entry_code(iter), mod)) |
---|
| 641 | + jump_entry_set_init(iter); |
---|
| 642 | + |
---|
571 | 643 | iterk = jump_entry_key(iter); |
---|
572 | 644 | if (iterk == key) |
---|
573 | 645 | continue; |
---|
574 | 646 | |
---|
575 | 647 | key = iterk; |
---|
576 | | - if (within_module(iter->key, mod)) { |
---|
| 648 | + if (within_module((unsigned long)key, mod)) { |
---|
577 | 649 | static_key_set_entries(key, iter); |
---|
578 | 650 | continue; |
---|
579 | 651 | } |
---|
.. | .. |
---|
603 | 675 | |
---|
604 | 676 | /* Only update if we've changed from our initial state */ |
---|
605 | 677 | if (jump_label_type(iter) != jump_label_init_type(iter)) |
---|
606 | | - __jump_label_update(key, iter, iter_stop); |
---|
| 678 | + __jump_label_update(key, iter, iter_stop, true); |
---|
607 | 679 | } |
---|
608 | 680 | |
---|
609 | 681 | return 0; |
---|
.. | .. |
---|
623 | 695 | |
---|
624 | 696 | key = jump_entry_key(iter); |
---|
625 | 697 | |
---|
626 | | - if (within_module(iter->key, mod)) |
---|
| 698 | + if (within_module((unsigned long)key, mod)) |
---|
627 | 699 | continue; |
---|
628 | 700 | |
---|
629 | 701 | /* No memory during module load */ |
---|
.. | .. |
---|
659 | 731 | } |
---|
660 | 732 | } |
---|
661 | 733 | |
---|
662 | | -/* Disable any jump label entries in module init code */ |
---|
663 | | -static void jump_label_invalidate_module_init(struct module *mod) |
---|
664 | | -{ |
---|
665 | | - struct jump_entry *iter_start = mod->jump_entries; |
---|
666 | | - struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
---|
667 | | - struct jump_entry *iter; |
---|
668 | | - |
---|
669 | | - for (iter = iter_start; iter < iter_stop; iter++) { |
---|
670 | | - if (within_module_init(iter->code, mod)) |
---|
671 | | - iter->code = 0; |
---|
672 | | - } |
---|
673 | | -} |
---|
674 | | - |
---|
675 | 734 | static int |
---|
676 | 735 | jump_label_module_notify(struct notifier_block *self, unsigned long val, |
---|
677 | 736 | void *data) |
---|
.. | .. |
---|
692 | 751 | break; |
---|
693 | 752 | case MODULE_STATE_GOING: |
---|
694 | 753 | jump_label_del_module(mod); |
---|
695 | | - break; |
---|
696 | | - case MODULE_STATE_LIVE: |
---|
697 | | - jump_label_invalidate_module_init(mod); |
---|
698 | 754 | break; |
---|
699 | 755 | } |
---|
700 | 756 | |
---|
.. | .. |
---|
732 | 788 | */ |
---|
733 | 789 | int jump_label_text_reserved(void *start, void *end) |
---|
734 | 790 | { |
---|
| 791 | + bool init = system_state < SYSTEM_RUNNING; |
---|
735 | 792 | int ret = __jump_label_text_reserved(__start___jump_table, |
---|
736 | | - __stop___jump_table, start, end); |
---|
| 793 | + __stop___jump_table, start, end, init); |
---|
737 | 794 | |
---|
738 | 795 | if (ret) |
---|
739 | 796 | return ret; |
---|
.. | .. |
---|
765 | 822 | entry = static_key_entries(key); |
---|
766 | 823 | /* if there are no users, entry can be NULL */ |
---|
767 | 824 | if (entry) |
---|
768 | | - __jump_label_update(key, entry, stop); |
---|
| 825 | + __jump_label_update(key, entry, stop, |
---|
| 826 | + system_state < SYSTEM_RUNNING); |
---|
769 | 827 | } |
---|
770 | 828 | |
---|
771 | 829 | #ifdef CONFIG_STATIC_KEYS_SELFTEST |
---|