.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * transition.c - Kernel Live Patching transition functions |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com> |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or |
---|
7 | | - * modify it under the terms of the GNU General Public License |
---|
8 | | - * as published by the Free Software Foundation; either version 2 |
---|
9 | | - * of the License, or (at your option) any later version. |
---|
10 | | - * |
---|
11 | | - * This program is distributed in the hope that it will be useful, |
---|
12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
14 | | - * GNU General Public License for more details. |
---|
15 | | - * |
---|
16 | | - * You should have received a copy of the GNU General Public License |
---|
17 | | - * along with this program; if not, see <http://www.gnu.org/licenses/>. |
---|
18 | 6 | */ |
---|
19 | 7 | |
---|
20 | 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
---|
.. | .. |
---|
29 | 17 | #define MAX_STACK_ENTRIES 100 |
---|
30 | 18 | #define STACK_ERR_BUF_SIZE 128 |
---|
31 | 19 | |
---|
| 20 | +#define SIGNALS_TIMEOUT 15 |
---|
| 21 | + |
---|
32 | 22 | struct klp_patch *klp_transition_patch; |
---|
33 | 23 | |
---|
34 | 24 | static int klp_target_state = KLP_UNDEFINED; |
---|
35 | 25 | |
---|
36 | | -static bool klp_forced = false; |
---|
| 26 | +static unsigned int klp_signals_cnt; |
---|
37 | 27 | |
---|
38 | 28 | /* |
---|
39 | 29 | * This work can be performed periodically to finish patching or unpatching any |
---|
.. | .. |
---|
52 | 42 | |
---|
53 | 43 | /* |
---|
54 | 44 | * This function is just a stub to implement a hard force |
---|
55 | | - * of synchronize_sched(). This requires synchronizing |
---|
| 45 | + * of synchronize_rcu(). This requires synchronizing |
---|
56 | 46 | * tasks even in userspace and idle. |
---|
57 | 47 | */ |
---|
58 | 48 | static void klp_sync(struct work_struct *work) |
---|
.. | .. |
---|
86 | 76 | pr_debug("'%s': completing %s transition\n", |
---|
87 | 77 | klp_transition_patch->mod->name, |
---|
88 | 78 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); |
---|
| 79 | + |
---|
| 80 | + if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) { |
---|
| 81 | + klp_unpatch_replaced_patches(klp_transition_patch); |
---|
| 82 | + klp_discard_nops(klp_transition_patch); |
---|
| 83 | + } |
---|
89 | 84 | |
---|
90 | 85 | if (klp_target_state == KLP_UNPATCHED) { |
---|
91 | 86 | /* |
---|
.. | .. |
---|
136 | 131 | pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name, |
---|
137 | 132 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); |
---|
138 | 133 | |
---|
139 | | - /* |
---|
140 | | - * klp_forced set implies unbounded increase of module's ref count if |
---|
141 | | - * the module is disabled/enabled in a loop. |
---|
142 | | - */ |
---|
143 | | - if (!klp_forced && klp_target_state == KLP_UNPATCHED) |
---|
144 | | - module_put(klp_transition_patch->mod); |
---|
145 | | - |
---|
146 | 134 | klp_target_state = KLP_UNDEFINED; |
---|
147 | 135 | klp_transition_patch = NULL; |
---|
148 | 136 | } |
---|
.. | .. |
---|
175 | 163 | void klp_update_patch_state(struct task_struct *task) |
---|
176 | 164 | { |
---|
177 | 165 | /* |
---|
178 | | - * A variant of synchronize_sched() is used to allow patching functions |
---|
| 166 | + * A variant of synchronize_rcu() is used to allow patching functions |
---|
179 | 167 | * where RCU is not watching, see klp_synchronize_transition(). |
---|
180 | 168 | */ |
---|
181 | 169 | preempt_disable_notrace(); |
---|
.. | .. |
---|
202 | 190 | * Determine whether the given stack trace includes any references to a |
---|
203 | 191 | * to-be-patched or to-be-unpatched function. |
---|
204 | 192 | */ |
---|
205 | | -static int klp_check_stack_func(struct klp_func *func, |
---|
206 | | - struct stack_trace *trace) |
---|
| 193 | +static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, |
---|
| 194 | + unsigned int nr_entries) |
---|
207 | 195 | { |
---|
208 | 196 | unsigned long func_addr, func_size, address; |
---|
209 | 197 | struct klp_ops *ops; |
---|
210 | 198 | int i; |
---|
211 | 199 | |
---|
212 | | - for (i = 0; i < trace->nr_entries; i++) { |
---|
213 | | - address = trace->entries[i]; |
---|
| 200 | + for (i = 0; i < nr_entries; i++) { |
---|
| 201 | + address = entries[i]; |
---|
214 | 202 | |
---|
215 | 203 | if (klp_target_state == KLP_UNPATCHED) { |
---|
216 | 204 | /* |
---|
.. | .. |
---|
224 | 212 | * Check for the to-be-patched function |
---|
225 | 213 | * (the previous func). |
---|
226 | 214 | */ |
---|
227 | | - ops = klp_find_ops(func->old_addr); |
---|
| 215 | + ops = klp_find_ops(func->old_func); |
---|
228 | 216 | |
---|
229 | 217 | if (list_is_singular(&ops->func_stack)) { |
---|
230 | 218 | /* original function */ |
---|
231 | | - func_addr = func->old_addr; |
---|
| 219 | + func_addr = (unsigned long)func->old_func; |
---|
232 | 220 | func_size = func->old_size; |
---|
233 | 221 | } else { |
---|
234 | 222 | /* previously patched function */ |
---|
.. | .. |
---|
254 | 242 | static int klp_check_stack(struct task_struct *task, char *err_buf) |
---|
255 | 243 | { |
---|
256 | 244 | static unsigned long entries[MAX_STACK_ENTRIES]; |
---|
257 | | - struct stack_trace trace; |
---|
258 | 245 | struct klp_object *obj; |
---|
259 | 246 | struct klp_func *func; |
---|
260 | | - int ret; |
---|
| 247 | + int ret, nr_entries; |
---|
261 | 248 | |
---|
262 | | - trace.skip = 0; |
---|
263 | | - trace.nr_entries = 0; |
---|
264 | | - trace.max_entries = MAX_STACK_ENTRIES; |
---|
265 | | - trace.entries = entries; |
---|
266 | | - ret = save_stack_trace_tsk_reliable(task, &trace); |
---|
267 | | - WARN_ON_ONCE(ret == -ENOSYS); |
---|
268 | | - if (ret) { |
---|
| 249 | + ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries)); |
---|
| 250 | + if (ret < 0) { |
---|
269 | 251 | snprintf(err_buf, STACK_ERR_BUF_SIZE, |
---|
270 | 252 | "%s: %s:%d has an unreliable stack\n", |
---|
271 | 253 | __func__, task->comm, task->pid); |
---|
272 | 254 | return ret; |
---|
273 | 255 | } |
---|
| 256 | + nr_entries = ret; |
---|
274 | 257 | |
---|
275 | 258 | klp_for_each_object(klp_transition_patch, obj) { |
---|
276 | 259 | if (!obj->patched) |
---|
277 | 260 | continue; |
---|
278 | 261 | klp_for_each_func(obj, func) { |
---|
279 | | - ret = klp_check_stack_func(func, &trace); |
---|
| 262 | + ret = klp_check_stack_func(func, entries, nr_entries); |
---|
280 | 263 | if (ret) { |
---|
281 | 264 | snprintf(err_buf, STACK_ERR_BUF_SIZE, |
---|
282 | 265 | "%s: %s:%d is sleeping on function %s\n", |
---|
.. | .. |
---|
297 | 280 | */ |
---|
298 | 281 | static bool klp_try_switch_task(struct task_struct *task) |
---|
299 | 282 | { |
---|
| 283 | + static char err_buf[STACK_ERR_BUF_SIZE]; |
---|
300 | 284 | struct rq *rq; |
---|
301 | 285 | struct rq_flags flags; |
---|
302 | 286 | int ret; |
---|
303 | 287 | bool success = false; |
---|
304 | | - char err_buf[STACK_ERR_BUF_SIZE]; |
---|
305 | 288 | |
---|
306 | 289 | err_buf[0] = '\0'; |
---|
307 | 290 | |
---|
308 | 291 | /* check if this task has already switched over */ |
---|
309 | 292 | if (task->patch_state == klp_target_state) |
---|
310 | 293 | return true; |
---|
| 294 | + |
---|
| 295 | + /* |
---|
| 296 | + * For arches which don't have reliable stack traces, we have to rely |
---|
| 297 | + * on other methods (e.g., switching tasks at kernel exit). |
---|
| 298 | + */ |
---|
| 299 | + if (!klp_have_reliable_stack()) |
---|
| 300 | + return false; |
---|
311 | 301 | |
---|
312 | 302 | /* |
---|
313 | 303 | * Now try to check the stack for any to-be-patched or to-be-unpatched |
---|
.. | .. |
---|
344 | 334 | pr_debug("%s", err_buf); |
---|
345 | 335 | |
---|
346 | 336 | return success; |
---|
| 337 | +} |
---|
347 | 338 | |
---|
| 339 | +/* |
---|
| 340 | + * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set. |
---|
| 341 | + * Kthreads with TIF_PATCH_PENDING set are woken up. |
---|
| 342 | + */ |
---|
| 343 | +static void klp_send_signals(void) |
---|
| 344 | +{ |
---|
| 345 | + struct task_struct *g, *task; |
---|
| 346 | + |
---|
| 347 | + if (klp_signals_cnt == SIGNALS_TIMEOUT) |
---|
| 348 | + pr_notice("signaling remaining tasks\n"); |
---|
| 349 | + |
---|
| 350 | + read_lock(&tasklist_lock); |
---|
| 351 | + for_each_process_thread(g, task) { |
---|
| 352 | + if (!klp_patch_pending(task)) |
---|
| 353 | + continue; |
---|
| 354 | + |
---|
| 355 | + /* |
---|
| 356 | + * There is a small race here. We could see TIF_PATCH_PENDING |
---|
| 357 | + * set and decide to wake up a kthread or send a fake signal. |
---|
| 358 | + * Meanwhile the task could migrate itself and the action |
---|
| 359 | + * would be meaningless. It is not serious though. |
---|
| 360 | + */ |
---|
| 361 | + if (task->flags & PF_KTHREAD) { |
---|
| 362 | + /* |
---|
| 363 | + * Wake up a kthread which sleeps interruptedly and |
---|
| 364 | + * still has not been migrated. |
---|
| 365 | + */ |
---|
| 366 | + wake_up_state(task, TASK_INTERRUPTIBLE); |
---|
| 367 | + } else { |
---|
| 368 | + /* |
---|
| 369 | + * Send fake signal to all non-kthread tasks which are |
---|
| 370 | + * still not migrated. |
---|
| 371 | + */ |
---|
| 372 | + spin_lock_irq(&task->sighand->siglock); |
---|
| 373 | + signal_wake_up(task, 0); |
---|
| 374 | + spin_unlock_irq(&task->sighand->siglock); |
---|
| 375 | + } |
---|
| 376 | + } |
---|
| 377 | + read_unlock(&tasklist_lock); |
---|
348 | 378 | } |
---|
349 | 379 | |
---|
350 | 380 | /* |
---|
.. | .. |
---|
359 | 389 | { |
---|
360 | 390 | unsigned int cpu; |
---|
361 | 391 | struct task_struct *g, *task; |
---|
| 392 | + struct klp_patch *patch; |
---|
362 | 393 | bool complete = true; |
---|
363 | 394 | |
---|
364 | 395 | WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); |
---|
.. | .. |
---|
396 | 427 | put_online_cpus(); |
---|
397 | 428 | |
---|
398 | 429 | if (!complete) { |
---|
| 430 | + if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT)) |
---|
| 431 | + klp_send_signals(); |
---|
| 432 | + klp_signals_cnt++; |
---|
| 433 | + |
---|
399 | 434 | /* |
---|
400 | 435 | * Some tasks weren't able to be switched over. Try again |
---|
401 | 436 | * later and/or wait for other methods like kernel exit |
---|
.. | .. |
---|
407 | 442 | } |
---|
408 | 443 | |
---|
409 | 444 | /* we're done, now cleanup the data structures */ |
---|
| 445 | + patch = klp_transition_patch; |
---|
410 | 446 | klp_complete_transition(); |
---|
| 447 | + |
---|
| 448 | + /* |
---|
| 449 | + * It would make more sense to free the unused patches in |
---|
| 450 | + * klp_complete_transition() but it is called also |
---|
| 451 | + * from klp_cancel_transition(). |
---|
| 452 | + */ |
---|
| 453 | + if (!patch->enabled) |
---|
| 454 | + klp_free_patch_async(patch); |
---|
| 455 | + else if (patch->replace) |
---|
| 456 | + klp_free_replaced_patches_async(patch); |
---|
411 | 457 | } |
---|
412 | 458 | |
---|
413 | 459 | /* |
---|
.. | .. |
---|
446 | 492 | if (task->patch_state != klp_target_state) |
---|
447 | 493 | set_tsk_thread_flag(task, TIF_PATCH_PENDING); |
---|
448 | 494 | } |
---|
| 495 | + |
---|
| 496 | + klp_signals_cnt = 0; |
---|
449 | 497 | } |
---|
450 | 498 | |
---|
451 | 499 | /* |
---|
.. | .. |
---|
563 | 611 | /* Called from copy_process() during fork */ |
---|
564 | 612 | void klp_copy_process(struct task_struct *child) |
---|
565 | 613 | { |
---|
| 614 | + |
---|
| 615 | + /* |
---|
| 616 | + * The parent process may have gone through a KLP transition since |
---|
| 617 | + * the thread flag was copied in setup_thread_stack earlier. Bring |
---|
| 618 | + * the task flag up to date with the parent here. |
---|
| 619 | + * |
---|
| 620 | + * The operation is serialized against all klp_*_transition() |
---|
| 621 | + * operations by the tasklist_lock. The only exception is |
---|
| 622 | + * klp_update_patch_state(current), but we cannot race with |
---|
| 623 | + * that because we are current. |
---|
| 624 | + */ |
---|
| 625 | + if (test_tsk_thread_flag(current, TIF_PATCH_PENDING)) |
---|
| 626 | + set_tsk_thread_flag(child, TIF_PATCH_PENDING); |
---|
| 627 | + else |
---|
| 628 | + clear_tsk_thread_flag(child, TIF_PATCH_PENDING); |
---|
| 629 | + |
---|
566 | 630 | child->patch_state = current->patch_state; |
---|
567 | | - |
---|
568 | | - /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ |
---|
569 | | -} |
---|
570 | | - |
---|
571 | | -/* |
---|
572 | | - * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set. |
---|
573 | | - * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this |
---|
574 | | - * action currently. |
---|
575 | | - */ |
---|
576 | | -void klp_send_signals(void) |
---|
577 | | -{ |
---|
578 | | - struct task_struct *g, *task; |
---|
579 | | - |
---|
580 | | - pr_notice("signaling remaining tasks\n"); |
---|
581 | | - |
---|
582 | | - read_lock(&tasklist_lock); |
---|
583 | | - for_each_process_thread(g, task) { |
---|
584 | | - if (!klp_patch_pending(task)) |
---|
585 | | - continue; |
---|
586 | | - |
---|
587 | | - /* |
---|
588 | | - * There is a small race here. We could see TIF_PATCH_PENDING |
---|
589 | | - * set and decide to wake up a kthread or send a fake signal. |
---|
590 | | - * Meanwhile the task could migrate itself and the action |
---|
591 | | - * would be meaningless. It is not serious though. |
---|
592 | | - */ |
---|
593 | | - if (task->flags & PF_KTHREAD) { |
---|
594 | | - /* |
---|
595 | | - * Wake up a kthread which sleeps interruptedly and |
---|
596 | | - * still has not been migrated. |
---|
597 | | - */ |
---|
598 | | - wake_up_state(task, TASK_INTERRUPTIBLE); |
---|
599 | | - } else { |
---|
600 | | - /* |
---|
601 | | - * Send fake signal to all non-kthread tasks which are |
---|
602 | | - * still not migrated. |
---|
603 | | - */ |
---|
604 | | - spin_lock_irq(&task->sighand->siglock); |
---|
605 | | - signal_wake_up(task, 0); |
---|
606 | | - spin_unlock_irq(&task->sighand->siglock); |
---|
607 | | - } |
---|
608 | | - } |
---|
609 | | - read_unlock(&tasklist_lock); |
---|
610 | 631 | } |
---|
611 | 632 | |
---|
612 | 633 | /* |
---|
.. | .. |
---|
620 | 641 | */ |
---|
621 | 642 | void klp_force_transition(void) |
---|
622 | 643 | { |
---|
| 644 | + struct klp_patch *patch; |
---|
623 | 645 | struct task_struct *g, *task; |
---|
624 | 646 | unsigned int cpu; |
---|
625 | 647 | |
---|
.. | .. |
---|
633 | 655 | for_each_possible_cpu(cpu) |
---|
634 | 656 | klp_update_patch_state(idle_task(cpu)); |
---|
635 | 657 | |
---|
636 | | - klp_forced = true; |
---|
| 658 | + klp_for_each_patch(patch) |
---|
| 659 | + patch->forced = true; |
---|
637 | 660 | } |
---|