hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/kernel/livepatch/transition.c
....@@ -1,20 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * transition.c - Kernel Live Patching transition functions
34 *
45 * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
5
- *
6
- * This program is free software; you can redistribute it and/or
7
- * modify it under the terms of the GNU General Public License
8
- * as published by the Free Software Foundation; either version 2
9
- * of the License, or (at your option) any later version.
10
- *
11
- * This program is distributed in the hope that it will be useful,
12
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
- * GNU General Public License for more details.
15
- *
16
- * You should have received a copy of the GNU General Public License
17
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
186 */
197
208 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
....@@ -29,11 +17,13 @@
2917 #define MAX_STACK_ENTRIES 100
3018 #define STACK_ERR_BUF_SIZE 128
3119
20
+#define SIGNALS_TIMEOUT 15
21
+
3222 struct klp_patch *klp_transition_patch;
3323
3424 static int klp_target_state = KLP_UNDEFINED;
3525
36
-static bool klp_forced = false;
26
+static unsigned int klp_signals_cnt;
3727
3828 /*
3929 * This work can be performed periodically to finish patching or unpatching any
....@@ -52,7 +42,7 @@
5242
5343 /*
5444 * This function is just a stub to implement a hard force
55
- * of synchronize_sched(). This requires synchronizing
45
+ * of synchronize_rcu(). This requires synchronizing
5646 * tasks even in userspace and idle.
5747 */
5848 static void klp_sync(struct work_struct *work)
....@@ -86,6 +76,11 @@
8676 pr_debug("'%s': completing %s transition\n",
8777 klp_transition_patch->mod->name,
8878 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
79
+
80
+ if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
81
+ klp_unpatch_replaced_patches(klp_transition_patch);
82
+ klp_discard_nops(klp_transition_patch);
83
+ }
8984
9085 if (klp_target_state == KLP_UNPATCHED) {
9186 /*
....@@ -136,13 +131,6 @@
136131 pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
137132 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
138133
139
- /*
140
- * klp_forced set implies unbounded increase of module's ref count if
141
- * the module is disabled/enabled in a loop.
142
- */
143
- if (!klp_forced && klp_target_state == KLP_UNPATCHED)
144
- module_put(klp_transition_patch->mod);
145
-
146134 klp_target_state = KLP_UNDEFINED;
147135 klp_transition_patch = NULL;
148136 }
....@@ -175,7 +163,7 @@
175163 void klp_update_patch_state(struct task_struct *task)
176164 {
177165 /*
178
- * A variant of synchronize_sched() is used to allow patching functions
166
+ * A variant of synchronize_rcu() is used to allow patching functions
179167 * where RCU is not watching, see klp_synchronize_transition().
180168 */
181169 preempt_disable_notrace();
....@@ -202,15 +190,15 @@
202190 * Determine whether the given stack trace includes any references to a
203191 * to-be-patched or to-be-unpatched function.
204192 */
205
-static int klp_check_stack_func(struct klp_func *func,
206
- struct stack_trace *trace)
193
+static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
194
+ unsigned int nr_entries)
207195 {
208196 unsigned long func_addr, func_size, address;
209197 struct klp_ops *ops;
210198 int i;
211199
212
- for (i = 0; i < trace->nr_entries; i++) {
213
- address = trace->entries[i];
200
+ for (i = 0; i < nr_entries; i++) {
201
+ address = entries[i];
214202
215203 if (klp_target_state == KLP_UNPATCHED) {
216204 /*
....@@ -224,11 +212,11 @@
224212 * Check for the to-be-patched function
225213 * (the previous func).
226214 */
227
- ops = klp_find_ops(func->old_addr);
215
+ ops = klp_find_ops(func->old_func);
228216
229217 if (list_is_singular(&ops->func_stack)) {
230218 /* original function */
231
- func_addr = func->old_addr;
219
+ func_addr = (unsigned long)func->old_func;
232220 func_size = func->old_size;
233221 } else {
234222 /* previously patched function */
....@@ -254,29 +242,24 @@
254242 static int klp_check_stack(struct task_struct *task, char *err_buf)
255243 {
256244 static unsigned long entries[MAX_STACK_ENTRIES];
257
- struct stack_trace trace;
258245 struct klp_object *obj;
259246 struct klp_func *func;
260
- int ret;
247
+ int ret, nr_entries;
261248
262
- trace.skip = 0;
263
- trace.nr_entries = 0;
264
- trace.max_entries = MAX_STACK_ENTRIES;
265
- trace.entries = entries;
266
- ret = save_stack_trace_tsk_reliable(task, &trace);
267
- WARN_ON_ONCE(ret == -ENOSYS);
268
- if (ret) {
249
+ ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
250
+ if (ret < 0) {
269251 snprintf(err_buf, STACK_ERR_BUF_SIZE,
270252 "%s: %s:%d has an unreliable stack\n",
271253 __func__, task->comm, task->pid);
272254 return ret;
273255 }
256
+ nr_entries = ret;
274257
275258 klp_for_each_object(klp_transition_patch, obj) {
276259 if (!obj->patched)
277260 continue;
278261 klp_for_each_func(obj, func) {
279
- ret = klp_check_stack_func(func, &trace);
262
+ ret = klp_check_stack_func(func, entries, nr_entries);
280263 if (ret) {
281264 snprintf(err_buf, STACK_ERR_BUF_SIZE,
282265 "%s: %s:%d is sleeping on function %s\n",
....@@ -297,17 +280,24 @@
297280 */
298281 static bool klp_try_switch_task(struct task_struct *task)
299282 {
283
+ static char err_buf[STACK_ERR_BUF_SIZE];
300284 struct rq *rq;
301285 struct rq_flags flags;
302286 int ret;
303287 bool success = false;
304
- char err_buf[STACK_ERR_BUF_SIZE];
305288
306289 err_buf[0] = '\0';
307290
308291 /* check if this task has already switched over */
309292 if (task->patch_state == klp_target_state)
310293 return true;
294
+
295
+ /*
296
+ * For arches which don't have reliable stack traces, we have to rely
297
+ * on other methods (e.g., switching tasks at kernel exit).
298
+ */
299
+ if (!klp_have_reliable_stack())
300
+ return false;
311301
312302 /*
313303 * Now try to check the stack for any to-be-patched or to-be-unpatched
....@@ -344,7 +334,47 @@
344334 pr_debug("%s", err_buf);
345335
346336 return success;
337
+}
347338
339
+/*
340
+ * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
341
+ * Kthreads with TIF_PATCH_PENDING set are woken up.
342
+ */
343
+static void klp_send_signals(void)
344
+{
345
+ struct task_struct *g, *task;
346
+
347
+ if (klp_signals_cnt == SIGNALS_TIMEOUT)
348
+ pr_notice("signaling remaining tasks\n");
349
+
350
+ read_lock(&tasklist_lock);
351
+ for_each_process_thread(g, task) {
352
+ if (!klp_patch_pending(task))
353
+ continue;
354
+
355
+ /*
356
+ * There is a small race here. We could see TIF_PATCH_PENDING
357
+ * set and decide to wake up a kthread or send a fake signal.
358
+ * Meanwhile the task could migrate itself and the action
359
+ * would be meaningless. It is not serious though.
360
+ */
361
+ if (task->flags & PF_KTHREAD) {
362
+ /*
363
+ * Wake up a kthread which sleeps interruptedly and
364
+ * still has not been migrated.
365
+ */
366
+ wake_up_state(task, TASK_INTERRUPTIBLE);
367
+ } else {
368
+ /*
369
+ * Send fake signal to all non-kthread tasks which are
370
+ * still not migrated.
371
+ */
372
+ spin_lock_irq(&task->sighand->siglock);
373
+ signal_wake_up(task, 0);
374
+ spin_unlock_irq(&task->sighand->siglock);
375
+ }
376
+ }
377
+ read_unlock(&tasklist_lock);
348378 }
349379
350380 /*
....@@ -359,6 +389,7 @@
359389 {
360390 unsigned int cpu;
361391 struct task_struct *g, *task;
392
+ struct klp_patch *patch;
362393 bool complete = true;
363394
364395 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
....@@ -396,6 +427,10 @@
396427 put_online_cpus();
397428
398429 if (!complete) {
430
+ if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
431
+ klp_send_signals();
432
+ klp_signals_cnt++;
433
+
399434 /*
400435 * Some tasks weren't able to be switched over. Try again
401436 * later and/or wait for other methods like kernel exit
....@@ -407,7 +442,18 @@
407442 }
408443
409444 /* we're done, now cleanup the data structures */
445
+ patch = klp_transition_patch;
410446 klp_complete_transition();
447
+
448
+ /*
449
+ * It would make more sense to free the unused patches in
450
+ * klp_complete_transition() but it is called also
451
+ * from klp_cancel_transition().
452
+ */
453
+ if (!patch->enabled)
454
+ klp_free_patch_async(patch);
455
+ else if (patch->replace)
456
+ klp_free_replaced_patches_async(patch);
411457 }
412458
413459 /*
....@@ -446,6 +492,8 @@
446492 if (task->patch_state != klp_target_state)
447493 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
448494 }
495
+
496
+ klp_signals_cnt = 0;
449497 }
450498
451499 /*
....@@ -563,50 +611,23 @@
563611 /* Called from copy_process() during fork */
564612 void klp_copy_process(struct task_struct *child)
565613 {
614
+
615
+ /*
616
+ * The parent process may have gone through a KLP transition since
617
+ * the thread flag was copied in setup_thread_stack earlier. Bring
618
+ * the task flag up to date with the parent here.
619
+ *
620
+ * The operation is serialized against all klp_*_transition()
621
+ * operations by the tasklist_lock. The only exception is
622
+ * klp_update_patch_state(current), but we cannot race with
623
+ * that because we are current.
624
+ */
625
+ if (test_tsk_thread_flag(current, TIF_PATCH_PENDING))
626
+ set_tsk_thread_flag(child, TIF_PATCH_PENDING);
627
+ else
628
+ clear_tsk_thread_flag(child, TIF_PATCH_PENDING);
629
+
566630 child->patch_state = current->patch_state;
567
-
568
- /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
569
-}
570
-
571
-/*
572
- * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
573
- * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this
574
- * action currently.
575
- */
576
-void klp_send_signals(void)
577
-{
578
- struct task_struct *g, *task;
579
-
580
- pr_notice("signaling remaining tasks\n");
581
-
582
- read_lock(&tasklist_lock);
583
- for_each_process_thread(g, task) {
584
- if (!klp_patch_pending(task))
585
- continue;
586
-
587
- /*
588
- * There is a small race here. We could see TIF_PATCH_PENDING
589
- * set and decide to wake up a kthread or send a fake signal.
590
- * Meanwhile the task could migrate itself and the action
591
- * would be meaningless. It is not serious though.
592
- */
593
- if (task->flags & PF_KTHREAD) {
594
- /*
595
- * Wake up a kthread which sleeps interruptedly and
596
- * still has not been migrated.
597
- */
598
- wake_up_state(task, TASK_INTERRUPTIBLE);
599
- } else {
600
- /*
601
- * Send fake signal to all non-kthread tasks which are
602
- * still not migrated.
603
- */
604
- spin_lock_irq(&task->sighand->siglock);
605
- signal_wake_up(task, 0);
606
- spin_unlock_irq(&task->sighand->siglock);
607
- }
608
- }
609
- read_unlock(&tasklist_lock);
610631 }
611632
612633 /*
....@@ -620,6 +641,7 @@
620641 */
621642 void klp_force_transition(void)
622643 {
644
+ struct klp_patch *patch;
623645 struct task_struct *g, *task;
624646 unsigned int cpu;
625647
....@@ -633,5 +655,6 @@
633655 for_each_possible_cpu(cpu)
634656 klp_update_patch_state(idle_task(cpu));
635657
636
- klp_forced = true;
658
+ klp_for_each_patch(patch)
659
+ patch->forced = true;
637660 }