.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* Kernel thread helper functions. |
---|
2 | 3 | * Copyright (C) 2004 IBM Corporation, Rusty Russell. |
---|
| 4 | + * Copyright (C) 2009 Red Hat, Inc. |
---|
3 | 5 | * |
---|
4 | 6 | * Creation is done via kthreadd, so that we get a clean environment |
---|
5 | 7 | * even if we're invoked from userspace (think modprobe, hotplug cpu, |
---|
6 | 8 | * etc.). |
---|
7 | 9 | */ |
---|
8 | 10 | #include <uapi/linux/sched/types.h> |
---|
| 11 | +#include <linux/mm.h> |
---|
| 12 | +#include <linux/mmu_context.h> |
---|
9 | 13 | #include <linux/sched.h> |
---|
| 14 | +#include <linux/sched/mm.h> |
---|
10 | 15 | #include <linux/sched/task.h> |
---|
11 | 16 | #include <linux/kthread.h> |
---|
12 | 17 | #include <linux/completion.h> |
---|
.. | .. |
---|
21 | 26 | #include <linux/freezer.h> |
---|
22 | 27 | #include <linux/ptrace.h> |
---|
23 | 28 | #include <linux/uaccess.h> |
---|
| 29 | +#include <linux/numa.h> |
---|
| 30 | +#include <linux/sched/isolation.h> |
---|
24 | 31 | #include <trace/events/sched.h> |
---|
| 32 | + |
---|
25 | 33 | |
---|
26 | 34 | static DEFINE_SPINLOCK(kthread_create_lock); |
---|
27 | 35 | static LIST_HEAD(kthread_create_list); |
---|
.. | .. |
---|
44 | 52 | struct kthread { |
---|
45 | 53 | unsigned long flags; |
---|
46 | 54 | unsigned int cpu; |
---|
| 55 | + int (*threadfn)(void *); |
---|
47 | 56 | void *data; |
---|
| 57 | + mm_segment_t oldfs; |
---|
48 | 58 | struct completion parked; |
---|
49 | 59 | struct completion exited; |
---|
50 | 60 | #ifdef CONFIG_BLK_CGROUP |
---|
.. | .. |
---|
72 | 82 | { |
---|
73 | 83 | WARN_ON(!(k->flags & PF_KTHREAD)); |
---|
74 | 84 | return (__force void *)k->set_child_tid; |
---|
| 85 | +} |
---|
| 86 | + |
---|
| 87 | +/* |
---|
| 88 | + * Variant of to_kthread() that doesn't assume @p is a kthread. |
---|
| 89 | + * |
---|
| 90 | + * Per construction; when: |
---|
| 91 | + * |
---|
| 92 | + * (p->flags & PF_KTHREAD) && p->set_child_tid |
---|
| 93 | + * |
---|
| 94 | + * the task is both a kthread and struct kthread is persistent. However |
---|
| 95 | + * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and |
---|
| 96 | + * begin_new_exec()). |
---|
| 97 | + */ |
---|
| 98 | +static inline struct kthread *__to_kthread(struct task_struct *p) |
---|
| 99 | +{ |
---|
| 100 | + void *kthread = (__force void *)p->set_child_tid; |
---|
| 101 | + if (kthread && !(p->flags & PF_KTHREAD)) |
---|
| 102 | + kthread = NULL; |
---|
| 103 | + return kthread; |
---|
75 | 104 | } |
---|
76 | 105 | |
---|
77 | 106 | void free_kthread_struct(struct task_struct *k) |
---|
.. | .. |
---|
102 | 131 | } |
---|
103 | 132 | EXPORT_SYMBOL(kthread_should_stop); |
---|
104 | 133 | |
---|
| 134 | +bool __kthread_should_park(struct task_struct *k) |
---|
| 135 | +{ |
---|
| 136 | + return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags); |
---|
| 137 | +} |
---|
| 138 | +EXPORT_SYMBOL_GPL(__kthread_should_park); |
---|
| 139 | + |
---|
105 | 140 | /** |
---|
106 | 141 | * kthread_should_park - should this kthread park now? |
---|
107 | 142 | * |
---|
.. | .. |
---|
115 | 150 | */ |
---|
116 | 151 | bool kthread_should_park(void) |
---|
117 | 152 | { |
---|
118 | | - return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); |
---|
| 153 | + return __kthread_should_park(current); |
---|
119 | 154 | } |
---|
120 | 155 | EXPORT_SYMBOL_GPL(kthread_should_park); |
---|
121 | 156 | |
---|
.. | .. |
---|
145 | 180 | EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); |
---|
146 | 181 | |
---|
147 | 182 | /** |
---|
| 183 | + * kthread_func - return the function specified on kthread creation |
---|
| 184 | + * @task: kthread task in question |
---|
| 185 | + * |
---|
| 186 | + * Returns NULL if the task is not a kthread. |
---|
| 187 | + */ |
---|
| 188 | +void *kthread_func(struct task_struct *task) |
---|
| 189 | +{ |
---|
| 190 | + struct kthread *kthread = __to_kthread(task); |
---|
| 191 | + if (kthread) |
---|
| 192 | + return kthread->threadfn; |
---|
| 193 | + return NULL; |
---|
| 194 | +} |
---|
| 195 | +EXPORT_SYMBOL_GPL(kthread_func); |
---|
| 196 | + |
---|
| 197 | +/** |
---|
148 | 198 | * kthread_data - return data value specified on kthread creation |
---|
149 | 199 | * @task: kthread task in question |
---|
150 | 200 | * |
---|
.. | .. |
---|
156 | 206 | { |
---|
157 | 207 | return to_kthread(task)->data; |
---|
158 | 208 | } |
---|
| 209 | +EXPORT_SYMBOL_GPL(kthread_data); |
---|
159 | 210 | |
---|
160 | 211 | /** |
---|
161 | 212 | * kthread_probe_data - speculative version of kthread_data() |
---|
.. | .. |
---|
168 | 219 | */ |
---|
169 | 220 | void *kthread_probe_data(struct task_struct *task) |
---|
170 | 221 | { |
---|
171 | | - struct kthread *kthread = to_kthread(task); |
---|
| 222 | + struct kthread *kthread = __to_kthread(task); |
---|
172 | 223 | void *data = NULL; |
---|
173 | 224 | |
---|
174 | | - probe_kernel_read(&data, &kthread->data, sizeof(data)); |
---|
| 225 | + if (kthread) |
---|
| 226 | + copy_from_kernel_nofault(&data, &kthread->data, sizeof(data)); |
---|
175 | 227 | return data; |
---|
176 | 228 | } |
---|
177 | 229 | |
---|
.. | .. |
---|
236 | 288 | do_exit(-ENOMEM); |
---|
237 | 289 | } |
---|
238 | 290 | |
---|
| 291 | + self->threadfn = threadfn; |
---|
239 | 292 | self->data = data; |
---|
240 | 293 | init_completion(&self->exited); |
---|
241 | 294 | init_completion(&self->parked); |
---|
.. | .. |
---|
352 | 405 | * The kernel thread should not inherit these properties. |
---|
353 | 406 | */ |
---|
354 | 407 | sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); |
---|
355 | | - set_cpus_allowed_ptr(task, cpu_all_mask); |
---|
| 408 | + set_cpus_allowed_ptr(task, |
---|
| 409 | + housekeeping_cpumask(HK_FLAG_KTHREAD)); |
---|
356 | 410 | } |
---|
357 | 411 | kfree(create); |
---|
358 | 412 | return task; |
---|
.. | .. |
---|
422 | 476 | { |
---|
423 | 477 | __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); |
---|
424 | 478 | } |
---|
| 479 | +EXPORT_SYMBOL_GPL(kthread_bind_mask); |
---|
425 | 480 | |
---|
426 | 481 | /** |
---|
427 | 482 | * kthread_bind - bind a just-created kthread to a cpu. |
---|
.. | .. |
---|
447 | 502 | * to "name.*%u". Code fills in cpu number. |
---|
448 | 503 | * |
---|
449 | 504 | * Description: This helper function creates and names a kernel thread |
---|
450 | | - * The thread will be woken and put into park mode. |
---|
451 | 505 | */ |
---|
452 | 506 | struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), |
---|
453 | 507 | void *data, unsigned int cpu, |
---|
.. | .. |
---|
482 | 536 | set_bit(KTHREAD_IS_PER_CPU, &kthread->flags); |
---|
483 | 537 | } |
---|
484 | 538 | |
---|
485 | | -bool kthread_is_per_cpu(struct task_struct *k) |
---|
| 539 | +bool kthread_is_per_cpu(struct task_struct *p) |
---|
486 | 540 | { |
---|
487 | | - struct kthread *kthread = to_kthread(k); |
---|
| 541 | + struct kthread *kthread = __to_kthread(p); |
---|
488 | 542 | if (!kthread) |
---|
489 | 543 | return false; |
---|
490 | 544 | |
---|
.. | .. |
---|
602 | 656 | /* Setup a clean context for our children to inherit. */ |
---|
603 | 657 | set_task_comm(tsk, "kthreadd"); |
---|
604 | 658 | ignore_signals(tsk); |
---|
605 | | - set_cpus_allowed_ptr(tsk, cpu_all_mask); |
---|
| 659 | + set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD)); |
---|
606 | 660 | set_mems_allowed(node_states[N_MEMORY]); |
---|
607 | 661 | |
---|
608 | 662 | current->flags |= PF_NOFREEZE; |
---|
.. | .. |
---|
638 | 692 | struct lock_class_key *key) |
---|
639 | 693 | { |
---|
640 | 694 | memset(worker, 0, sizeof(struct kthread_worker)); |
---|
641 | | - spin_lock_init(&worker->lock); |
---|
| 695 | + raw_spin_lock_init(&worker->lock); |
---|
642 | 696 | lockdep_set_class_and_name(&worker->lock, key, name); |
---|
643 | 697 | INIT_LIST_HEAD(&worker->work_list); |
---|
644 | 698 | INIT_LIST_HEAD(&worker->delayed_work_list); |
---|
.. | .. |
---|
680 | 734 | |
---|
681 | 735 | if (kthread_should_stop()) { |
---|
682 | 736 | __set_current_state(TASK_RUNNING); |
---|
683 | | - spin_lock_irq(&worker->lock); |
---|
| 737 | + raw_spin_lock_irq(&worker->lock); |
---|
684 | 738 | worker->task = NULL; |
---|
685 | | - spin_unlock_irq(&worker->lock); |
---|
| 739 | + raw_spin_unlock_irq(&worker->lock); |
---|
686 | 740 | return 0; |
---|
687 | 741 | } |
---|
688 | 742 | |
---|
689 | 743 | work = NULL; |
---|
690 | | - spin_lock_irq(&worker->lock); |
---|
| 744 | + raw_spin_lock_irq(&worker->lock); |
---|
691 | 745 | if (!list_empty(&worker->work_list)) { |
---|
692 | 746 | work = list_first_entry(&worker->work_list, |
---|
693 | 747 | struct kthread_work, node); |
---|
694 | 748 | list_del_init(&work->node); |
---|
695 | 749 | } |
---|
696 | 750 | worker->current_work = work; |
---|
697 | | - spin_unlock_irq(&worker->lock); |
---|
| 751 | + raw_spin_unlock_irq(&worker->lock); |
---|
698 | 752 | |
---|
699 | 753 | if (work) { |
---|
700 | 754 | __set_current_state(TASK_RUNNING); |
---|
.. | .. |
---|
714 | 768 | { |
---|
715 | 769 | struct kthread_worker *worker; |
---|
716 | 770 | struct task_struct *task; |
---|
717 | | - int node = -1; |
---|
| 771 | + int node = NUMA_NO_NODE; |
---|
718 | 772 | |
---|
719 | 773 | worker = kzalloc(sizeof(*worker), GFP_KERNEL); |
---|
720 | 774 | if (!worker) |
---|
.. | .. |
---|
768 | 822 | |
---|
769 | 823 | /** |
---|
770 | 824 | * kthread_create_worker_on_cpu - create a kthread worker and bind it |
---|
771 | | - * it to a given CPU and the associated NUMA node. |
---|
| 825 | + * to a given CPU and the associated NUMA node. |
---|
772 | 826 | * @cpu: CPU number |
---|
773 | 827 | * @flags: flags modifying the default behavior of the worker |
---|
774 | 828 | * @namefmt: printf-style name for the kthread worker (task). |
---|
.. | .. |
---|
851 | 905 | bool ret = false; |
---|
852 | 906 | unsigned long flags; |
---|
853 | 907 | |
---|
854 | | - spin_lock_irqsave(&worker->lock, flags); |
---|
| 908 | + raw_spin_lock_irqsave(&worker->lock, flags); |
---|
855 | 909 | if (!queuing_blocked(worker, work)) { |
---|
856 | 910 | kthread_insert_work(worker, work, &worker->work_list); |
---|
857 | 911 | ret = true; |
---|
858 | 912 | } |
---|
859 | | - spin_unlock_irqrestore(&worker->lock, flags); |
---|
| 913 | + raw_spin_unlock_irqrestore(&worker->lock, flags); |
---|
860 | 914 | return ret; |
---|
861 | 915 | } |
---|
862 | 916 | EXPORT_SYMBOL_GPL(kthread_queue_work); |
---|
.. | .. |
---|
874 | 928 | struct kthread_delayed_work *dwork = from_timer(dwork, t, timer); |
---|
875 | 929 | struct kthread_work *work = &dwork->work; |
---|
876 | 930 | struct kthread_worker *worker = work->worker; |
---|
| 931 | + unsigned long flags; |
---|
877 | 932 | |
---|
878 | 933 | /* |
---|
879 | 934 | * This might happen when a pending work is reinitialized. |
---|
.. | .. |
---|
882 | 937 | if (WARN_ON_ONCE(!worker)) |
---|
883 | 938 | return; |
---|
884 | 939 | |
---|
885 | | - spin_lock(&worker->lock); |
---|
| 940 | + raw_spin_lock_irqsave(&worker->lock, flags); |
---|
886 | 941 | /* Work must not be used with >1 worker, see kthread_queue_work(). */ |
---|
887 | 942 | WARN_ON_ONCE(work->worker != worker); |
---|
888 | 943 | |
---|
.. | .. |
---|
892 | 947 | if (!work->canceling) |
---|
893 | 948 | kthread_insert_work(worker, work, &worker->work_list); |
---|
894 | 949 | |
---|
895 | | - spin_unlock(&worker->lock); |
---|
| 950 | + raw_spin_unlock_irqrestore(&worker->lock, flags); |
---|
896 | 951 | } |
---|
897 | 952 | EXPORT_SYMBOL(kthread_delayed_work_timer_fn); |
---|
898 | 953 | |
---|
899 | | -void __kthread_queue_delayed_work(struct kthread_worker *worker, |
---|
900 | | - struct kthread_delayed_work *dwork, |
---|
901 | | - unsigned long delay) |
---|
| 954 | +static void __kthread_queue_delayed_work(struct kthread_worker *worker, |
---|
| 955 | + struct kthread_delayed_work *dwork, |
---|
| 956 | + unsigned long delay) |
---|
902 | 957 | { |
---|
903 | 958 | struct timer_list *timer = &dwork->timer; |
---|
904 | 959 | struct kthread_work *work = &dwork->work; |
---|
905 | 960 | |
---|
906 | | - WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn); |
---|
| 961 | + /* |
---|
| 962 | + * With CFI, timer->function can point to a jump table entry in a module, |
---|
| 963 | + * which fails the comparison. Disable the warning if CFI and modules are |
---|
| 964 | + * both enabled. |
---|
| 965 | + */ |
---|
| 966 | + if (!IS_ENABLED(CONFIG_CFI_CLANG) || !IS_ENABLED(CONFIG_MODULES)) |
---|
| 967 | + WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn); |
---|
907 | 968 | |
---|
908 | 969 | /* |
---|
909 | 970 | * If @delay is 0, queue @dwork->work immediately. This is for |
---|
.. | .. |
---|
948 | 1009 | unsigned long flags; |
---|
949 | 1010 | bool ret = false; |
---|
950 | 1011 | |
---|
951 | | - spin_lock_irqsave(&worker->lock, flags); |
---|
| 1012 | + raw_spin_lock_irqsave(&worker->lock, flags); |
---|
952 | 1013 | |
---|
953 | 1014 | if (!queuing_blocked(worker, work)) { |
---|
954 | 1015 | __kthread_queue_delayed_work(worker, dwork, delay); |
---|
955 | 1016 | ret = true; |
---|
956 | 1017 | } |
---|
957 | 1018 | |
---|
958 | | - spin_unlock_irqrestore(&worker->lock, flags); |
---|
| 1019 | + raw_spin_unlock_irqrestore(&worker->lock, flags); |
---|
959 | 1020 | return ret; |
---|
960 | 1021 | } |
---|
961 | 1022 | EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); |
---|
.. | .. |
---|
991 | 1052 | if (!worker) |
---|
992 | 1053 | return; |
---|
993 | 1054 | |
---|
994 | | - spin_lock_irq(&worker->lock); |
---|
| 1055 | + raw_spin_lock_irq(&worker->lock); |
---|
995 | 1056 | /* Work must not be used with >1 worker, see kthread_queue_work(). */ |
---|
996 | 1057 | WARN_ON_ONCE(work->worker != worker); |
---|
997 | 1058 | |
---|
.. | .. |
---|
1003 | 1064 | else |
---|
1004 | 1065 | noop = true; |
---|
1005 | 1066 | |
---|
1006 | | - spin_unlock_irq(&worker->lock); |
---|
| 1067 | + raw_spin_unlock_irq(&worker->lock); |
---|
1007 | 1068 | |
---|
1008 | 1069 | if (!noop) |
---|
1009 | 1070 | wait_for_completion(&fwork.done); |
---|
.. | .. |
---|
1031 | 1092 | * any queuing is blocked by setting the canceling counter. |
---|
1032 | 1093 | */ |
---|
1033 | 1094 | work->canceling++; |
---|
1034 | | - spin_unlock_irqrestore(&worker->lock, *flags); |
---|
| 1095 | + raw_spin_unlock_irqrestore(&worker->lock, *flags); |
---|
1035 | 1096 | del_timer_sync(&dwork->timer); |
---|
1036 | | - spin_lock_irqsave(&worker->lock, *flags); |
---|
| 1097 | + raw_spin_lock_irqsave(&worker->lock, *flags); |
---|
1037 | 1098 | work->canceling--; |
---|
1038 | 1099 | } |
---|
1039 | 1100 | |
---|
.. | .. |
---|
1074 | 1135 | * modify @dwork's timer so that it expires after @delay. If @delay is zero, |
---|
1075 | 1136 | * @work is guaranteed to be queued immediately. |
---|
1076 | 1137 | * |
---|
1077 | | - * Return: %true if @dwork was pending and its timer was modified, |
---|
1078 | | - * %false otherwise. |
---|
| 1138 | + * Return: %false if @dwork was idle and queued, %true otherwise. |
---|
1079 | 1139 | * |
---|
1080 | 1140 | * A special case is when the work is being canceled in parallel. |
---|
1081 | 1141 | * It might be caused either by the real kthread_cancel_delayed_work_sync() |
---|
1082 | 1142 | * or yet another kthread_mod_delayed_work() call. We let the other command |
---|
1083 | | - * win and return %false here. The caller is supposed to synchronize these |
---|
1084 | | - * operations a reasonable way. |
---|
| 1143 | + * win and return %true here. The return value can be used for reference |
---|
| 1144 | + * counting and the number of queued works stays the same. Anyway, the caller |
---|
| 1145 | + * is supposed to synchronize these operations a reasonable way. |
---|
1085 | 1146 | * |
---|
1086 | 1147 | * This function is safe to call from any context including IRQ handler. |
---|
1087 | 1148 | * See __kthread_cancel_work() and kthread_delayed_work_timer_fn() |
---|
.. | .. |
---|
1093 | 1154 | { |
---|
1094 | 1155 | struct kthread_work *work = &dwork->work; |
---|
1095 | 1156 | unsigned long flags; |
---|
1096 | | - int ret = false; |
---|
| 1157 | + int ret; |
---|
1097 | 1158 | |
---|
1098 | | - spin_lock_irqsave(&worker->lock, flags); |
---|
| 1159 | + raw_spin_lock_irqsave(&worker->lock, flags); |
---|
1099 | 1160 | |
---|
1100 | 1161 | /* Do not bother with canceling when never queued. */ |
---|
1101 | | - if (!work->worker) |
---|
| 1162 | + if (!work->worker) { |
---|
| 1163 | + ret = false; |
---|
1102 | 1164 | goto fast_queue; |
---|
| 1165 | + } |
---|
1103 | 1166 | |
---|
1104 | 1167 | /* Work must not be used with >1 worker, see kthread_queue_work() */ |
---|
1105 | 1168 | WARN_ON_ONCE(work->worker != worker); |
---|
.. | .. |
---|
1117 | 1180 | * be used for reference counting. |
---|
1118 | 1181 | */ |
---|
1119 | 1182 | kthread_cancel_delayed_work_timer(work, &flags); |
---|
1120 | | - if (work->canceling) |
---|
| 1183 | + if (work->canceling) { |
---|
| 1184 | + /* The number of works in the queue does not change. */ |
---|
| 1185 | + ret = true; |
---|
1121 | 1186 | goto out; |
---|
| 1187 | + } |
---|
1122 | 1188 | ret = __kthread_cancel_work(work); |
---|
| 1189 | + |
---|
| 1190 | + /* |
---|
| 1191 | + * Canceling could run in parallel from kthread_cancel_delayed_work_sync |
---|
| 1192 | + * and change work's canceling count as the spinlock is released and regain |
---|
| 1193 | + * in __kthread_cancel_work so we need to check the count again. Otherwise, |
---|
| 1194 | + * we might incorrectly queue the dwork and further cause |
---|
| 1195 | + * cancel_delayed_work_sync thread waiting for flush dwork endlessly. |
---|
| 1196 | + */ |
---|
| 1197 | + if (work->canceling) { |
---|
| 1198 | + ret = false; |
---|
| 1199 | + goto out; |
---|
| 1200 | + } |
---|
1123 | 1201 | |
---|
1124 | 1202 | fast_queue: |
---|
1125 | 1203 | __kthread_queue_delayed_work(worker, dwork, delay); |
---|
1126 | 1204 | out: |
---|
1127 | | - spin_unlock_irqrestore(&worker->lock, flags); |
---|
| 1205 | + raw_spin_unlock_irqrestore(&worker->lock, flags); |
---|
1128 | 1206 | return ret; |
---|
1129 | 1207 | } |
---|
1130 | 1208 | EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); |
---|
.. | .. |
---|
1138 | 1216 | if (!worker) |
---|
1139 | 1217 | goto out; |
---|
1140 | 1218 | |
---|
1141 | | - spin_lock_irqsave(&worker->lock, flags); |
---|
| 1219 | + raw_spin_lock_irqsave(&worker->lock, flags); |
---|
1142 | 1220 | /* Work must not be used with >1 worker, see kthread_queue_work(). */ |
---|
1143 | 1221 | WARN_ON_ONCE(work->worker != worker); |
---|
1144 | 1222 | |
---|
.. | .. |
---|
1155 | 1233 | * In the meantime, block any queuing by setting the canceling counter. |
---|
1156 | 1234 | */ |
---|
1157 | 1235 | work->canceling++; |
---|
1158 | | - spin_unlock_irqrestore(&worker->lock, flags); |
---|
| 1236 | + raw_spin_unlock_irqrestore(&worker->lock, flags); |
---|
1159 | 1237 | kthread_flush_work(work); |
---|
1160 | | - spin_lock_irqsave(&worker->lock, flags); |
---|
| 1238 | + raw_spin_lock_irqsave(&worker->lock, flags); |
---|
1161 | 1239 | work->canceling--; |
---|
1162 | 1240 | |
---|
1163 | 1241 | out_fast: |
---|
1164 | | - spin_unlock_irqrestore(&worker->lock, flags); |
---|
| 1242 | + raw_spin_unlock_irqrestore(&worker->lock, flags); |
---|
1165 | 1243 | out: |
---|
1166 | 1244 | return ret; |
---|
1167 | 1245 | } |
---|
.. | .. |
---|
1245 | 1323 | } |
---|
1246 | 1324 | EXPORT_SYMBOL(kthread_destroy_worker); |
---|
1247 | 1325 | |
---|
| 1326 | +/** |
---|
| 1327 | + * kthread_use_mm - make the calling kthread operate on an address space |
---|
| 1328 | + * @mm: address space to operate on |
---|
| 1329 | + */ |
---|
| 1330 | +void kthread_use_mm(struct mm_struct *mm) |
---|
| 1331 | +{ |
---|
| 1332 | + struct mm_struct *active_mm; |
---|
| 1333 | + struct task_struct *tsk = current; |
---|
| 1334 | + |
---|
| 1335 | + WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); |
---|
| 1336 | + WARN_ON_ONCE(tsk->mm); |
---|
| 1337 | + |
---|
| 1338 | + task_lock(tsk); |
---|
| 1339 | + /* Hold off tlb flush IPIs while switching mm's */ |
---|
| 1340 | + local_irq_disable(); |
---|
| 1341 | + active_mm = tsk->active_mm; |
---|
| 1342 | + if (active_mm != mm) { |
---|
| 1343 | + mmgrab(mm); |
---|
| 1344 | + tsk->active_mm = mm; |
---|
| 1345 | + } |
---|
| 1346 | + tsk->mm = mm; |
---|
| 1347 | + switch_mm_irqs_off(active_mm, mm, tsk); |
---|
| 1348 | + local_irq_enable(); |
---|
| 1349 | + task_unlock(tsk); |
---|
| 1350 | +#ifdef finish_arch_post_lock_switch |
---|
| 1351 | + finish_arch_post_lock_switch(); |
---|
| 1352 | +#endif |
---|
| 1353 | + |
---|
| 1354 | + if (active_mm != mm) |
---|
| 1355 | + mmdrop(active_mm); |
---|
| 1356 | + |
---|
| 1357 | + to_kthread(tsk)->oldfs = force_uaccess_begin(); |
---|
| 1358 | +} |
---|
| 1359 | +EXPORT_SYMBOL_GPL(kthread_use_mm); |
---|
| 1360 | + |
---|
| 1361 | +/** |
---|
| 1362 | + * kthread_unuse_mm - reverse the effect of kthread_use_mm() |
---|
| 1363 | + * @mm: address space to operate on |
---|
| 1364 | + */ |
---|
| 1365 | +void kthread_unuse_mm(struct mm_struct *mm) |
---|
| 1366 | +{ |
---|
| 1367 | + struct task_struct *tsk = current; |
---|
| 1368 | + |
---|
| 1369 | + WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); |
---|
| 1370 | + WARN_ON_ONCE(!tsk->mm); |
---|
| 1371 | + |
---|
| 1372 | + force_uaccess_end(to_kthread(tsk)->oldfs); |
---|
| 1373 | + |
---|
| 1374 | + task_lock(tsk); |
---|
| 1375 | + sync_mm_rss(mm); |
---|
| 1376 | + local_irq_disable(); |
---|
| 1377 | + tsk->mm = NULL; |
---|
| 1378 | + /* active_mm is still 'mm' */ |
---|
| 1379 | + enter_lazy_tlb(mm, tsk); |
---|
| 1380 | + local_irq_enable(); |
---|
| 1381 | + task_unlock(tsk); |
---|
| 1382 | +} |
---|
| 1383 | +EXPORT_SYMBOL_GPL(kthread_unuse_mm); |
---|
| 1384 | + |
---|
1248 | 1385 | #ifdef CONFIG_BLK_CGROUP |
---|
1249 | 1386 | /** |
---|
1250 | 1387 | * kthread_associate_blkcg - associate blkcg to current kthread |
---|