From 244b2c5ca8b14627e4a17755e5922221e121c771 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 09 Oct 2024 06:15:07 +0000
Subject: [PATCH] change system file
---
kernel/kernel/kthread.c | 183 ++++++++++++++++++++++++++++++++++++++++-----
1 files changed, 160 insertions(+), 23 deletions(-)
diff --git a/kernel/kernel/kthread.c b/kernel/kernel/kthread.c
index f09fe68..9d736f5 100644
--- a/kernel/kernel/kthread.c
+++ b/kernel/kernel/kthread.c
@@ -1,12 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Kernel thread helper functions.
* Copyright (C) 2004 IBM Corporation, Rusty Russell.
+ * Copyright (C) 2009 Red Hat, Inc.
*
* Creation is done via kthreadd, so that we get a clean environment
* even if we're invoked from userspace (think modprobe, hotplug cpu,
* etc.).
*/
#include <uapi/linux/sched/types.h>
+#include <linux/mm.h>
+#include <linux/mmu_context.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/sched/task.h>
#include <linux/kthread.h>
#include <linux/completion.h>
@@ -21,7 +26,10 @@
#include <linux/freezer.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
+#include <linux/numa.h>
+#include <linux/sched/isolation.h>
#include <trace/events/sched.h>
+
static DEFINE_SPINLOCK(kthread_create_lock);
static LIST_HEAD(kthread_create_list);
@@ -44,7 +52,9 @@
struct kthread {
unsigned long flags;
unsigned int cpu;
+ int (*threadfn)(void *);
void *data;
+ mm_segment_t oldfs;
struct completion parked;
struct completion exited;
#ifdef CONFIG_BLK_CGROUP
@@ -72,6 +82,25 @@
{
WARN_ON(!(k->flags & PF_KTHREAD));
return (__force void *)k->set_child_tid;
+}
+
+/*
+ * Variant of to_kthread() that doesn't assume @p is a kthread.
+ *
+ * Per construction; when:
+ *
+ * (p->flags & PF_KTHREAD) && p->set_child_tid
+ *
+ * the task is both a kthread and struct kthread is persistent. However
+ * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
+ * begin_new_exec()).
+ */
+static inline struct kthread *__to_kthread(struct task_struct *p)
+{
+ void *kthread = (__force void *)p->set_child_tid;
+ if (kthread && !(p->flags & PF_KTHREAD))
+ kthread = NULL;
+ return kthread;
}
void free_kthread_struct(struct task_struct *k)
@@ -102,6 +131,12 @@
}
EXPORT_SYMBOL(kthread_should_stop);
+bool __kthread_should_park(struct task_struct *k)
+{
+ return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
+}
+EXPORT_SYMBOL_GPL(__kthread_should_park);
+
/**
* kthread_should_park - should this kthread park now?
*
@@ -115,7 +150,7 @@
*/
bool kthread_should_park(void)
{
- return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
+ return __kthread_should_park(current);
}
EXPORT_SYMBOL_GPL(kthread_should_park);
@@ -145,6 +180,21 @@
EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
/**
+ * kthread_func - return the function specified on kthread creation
+ * @task: kthread task in question
+ *
+ * Returns NULL if the task is not a kthread.
+ */
+void *kthread_func(struct task_struct *task)
+{
+ struct kthread *kthread = __to_kthread(task);
+ if (kthread)
+ return kthread->threadfn;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(kthread_func);
+
+/**
* kthread_data - return data value specified on kthread creation
* @task: kthread task in question
*
@@ -156,6 +206,7 @@
{
return to_kthread(task)->data;
}
+EXPORT_SYMBOL_GPL(kthread_data);
/**
* kthread_probe_data - speculative version of kthread_data()
@@ -168,10 +219,11 @@
*/
void *kthread_probe_data(struct task_struct *task)
{
- struct kthread *kthread = to_kthread(task);
+ struct kthread *kthread = __to_kthread(task);
void *data = NULL;
- probe_kernel_read(&data, &kthread->data, sizeof(data));
+ if (kthread)
+ copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
return data;
}
@@ -236,6 +288,7 @@
do_exit(-ENOMEM);
}
+ self->threadfn = threadfn;
self->data = data;
init_completion(&self->exited);
init_completion(&self->parked);
@@ -352,7 +405,8 @@
* The kernel thread should not inherit these properties.
*/
sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
- set_cpus_allowed_ptr(task, cpu_all_mask);
+ set_cpus_allowed_ptr(task,
+ housekeeping_cpumask(HK_FLAG_KTHREAD));
}
kfree(create);
return task;
@@ -422,6 +476,7 @@
{
__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
}
+EXPORT_SYMBOL_GPL(kthread_bind_mask);
/**
* kthread_bind - bind a just-created kthread to a cpu.
@@ -447,7 +502,6 @@
* to "name.*%u". Code fills in cpu number.
*
* Description: This helper function creates and names a kernel thread
- * The thread will be woken and put into park mode.
*/
struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
void *data, unsigned int cpu,
@@ -482,9 +536,9 @@
set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
}
-bool kthread_is_per_cpu(struct task_struct *k)
+bool kthread_is_per_cpu(struct task_struct *p)
{
- struct kthread *kthread = to_kthread(k);
+ struct kthread *kthread = __to_kthread(p);
if (!kthread)
return false;
@@ -602,7 +656,7 @@
/* Setup a clean context for our children to inherit. */
set_task_comm(tsk, "kthreadd");
ignore_signals(tsk);
- set_cpus_allowed_ptr(tsk, cpu_all_mask);
+ set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD));
set_mems_allowed(node_states[N_MEMORY]);
current->flags |= PF_NOFREEZE;
@@ -714,7 +768,7 @@
{
struct kthread_worker *worker;
struct task_struct *task;
- int node = -1;
+ int node = NUMA_NO_NODE;
worker = kzalloc(sizeof(*worker), GFP_KERNEL);
if (!worker)
@@ -768,7 +822,7 @@
/**
* kthread_create_worker_on_cpu - create a kthread worker and bind it
- * it to a given CPU and the associated NUMA node.
+ * to a given CPU and the associated NUMA node.
* @cpu: CPU number
* @flags: flags modifying the default behavior of the worker
* @namefmt: printf-style name for the kthread worker (task).
@@ -874,6 +928,7 @@
struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
struct kthread_work *work = &dwork->work;
struct kthread_worker *worker = work->worker;
+ unsigned long flags;
/*
* This might happen when a pending work is reinitialized.
@@ -882,7 +937,7 @@
if (WARN_ON_ONCE(!worker))
return;
- raw_spin_lock(&worker->lock);
+ raw_spin_lock_irqsave(&worker->lock, flags);
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);
@@ -892,18 +947,24 @@
if (!work->canceling)
kthread_insert_work(worker, work, &worker->work_list);
- raw_spin_unlock(&worker->lock);
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
}
EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
-void __kthread_queue_delayed_work(struct kthread_worker *worker,
- struct kthread_delayed_work *dwork,
- unsigned long delay)
+static void __kthread_queue_delayed_work(struct kthread_worker *worker,
+ struct kthread_delayed_work *dwork,
+ unsigned long delay)
{
struct timer_list *timer = &dwork->timer;
struct kthread_work *work = &dwork->work;
- WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
+ /*
+ * With CFI, timer->function can point to a jump table entry in a module,
+ * which fails the comparison. Disable the warning if CFI and modules are
+ * both enabled.
+ */
+ if (!IS_ENABLED(CONFIG_CFI_CLANG) || !IS_ENABLED(CONFIG_MODULES))
+ WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
/*
* If @delay is 0, queue @dwork->work immediately. This is for
@@ -1074,14 +1135,14 @@
* modify @dwork's timer so that it expires after @delay. If @delay is zero,
* @work is guaranteed to be queued immediately.
*
- * Return: %true if @dwork was pending and its timer was modified,
- * %false otherwise.
+ * Return: %false if @dwork was idle and queued, %true otherwise.
*
* A special case is when the work is being canceled in parallel.
* It might be caused either by the real kthread_cancel_delayed_work_sync()
* or yet another kthread_mod_delayed_work() call. We let the other command
- * win and return %false here. The caller is supposed to synchronize these
- * operations a reasonable way.
+ * win and return %true here. The return value can be used for reference
+ * counting and the number of queued works stays the same. Anyway, the caller
+ * is supposed to synchronize these operations a reasonable way.
*
* This function is safe to call from any context including IRQ handler.
* See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
@@ -1093,13 +1154,15 @@
{
struct kthread_work *work = &dwork->work;
unsigned long flags;
- int ret = false;
+ int ret;
raw_spin_lock_irqsave(&worker->lock, flags);
/* Do not bother with canceling when never queued. */
- if (!work->worker)
+ if (!work->worker) {
+ ret = false;
goto fast_queue;
+ }
/* Work must not be used with >1 worker, see kthread_queue_work() */
WARN_ON_ONCE(work->worker != worker);
@@ -1117,9 +1180,24 @@
* be used for reference counting.
*/
kthread_cancel_delayed_work_timer(work, &flags);
- if (work->canceling)
+ if (work->canceling) {
+ /* The number of works in the queue does not change. */
+ ret = true;
goto out;
+ }
ret = __kthread_cancel_work(work);
+
+ /*
+ * Canceling could run in parallel from kthread_cancel_delayed_work_sync
+ * and change work's canceling count as the spinlock is released and regain
+ * in __kthread_cancel_work so we need to check the count again. Otherwise,
+ * we might incorrectly queue the dwork and further cause
+ * cancel_delayed_work_sync thread waiting for flush dwork endlessly.
+ */
+ if (work->canceling) {
+ ret = false;
+ goto out;
+ }
fast_queue:
__kthread_queue_delayed_work(worker, dwork, delay);
@@ -1245,6 +1323,65 @@
}
EXPORT_SYMBOL(kthread_destroy_worker);
+/**
+ * kthread_use_mm - make the calling kthread operate on an address space
+ * @mm: address space to operate on
+ */
+void kthread_use_mm(struct mm_struct *mm)
+{
+ struct mm_struct *active_mm;
+ struct task_struct *tsk = current;
+
+ WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
+ WARN_ON_ONCE(tsk->mm);
+
+ task_lock(tsk);
+ /* Hold off tlb flush IPIs while switching mm's */
+ local_irq_disable();
+ active_mm = tsk->active_mm;
+ if (active_mm != mm) {
+ mmgrab(mm);
+ tsk->active_mm = mm;
+ }
+ tsk->mm = mm;
+ switch_mm_irqs_off(active_mm, mm, tsk);
+ local_irq_enable();
+ task_unlock(tsk);
+#ifdef finish_arch_post_lock_switch
+ finish_arch_post_lock_switch();
+#endif
+
+ if (active_mm != mm)
+ mmdrop(active_mm);
+
+ to_kthread(tsk)->oldfs = force_uaccess_begin();
+}
+EXPORT_SYMBOL_GPL(kthread_use_mm);
+
+/**
+ * kthread_unuse_mm - reverse the effect of kthread_use_mm()
+ * @mm: address space to operate on
+ */
+void kthread_unuse_mm(struct mm_struct *mm)
+{
+ struct task_struct *tsk = current;
+
+ WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
+ WARN_ON_ONCE(!tsk->mm);
+
+ force_uaccess_end(to_kthread(tsk)->oldfs);
+
+ task_lock(tsk);
+ sync_mm_rss(mm);
+ local_irq_disable();
+ tsk->mm = NULL;
+ /* active_mm is still 'mm' */
+ enter_lazy_tlb(mm, tsk);
+ local_irq_enable();
+ task_unlock(tsk);
+}
+EXPORT_SYMBOL_GPL(kthread_unuse_mm);
+
#ifdef CONFIG_BLK_CGROUP
/**
* kthread_associate_blkcg - associate blkcg to current kthread
--
Gitblit v1.6.2