From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 08:20:59 +0000
Subject: [PATCH] kernel_5.10 no rt
---
kernel/ipc/mqueue.c | 342 +++++++++++++++++++++++++++++++++++++++-----------------
1 files changed, 237 insertions(+), 105 deletions(-)
diff --git a/kernel/ipc/mqueue.c b/kernel/ipc/mqueue.c
index 75b10fe..86969de 100644
--- a/kernel/ipc/mqueue.c
+++ b/kernel/ipc/mqueue.c
@@ -18,6 +18,7 @@
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/mount.h>
+#include <linux/fs_context.h>
#include <linux/namei.h>
#include <linux/sysctl.h>
#include <linux/poll.h>
@@ -42,6 +43,11 @@
#include <net/sock.h>
#include "util.h"
+struct mqueue_fs_context {
+ struct ipc_namespace *ipc_ns;
+ bool newns; /* Set if newly created ipc namespace */
+};
+
#define MQUEUE_MAGIC 0x19800202
#define DIRENT_SIZE 20
#define FILENT_SIZE 80
@@ -58,6 +64,66 @@
int priority;
};
+/*
+ * Locking:
+ *
+ * Accesses to a message queue are synchronized by acquiring info->lock.
+ *
+ * There are two notable exceptions:
+ * - The actual wakeup of a sleeping task is performed using the wake_q
+ * framework. info->lock is already released when wake_up_q is called.
+ * - The exit codepaths after sleeping check ext_wait_queue->state without
+ * any locks. If it is STATE_READY, then the syscall is completed without
+ * acquiring info->lock.
+ *
+ * MQ_BARRIER:
+ * To achieve proper release/acquire memory barrier pairing, the state is set to
+ * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed
+ * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used.
+ *
+ * This prevents the following races:
+ *
+ * 1) With the simple wake_q_add(), the task could be gone already before
+ * the increase of the reference happens
+ * Thread A
+ * Thread B
+ * WRITE_ONCE(wait.state, STATE_NONE);
+ * schedule_hrtimeout()
+ * wake_q_add(A)
+ * if (cmpxchg()) // success
+ * ->state = STATE_READY (reordered)
+ * <timeout returns>
+ * if (wait.state == STATE_READY) return;
+ * sysret to user space
+ * sys_exit()
+ * get_task_struct() // UaF
+ *
+ * Solution: Use wake_q_add_safe() and perform the get_task_struct() before
+ * the smp_store_release() that does ->state = STATE_READY.
+ *
+ * 2) Without proper _release/_acquire barriers, the woken up task
+ * could read stale data
+ *
+ * Thread A
+ * Thread B
+ * do_mq_timedreceive
+ * WRITE_ONCE(wait.state, STATE_NONE);
+ * schedule_hrtimeout()
+ * state = STATE_READY;
+ * <timeout returns>
+ * if (wait.state == STATE_READY) return;
+ * msg_ptr = wait.msg; // Access to stale data!
+ * receiver->msg = message; (reordered)
+ *
+ * Solution: use _release and _acquire barriers.
+ *
+ * 3) There is intentionally no barrier when setting current->state
+ * to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the
+ * release memory barrier, and the wakeup is triggered when holding
+ * info->lock, i.e. spin_lock(&info->lock) provided a pairing
+ * acquire memory barrier.
+ */
+
struct ext_wait_queue { /* queue of sleeping tasks */
struct task_struct *task;
struct list_head list;
@@ -71,6 +137,7 @@
wait_queue_head_t wait_q;
struct rb_root msg_tree;
+ struct rb_node *msg_tree_rightmost;
struct posix_msg_tree_node *node_cache;
struct mq_attr attr;
@@ -88,9 +155,11 @@
unsigned long qsize; /* size of queue in memory (sum of all msgs) */
};
+static struct file_system_type mqueue_fs_type;
static const struct inode_operations mqueue_dir_inode_operations;
static const struct file_operations mqueue_file_operations;
static const struct super_operations mqueue_super_ops;
+static const struct fs_context_operations mqueue_fs_context_ops;
static void remove_notification(struct mqueue_inode_info *info);
static struct kmem_cache *mqueue_inode_cachep;
@@ -125,6 +194,7 @@
{
struct rb_node **p, *parent = NULL;
struct posix_msg_tree_node *leaf;
+ bool rightmost = true;
p = &info->msg_tree.rb_node;
while (*p) {
@@ -133,9 +203,10 @@
if (likely(leaf->priority == msg->m_type))
goto insert_msg;
- else if (msg->m_type < leaf->priority)
+ else if (msg->m_type < leaf->priority) {
p = &(*p)->rb_left;
- else
+ rightmost = false;
+ } else
p = &(*p)->rb_right;
}
if (info->node_cache) {
@@ -148,6 +219,10 @@
INIT_LIST_HEAD(&leaf->msg_list);
}
leaf->priority = msg->m_type;
+
+ if (rightmost)
+ info->msg_tree_rightmost = &leaf->rb_node;
+
rb_link_node(&leaf->rb_node, parent, p);
rb_insert_color(&leaf->rb_node, &info->msg_tree);
insert_msg:
@@ -157,23 +232,34 @@
return 0;
}
+static inline void msg_tree_erase(struct posix_msg_tree_node *leaf,
+ struct mqueue_inode_info *info)
+{
+ struct rb_node *node = &leaf->rb_node;
+
+ if (info->msg_tree_rightmost == node)
+ info->msg_tree_rightmost = rb_prev(node);
+
+ rb_erase(node, &info->msg_tree);
+ if (info->node_cache)
+ kfree(leaf);
+ else
+ info->node_cache = leaf;
+}
+
static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
{
- struct rb_node **p, *parent = NULL;
+ struct rb_node *parent = NULL;
struct posix_msg_tree_node *leaf;
struct msg_msg *msg;
try_again:
- p = &info->msg_tree.rb_node;
- while (*p) {
- parent = *p;
- /*
- * During insert, low priorities go to the left and high to the
- * right. On receive, we want the highest priorities first, so
- * walk all the way to the right.
- */
- p = &(*p)->rb_right;
- }
+ /*
+ * During insert, low priorities go to the left and high to the
+ * right. On receive, we want the highest priorities first, so
+ * walk all the way to the right.
+ */
+ parent = info->msg_tree_rightmost;
if (!parent) {
if (info->attr.mq_curmsgs) {
pr_warn_once("Inconsistency in POSIX message queue, "
@@ -188,24 +274,14 @@
pr_warn_once("Inconsistency in POSIX message queue, "
"empty leaf node but we haven't implemented "
"lazy leaf delete!\n");
- rb_erase(&leaf->rb_node, &info->msg_tree);
- if (info->node_cache) {
- kfree(leaf);
- } else {
- info->node_cache = leaf;
- }
+ msg_tree_erase(leaf, info);
goto try_again;
} else {
msg = list_first_entry(&leaf->msg_list,
struct msg_msg, m_list);
list_del(&msg->m_list);
if (list_empty(&leaf->msg_list)) {
- rb_erase(&leaf->rb_node, &info->msg_tree);
- if (info->node_cache) {
- kfree(leaf);
- } else {
- info->node_cache = leaf;
- }
+ msg_tree_erase(leaf, info);
}
}
info->attr.mq_curmsgs--;
@@ -248,6 +324,7 @@
info->qsize = 0;
info->user = NULL; /* set when all is ok */
info->msg_tree = RB_ROOT;
+ info->msg_tree_rightmost = NULL;
info->node_cache = NULL;
memset(&info->attr, 0, sizeof(info->attr));
info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
@@ -323,7 +400,7 @@
return ERR_PTR(ret);
}
-static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
+static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
struct ipc_namespace *ns = sb->s_fs_info;
@@ -344,18 +421,69 @@
return 0;
}
-static struct dentry *mqueue_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int mqueue_get_tree(struct fs_context *fc)
{
- struct ipc_namespace *ns;
- if (flags & SB_KERNMOUNT) {
- ns = data;
- data = NULL;
- } else {
- ns = current->nsproxy->ipc_ns;
+ struct mqueue_fs_context *ctx = fc->fs_private;
+
+ /*
+ * With a newly created ipc namespace, we don't need to do a search
+ * for an ipc namespace match, but we still need to set s_fs_info.
+ */
+ if (ctx->newns) {
+ fc->s_fs_info = ctx->ipc_ns;
+ return get_tree_nodev(fc, mqueue_fill_super);
}
- return mount_ns(fs_type, flags, data, ns, ns->user_ns, mqueue_fill_super);
+ return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
+}
+
+static void mqueue_fs_context_free(struct fs_context *fc)
+{
+ struct mqueue_fs_context *ctx = fc->fs_private;
+
+ put_ipc_ns(ctx->ipc_ns);
+ kfree(ctx);
+}
+
+static int mqueue_init_fs_context(struct fs_context *fc)
+{
+ struct mqueue_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
+ put_user_ns(fc->user_ns);
+ fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
+ fc->fs_private = ctx;
+ fc->ops = &mqueue_fs_context_ops;
+ return 0;
+}
+
+/*
+ * mq_init_ns() is currently the only caller of mq_create_mount().
+ * So the ns parameter is always a newly created ipc namespace.
+ */
+static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
+{
+ struct mqueue_fs_context *ctx;
+ struct fs_context *fc;
+ struct vfsmount *mnt;
+
+ fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
+ if (IS_ERR(fc))
+ return ERR_CAST(fc);
+
+ ctx = fc->fs_private;
+ ctx->newns = true;
+ put_ipc_ns(ctx->ipc_ns);
+ ctx->ipc_ns = get_ipc_ns(ns);
+ put_user_ns(fc->user_ns);
+ fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
+
+ mnt = fc_mount(fc);
+ put_fs_context(fc);
+ return mnt;
}
static void init_once(void *foo)
@@ -375,15 +503,9 @@
return &ei->vfs_inode;
}
-static void mqueue_i_callback(struct rcu_head *head)
+static void mqueue_free_inode(struct inode *inode)
{
- struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
-}
-
-static void mqueue_destroy_inode(struct inode *inode)
-{
- call_rcu(&inode->i_rcu, mqueue_i_callback);
}
static void mqueue_evict_inode(struct inode *inode)
@@ -574,8 +696,6 @@
{
struct ext_wait_queue *walk;
- ewp->task = current;
-
list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
if (walk->task->prio <= current->prio) {
list_add_tail(&ewp->list, &walk->list);
@@ -600,18 +720,23 @@
wq_add(info, sr, ewp);
for (;;) {
+ /* memory barrier not required, we hold info->lock */
__set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&info->lock);
time = schedule_hrtimeout_range_clock(timeout, 0,
HRTIMER_MODE_ABS, CLOCK_REALTIME);
- if (ewp->state == STATE_READY) {
+ if (READ_ONCE(ewp->state) == STATE_READY) {
+ /* see MQ_BARRIER for purpose/pairing */
+ smp_acquire__after_ctrl_dep();
retval = 0;
goto out;
}
spin_lock(&info->lock);
- if (ewp->state == STATE_READY) {
+
+ /* we hold info->lock, so no memory barrier required */
+ if (READ_ONCE(ewp->state) == STATE_READY) {
retval = 0;
goto out_unlock;
}
@@ -667,7 +792,7 @@
case SIGEV_NONE:
break;
case SIGEV_SIGNAL: {
- struct siginfo sig_i;
+ struct kernel_siginfo sig_i;
struct task_struct *task;
/* do_mq_notify() accepts sigev_signo == 0, why?? */
@@ -738,7 +863,7 @@
info->notify_user_ns = NULL;
}
-static int prepare_open(struct vfsmount *mnt, struct dentry *dentry, int oflag, int ro,
+static int prepare_open(struct dentry *dentry, int oflag, int ro,
umode_t mode, struct filename *name,
struct mq_attr *attr)
{
@@ -752,7 +877,7 @@
if (ro)
return ro;
audit_inode_parent_hidden(name, dentry->d_parent);
- return vfs_mkobj2(mnt, dentry, mode & ~current_umask(),
+ return vfs_mkobj(dentry, mode & ~current_umask(),
mqueue_create_attr, attr);
}
/* it already existed */
@@ -762,7 +887,7 @@
if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
return -EINVAL;
acc = oflag2acc[oflag & O_ACCMODE];
- return inode_permission2(mnt, d_inode(dentry), acc);
+ return inode_permission(d_inode(dentry), acc);
}
static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
@@ -786,13 +911,13 @@
ro = mnt_want_write(mnt); /* we'll drop it in any case */
inode_lock(d_inode(root));
- path.dentry = lookup_one_len2(name->name, mnt, root, strlen(name->name));
+ path.dentry = lookup_one_len(name->name, root, strlen(name->name));
if (IS_ERR(path.dentry)) {
error = PTR_ERR(path.dentry);
goto out_putfd;
}
path.mnt = mntget(mnt);
- error = prepare_open(path.mnt, path.dentry, oflag, ro, mode, name, attr);
+ error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
if (!error) {
struct file *file = dentry_open(&path, oflag, current_cred());
if (!IS_ERR(file))
@@ -842,7 +967,7 @@
if (err)
goto out_name;
inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
- dentry = lookup_one_len2(name->name, mnt, mnt->mnt_root,
+ dentry = lookup_one_len(name->name, mnt->mnt_root,
strlen(name->name));
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
@@ -854,7 +979,7 @@
err = -ENOENT;
} else {
ihold(inode);
- err = vfs_unlink2(mnt, d_inode(dentry->d_parent), dentry, NULL);
+ err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL);
}
dput(dentry);
@@ -888,6 +1013,20 @@
* The same algorithm is used for senders.
*/
+static inline void __pipelined_op(struct wake_q_head *wake_q,
+ struct mqueue_inode_info *info,
+ struct ext_wait_queue *this)
+{
+ struct task_struct *task;
+
+ list_del(&this->list);
+ task = get_task_struct(this->task);
+
+ /* see MQ_BARRIER for purpose/pairing */
+ smp_store_release(&this->state, STATE_READY);
+ wake_q_add_safe(wake_q, task);
+}
+
/* pipelined_send() - send a message directly to the task waiting in
* sys_mq_timedreceive() (without inserting message into a queue).
*/
@@ -897,17 +1036,7 @@
struct ext_wait_queue *receiver)
{
receiver->msg = message;
- list_del(&receiver->list);
- wake_q_add(wake_q, receiver->task);
- /*
- * Rely on the implicit cmpxchg barrier from wake_q_add such
- * that we can ensure that updating receiver->state is the last
- * write operation: As once set, the receiver can continue,
- * and if we don't have the reference count from the wake_q,
- * yet, at that point we can later have a use-after-free
- * condition and bogus wakeup.
- */
- receiver->state = STATE_READY;
+ __pipelined_op(wake_q, info, receiver);
}
/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
@@ -925,9 +1054,7 @@
if (msg_insert(sender->msg, info))
return;
- list_del(&sender->list);
- wake_q_add(wake_q, sender->task);
- sender->state = STATE_READY;
+ __pipelined_op(wake_q, info, sender);
}
static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
@@ -1014,7 +1141,9 @@
} else {
wait.task = current;
wait.msg = (void *) msg_ptr;
- wait.state = STATE_NONE;
+
+ /* memory barrier not required, we hold info->lock */
+ WRITE_ONCE(wait.state, STATE_NONE);
ret = wq_sleep(info, SEND, timeout, &wait);
/*
* wq_sleep must be called with info->lock held, and
@@ -1117,7 +1246,9 @@
ret = -EAGAIN;
} else {
wait.task = current;
- wait.state = STATE_NONE;
+
+ /* memory barrier not required, we hold info->lock */
+ WRITE_ONCE(wait.state, STATE_NONE);
ret = wq_sleep(info, RECV, timeout, &wait);
msg_ptr = wait.msg;
}
@@ -1210,15 +1341,14 @@
/* create the notify skb */
nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
- if (!nc) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!nc)
+ return -ENOMEM;
+
if (copy_from_user(nc->data,
notification->sigev_value.sival_ptr,
NOTIFY_COOKIE_LEN)) {
ret = -EFAULT;
- goto out;
+ goto free_skb;
}
/* TODO: add a header? */
@@ -1234,8 +1364,7 @@
fdput(f);
if (IS_ERR(sock)) {
ret = PTR_ERR(sock);
- sock = NULL;
- goto out;
+ goto free_skb;
}
timeo = MAX_SCHEDULE_TIMEOUT;
@@ -1244,11 +1373,8 @@
sock = NULL;
goto retry;
}
- if (ret) {
- sock = NULL;
- nc = NULL;
- goto out;
- }
+ if (ret)
+ return ret;
}
}
@@ -1304,7 +1430,8 @@
out:
if (sock)
netlink_detachskb(sock, nc);
- else if (nc)
+ else
+free_skb:
dev_kfree_skb(nc);
return ret;
@@ -1486,20 +1613,20 @@
#endif
#ifdef CONFIG_COMPAT_32BIT_TIME
-static int compat_prepare_timeout(const struct compat_timespec __user *p,
+static int compat_prepare_timeout(const struct old_timespec32 __user *p,
struct timespec64 *ts)
{
- if (compat_get_timespec64(ts, p))
+ if (get_old_timespec32(ts, p))
return -EFAULT;
if (!timespec64_valid(ts))
return -EINVAL;
return 0;
}
-COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes,
- const char __user *, u_msg_ptr,
- compat_size_t, msg_len, unsigned int, msg_prio,
- const struct compat_timespec __user *, u_abs_timeout)
+SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
+ const char __user *, u_msg_ptr,
+ unsigned int, msg_len, unsigned int, msg_prio,
+ const struct old_timespec32 __user *, u_abs_timeout)
{
struct timespec64 ts, *p = NULL;
if (u_abs_timeout) {
@@ -1511,10 +1638,10 @@
return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
}
-COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes,
- char __user *, u_msg_ptr,
- compat_size_t, msg_len, unsigned int __user *, u_msg_prio,
- const struct compat_timespec __user *, u_abs_timeout)
+SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
+ char __user *, u_msg_ptr,
+ unsigned int, msg_len, unsigned int __user *, u_msg_prio,
+ const struct old_timespec32 __user *, u_abs_timeout)
{
struct timespec64 ts, *p = NULL;
if (u_abs_timeout) {
@@ -1542,20 +1669,27 @@
static const struct super_operations mqueue_super_ops = {
.alloc_inode = mqueue_alloc_inode,
- .destroy_inode = mqueue_destroy_inode,
+ .free_inode = mqueue_free_inode,
.evict_inode = mqueue_evict_inode,
.statfs = simple_statfs,
};
+static const struct fs_context_operations mqueue_fs_context_ops = {
+ .free = mqueue_fs_context_free,
+ .get_tree = mqueue_get_tree,
+};
+
static struct file_system_type mqueue_fs_type = {
- .name = "mqueue",
- .mount = mqueue_mount,
- .kill_sb = kill_litter_super,
- .fs_flags = FS_USERNS_MOUNT,
+ .name = "mqueue",
+ .init_fs_context = mqueue_init_fs_context,
+ .kill_sb = kill_litter_super,
+ .fs_flags = FS_USERNS_MOUNT,
};
int mq_init_ns(struct ipc_namespace *ns)
{
+ struct vfsmount *m;
+
ns->mq_queues_count = 0;
ns->mq_queues_max = DFLT_QUEUESMAX;
ns->mq_msg_max = DFLT_MSGMAX;
@@ -1563,12 +1697,10 @@
ns->mq_msg_default = DFLT_MSG;
ns->mq_msgsize_default = DFLT_MSGSIZE;
- ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
- if (IS_ERR(ns->mq_mnt)) {
- int err = PTR_ERR(ns->mq_mnt);
- ns->mq_mnt = NULL;
- return err;
- }
+ m = mq_create_mount(ns);
+ if (IS_ERR(m))
+ return PTR_ERR(m);
+ ns->mq_mnt = m;
return 0;
}
--
Gitblit v1.6.2