From 982b8cc116118b3463d3f332581945625722acd8 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Tue, 09 Jan 2024 02:10:54 +0000
Subject: [PATCH] add poweroff command
---
kernel/fs/pipe.c | 751 ++++++++++++++++++++++++++++++++++++++-------------------
1 files changed, 495 insertions(+), 256 deletions(-)
diff --git a/kernel/fs/pipe.c b/kernel/fs/pipe.c
index 1489257..ea68035 100644
--- a/kernel/fs/pipe.c
+++ b/kernel/fs/pipe.c
@@ -14,6 +14,7 @@
#include <linux/fs.h>
#include <linux/log2.h>
#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
#include <linux/magic.h>
#include <linux/pipe_fs_i.h>
#include <linux/uio.h>
@@ -23,6 +24,7 @@
#include <linux/syscalls.h>
#include <linux/fcntl.h>
#include <linux/memcontrol.h>
+#include <linux/watch_queue.h>
#include <linux/uaccess.h>
#include <asm/ioctls.h>
@@ -57,10 +59,12 @@
unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
/*
- * We use a start+len construction, which provides full use of the
- * allocated memory.
- * -- Florian Coosmann (FGC)
- *
+ * We use head and tail indices that aren't masked off, except at the point of
+ * dereference, but rather they're allowed to wrap naturally. This means there
+ * isn't a dead spot in the buffer, but the ring has to be a power of two and
+ * <= 2^31.
+ * -- David Howells 2019-09-23.
+ *
* Reads with count = 0 should always return 0.
* -- Julian Bradfield 1999-06-07.
*
@@ -117,22 +121,6 @@
}
}
-/* Drop the inode semaphore and wait for a pipe event, atomically */
-void pipe_wait(struct pipe_inode_info *pipe)
-{
- DEFINE_WAIT(wait);
-
- /*
- * Pipes are system-local resources, so sleeping on them
- * is considered a noninteractive wait:
- */
- prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
- pipe_unlock(pipe);
- schedule();
- finish_wait(&pipe->wait, &wait);
- pipe_lock(pipe);
-}
-
static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
@@ -149,22 +137,20 @@
put_page(page);
}
-static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
+static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
{
struct page *page = buf->page;
- if (page_count(page) == 1) {
- if (memcg_kmem_enabled())
- memcg_kmem_uncharge(page, 0);
- __SetPageLocked(page);
- return 0;
- }
- return 1;
+ if (page_count(page) != 1)
+ return false;
+ memcg_kmem_uncharge_page(page, 0);
+ __SetPageLocked(page);
+ return true;
}
/**
- * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
+ * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to attempt to steal
*
@@ -175,8 +161,8 @@
* he wishes; the typical use is insertion into a different file
* page cache.
*/
-int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
+bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
{
struct page *page = buf->page;
@@ -187,12 +173,11 @@
*/
if (page_count(page) == 1) {
lock_page(page);
- return 0;
+ return true;
}
-
- return 1;
+ return false;
}
-EXPORT_SYMBOL(generic_pipe_buf_steal);
+EXPORT_SYMBOL(generic_pipe_buf_try_steal);
/**
* generic_pipe_buf_get - get a reference to a &struct pipe_buffer
@@ -211,22 +196,6 @@
EXPORT_SYMBOL(generic_pipe_buf_get);
/**
- * generic_pipe_buf_confirm - verify contents of the pipe buffer
- * @info: the pipe that the buffer belongs to
- * @buf: the buffer to confirm
- *
- * Description:
- * This function does nothing, because the generic pipe code uses
- * pages that are always good when inserted into the pipe.
- */
-int generic_pipe_buf_confirm(struct pipe_inode_info *info,
- struct pipe_buffer *buf)
-{
- return 0;
-}
-EXPORT_SYMBOL(generic_pipe_buf_confirm);
-
-/**
* generic_pipe_buf_release - put a reference to a &struct pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to put a reference to
@@ -242,33 +211,19 @@
EXPORT_SYMBOL(generic_pipe_buf_release);
static const struct pipe_buf_operations anon_pipe_buf_ops = {
- .can_merge = 1,
- .confirm = generic_pipe_buf_confirm,
- .release = anon_pipe_buf_release,
- .steal = anon_pipe_buf_steal,
- .get = generic_pipe_buf_get,
+ .release = anon_pipe_buf_release,
+ .try_steal = anon_pipe_buf_try_steal,
+ .get = generic_pipe_buf_get,
};
-static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
- .can_merge = 0,
- .confirm = generic_pipe_buf_confirm,
- .release = anon_pipe_buf_release,
- .steal = anon_pipe_buf_steal,
- .get = generic_pipe_buf_get,
-};
-
-static const struct pipe_buf_operations packet_pipe_buf_ops = {
- .can_merge = 0,
- .confirm = generic_pipe_buf_confirm,
- .release = anon_pipe_buf_release,
- .steal = anon_pipe_buf_steal,
- .get = generic_pipe_buf_get,
-};
-
-void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
+/* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
+static inline bool pipe_readable(const struct pipe_inode_info *pipe)
{
- if (buf->ops == &anon_pipe_buf_ops)
- buf->ops = &anon_pipe_buf_nomerge_ops;
+ unsigned int head = READ_ONCE(pipe->head);
+ unsigned int tail = READ_ONCE(pipe->tail);
+ unsigned int writers = READ_ONCE(pipe->writers);
+
+ return !pipe_empty(head, tail) || !writers;
}
static ssize_t
@@ -277,27 +232,69 @@
size_t total_len = iov_iter_count(to);
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
- int do_wakeup;
+ bool was_full, wake_next_reader = false;
ssize_t ret;
/* Null read succeeds. */
if (unlikely(total_len == 0))
return 0;
- do_wakeup = 0;
ret = 0;
__pipe_lock(pipe);
+
+ /*
+ * We only wake up writers if the pipe was full when we started
+ * reading in order to avoid unnecessary wakeups.
+ *
+ * But when we do wake up writers, we do so using a sync wakeup
+ * (WF_SYNC), because we want them to get going and generate more
+ * data for us.
+ */
+ was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
for (;;) {
- int bufs = pipe->nrbufs;
- if (bufs) {
- int curbuf = pipe->curbuf;
- struct pipe_buffer *buf = pipe->bufs + curbuf;
+ /* Read ->head with a barrier vs post_one_notification() */
+ unsigned int head = smp_load_acquire(&pipe->head);
+ unsigned int tail = pipe->tail;
+ unsigned int mask = pipe->ring_size - 1;
+
+#ifdef CONFIG_WATCH_QUEUE
+ if (pipe->note_loss) {
+ struct watch_notification n;
+
+ if (total_len < 8) {
+ if (ret == 0)
+ ret = -ENOBUFS;
+ break;
+ }
+
+ n.type = WATCH_TYPE_META;
+ n.subtype = WATCH_META_LOSS_NOTIFICATION;
+ n.info = watch_sizeof(n);
+ if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
+ ret += sizeof(n);
+ total_len -= sizeof(n);
+ pipe->note_loss = false;
+ }
+#endif
+
+ if (!pipe_empty(head, tail)) {
+ struct pipe_buffer *buf = &pipe->bufs[tail & mask];
size_t chars = buf->len;
size_t written;
int error;
- if (chars > total_len)
+ if (chars > total_len) {
+ if (buf->flags & PIPE_BUF_FLAG_WHOLE) {
+ if (ret == 0)
+ ret = -ENOBUFS;
+ break;
+ }
chars = total_len;
+ }
error = pipe_buf_confirm(pipe, buf);
if (error) {
@@ -324,50 +321,77 @@
if (!buf->len) {
pipe_buf_release(pipe, buf);
- curbuf = (curbuf + 1) & (pipe->buffers - 1);
- pipe->curbuf = curbuf;
- pipe->nrbufs = --bufs;
- do_wakeup = 1;
+ spin_lock_irq(&pipe->rd_wait.lock);
+#ifdef CONFIG_WATCH_QUEUE
+ if (buf->flags & PIPE_BUF_FLAG_LOSS)
+ pipe->note_loss = true;
+#endif
+ tail++;
+ pipe->tail = tail;
+ spin_unlock_irq(&pipe->rd_wait.lock);
}
total_len -= chars;
if (!total_len)
break; /* common path: read succeeded */
+ if (!pipe_empty(head, tail)) /* More to do? */
+ continue;
}
- if (bufs) /* More to do? */
- continue;
+
if (!pipe->writers)
break;
- if (!pipe->waiting_writers) {
- /* syscall merging: Usually we must not sleep
- * if O_NONBLOCK is set, or if we got some data.
- * But if a writer sleeps in kernel space, then
- * we can wait for that data without violating POSIX.
- */
- if (ret)
- break;
- if (filp->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
- }
- if (signal_pending(current)) {
- if (!ret)
- ret = -ERESTARTSYS;
+ if (ret)
+ break;
+ if (filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
break;
}
- if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
- kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
+ __pipe_unlock(pipe);
+
+ /*
+ * We only get here if we didn't actually read anything.
+ *
+ * However, we could have seen (and removed) a zero-sized
+ * pipe buffer, and might have made space in the buffers
+ * that way.
+ *
+ * You can't make zero-sized pipe buffers by doing an empty
+ * write (not even in packet mode), but they can happen if
+ * the writer gets an EFAULT when trying to fill a buffer
+ * that already got allocated and inserted in the buffer
+ * array.
+ *
+ * So we still need to wake up any pending writers in the
+ * _very_ unlikely case that the pipe was full, but we got
+ * no data.
+ */
+ if (unlikely(was_full)) {
+ wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
+ kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
- pipe_wait(pipe);
+
+ /*
+ * But because we didn't read anything, at this point we can
+ * just return directly with -ERESTARTSYS if we're interrupted,
+ * since we've done any required wakeups and there's no need
+ * to mark anything accessed. And we've dropped the lock.
+ */
+ if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
+ return -ERESTARTSYS;
+
+ __pipe_lock(pipe);
+ was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
+ wake_next_reader = true;
}
+ if (pipe_empty(pipe->head, pipe->tail))
+ wake_next_reader = false;
__pipe_unlock(pipe);
- /* Signal writers asynchronously that there is more room. */
- if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
+ if (was_full) {
+ wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
+ if (wake_next_reader)
+ wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
if (ret > 0)
file_accessed(filp);
return ret;
@@ -378,15 +402,28 @@
return (file->f_flags & O_DIRECT) != 0;
}
+/* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
+static inline bool pipe_writable(const struct pipe_inode_info *pipe)
+{
+ unsigned int head = READ_ONCE(pipe->head);
+ unsigned int tail = READ_ONCE(pipe->tail);
+ unsigned int max_usage = READ_ONCE(pipe->max_usage);
+
+ return !pipe_full(head, tail, max_usage) ||
+ !READ_ONCE(pipe->readers);
+}
+
static ssize_t
pipe_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
+ unsigned int head;
ssize_t ret = 0;
- int do_wakeup = 0;
size_t total_len = iov_iter_count(from);
ssize_t chars;
+ bool was_empty = false;
+ bool wake_next_writer = false;
/* Null write succeeds. */
if (unlikely(total_len == 0))
@@ -400,15 +437,34 @@
goto out;
}
- /* We try to merge small writes */
- chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
- if (pipe->nrbufs && chars != 0) {
- int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
- (pipe->buffers - 1);
- struct pipe_buffer *buf = pipe->bufs + lastbuf;
+#ifdef CONFIG_WATCH_QUEUE
+ if (pipe->watch_queue) {
+ ret = -EXDEV;
+ goto out;
+ }
+#endif
+
+ /*
+ * Epoll nonsensically wants a wakeup whether the pipe
+ * was already empty or not.
+ *
+ * If it wasn't empty we try to merge new data into
+ * the last buffer.
+ *
+ * That naturally merges small writes, but it also
+ * page-aligns the rest of the writes for large writes
+ * spanning multiple pages.
+ */
+ head = pipe->head;
+ was_empty = true;
+ chars = total_len & (PAGE_SIZE-1);
+ if (chars && !pipe_empty(head, pipe->tail)) {
+ unsigned int mask = pipe->ring_size - 1;
+ struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
int offset = buf->offset + buf->len;
- if (buf->ops->can_merge && offset + chars <= PAGE_SIZE) {
+ if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
+ offset + chars <= PAGE_SIZE) {
ret = pipe_buf_confirm(pipe, buf);
if (ret)
goto out;
@@ -418,7 +474,7 @@
ret = -EFAULT;
goto out;
}
- do_wakeup = 1;
+
buf->len += ret;
if (!iov_iter_count(from))
goto out;
@@ -426,18 +482,17 @@
}
for (;;) {
- int bufs;
-
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
break;
}
- bufs = pipe->nrbufs;
- if (bufs < pipe->buffers) {
- int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
- struct pipe_buffer *buf = pipe->bufs + newbuf;
+
+ head = pipe->head;
+ if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
+ unsigned int mask = pipe->ring_size - 1;
+ struct pipe_buffer *buf = &pipe->bufs[head & mask];
struct page *page = pipe->tmp_page;
int copied;
@@ -449,12 +504,35 @@
}
pipe->tmp_page = page;
}
- /* Always wake up, even if the copy fails. Otherwise
- * we lock up (O_NONBLOCK-)readers that sleep due to
- * syscall merging.
- * FIXME! Is this really true?
+
+ /* Allocate a slot in the ring in advance and attach an
+ * empty buffer. If we fault or otherwise fail to use
+ * it, either the reader will consume it or it'll still
+ * be there for the next write.
*/
- do_wakeup = 1;
+ spin_lock_irq(&pipe->rd_wait.lock);
+
+ head = pipe->head;
+ if (pipe_full(head, pipe->tail, pipe->max_usage)) {
+ spin_unlock_irq(&pipe->rd_wait.lock);
+ continue;
+ }
+
+ pipe->head = head + 1;
+ spin_unlock_irq(&pipe->rd_wait.lock);
+
+ /* Insert it into the buffer array */
+ buf = &pipe->bufs[head & mask];
+ buf->page = page;
+ buf->ops = &anon_pipe_buf_ops;
+ buf->offset = 0;
+ buf->len = 0;
+ if (is_packetized(filp))
+ buf->flags = PIPE_BUF_FLAG_PACKET;
+ else
+ buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
+ pipe->tmp_page = NULL;
+
copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
if (!ret)
@@ -462,25 +540,17 @@
break;
}
ret += copied;
-
- /* Insert it into the buffer array */
- buf->page = page;
- buf->ops = &anon_pipe_buf_ops;
buf->offset = 0;
buf->len = copied;
- buf->flags = 0;
- if (is_packetized(filp)) {
- buf->ops = &packet_pipe_buf_ops;
- buf->flags = PIPE_BUF_FLAG_PACKET;
- }
- pipe->nrbufs = ++bufs;
- pipe->tmp_page = NULL;
if (!iov_iter_count(from))
break;
}
- if (bufs < pipe->buffers)
+
+ if (!pipe_full(head, pipe->tail, pipe->max_usage))
continue;
+
+ /* Wait for buffer space to become available. */
if (filp->f_flags & O_NONBLOCK) {
if (!ret)
ret = -EAGAIN;
@@ -491,21 +561,43 @@
ret = -ERESTARTSYS;
break;
}
- if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
+
+ /*
+ * We're going to release the pipe lock and wait for more
+ * space. We wake up any readers if necessary, and then
+ * after waiting we need to re-check whether the pipe
+ * become empty while we dropped the lock.
+ */
+ __pipe_unlock(pipe);
+ if (was_empty) {
+ wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
- do_wakeup = 0;
}
- pipe->waiting_writers++;
- pipe_wait(pipe);
- pipe->waiting_writers--;
+ wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
+ __pipe_lock(pipe);
+ was_empty = pipe_empty(pipe->head, pipe->tail);
+ wake_next_writer = true;
}
out:
+ if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
+ wake_next_writer = false;
__pipe_unlock(pipe);
- if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
+
+ /*
+ * If we do do a wakeup event, we do a 'sync' wakeup, because we
+ * want the reader to start processing things asap, rather than
+ * leave the data pending.
+ *
+ * This is particularly important for small writes, because of
+ * how (for example) the GNU make jobserver uses small writes to
+ * wake up pending jobs
+ */
+ if (was_empty) {
+ wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
+ if (wake_next_writer)
+ wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
int err = file_update_time(filp);
if (err)
@@ -518,23 +610,40 @@
static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct pipe_inode_info *pipe = filp->private_data;
- int count, buf, nrbufs;
+ int count, head, tail, mask;
switch (cmd) {
- case FIONREAD:
- __pipe_lock(pipe);
- count = 0;
- buf = pipe->curbuf;
- nrbufs = pipe->nrbufs;
- while (--nrbufs >= 0) {
- count += pipe->bufs[buf].len;
- buf = (buf+1) & (pipe->buffers - 1);
- }
- __pipe_unlock(pipe);
+ case FIONREAD:
+ __pipe_lock(pipe);
+ count = 0;
+ head = pipe->head;
+ tail = pipe->tail;
+ mask = pipe->ring_size - 1;
- return put_user(count, (int __user *)arg);
- default:
- return -ENOIOCTLCMD;
+ while (tail != head) {
+ count += pipe->bufs[tail & mask].len;
+ tail++;
+ }
+ __pipe_unlock(pipe);
+
+ return put_user(count, (int __user *)arg);
+
+#ifdef CONFIG_WATCH_QUEUE
+ case IOC_WATCH_QUEUE_SET_SIZE: {
+ int ret;
+ __pipe_lock(pipe);
+ ret = watch_queue_set_size(pipe, arg);
+ __pipe_unlock(pipe);
+ return ret;
+ }
+
+ case IOC_WATCH_QUEUE_SET_FILTER:
+ return watch_queue_set_filter(
+ pipe, (struct watch_notification_filter __user *)arg);
+#endif
+
+ default:
+ return -ENOIOCTLCMD;
}
}
@@ -544,21 +653,38 @@
{
__poll_t mask;
struct pipe_inode_info *pipe = filp->private_data;
- int nrbufs;
+ unsigned int head, tail;
- poll_wait(filp, &pipe->wait, wait);
+ /*
+ * Reading pipe state only -- no need for acquiring the semaphore.
+ *
+ * But because this is racy, the code has to add the
+ * entry to the poll table _first_ ..
+ */
+ if (filp->f_mode & FMODE_READ)
+ poll_wait(filp, &pipe->rd_wait, wait);
+ if (filp->f_mode & FMODE_WRITE)
+ poll_wait(filp, &pipe->wr_wait, wait);
- /* Reading only -- no need for acquiring the semaphore. */
- nrbufs = pipe->nrbufs;
+ /*
+ * .. and only then can you do the racy tests. That way,
+ * if something changes and you got it wrong, the poll
+ * table entry will wake you up and fix it.
+ */
+ head = READ_ONCE(pipe->head);
+ tail = READ_ONCE(pipe->tail);
+
mask = 0;
if (filp->f_mode & FMODE_READ) {
- mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
+ if (!pipe_empty(head, tail))
+ mask |= EPOLLIN | EPOLLRDNORM;
if (!pipe->writers && filp->f_version != pipe->w_counter)
mask |= EPOLLHUP;
}
if (filp->f_mode & FMODE_WRITE) {
- mask |= (nrbufs < pipe->buffers) ? EPOLLOUT | EPOLLWRNORM : 0;
+ if (!pipe_full(head, tail, pipe->max_usage))
+ mask |= EPOLLOUT | EPOLLWRNORM;
/*
* Most Unices do not set EPOLLERR for FIFOs but on Linux they
* behave exactly like pipes for poll().
@@ -596,8 +722,10 @@
if (file->f_mode & FMODE_WRITE)
pipe->writers--;
- if (pipe->readers || pipe->writers) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
+ /* Was that the last reader or writer, but not the other side? */
+ if (!pipe->readers != !pipe->writers) {
+ wake_up_interruptible_all(&pipe->rd_wait);
+ wake_up_interruptible_all(&pipe->wr_wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
@@ -626,27 +754,27 @@
return retval;
}
-static unsigned long account_pipe_buffers(struct user_struct *user,
- unsigned long old, unsigned long new)
+unsigned long account_pipe_buffers(struct user_struct *user,
+ unsigned long old, unsigned long new)
{
return atomic_long_add_return(new - old, &user->pipe_bufs);
}
-static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
+bool too_many_pipe_buffers_soft(unsigned long user_bufs)
{
unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
return soft_limit && user_bufs > soft_limit;
}
-static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
+bool too_many_pipe_buffers_hard(unsigned long user_bufs)
{
unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
return hard_limit && user_bufs > hard_limit;
}
-static bool is_unprivileged_user(void)
+bool pipe_is_unprivileged_user(void)
{
return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
}
@@ -668,21 +796,24 @@
user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
- if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
+ if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
pipe_bufs = PIPE_MIN_DEF_BUFFERS;
}
- if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
+ if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
goto out_revert_acct;
pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
GFP_KERNEL_ACCOUNT);
if (pipe->bufs) {
- init_waitqueue_head(&pipe->wait);
+ init_waitqueue_head(&pipe->rd_wait);
+ init_waitqueue_head(&pipe->wr_wait);
pipe->r_counter = pipe->w_counter = 1;
- pipe->buffers = pipe_bufs;
+ pipe->max_usage = pipe_bufs;
+ pipe->ring_size = pipe_bufs;
+ pipe->nr_accounted = pipe_bufs;
pipe->user = user;
mutex_init(&pipe->mutex);
return pipe;
@@ -700,13 +831,22 @@
{
int i;
- (void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
+#ifdef CONFIG_WATCH_QUEUE
+ if (pipe->watch_queue)
+ watch_queue_clear(pipe->watch_queue);
+#endif
+
+ (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
free_uid(pipe->user);
- for (i = 0; i < pipe->buffers; i++) {
+ for (i = 0; i < pipe->ring_size; i++) {
struct pipe_buffer *buf = pipe->bufs + i;
if (buf->ops)
pipe_buf_release(pipe, buf);
}
+#ifdef CONFIG_WATCH_QUEUE
+ if (pipe->watch_queue)
+ put_watch_queue(pipe->watch_queue);
+#endif
if (pipe->tmp_page)
__free_page(pipe->tmp_page);
kfree(pipe->bufs);
@@ -772,9 +912,19 @@
{
struct inode *inode = get_pipe_inode();
struct file *f;
+ int error;
if (!inode)
return -ENFILE;
+
+ if (flags & O_NOTIFICATION_PIPE) {
+ error = watch_queue_init(inode->i_pipe);
+ if (error) {
+ free_pipe_info(inode->i_pipe);
+ iput(inode);
+ return error;
+ }
+ }
f = alloc_file_pseudo(inode, pipe_mnt, "",
O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
@@ -796,6 +946,8 @@
}
res[0]->private_data = inode->i_pipe;
res[1] = f;
+ stream_open(inode, res[0]);
+ stream_open(inode, res[1]);
return 0;
}
@@ -804,7 +956,7 @@
int error;
int fdw, fdr;
- if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
+ if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
return -EINVAL;
error = create_pipe_files(files, flags);
@@ -881,12 +1033,52 @@
return do_pipe2(fildes, 0);
}
+/*
+ * This is the stupid "wait for pipe to be readable or writable"
+ * model.
+ *
+ * See pipe_read/write() for the proper kind of exclusive wait,
+ * but that requires that we wake up any other readers/writers
+ * if we then do not end up reading everything (ie the whole
+ * "wake_next_reader/writer" logic in pipe_read/write()).
+ */
+void pipe_wait_readable(struct pipe_inode_info *pipe)
+{
+ pipe_unlock(pipe);
+ wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe));
+ pipe_lock(pipe);
+}
+
+void pipe_wait_writable(struct pipe_inode_info *pipe)
+{
+ pipe_unlock(pipe);
+ wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe));
+ pipe_lock(pipe);
+}
+
+/*
+ * This depends on both the wait (here) and the wakeup (wake_up_partner)
+ * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
+ * race with the count check and waitqueue prep.
+ *
+ * Normally in order to avoid races, you'd do the prepare_to_wait() first,
+ * then check the condition you're waiting for, and only then sleep. But
+ * because of the pipe lock, we can check the condition before being on
+ * the wait queue.
+ *
+ * We use the 'rd_wait' waitqueue for pipe partner waiting.
+ */
static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
{
- int cur = *cnt;
+ DEFINE_WAIT(rdwait);
+ int cur = *cnt;
while (cur == *cnt) {
- pipe_wait(pipe);
+ prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE);
+ pipe_unlock(pipe);
+ schedule();
+ finish_wait(&pipe->rd_wait, &rdwait);
+ pipe_lock(pipe);
if (signal_pending(current))
break;
}
@@ -895,7 +1087,7 @@
static void wake_up_partner(struct pipe_inode_info *pipe)
{
- wake_up_interruptible(&pipe->wait);
+ wake_up_interruptible_all(&pipe->rd_wait);
}
static int fifo_open(struct inode *inode, struct file *filp)
@@ -934,9 +1126,9 @@
__pipe_lock(pipe);
/* We can only do regular read/write on fifos */
- filp->f_mode &= (FMODE_READ | FMODE_WRITE);
+ stream_open(inode, filp);
- switch (filp->f_mode) {
+ switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
case FMODE_READ:
/*
* O_RDONLY
@@ -958,7 +1150,7 @@
}
}
break;
-
+
case FMODE_WRITE:
/*
* O_WRONLY
@@ -978,7 +1170,7 @@
goto err_wr;
}
break;
-
+
case FMODE_READ | FMODE_WRITE:
/*
* O_RDWR
@@ -1006,13 +1198,13 @@
err_rd:
if (!--pipe->readers)
- wake_up_interruptible(&pipe->wait);
+ wake_up_interruptible(&pipe->wr_wait);
ret = -ERESTARTSYS;
goto err;
err_wr:
if (!--pipe->writers)
- wake_up_interruptible(&pipe->wait);
+ wake_up_interruptible_all(&pipe->rd_wait);
ret = -ERESTARTSYS;
goto err;
@@ -1032,6 +1224,7 @@
.unlocked_ioctl = pipe_ioctl,
.release = pipe_release,
.fasync = pipe_fasync,
+ .splice_write = iter_file_splice_write,
};
/*
@@ -1051,20 +1244,91 @@
}
/*
+ * Resize the pipe ring to a number of slots.
+ *
+ * Note the pipe can be reduced in capacity, but only if the current
+ * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
+ * returned instead.
+ */
+int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
+{
+ struct pipe_buffer *bufs;
+ unsigned int head, tail, mask, n;
+
+ bufs = kcalloc(nr_slots, sizeof(*bufs),
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
+ if (unlikely(!bufs))
+ return -ENOMEM;
+
+ spin_lock_irq(&pipe->rd_wait.lock);
+ mask = pipe->ring_size - 1;
+ head = pipe->head;
+ tail = pipe->tail;
+
+ n = pipe_occupancy(head, tail);
+ if (nr_slots < n) {
+ spin_unlock_irq(&pipe->rd_wait.lock);
+ kfree(bufs);
+ return -EBUSY;
+ }
+
+ /*
+ * The pipe array wraps around, so just start the new one at zero
+ * and adjust the indices.
+ */
+ if (n > 0) {
+ unsigned int h = head & mask;
+ unsigned int t = tail & mask;
+ if (h > t) {
+ memcpy(bufs, pipe->bufs + t,
+ n * sizeof(struct pipe_buffer));
+ } else {
+ unsigned int tsize = pipe->ring_size - t;
+ if (h > 0)
+ memcpy(bufs + tsize, pipe->bufs,
+ h * sizeof(struct pipe_buffer));
+ memcpy(bufs, pipe->bufs + t,
+ tsize * sizeof(struct pipe_buffer));
+ }
+ }
+
+ head = n;
+ tail = 0;
+
+ kfree(pipe->bufs);
+ pipe->bufs = bufs;
+ pipe->ring_size = nr_slots;
+ if (pipe->max_usage > nr_slots)
+ pipe->max_usage = nr_slots;
+ pipe->tail = tail;
+ pipe->head = head;
+
+ spin_unlock_irq(&pipe->rd_wait.lock);
+
+ /* This might have made more room for writers */
+ wake_up_interruptible(&pipe->wr_wait);
+ return 0;
+}
+
+/*
* Allocate a new array of pipe buffers and copy the info over. Returns the
* pipe size if successful, or return -ERROR on error.
*/
static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
{
- struct pipe_buffer *bufs;
- unsigned int size, nr_pages;
unsigned long user_bufs;
+ unsigned int nr_slots, size;
long ret = 0;
- size = round_pipe_size(arg);
- nr_pages = size >> PAGE_SHIFT;
+#ifdef CONFIG_WATCH_QUEUE
+ if (pipe->watch_queue)
+ return -EBUSY;
+#endif
- if (!nr_pages)
+ size = round_pipe_size(arg);
+ nr_slots = size >> PAGE_SHIFT;
+
+ if (!nr_slots)
return -EINVAL;
/*
@@ -1074,67 +1338,30 @@
* Decreasing the pipe capacity is always permitted, even
* if the user is currently over a limit.
*/
- if (nr_pages > pipe->buffers &&
+ if (nr_slots > pipe->max_usage &&
size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
return -EPERM;
- user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
+ user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots);
- if (nr_pages > pipe->buffers &&
+ if (nr_slots > pipe->max_usage &&
(too_many_pipe_buffers_hard(user_bufs) ||
too_many_pipe_buffers_soft(user_bufs)) &&
- is_unprivileged_user()) {
+ pipe_is_unprivileged_user()) {
ret = -EPERM;
goto out_revert_acct;
}
- /*
- * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
- * expect a lot of shrink+grow operations, just free and allocate
- * again like we would do for growing. If the pipe currently
- * contains more buffers than arg, then return busy.
- */
- if (nr_pages < pipe->nrbufs) {
- ret = -EBUSY;
+ ret = pipe_resize_ring(pipe, nr_slots);
+ if (ret < 0)
goto out_revert_acct;
- }
- bufs = kcalloc(nr_pages, sizeof(*bufs),
- GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
- if (unlikely(!bufs)) {
- ret = -ENOMEM;
- goto out_revert_acct;
- }
-
- /*
- * The pipe array wraps around, so just start the new one at zero
- * and adjust the indexes.
- */
- if (pipe->nrbufs) {
- unsigned int tail;
- unsigned int head;
-
- tail = pipe->curbuf + pipe->nrbufs;
- if (tail < pipe->buffers)
- tail = 0;
- else
- tail &= (pipe->buffers - 1);
-
- head = pipe->nrbufs - tail;
- if (head)
- memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
- if (tail)
- memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
- }
-
- pipe->curbuf = 0;
- kfree(pipe->bufs);
- pipe->bufs = bufs;
- pipe->buffers = nr_pages;
- return nr_pages * PAGE_SIZE;
+ pipe->max_usage = nr_slots;
+ pipe->nr_accounted = nr_slots;
+ return pipe->max_usage * PAGE_SIZE;
out_revert_acct:
- (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
+ (void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted);
return ret;
}
@@ -1143,9 +1370,17 @@
* location, so checking ->i_pipe is not enough to verify that this is a
* pipe.
*/
-struct pipe_inode_info *get_pipe_info(struct file *file)
+struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
{
- return file->f_op == &pipefifo_fops ? file->private_data : NULL;
+ struct pipe_inode_info *pipe = file->private_data;
+
+ if (file->f_op != &pipefifo_fops || !pipe)
+ return NULL;
+#ifdef CONFIG_WATCH_QUEUE
+ if (for_splice && pipe->watch_queue)
+ return NULL;
+#endif
+ return pipe;
}
long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -1153,7 +1388,7 @@
struct pipe_inode_info *pipe;
long ret;
- pipe = get_pipe_info(file);
+ pipe = get_pipe_info(file, false);
if (!pipe)
return -EBADF;
@@ -1164,7 +1399,7 @@
ret = pipe_set_size(pipe, arg);
break;
case F_GETPIPE_SZ:
- ret = pipe->buffers * PAGE_SIZE;
+ ret = pipe->max_usage * PAGE_SIZE;
break;
default:
ret = -EINVAL;
@@ -1186,16 +1421,20 @@
* any operations on the root directory. However, we need a non-trivial
* d_name - pipe: will go nicely and kill the special-casing in procfs.
*/
-static struct dentry *pipefs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+
+static int pipefs_init_fs_context(struct fs_context *fc)
{
- return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
- &pipefs_dentry_operations, PIPEFS_MAGIC);
+ struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->ops = &pipefs_ops;
+ ctx->dops = &pipefs_dentry_operations;
+ return 0;
}
static struct file_system_type pipe_fs_type = {
.name = "pipefs",
- .mount = pipefs_mount,
+ .init_fs_context = pipefs_init_fs_context,
.kill_sb = kill_anon_super,
};
--
Gitblit v1.6.2