hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/fs/eventfd.c
....@@ -25,6 +25,8 @@
2525 #include <linux/idr.h>
2626 #include <linux/uio.h>
2727
28
+DEFINE_PER_CPU(int, eventfd_wake_count);
29
+
2830 static DEFINE_IDA(eventfd_ida);
2931
3032 struct eventfd_ctx {
....@@ -51,21 +53,21 @@
5153 * Deadlock or stack overflow issues can happen if we recurse here
5254 * through waitqueue wakeup handlers. If the caller users potentially
5355 * nested waitqueues with custom wakeup handlers, then it should
54
- * check eventfd_signal_allowed() before calling this function. If
55
- * it returns false, the eventfd_signal() call should be deferred to a
56
+ * check eventfd_signal_count() before calling this function. If
57
+ * it returns true, the eventfd_signal() call should be deferred to a
5658 * safe context.
5759 */
58
- if (WARN_ON_ONCE(current->in_eventfd_signal))
60
+ if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
5961 return 0;
6062
6163 spin_lock_irqsave(&ctx->wqh.lock, flags);
62
- current->in_eventfd_signal = 1;
64
+ this_cpu_inc(eventfd_wake_count);
6365 if (ULLONG_MAX - ctx->count < n)
6466 n = ULLONG_MAX - ctx->count;
6567 ctx->count += n;
6668 if (waitqueue_active(&ctx->wqh))
6769 wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
68
- current->in_eventfd_signal = 0;
70
+ this_cpu_dec(eventfd_wake_count);
6971 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
7072
7173 return n;
....@@ -185,11 +187,14 @@
185187 return events;
186188 }
187189
188
-static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
190
+void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
189191 {
190
- *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
192
+ lockdep_assert_held(&ctx->wqh.lock);
193
+
194
+ *cnt = ((ctx->flags & EFD_SEMAPHORE) && ctx->count) ? 1 : ctx->count;
191195 ctx->count -= *cnt;
192196 }
197
+EXPORT_SYMBOL_GPL(eventfd_ctx_do_read);
193198
194199 /**
195200 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.