hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/fs/eventfd.c
....@@ -25,6 +25,8 @@
2525 #include <linux/idr.h>
2626 #include <linux/uio.h>
2727
28
+DEFINE_PER_CPU(int, eventfd_wake_count);
29
+
2830 static DEFINE_IDA(eventfd_ida);
2931
3032 struct eventfd_ctx {
....@@ -51,21 +53,21 @@
5153 * Deadlock or stack overflow issues can happen if we recurse here
5254 * through waitqueue wakeup handlers. If the caller users potentially
5355 * nested waitqueues with custom wakeup handlers, then it should
54
- * check eventfd_signal_allowed() before calling this function. If
55
- * it returns false, the eventfd_signal() call should be deferred to a
56
+ * check eventfd_signal_count() before calling this function. If
57
+ * it returns true, the eventfd_signal() call should be deferred to a
5658 * safe context.
5759 */
58
- if (WARN_ON_ONCE(current->in_eventfd_signal))
60
+ if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
5961 return 0;
6062
6163 spin_lock_irqsave(&ctx->wqh.lock, flags);
62
- current->in_eventfd_signal = 1;
64
+ this_cpu_inc(eventfd_wake_count);
6365 if (ULLONG_MAX - ctx->count < n)
6466 n = ULLONG_MAX - ctx->count;
6567 ctx->count += n;
6668 if (waitqueue_active(&ctx->wqh))
6769 wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
68
- current->in_eventfd_signal = 0;
70
+ this_cpu_dec(eventfd_wake_count);
6971 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
7072
7173 return n;