| .. | .. |
|---|
| 25 | 25 | #include <linux/idr.h> |
|---|
| 26 | 26 | #include <linux/uio.h> |
|---|
| 27 | 27 | |
|---|
| 28 | +DEFINE_PER_CPU(int, eventfd_wake_count); |
|---|
| 29 | + |
|---|
| 28 | 30 | static DEFINE_IDA(eventfd_ida); |
|---|
| 29 | 31 | |
|---|
| 30 | 32 | struct eventfd_ctx { |
|---|
| .. | .. |
|---|
| 51 | 53 | * Deadlock or stack overflow issues can happen if we recurse here |
|---|
| 52 | 54 | * through waitqueue wakeup handlers. If the caller users potentially |
|---|
| 53 | 55 | * nested waitqueues with custom wakeup handlers, then it should |
|---|
| 54 | | - * check eventfd_signal_allowed() before calling this function. If |
|---|
| 55 | | - * it returns false, the eventfd_signal() call should be deferred to a |
|---|
| 56 | + * check eventfd_signal_count() before calling this function. If |
|---|
| 57 | + * it returns true, the eventfd_signal() call should be deferred to a |
|---|
| 56 | 58 | * safe context. |
|---|
| 57 | 59 | */ |
|---|
| 58 | | - if (WARN_ON_ONCE(current->in_eventfd_signal)) |
|---|
| 60 | + if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count))) |
|---|
| 59 | 61 | return 0; |
|---|
| 60 | 62 | |
|---|
| 61 | 63 | spin_lock_irqsave(&ctx->wqh.lock, flags); |
|---|
| 62 | | - current->in_eventfd_signal = 1; |
|---|
| 64 | + this_cpu_inc(eventfd_wake_count); |
|---|
| 63 | 65 | if (ULLONG_MAX - ctx->count < n) |
|---|
| 64 | 66 | n = ULLONG_MAX - ctx->count; |
|---|
| 65 | 67 | ctx->count += n; |
|---|
| 66 | 68 | if (waitqueue_active(&ctx->wqh)) |
|---|
| 67 | 69 | wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask); |
|---|
| 68 | | - current->in_eventfd_signal = 0; |
|---|
| 70 | + this_cpu_dec(eventfd_wake_count); |
|---|
| 69 | 71 | spin_unlock_irqrestore(&ctx->wqh.lock, flags); |
|---|
| 70 | 72 | |
|---|
| 71 | 73 | return n; |
|---|
| .. | .. |
|---|
| 185 | 187 | return events; |
|---|
| 186 | 188 | } |
|---|
| 187 | 189 | |
|---|
| 188 | | -static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) |
|---|
| 190 | +void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) |
|---|
| 189 | 191 | { |
|---|
| 190 | | - *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count; |
|---|
| 192 | + lockdep_assert_held(&ctx->wqh.lock); |
|---|
| 193 | + |
|---|
| 194 | + *cnt = ((ctx->flags & EFD_SEMAPHORE) && ctx->count) ? 1 : ctx->count; |
|---|
| 191 | 195 | ctx->count -= *cnt; |
|---|
| 192 | 196 | } |
|---|
| 197 | +EXPORT_SYMBOL_GPL(eventfd_ctx_do_read); |
|---|
| 193 | 198 | |
|---|
| 194 | 199 | /** |
|---|
| 195 | 200 | * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. |
|---|