| .. | .. |
|---|
| 121 | 121 | long nr_pages; |
|---|
| 122 | 122 | |
|---|
| 123 | 123 | struct rcu_work free_rwork; /* see free_ioctx() */ |
|---|
| 124 | + struct work_struct free_work; /* see free_ioctx() */ |
|---|
| 124 | 125 | |
|---|
| 125 | 126 | /* |
|---|
| 126 | 127 | * signals when all in-flight requests are done |
|---|
| .. | .. |
|---|
| 608 | 609 | * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - |
|---|
| 609 | 610 | * now it's safe to cancel any that need to be. |
|---|
| 610 | 611 | */ |
|---|
| 611 | | -static void free_ioctx_users(struct percpu_ref *ref) |
|---|
| 612 | +static void free_ioctx_users_work(struct work_struct *work) |
|---|
| 612 | 613 | { |
|---|
| 613 | | - struct kioctx *ctx = container_of(ref, struct kioctx, users); |
|---|
| 614 | + struct kioctx *ctx = container_of(work, struct kioctx, free_work); |
|---|
| 614 | 615 | struct aio_kiocb *req; |
|---|
| 615 | 616 | |
|---|
| 616 | 617 | spin_lock_irq(&ctx->ctx_lock); |
|---|
| .. | .. |
|---|
| 628 | 629 | percpu_ref_put(&ctx->reqs); |
|---|
| 629 | 630 | } |
|---|
| 630 | 631 | |
|---|
| 632 | +static void free_ioctx_users(struct percpu_ref *ref) |
|---|
| 633 | +{ |
|---|
| 634 | + struct kioctx *ctx = container_of(ref, struct kioctx, users); |
|---|
| 635 | + |
|---|
| 636 | + INIT_WORK(&ctx->free_work, free_ioctx_users_work); |
|---|
| 637 | + schedule_work(&ctx->free_work); |
|---|
| 638 | +} |
|---|
| 639 | + |
|---|
| 631 | 640 | static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) |
|---|
| 632 | 641 | { |
|---|
| 633 | 642 | unsigned i, new_nr; |
|---|