| .. | .. |
|---|
| 10 | 10 | #include "xfs_shared.h" |
|---|
| 11 | 11 | #include "xfs_trans_resv.h" |
|---|
| 12 | 12 | #include "xfs_mount.h" |
|---|
| 13 | | -#include "xfs_error.h" |
|---|
| 14 | | -#include "xfs_alloc.h" |
|---|
| 15 | 13 | #include "xfs_extent_busy.h" |
|---|
| 16 | | -#include "xfs_discard.h" |
|---|
| 17 | 14 | #include "xfs_trans.h" |
|---|
| 18 | 15 | #include "xfs_trans_priv.h" |
|---|
| 19 | 16 | #include "xfs_log.h" |
|---|
| .. | .. |
|---|
| 40 | 37 | { |
|---|
| 41 | 38 | struct xlog_ticket *tic; |
|---|
| 42 | 39 | |
|---|
| 43 | | - tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0, |
|---|
| 44 | | - KM_SLEEP|KM_NOFS); |
|---|
| 40 | + tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0); |
|---|
| 45 | 41 | |
|---|
| 46 | 42 | /* |
|---|
| 47 | 43 | * set the current reservation to zero so we know to steal the basic |
|---|
| .. | .. |
|---|
| 182 | 178 | |
|---|
| 183 | 179 | /* |
|---|
| 184 | 180 | * We free and allocate here as a realloc would copy |
|---|
| 185 | | - * unecessary data. We don't use kmem_zalloc() for the |
|---|
| 181 | + * unnecessary data. We don't use kmem_zalloc() for the |
|---|
| 186 | 182 | * same reason - we don't need to zero the data area in |
|---|
| 187 | 183 | * the buffer, only the log vector header and the iovec |
|---|
| 188 | 184 | * storage. |
|---|
| 189 | 185 | */ |
|---|
| 190 | 186 | kmem_free(lip->li_lv_shadow); |
|---|
| 191 | 187 | |
|---|
| 192 | | - lv = kmem_alloc_large(buf_size, KM_SLEEP | KM_NOFS); |
|---|
| 188 | + lv = kmem_alloc_large(buf_size, KM_NOFS); |
|---|
| 193 | 189 | memset(lv, 0, xlog_cil_iovec_space(niovecs)); |
|---|
| 194 | 190 | |
|---|
| 195 | 191 | lv->lv_item = lip; |
|---|
| .. | .. |
|---|
| 243 | 239 | * this CIL context and so we need to pin it. If we are replacing the |
|---|
| 244 | 240 | * old_lv, then remove the space it accounts for and make it the shadow |
|---|
| 245 | 241 | * buffer for later freeing. In both cases we are now switching to the |
|---|
| 246 | | - * shadow buffer, so update the the pointer to it appropriately. |
|---|
| 242 | + * shadow buffer, so update the pointer to it appropriately. |
|---|
| 247 | 243 | */ |
|---|
| 248 | 244 | if (!old_lv) { |
|---|
| 249 | | - lv->lv_item->li_ops->iop_pin(lv->lv_item); |
|---|
| 245 | + if (lv->lv_item->li_ops->iop_pin) |
|---|
| 246 | + lv->lv_item->li_ops->iop_pin(lv->lv_item); |
|---|
| 250 | 247 | lv->lv_item->li_lv_shadow = NULL; |
|---|
| 251 | 248 | } else if (old_lv != lv) { |
|---|
| 252 | 249 | ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); |
|---|
| .. | .. |
|---|
| 576 | 573 | */ |
|---|
| 577 | 574 | static void |
|---|
| 578 | 575 | xlog_cil_committed( |
|---|
| 579 | | - void *args, |
|---|
| 580 | | - int abort) |
|---|
| 576 | + struct xfs_cil_ctx *ctx) |
|---|
| 581 | 577 | { |
|---|
| 582 | | - struct xfs_cil_ctx *ctx = args; |
|---|
| 583 | 578 | struct xfs_mount *mp = ctx->cil->xc_log->l_mp; |
|---|
| 579 | + bool abort = XLOG_FORCED_SHUTDOWN(ctx->cil->xc_log); |
|---|
| 580 | + |
|---|
| 581 | + /* |
|---|
| 582 | + * If the I/O failed, we're aborting the commit and already shutdown. |
|---|
| 583 | + * Wake any commit waiters before aborting the log items so we don't |
|---|
| 584 | + * block async log pushers on callbacks. Async log pushers explicitly do |
|---|
| 585 | + * not wait on log force completion because they may be holding locks |
|---|
| 586 | + * required to unpin items. |
|---|
| 587 | + */ |
|---|
| 588 | + if (abort) { |
|---|
| 589 | + spin_lock(&ctx->cil->xc_push_lock); |
|---|
| 590 | + wake_up_all(&ctx->cil->xc_commit_wait); |
|---|
| 591 | + spin_unlock(&ctx->cil->xc_push_lock); |
|---|
| 592 | + } |
|---|
| 584 | 593 | |
|---|
| 585 | 594 | xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, |
|---|
| 586 | 595 | ctx->start_lsn, abort); |
|---|
| .. | .. |
|---|
| 589 | 598 | xfs_extent_busy_clear(mp, &ctx->busy_extents, |
|---|
| 590 | 599 | (mp->m_flags & XFS_MOUNT_DISCARD) && !abort); |
|---|
| 591 | 600 | |
|---|
| 592 | | - /* |
|---|
| 593 | | - * If we are aborting the commit, wake up anyone waiting on the |
|---|
| 594 | | - * committing list. If we don't, then a shutdown we can leave processes |
|---|
| 595 | | - * waiting in xlog_cil_force_lsn() waiting on a sequence commit that |
|---|
| 596 | | - * will never happen because we aborted it. |
|---|
| 597 | | - */ |
|---|
| 598 | 601 | spin_lock(&ctx->cil->xc_push_lock); |
|---|
| 599 | | - if (abort) |
|---|
| 600 | | - wake_up_all(&ctx->cil->xc_commit_wait); |
|---|
| 601 | 602 | list_del(&ctx->committing); |
|---|
| 602 | 603 | spin_unlock(&ctx->cil->xc_push_lock); |
|---|
| 603 | 604 | |
|---|
| .. | .. |
|---|
| 609 | 610 | kmem_free(ctx); |
|---|
| 610 | 611 | } |
|---|
| 611 | 612 | |
|---|
| 613 | +void |
|---|
| 614 | +xlog_cil_process_committed( |
|---|
| 615 | + struct list_head *list) |
|---|
| 616 | +{ |
|---|
| 617 | + struct xfs_cil_ctx *ctx; |
|---|
| 618 | + |
|---|
| 619 | + while ((ctx = list_first_entry_or_null(list, |
|---|
| 620 | + struct xfs_cil_ctx, iclog_entry))) { |
|---|
| 621 | + list_del(&ctx->iclog_entry); |
|---|
| 622 | + xlog_cil_committed(ctx); |
|---|
| 623 | + } |
|---|
| 624 | +} |
|---|
| 625 | + |
|---|
| 612 | 626 | /* |
|---|
| 613 | | - * Push the Committed Item List to the log. If @push_seq flag is zero, then it |
|---|
| 614 | | - * is a background flush and so we can chose to ignore it. Otherwise, if the |
|---|
| 615 | | - * current sequence is the same as @push_seq we need to do a flush. If |
|---|
| 616 | | - * @push_seq is less than the current sequence, then it has already been |
|---|
| 627 | + * Push the Committed Item List to the log. |
|---|
| 628 | + * |
|---|
| 629 | + * If the current sequence is the same as xc_push_seq we need to do a flush. If |
|---|
| 630 | + * xc_push_seq is less than the current sequence, then it has already been |
|---|
| 617 | 631 | * flushed and we don't need to do anything - the caller will wait for it to |
|---|
| 618 | 632 | * complete if necessary. |
|---|
| 619 | 633 | * |
|---|
| 620 | | - * @push_seq is a value rather than a flag because that allows us to do an |
|---|
| 621 | | - * unlocked check of the sequence number for a match. Hence we can allows log |
|---|
| 622 | | - * forces to run racily and not issue pushes for the same sequence twice. If we |
|---|
| 623 | | - * get a race between multiple pushes for the same sequence they will block on |
|---|
| 624 | | - * the first one and then abort, hence avoiding needless pushes. |
|---|
| 634 | + * xc_push_seq is checked unlocked against the sequence number for a match. |
|---|
| 635 | + * Hence we can allow log forces to run racily and not issue pushes for the |
|---|
| 636 | + * same sequence twice. If we get a race between multiple pushes for the same |
|---|
| 637 | + * sequence they will block on the first one and then abort, hence avoiding |
|---|
| 638 | + * needless pushes. |
|---|
| 625 | 639 | */ |
|---|
| 626 | | -STATIC int |
|---|
| 627 | | -xlog_cil_push( |
|---|
| 628 | | - struct xlog *log) |
|---|
| 640 | +static void |
|---|
| 641 | +xlog_cil_push_work( |
|---|
| 642 | + struct work_struct *work) |
|---|
| 629 | 643 | { |
|---|
| 630 | | - struct xfs_cil *cil = log->l_cilp; |
|---|
| 644 | + struct xfs_cil *cil = |
|---|
| 645 | + container_of(work, struct xfs_cil, xc_push_work); |
|---|
| 646 | + struct xlog *log = cil->xc_log; |
|---|
| 631 | 647 | struct xfs_log_vec *lv; |
|---|
| 632 | 648 | struct xfs_cil_ctx *ctx; |
|---|
| 633 | 649 | struct xfs_cil_ctx *new_ctx; |
|---|
| .. | .. |
|---|
| 641 | 657 | xfs_lsn_t commit_lsn; |
|---|
| 642 | 658 | xfs_lsn_t push_seq; |
|---|
| 643 | 659 | |
|---|
| 644 | | - if (!cil) |
|---|
| 645 | | - return 0; |
|---|
| 646 | | - |
|---|
| 647 | | - new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); |
|---|
| 660 | + new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_NOFS); |
|---|
| 648 | 661 | new_ctx->ticket = xlog_cil_ticket_alloc(log); |
|---|
| 649 | 662 | |
|---|
| 650 | 663 | down_write(&cil->xc_ctx_lock); |
|---|
| .. | .. |
|---|
| 653 | 666 | spin_lock(&cil->xc_push_lock); |
|---|
| 654 | 667 | push_seq = cil->xc_push_seq; |
|---|
| 655 | 668 | ASSERT(push_seq <= ctx->sequence); |
|---|
| 669 | + |
|---|
| 670 | + /* |
|---|
| 671 | + * As we are about to switch to a new, empty CIL context, we no longer |
|---|
| 672 | + * need to throttle tasks on CIL space overruns. Wake any waiters that |
|---|
| 673 | + * the hard push throttle may have caught so they can start committing |
|---|
| 674 | + * to the new context. The ctx->xc_push_lock provides the serialisation |
|---|
| 675 | + * necessary for safely using the lockless waitqueue_active() check in |
|---|
| 676 | + * this context. |
|---|
| 677 | + */ |
|---|
| 678 | + if (waitqueue_active(&cil->xc_push_wait)) |
|---|
| 679 | + wake_up_all(&cil->xc_push_wait); |
|---|
| 656 | 680 | |
|---|
| 657 | 681 | /* |
|---|
| 658 | 682 | * Check if we've anything to push. If there is nothing, then we don't |
|---|
| .. | .. |
|---|
| 666 | 690 | } |
|---|
| 667 | 691 | |
|---|
| 668 | 692 | |
|---|
| 669 | | - /* check for a previously pushed seqeunce */ |
|---|
| 693 | + /* check for a previously pushed sequence */ |
|---|
| 670 | 694 | if (push_seq < cil->xc_ctx->sequence) { |
|---|
| 671 | 695 | spin_unlock(&cil->xc_push_lock); |
|---|
| 672 | 696 | goto out_skip; |
|---|
| .. | .. |
|---|
| 724 | 748 | |
|---|
| 725 | 749 | /* |
|---|
| 726 | 750 | * initialise the new context and attach it to the CIL. Then attach |
|---|
| 727 | | - * the current context to the CIL committing lsit so it can be found |
|---|
| 751 | + * the current context to the CIL committing list so it can be found |
|---|
| 728 | 752 | * during log forces to extract the commit lsn of the sequence that |
|---|
| 729 | 753 | * needs to be forced. |
|---|
| 730 | 754 | */ |
|---|
| .. | .. |
|---|
| 753 | 777 | * that higher sequences will wait for us to write out a commit record |
|---|
| 754 | 778 | * before they do. |
|---|
| 755 | 779 | * |
|---|
| 756 | | - * xfs_log_force_lsn requires us to mirror the new sequence into the cil |
|---|
| 780 | + * xfs_log_force_seq requires us to mirror the new sequence into the cil |
|---|
| 757 | 781 | * structure atomically with the addition of this sequence to the |
|---|
| 758 | 782 | * committing list. This also ensures that we can do unlocked checks |
|---|
| 759 | 783 | * against the current sequence in log forces without risking |
|---|
| .. | .. |
|---|
| 787 | 811 | lvhdr.lv_iovecp = &lhdr; |
|---|
| 788 | 812 | lvhdr.lv_next = ctx->lv_chain; |
|---|
| 789 | 813 | |
|---|
| 790 | | - error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0); |
|---|
| 814 | + error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0, true); |
|---|
| 791 | 815 | if (error) |
|---|
| 792 | 816 | goto out_abort_free_ticket; |
|---|
| 793 | 817 | |
|---|
| .. | .. |
|---|
| 825 | 849 | } |
|---|
| 826 | 850 | spin_unlock(&cil->xc_push_lock); |
|---|
| 827 | 851 | |
|---|
| 828 | | - /* xfs_log_done always frees the ticket on error. */ |
|---|
| 829 | | - commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, false); |
|---|
| 830 | | - if (commit_lsn == -1) |
|---|
| 831 | | - goto out_abort; |
|---|
| 832 | | - |
|---|
| 833 | | - /* attach all the transactions w/ busy extents to iclog */ |
|---|
| 834 | | - ctx->log_cb.cb_func = xlog_cil_committed; |
|---|
| 835 | | - ctx->log_cb.cb_arg = ctx; |
|---|
| 836 | | - error = xfs_log_notify(commit_iclog, &ctx->log_cb); |
|---|
| 852 | + error = xlog_commit_record(log, tic, &commit_iclog, &commit_lsn); |
|---|
| 837 | 853 | if (error) |
|---|
| 854 | + goto out_abort_free_ticket; |
|---|
| 855 | + |
|---|
| 856 | + xfs_log_ticket_ungrant(log, tic); |
|---|
| 857 | + |
|---|
| 858 | + spin_lock(&commit_iclog->ic_callback_lock); |
|---|
| 859 | + if (commit_iclog->ic_state == XLOG_STATE_IOERROR) { |
|---|
| 860 | + spin_unlock(&commit_iclog->ic_callback_lock); |
|---|
| 838 | 861 | goto out_abort; |
|---|
| 862 | + } |
|---|
| 863 | + ASSERT_ALWAYS(commit_iclog->ic_state == XLOG_STATE_ACTIVE || |
|---|
| 864 | + commit_iclog->ic_state == XLOG_STATE_WANT_SYNC); |
|---|
| 865 | + list_add_tail(&ctx->iclog_entry, &commit_iclog->ic_callbacks); |
|---|
| 866 | + spin_unlock(&commit_iclog->ic_callback_lock); |
|---|
| 839 | 867 | |
|---|
| 840 | 868 | /* |
|---|
| 841 | 869 | * now the checkpoint commit is complete and we've attached the |
|---|
| .. | .. |
|---|
| 848 | 876 | spin_unlock(&cil->xc_push_lock); |
|---|
| 849 | 877 | |
|---|
| 850 | 878 | /* release the hounds! */ |
|---|
| 851 | | - return xfs_log_release_iclog(log->l_mp, commit_iclog); |
|---|
| 879 | + xfs_log_release_iclog(commit_iclog); |
|---|
| 880 | + return; |
|---|
| 852 | 881 | |
|---|
| 853 | 882 | out_skip: |
|---|
| 854 | 883 | up_write(&cil->xc_ctx_lock); |
|---|
| 855 | 884 | xfs_log_ticket_put(new_ctx->ticket); |
|---|
| 856 | 885 | kmem_free(new_ctx); |
|---|
| 857 | | - return 0; |
|---|
| 886 | + return; |
|---|
| 858 | 887 | |
|---|
| 859 | 888 | out_abort_free_ticket: |
|---|
| 860 | | - xfs_log_ticket_put(tic); |
|---|
| 889 | + xfs_log_ticket_ungrant(log, tic); |
|---|
| 861 | 890 | out_abort: |
|---|
| 862 | | - xlog_cil_committed(ctx, XFS_LI_ABORTED); |
|---|
| 863 | | - return -EIO; |
|---|
| 864 | | -} |
|---|
| 865 | | - |
|---|
| 866 | | -static void |
|---|
| 867 | | -xlog_cil_push_work( |
|---|
| 868 | | - struct work_struct *work) |
|---|
| 869 | | -{ |
|---|
| 870 | | - struct xfs_cil *cil = container_of(work, struct xfs_cil, |
|---|
| 871 | | - xc_push_work); |
|---|
| 872 | | - xlog_cil_push(cil->xc_log); |
|---|
| 891 | + ASSERT(XLOG_FORCED_SHUTDOWN(log)); |
|---|
| 892 | + xlog_cil_committed(ctx); |
|---|
| 873 | 893 | } |
|---|
| 874 | 894 | |
|---|
| 875 | 895 | /* |
|---|
| .. | .. |
|---|
| 881 | 901 | */ |
|---|
| 882 | 902 | static void |
|---|
| 883 | 903 | xlog_cil_push_background( |
|---|
| 884 | | - struct xlog *log) |
|---|
| 904 | + struct xlog *log) __releases(cil->xc_ctx_lock) |
|---|
| 885 | 905 | { |
|---|
| 886 | 906 | struct xfs_cil *cil = log->l_cilp; |
|---|
| 887 | 907 | |
|---|
| .. | .. |
|---|
| 892 | 912 | ASSERT(!list_empty(&cil->xc_cil)); |
|---|
| 893 | 913 | |
|---|
| 894 | 914 | /* |
|---|
| 895 | | - * don't do a background push if we haven't used up all the |
|---|
| 915 | + * Don't do a background push if we haven't used up all the |
|---|
| 896 | 916 | * space available yet. |
|---|
| 897 | 917 | */ |
|---|
| 898 | | - if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) |
|---|
| 918 | + if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) { |
|---|
| 919 | + up_read(&cil->xc_ctx_lock); |
|---|
| 899 | 920 | return; |
|---|
| 921 | + } |
|---|
| 900 | 922 | |
|---|
| 901 | 923 | spin_lock(&cil->xc_push_lock); |
|---|
| 902 | 924 | if (cil->xc_push_seq < cil->xc_current_sequence) { |
|---|
| 903 | 925 | cil->xc_push_seq = cil->xc_current_sequence; |
|---|
| 904 | 926 | queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work); |
|---|
| 905 | 927 | } |
|---|
| 928 | + |
|---|
| 929 | + /* |
|---|
| 930 | + * Drop the context lock now, we can't hold that if we need to sleep |
|---|
| 931 | + * because we are over the blocking threshold. The push_lock is still |
|---|
| 932 | + * held, so blocking threshold sleep/wakeup is still correctly |
|---|
| 933 | + * serialised here. |
|---|
| 934 | + */ |
|---|
| 935 | + up_read(&cil->xc_ctx_lock); |
|---|
| 936 | + |
|---|
| 937 | + /* |
|---|
| 938 | + * If we are well over the space limit, throttle the work that is being |
|---|
| 939 | + * done until the push work on this context has begun. Enforce the hard |
|---|
| 940 | + * throttle on all transaction commits once it has been activated, even |
|---|
| 941 | + * if the committing transactions have resulted in the space usage |
|---|
| 942 | + * dipping back down under the hard limit. |
|---|
| 943 | + * |
|---|
| 944 | + * The ctx->xc_push_lock provides the serialisation necessary for safely |
|---|
| 945 | + * using the lockless waitqueue_active() check in this context. |
|---|
| 946 | + */ |
|---|
| 947 | + if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log) || |
|---|
| 948 | + waitqueue_active(&cil->xc_push_wait)) { |
|---|
| 949 | + trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket); |
|---|
| 950 | + ASSERT(cil->xc_ctx->space_used < log->l_logsize); |
|---|
| 951 | + xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock); |
|---|
| 952 | + return; |
|---|
| 953 | + } |
|---|
| 954 | + |
|---|
| 906 | 955 | spin_unlock(&cil->xc_push_lock); |
|---|
| 907 | 956 | |
|---|
| 908 | 957 | } |
|---|
| .. | .. |
|---|
| 971 | 1020 | * allowed again. |
|---|
| 972 | 1021 | */ |
|---|
| 973 | 1022 | void |
|---|
| 974 | | -xfs_log_commit_cil( |
|---|
| 975 | | - struct xfs_mount *mp, |
|---|
| 1023 | +xlog_cil_commit( |
|---|
| 1024 | + struct xlog *log, |
|---|
| 976 | 1025 | struct xfs_trans *tp, |
|---|
| 977 | | - xfs_lsn_t *commit_lsn, |
|---|
| 1026 | + xfs_csn_t *commit_seq, |
|---|
| 978 | 1027 | bool regrant) |
|---|
| 979 | 1028 | { |
|---|
| 980 | | - struct xlog *log = mp->m_log; |
|---|
| 981 | 1029 | struct xfs_cil *cil = log->l_cilp; |
|---|
| 982 | | - xfs_lsn_t xc_commit_lsn; |
|---|
| 1030 | + struct xfs_log_item *lip, *next; |
|---|
| 983 | 1031 | |
|---|
| 984 | 1032 | /* |
|---|
| 985 | 1033 | * Do all necessary memory allocation before we lock the CIL. |
|---|
| .. | .. |
|---|
| 993 | 1041 | |
|---|
| 994 | 1042 | xlog_cil_insert_items(log, tp); |
|---|
| 995 | 1043 | |
|---|
| 996 | | - xc_commit_lsn = cil->xc_ctx->sequence; |
|---|
| 997 | | - if (commit_lsn) |
|---|
| 998 | | - *commit_lsn = xc_commit_lsn; |
|---|
| 999 | | - |
|---|
| 1000 | | - xfs_log_done(mp, tp->t_ticket, NULL, regrant); |
|---|
| 1044 | + if (regrant && !XLOG_FORCED_SHUTDOWN(log)) |
|---|
| 1045 | + xfs_log_ticket_regrant(log, tp->t_ticket); |
|---|
| 1046 | + else |
|---|
| 1047 | + xfs_log_ticket_ungrant(log, tp->t_ticket); |
|---|
| 1001 | 1048 | tp->t_ticket = NULL; |
|---|
| 1002 | 1049 | xfs_trans_unreserve_and_mod_sb(tp); |
|---|
| 1003 | 1050 | |
|---|
| 1004 | 1051 | /* |
|---|
| 1005 | 1052 | * Once all the items of the transaction have been copied to the CIL, |
|---|
| 1006 | | - * the items can be unlocked and freed. |
|---|
| 1053 | + * the items can be unlocked and possibly freed. |
|---|
| 1007 | 1054 | * |
|---|
| 1008 | 1055 | * This needs to be done before we drop the CIL context lock because we |
|---|
| 1009 | 1056 | * have to update state in the log items and unlock them before they go |
|---|
| .. | .. |
|---|
| 1012 | 1059 | * the log items. This affects (at least) processing of stale buffers, |
|---|
| 1013 | 1060 | * inodes and EFIs. |
|---|
| 1014 | 1061 | */ |
|---|
| 1015 | | - xfs_trans_free_items(tp, xc_commit_lsn, false); |
|---|
| 1062 | + trace_xfs_trans_commit_items(tp, _RET_IP_); |
|---|
| 1063 | + list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { |
|---|
| 1064 | + xfs_trans_del_item(lip); |
|---|
| 1065 | + if (lip->li_ops->iop_committing) |
|---|
| 1066 | + lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence); |
|---|
| 1067 | + } |
|---|
| 1068 | + if (commit_seq) |
|---|
| 1069 | + *commit_seq = cil->xc_ctx->sequence; |
|---|
| 1016 | 1070 | |
|---|
| 1071 | + /* xlog_cil_push_background() releases cil->xc_ctx_lock */ |
|---|
| 1017 | 1072 | xlog_cil_push_background(log); |
|---|
| 1018 | | - |
|---|
| 1019 | | - up_read(&cil->xc_ctx_lock); |
|---|
| 1020 | 1073 | } |
|---|
| 1021 | 1074 | |
|---|
| 1022 | 1075 | /* |
|---|
| .. | .. |
|---|
| 1030 | 1083 | * iclog flush is necessary following this call. |
|---|
| 1031 | 1084 | */ |
|---|
| 1032 | 1085 | xfs_lsn_t |
|---|
| 1033 | | -xlog_cil_force_lsn( |
|---|
| 1086 | +xlog_cil_force_seq( |
|---|
| 1034 | 1087 | struct xlog *log, |
|---|
| 1035 | | - xfs_lsn_t sequence) |
|---|
| 1088 | + xfs_csn_t sequence) |
|---|
| 1036 | 1089 | { |
|---|
| 1037 | 1090 | struct xfs_cil *cil = log->l_cilp; |
|---|
| 1038 | 1091 | struct xfs_cil_ctx *ctx; |
|---|
| .. | .. |
|---|
| 1126 | 1179 | */ |
|---|
| 1127 | 1180 | bool |
|---|
| 1128 | 1181 | xfs_log_item_in_current_chkpt( |
|---|
| 1129 | | - struct xfs_log_item *lip) |
|---|
| 1182 | + struct xfs_log_item *lip) |
|---|
| 1130 | 1183 | { |
|---|
| 1131 | | - struct xfs_cil_ctx *ctx; |
|---|
| 1184 | + struct xfs_cil *cil = lip->li_mountp->m_log->l_cilp; |
|---|
| 1132 | 1185 | |
|---|
| 1133 | 1186 | if (list_empty(&lip->li_cil)) |
|---|
| 1134 | 1187 | return false; |
|---|
| 1135 | | - |
|---|
| 1136 | | - ctx = lip->li_mountp->m_log->l_cilp->xc_ctx; |
|---|
| 1137 | 1188 | |
|---|
| 1138 | 1189 | /* |
|---|
| 1139 | 1190 | * li_seq is written on the first commit of a log item to record the |
|---|
| 1140 | 1191 | * first checkpoint it is written to. Hence if it is different to the |
|---|
| 1141 | 1192 | * current sequence, we're in a new checkpoint. |
|---|
| 1142 | 1193 | */ |
|---|
| 1143 | | - if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0) |
|---|
| 1144 | | - return false; |
|---|
| 1145 | | - return true; |
|---|
| 1194 | + return lip->li_seq == READ_ONCE(cil->xc_current_sequence); |
|---|
| 1146 | 1195 | } |
|---|
| 1147 | 1196 | |
|---|
| 1148 | 1197 | /* |
|---|
| .. | .. |
|---|
| 1155 | 1204 | struct xfs_cil *cil; |
|---|
| 1156 | 1205 | struct xfs_cil_ctx *ctx; |
|---|
| 1157 | 1206 | |
|---|
| 1158 | | - cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); |
|---|
| 1207 | + cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL); |
|---|
| 1159 | 1208 | if (!cil) |
|---|
| 1160 | 1209 | return -ENOMEM; |
|---|
| 1161 | 1210 | |
|---|
| 1162 | | - ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL); |
|---|
| 1211 | + ctx = kmem_zalloc(sizeof(*ctx), KM_MAYFAIL); |
|---|
| 1163 | 1212 | if (!ctx) { |
|---|
| 1164 | 1213 | kmem_free(cil); |
|---|
| 1165 | 1214 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 1170 | 1219 | INIT_LIST_HEAD(&cil->xc_committing); |
|---|
| 1171 | 1220 | spin_lock_init(&cil->xc_cil_lock); |
|---|
| 1172 | 1221 | spin_lock_init(&cil->xc_push_lock); |
|---|
| 1222 | + init_waitqueue_head(&cil->xc_push_wait); |
|---|
| 1173 | 1223 | init_rwsem(&cil->xc_ctx_lock); |
|---|
| 1174 | 1224 | init_waitqueue_head(&cil->xc_commit_wait); |
|---|
| 1175 | 1225 | |
|---|