hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/fs/gfs2/glock.c
....@@ -1,10 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
34 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4
- *
5
- * This copyrighted material is made available to anyone wishing to use,
6
- * modify, copy, or redistribute it subject to the terms and conditions
7
- * of the GNU General Public License version 2.
85 */
96
107 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
....@@ -128,19 +125,45 @@
128125 {
129126 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
130127
131
- if (gl->gl_ops->go_flags & GLOF_ASPACE) {
128
+ kfree(gl->gl_lksb.sb_lvbptr);
129
+ if (gl->gl_ops->go_flags & GLOF_ASPACE)
132130 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
133
- } else {
134
- kfree(gl->gl_lksb.sb_lvbptr);
131
+ else
135132 kmem_cache_free(gfs2_glock_cachep, gl);
136
- }
133
+}
134
+
135
+/**
136
+ * glock_blocked_by_withdraw - determine if we can still use a glock
137
+ * @gl: the glock
138
+ *
139
+ * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
140
+ * when we're withdrawn. For example, to maintain metadata integrity, we should
141
+ * disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like
142
+ * iopen or the transaction glocks may be safely used because none of their
143
+ * metadata goes through the journal. So in general, we should disallow all
144
+ * glocks that are journaled, and allow all the others. One exception is:
145
+ * we need to allow our active journal to be promoted and demoted so others
146
+ * may recover it and we can reacquire it when they're done.
147
+ */
148
+static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
149
+{
150
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
151
+
152
+ if (likely(!gfs2_withdrawn(sdp)))
153
+ return false;
154
+ if (gl->gl_ops->go_flags & GLOF_NONDISK)
155
+ return false;
156
+ if (!sdp->sd_jdesc ||
157
+ gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr)
158
+ return false;
159
+ return true;
137160 }
138161
139162 void gfs2_glock_free(struct gfs2_glock *gl)
140163 {
141164 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
142165
143
- BUG_ON(atomic_read(&gl->gl_revokes));
166
+ gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0);
144167 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
145168 smp_mb();
146169 wake_up_glock(gl);
....@@ -247,7 +270,12 @@
247270 gfs2_glock_remove_from_lru(gl);
248271 spin_unlock(&gl->gl_lockref.lock);
249272 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
250
- GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
273
+ if (mapping) {
274
+ truncate_inode_pages_final(mapping);
275
+ if (!gfs2_withdrawn(sdp))
276
+ GLOCK_BUG_ON(gl, mapping->nrpages ||
277
+ mapping->nrexceptional);
278
+ }
251279 trace_gfs2_glock_put(gl);
252280 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
253281 }
....@@ -284,7 +312,7 @@
284312
285313 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
286314 {
287
- const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
315
+ const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list);
288316 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
289317 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
290318 return 0;
....@@ -308,6 +336,11 @@
308336 clear_bit(HIF_WAIT, &gh->gh_iflags);
309337 smp_mb__after_atomic();
310338 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
339
+ if (gh->gh_flags & GL_ASYNC) {
340
+ struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd;
341
+
342
+ wake_up(&sdp->sd_async_glock_wait);
343
+ }
311344 }
312345
313346 /**
....@@ -425,15 +458,21 @@
425458 else
426459 gl->gl_lockref.count--;
427460 }
428
- if (held1 && held2 && list_empty(&gl->gl_holders))
429
- clear_bit(GLF_QUEUED, &gl->gl_flags);
430
-
431461 if (new_state != gl->gl_target)
432462 /* shorten our minimum hold time */
433463 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
434464 GL_GLOCK_MIN_HOLD);
435465 gl->gl_state = new_state;
436466 gl->gl_tchange = jiffies;
467
+}
468
+
469
+static void gfs2_set_demote(struct gfs2_glock *gl)
470
+{
471
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
472
+
473
+ set_bit(GLF_DEMOTE, &gl->gl_flags);
474
+ smp_mb();
475
+ wake_up(&sdp->sd_async_glock_wait);
437476 }
438477
439478 static void gfs2_demote_wake(struct gfs2_glock *gl)
....@@ -499,7 +538,8 @@
499538 do_xmote(gl, gh, LM_ST_UNLOCKED);
500539 break;
501540 default: /* Everything else */
502
- pr_err("wanted %u got %u\n", gl->gl_target, state);
541
+ fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
542
+ gl->gl_target, state);
503543 GLOCK_BUG_ON(gl, 1);
504544 }
505545 spin_unlock(&gl->gl_lockref.lock);
....@@ -529,6 +569,16 @@
529569 spin_unlock(&gl->gl_lockref.lock);
530570 }
531571
572
+static bool is_system_glock(struct gfs2_glock *gl)
573
+{
574
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
575
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
576
+
577
+ if (gl == m_ip->i_gl)
578
+ return true;
579
+ return false;
580
+}
581
+
532582 /**
533583 * do_xmote - Calls the DLM to change the state of a lock
534584 * @gl: The lock state
....@@ -546,8 +596,8 @@
546596 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
547597 int ret;
548598
549
- if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
550
- target != LM_ST_UNLOCKED)
599
+ if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) &&
600
+ gh && !(gh->gh_flags & LM_FLAG_NOEXP))
551601 return;
552602 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
553603 LM_FLAG_PRIORITY);
....@@ -555,7 +605,14 @@
555605 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
556606 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
557607 glops->go_inval) {
558
- set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
608
+ /*
609
+ * If another process is already doing the invalidate, let that
610
+ * finish first. The glock state machine will get back to this
611
+ * holder again later.
612
+ */
613
+ if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS,
614
+ &gl->gl_flags))
615
+ return;
559616 do_error(gl, 0); /* Fail queued try locks */
560617 }
561618 gl->gl_req = target;
....@@ -565,13 +622,74 @@
565622 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
566623 clear_bit(GLF_BLOCKING, &gl->gl_flags);
567624 spin_unlock(&gl->gl_lockref.lock);
568
- if (glops->go_sync)
569
- glops->go_sync(gl);
570
- if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
625
+ if (glops->go_sync) {
626
+ ret = glops->go_sync(gl);
627
+ /* If we had a problem syncing (due to io errors or whatever,
628
+ * we should not invalidate the metadata or tell dlm to
629
+ * release the glock to other nodes.
630
+ */
631
+ if (ret) {
632
+ if (cmpxchg(&sdp->sd_log_error, 0, ret)) {
633
+ fs_err(sdp, "Error %d syncing glock \n", ret);
634
+ gfs2_dump_glock(NULL, gl, true);
635
+ }
636
+ goto skip_inval;
637
+ }
638
+ }
639
+ if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) {
640
+ /*
641
+ * The call to go_sync should have cleared out the ail list.
642
+ * If there are still items, we have a problem. We ought to
643
+ * withdraw, but we can't because the withdraw code also uses
644
+ * glocks. Warn about the error, dump the glock, then fall
645
+ * through and wait for logd to do the withdraw for us.
646
+ */
647
+ if ((atomic_read(&gl->gl_ail_count) != 0) &&
648
+ (!cmpxchg(&sdp->sd_log_error, 0, -EIO))) {
649
+ gfs2_glock_assert_warn(gl,
650
+ !atomic_read(&gl->gl_ail_count));
651
+ gfs2_dump_glock(NULL, gl, true);
652
+ }
571653 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
572
- clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
654
+ clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
655
+ }
573656
657
+skip_inval:
574658 gfs2_glock_hold(gl);
659
+ /*
660
+ * Check for an error encountered since we called go_sync and go_inval.
661
+ * If so, we can't withdraw from the glock code because the withdraw
662
+ * code itself uses glocks (see function signal_our_withdraw) to
663
+ * change the mount to read-only. Most importantly, we must not call
664
+ * dlm to unlock the glock until the journal is in a known good state
665
+ * (after journal replay) otherwise other nodes may use the object
666
+ * (rgrp or dinode) and then later, journal replay will corrupt the
667
+ * file system. The best we can do here is wait for the logd daemon
668
+ * to see sd_log_error and withdraw, and in the meantime, requeue the
669
+ * work for later.
670
+ *
671
+ * We make a special exception for some system glocks, such as the
672
+ * system statfs inode glock, which needs to be granted before the
673
+ * gfs2_quotad daemon can exit, and that exit needs to finish before
674
+ * we can unmount the withdrawn file system.
675
+ *
676
+ * However, if we're just unlocking the lock (say, for unmount, when
677
+ * gfs2_gl_hash_clear calls clear_glock) and recovery is complete
678
+ * then it's okay to tell dlm to unlock it.
679
+ */
680
+ if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp)))
681
+ gfs2_withdraw_delayed(sdp);
682
+ if (glock_blocked_by_withdraw(gl) &&
683
+ (target != LM_ST_UNLOCKED ||
684
+ test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
685
+ if (!is_system_glock(gl)) {
686
+ gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
687
+ goto out;
688
+ } else {
689
+ clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
690
+ }
691
+ }
692
+
575693 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
576694 /* lock_dlm */
577695 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
....@@ -580,17 +698,15 @@
580698 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
581699 finish_xmote(gl, target);
582700 gfs2_glock_queue_work(gl, 0);
583
- }
584
- else if (ret) {
585
- pr_err("lm_lock ret %d\n", ret);
586
- GLOCK_BUG_ON(gl, !test_bit(SDF_SHUTDOWN,
587
- &sdp->sd_flags));
701
+ } else if (ret) {
702
+ fs_err(sdp, "lm_lock ret %d\n", ret);
703
+ GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp));
588704 }
589705 } else { /* lock_nolock */
590706 finish_xmote(gl, target);
591707 gfs2_glock_queue_work(gl, 0);
592708 }
593
-
709
+out:
594710 spin_lock(&gl->gl_lockref.lock);
595711 }
596712
....@@ -604,7 +720,7 @@
604720 struct gfs2_holder *gh;
605721
606722 if (!list_empty(&gl->gl_holders)) {
607
- gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
723
+ gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
608724 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
609725 return gh;
610726 }
....@@ -669,12 +785,95 @@
669785 return;
670786 }
671787
788
+void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation)
789
+{
790
+ struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
791
+
792
+ if (ri->ri_magic == 0)
793
+ ri->ri_magic = cpu_to_be32(GFS2_MAGIC);
794
+ if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC))
795
+ ri->ri_generation_deleted = cpu_to_be64(generation);
796
+}
797
+
798
+bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation)
799
+{
800
+ struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
801
+
802
+ if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC))
803
+ return false;
804
+ return generation <= be64_to_cpu(ri->ri_generation_deleted);
805
+}
806
+
807
+static void gfs2_glock_poke(struct gfs2_glock *gl)
808
+{
809
+ int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP;
810
+ struct gfs2_holder gh;
811
+ int error;
812
+
813
+ gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh);
814
+ error = gfs2_glock_nq(&gh);
815
+ if (!error)
816
+ gfs2_glock_dq(&gh);
817
+ gfs2_holder_uninit(&gh);
818
+}
819
+
820
+static bool gfs2_try_evict(struct gfs2_glock *gl)
821
+{
822
+ struct gfs2_inode *ip;
823
+ bool evicted = false;
824
+
825
+ /*
826
+ * If there is contention on the iopen glock and we have an inode, try
827
+ * to grab and release the inode so that it can be evicted. This will
828
+ * allow the remote node to go ahead and delete the inode without us
829
+ * having to do it, which will avoid rgrp glock thrashing.
830
+ *
831
+ * The remote node is likely still holding the corresponding inode
832
+ * glock, so it will run before we get to verify that the delete has
833
+ * happened below.
834
+ */
835
+ spin_lock(&gl->gl_lockref.lock);
836
+ ip = gl->gl_object;
837
+ if (ip && !igrab(&ip->i_inode))
838
+ ip = NULL;
839
+ spin_unlock(&gl->gl_lockref.lock);
840
+ if (ip) {
841
+ struct gfs2_glock *inode_gl = NULL;
842
+
843
+ gl->gl_no_formal_ino = ip->i_no_formal_ino;
844
+ set_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
845
+ d_prune_aliases(&ip->i_inode);
846
+ iput(&ip->i_inode);
847
+
848
+ /* If the inode was evicted, gl->gl_object will now be NULL. */
849
+ spin_lock(&gl->gl_lockref.lock);
850
+ ip = gl->gl_object;
851
+ if (ip) {
852
+ inode_gl = ip->i_gl;
853
+ lockref_get(&inode_gl->gl_lockref);
854
+ clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
855
+ }
856
+ spin_unlock(&gl->gl_lockref.lock);
857
+ if (inode_gl) {
858
+ gfs2_glock_poke(inode_gl);
859
+ gfs2_glock_put(inode_gl);
860
+ }
861
+ evicted = !ip;
862
+ }
863
+ return evicted;
864
+}
865
+
672866 static void delete_work_func(struct work_struct *work)
673867 {
674
- struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
868
+ struct delayed_work *dwork = to_delayed_work(work);
869
+ struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
675870 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
676871 struct inode *inode;
677872 u64 no_addr = gl->gl_name.ln_number;
873
+
874
+ spin_lock(&gl->gl_lockref.lock);
875
+ clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
876
+ spin_unlock(&gl->gl_lockref.lock);
678877
679878 /* If someone's using this glock to create a new dinode, the block must
680879 have been freed by another node, then re-used, in which case our
....@@ -682,8 +881,34 @@
682881 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
683882 goto out;
684883
685
- inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
686
- if (inode && !IS_ERR(inode)) {
884
+ if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
885
+ /*
886
+ * If we can evict the inode, give the remote node trying to
887
+ * delete the inode some time before verifying that the delete
888
+ * has happened. Otherwise, if we cause contention on the inode glock
889
+ * immediately, the remote node will think that we still have
890
+ * the inode in use, and so it will give up waiting.
891
+ *
892
+ * If we can't evict the inode, signal to the remote node that
893
+ * the inode is still in use. We'll later try to delete the
894
+ * inode locally in gfs2_evict_inode.
895
+ *
896
+ * FIXME: We only need to verify that the remote node has
897
+ * deleted the inode because nodes before this remote delete
898
+ * rework won't cooperate. At a later time, when we no longer
899
+ * care about compatibility with such nodes, we can skip this
900
+ * step entirely.
901
+ */
902
+ if (gfs2_try_evict(gl)) {
903
+ if (gfs2_queue_delete_work(gl, 5 * HZ))
904
+ return;
905
+ }
906
+ goto out;
907
+ }
908
+
909
+ inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
910
+ GFS2_BLKST_UNLINKED);
911
+ if (!IS_ERR_OR_NULL(inode)) {
687912 d_prune_aliases(inode);
688913 iput(inode);
689914 }
....@@ -713,7 +938,7 @@
713938
714939 if (!delay) {
715940 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
716
- set_bit(GLF_DEMOTE, &gl->gl_flags);
941
+ gfs2_set_demote(gl);
717942 }
718943 }
719944 run_queue(gl, 0);
....@@ -817,7 +1042,7 @@
8171042 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
8181043
8191044 if (glops->go_flags & GLOF_LVB) {
820
- gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
1045
+ gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
8211046 if (!gl->gl_lksb.sb_lvbptr) {
8221047 kmem_cache_free(cachep, gl);
8231048 return -ENOMEM;
....@@ -828,6 +1053,7 @@
8281053 gl->gl_node.next = NULL;
8291054 gl->gl_flags = 0;
8301055 gl->gl_name = name;
1056
+ lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass);
8311057 gl->gl_lockref.count = 1;
8321058 gl->gl_state = LM_ST_UNLOCKED;
8331059 gl->gl_target = LM_ST_UNLOCKED;
....@@ -844,7 +1070,8 @@
8441070 gl->gl_object = NULL;
8451071 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
8461072 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
847
- INIT_WORK(&gl->gl_delete, delete_work_func);
1073
+ if (gl->gl_name.ln_type == LM_TYPE_IOPEN)
1074
+ INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
8481075
8491076 mapping = gfs2_glock2aspace(gl);
8501077 if (mapping) {
....@@ -934,6 +1161,17 @@
9341161 gh->gh_ip = 0;
9351162 }
9361163
1164
+static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
1165
+ unsigned long start_time)
1166
+{
1167
+ /* Have we waited longer that a second? */
1168
+ if (time_after(jiffies, start_time + HZ)) {
1169
+ /* Lengthen the minimum hold time. */
1170
+ gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR,
1171
+ GL_GLOCK_MAX_HOLD);
1172
+ }
1173
+}
1174
+
9371175 /**
9381176 * gfs2_glock_wait - wait on a glock acquisition
9391177 * @gh: the glock holder
....@@ -943,16 +1181,97 @@
9431181
9441182 int gfs2_glock_wait(struct gfs2_holder *gh)
9451183 {
946
- unsigned long time1 = jiffies;
1184
+ unsigned long start_time = jiffies;
9471185
9481186 might_sleep();
9491187 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
950
- if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
951
- /* Lengthen the minimum hold time. */
952
- gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
953
- GL_GLOCK_HOLD_INCR,
954
- GL_GLOCK_MAX_HOLD);
1188
+ gfs2_glock_update_hold_time(gh->gh_gl, start_time);
9551189 return gh->gh_error;
1190
+}
1191
+
1192
+static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
1193
+{
1194
+ int i;
1195
+
1196
+ for (i = 0; i < num_gh; i++)
1197
+ if (test_bit(HIF_WAIT, &ghs[i].gh_iflags))
1198
+ return 1;
1199
+ return 0;
1200
+}
1201
+
1202
+/**
1203
+ * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions
1204
+ * @num_gh: the number of holders in the array
1205
+ * @ghs: the glock holder array
1206
+ *
1207
+ * Returns: 0 on success, meaning all glocks have been granted and are held.
1208
+ * -ESTALE if the request timed out, meaning all glocks were released,
1209
+ * and the caller should retry the operation.
1210
+ */
1211
+
1212
+int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
1213
+{
1214
+ struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
1215
+ int i, ret = 0, timeout = 0;
1216
+ unsigned long start_time = jiffies;
1217
+ bool keep_waiting;
1218
+
1219
+ might_sleep();
1220
+ /*
1221
+ * Total up the (minimum hold time * 2) of all glocks and use that to
1222
+ * determine the max amount of time we should wait.
1223
+ */
1224
+ for (i = 0; i < num_gh; i++)
1225
+ timeout += ghs[i].gh_gl->gl_hold_time << 1;
1226
+
1227
+wait_for_dlm:
1228
+ if (!wait_event_timeout(sdp->sd_async_glock_wait,
1229
+ !glocks_pending(num_gh, ghs), timeout))
1230
+ ret = -ESTALE; /* request timed out. */
1231
+
1232
+ /*
1233
+ * If dlm granted all our requests, we need to adjust the glock
1234
+ * minimum hold time values according to how long we waited.
1235
+ *
1236
+ * If our request timed out, we need to repeatedly release any held
1237
+ * glocks we acquired thus far to allow dlm to acquire the remaining
1238
+ * glocks without deadlocking. We cannot currently cancel outstanding
1239
+ * glock acquisitions.
1240
+ *
1241
+ * The HIF_WAIT bit tells us which requests still need a response from
1242
+ * dlm.
1243
+ *
1244
+ * If dlm sent us any errors, we return the first error we find.
1245
+ */
1246
+ keep_waiting = false;
1247
+ for (i = 0; i < num_gh; i++) {
1248
+ /* Skip holders we have already dequeued below. */
1249
+ if (!gfs2_holder_queued(&ghs[i]))
1250
+ continue;
1251
+ /* Skip holders with a pending DLM response. */
1252
+ if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) {
1253
+ keep_waiting = true;
1254
+ continue;
1255
+ }
1256
+
1257
+ if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) {
1258
+ if (ret == -ESTALE)
1259
+ gfs2_glock_dq(&ghs[i]);
1260
+ else
1261
+ gfs2_glock_update_hold_time(ghs[i].gh_gl,
1262
+ start_time);
1263
+ }
1264
+ if (!ret)
1265
+ ret = ghs[i].gh_error;
1266
+ }
1267
+
1268
+ if (keep_waiting)
1269
+ goto wait_for_dlm;
1270
+
1271
+ /*
1272
+ * At this point, we've either acquired all locks or released them all.
1273
+ */
1274
+ return ret;
9561275 }
9571276
9581277 /**
....@@ -967,9 +1286,10 @@
9671286 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
9681287 unsigned long delay, bool remote)
9691288 {
970
- int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
971
-
972
- set_bit(bit, &gl->gl_flags);
1289
+ if (delay)
1290
+ set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
1291
+ else
1292
+ gfs2_set_demote(gl);
9731293 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
9741294 gl->gl_demote_state = state;
9751295 gl->gl_demote_time = jiffies;
....@@ -1021,9 +1341,9 @@
10211341 struct gfs2_holder *gh2;
10221342 int try_futile = 0;
10231343
1024
- BUG_ON(gh->gh_owner_pid == NULL);
1344
+ GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
10251345 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1026
- BUG();
1346
+ GLOCK_BUG_ON(gl, true);
10271347
10281348 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
10291349 if (test_bit(GLF_LOCK, &gl->gl_flags))
....@@ -1048,7 +1368,6 @@
10481368 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
10491369 insert_pt = &gh2->gh_list;
10501370 }
1051
- set_bit(GLF_QUEUED, &gl->gl_flags);
10521371 trace_gfs2_glock_queue(gh, 1);
10531372 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
10541373 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
....@@ -1060,7 +1379,7 @@
10601379 }
10611380 list_add_tail(&gh->gh_list, insert_pt);
10621381 do_cancel:
1063
- gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1382
+ gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
10641383 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
10651384 spin_unlock(&gl->gl_lockref.lock);
10661385 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
....@@ -1070,15 +1389,15 @@
10701389 return;
10711390
10721391 trap_recursive:
1073
- pr_err("original: %pSR\n", (void *)gh2->gh_ip);
1074
- pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
1075
- pr_err("lock type: %d req lock state : %d\n",
1392
+ fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
1393
+ fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1394
+ fs_err(sdp, "lock type: %d req lock state : %d\n",
10761395 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1077
- pr_err("new: %pSR\n", (void *)gh->gh_ip);
1078
- pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
1079
- pr_err("lock type: %d req lock state : %d\n",
1396
+ fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
1397
+ fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
1398
+ fs_err(sdp, "lock type: %d req lock state : %d\n",
10801399 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1081
- gfs2_dump_glock(NULL, gl);
1400
+ gfs2_dump_glock(NULL, gl, true);
10821401 BUG();
10831402 }
10841403
....@@ -1094,10 +1413,9 @@
10941413 int gfs2_glock_nq(struct gfs2_holder *gh)
10951414 {
10961415 struct gfs2_glock *gl = gh->gh_gl;
1097
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
10981416 int error = 0;
10991417
1100
- if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1418
+ if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP))
11011419 return -EIO;
11021420
11031421 if (test_bit(GLF_LRU, &gl->gl_flags))
....@@ -1141,24 +1459,34 @@
11411459 void gfs2_glock_dq(struct gfs2_holder *gh)
11421460 {
11431461 struct gfs2_glock *gl = gh->gh_gl;
1144
- const struct gfs2_glock_operations *glops = gl->gl_ops;
1462
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
11451463 unsigned delay = 0;
11461464 int fast_path = 0;
11471465
11481466 spin_lock(&gl->gl_lockref.lock);
1467
+ /*
1468
+ * If we're in the process of file system withdraw, we cannot just
1469
+ * dequeue any glocks until our journal is recovered, lest we
1470
+ * introduce file system corruption. We need two exceptions to this
1471
+ * rule: We need to allow unlocking of nondisk glocks and the glock
1472
+ * for our own journal that needs recovery.
1473
+ */
1474
+ if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
1475
+ glock_blocked_by_withdraw(gl) &&
1476
+ gh->gh_gl != sdp->sd_jinode_gl) {
1477
+ sdp->sd_glock_dqs_held++;
1478
+ spin_unlock(&gl->gl_lockref.lock);
1479
+ might_sleep();
1480
+ wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
1481
+ TASK_UNINTERRUPTIBLE);
1482
+ spin_lock(&gl->gl_lockref.lock);
1483
+ }
11491484 if (gh->gh_flags & GL_NOCACHE)
11501485 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
11511486
11521487 list_del_init(&gh->gh_list);
11531488 clear_bit(HIF_HOLDER, &gh->gh_iflags);
11541489 if (find_first_holder(gl) == NULL) {
1155
- if (glops->go_unlock) {
1156
- GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1157
- spin_unlock(&gl->gl_lockref.lock);
1158
- glops->go_unlock(gh);
1159
- spin_lock(&gl->gl_lockref.lock);
1160
- clear_bit(GLF_LOCK, &gl->gl_flags);
1161
- }
11621490 if (list_empty(&gl->gl_holders) &&
11631491 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
11641492 !test_bit(GLF_DEMOTE, &gl->gl_flags))
....@@ -1342,16 +1670,15 @@
13421670 unsigned long now = jiffies;
13431671
13441672 gfs2_glock_hold(gl);
1673
+ spin_lock(&gl->gl_lockref.lock);
13451674 holdtime = gl->gl_tchange + gl->gl_hold_time;
1346
- if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1675
+ if (!list_empty(&gl->gl_holders) &&
13471676 gl->gl_name.ln_type == LM_TYPE_INODE) {
13481677 if (time_before(now, holdtime))
13491678 delay = holdtime - now;
13501679 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
13511680 delay = gl->gl_hold_time;
13521681 }
1353
-
1354
- spin_lock(&gl->gl_lockref.lock);
13551682 handle_callback(gl, state, delay, true);
13561683 __gfs2_glock_queue_work(gl, delay);
13571684 spin_unlock(&gl->gl_lockref.lock);
....@@ -1455,7 +1782,7 @@
14551782 list_sort(NULL, list, glock_cmp);
14561783
14571784 while(!list_empty(list)) {
1458
- gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1785
+ gl = list_first_entry(list, struct gfs2_glock, gl_lru);
14591786 list_del_init(&gl->gl_lru);
14601787 clear_bit(GLF_LRU, &gl->gl_flags);
14611788 if (!spin_trylock(&gl->gl_lockref.lock)) {
....@@ -1497,7 +1824,7 @@
14971824
14981825 spin_lock(&lru_lock);
14991826 while ((nr-- >= 0) && !list_empty(&lru_list)) {
1500
- gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1827
+ gl = list_first_entry(&lru_list, struct gfs2_glock, gl_lru);
15011828
15021829 /* Test for being demotable */
15031830 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
....@@ -1538,10 +1865,9 @@
15381865 };
15391866
15401867 /**
1541
- * examine_bucket - Call a function for glock in a hash bucket
1868
+ * glock_hash_walk - Call a function for glock in a hash bucket
15421869 * @examiner: the function
15431870 * @sdp: the filesystem
1544
- * @bucket: the bucket
15451871 *
15461872 * Note that the function can be called multiple times on the same
15471873 * object. So the user must ensure that the function can cope with
....@@ -1558,15 +1884,57 @@
15581884 do {
15591885 rhashtable_walk_start(&iter);
15601886
1561
- while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
1562
- if (gl->gl_name.ln_sbd == sdp &&
1563
- lockref_get_not_dead(&gl->gl_lockref))
1887
+ while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) {
1888
+ if (gl->gl_name.ln_sbd == sdp)
15641889 examiner(gl);
1890
+ }
15651891
15661892 rhashtable_walk_stop(&iter);
15671893 } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
15681894
15691895 rhashtable_walk_exit(&iter);
1896
+}
1897
+
1898
+bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay)
1899
+{
1900
+ bool queued;
1901
+
1902
+ spin_lock(&gl->gl_lockref.lock);
1903
+ queued = queue_delayed_work(gfs2_delete_workqueue,
1904
+ &gl->gl_delete, delay);
1905
+ if (queued)
1906
+ set_bit(GLF_PENDING_DELETE, &gl->gl_flags);
1907
+ spin_unlock(&gl->gl_lockref.lock);
1908
+ return queued;
1909
+}
1910
+
1911
+void gfs2_cancel_delete_work(struct gfs2_glock *gl)
1912
+{
1913
+ if (cancel_delayed_work(&gl->gl_delete)) {
1914
+ clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
1915
+ gfs2_glock_put(gl);
1916
+ }
1917
+}
1918
+
1919
+bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
1920
+{
1921
+ return test_bit(GLF_PENDING_DELETE, &gl->gl_flags);
1922
+}
1923
+
1924
+static void flush_delete_work(struct gfs2_glock *gl)
1925
+{
1926
+ if (gl->gl_name.ln_type == LM_TYPE_IOPEN) {
1927
+ if (cancel_delayed_work(&gl->gl_delete)) {
1928
+ queue_delayed_work(gfs2_delete_workqueue,
1929
+ &gl->gl_delete, 0);
1930
+ }
1931
+ }
1932
+}
1933
+
1934
+void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
1935
+{
1936
+ glock_hash_walk(flush_delete_work, sdp);
1937
+ flush_workqueue(gfs2_delete_workqueue);
15701938 }
15711939
15721940 /**
....@@ -1577,10 +1945,10 @@
15771945
15781946 static void thaw_glock(struct gfs2_glock *gl)
15791947 {
1580
- if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
1581
- gfs2_glock_put(gl);
1948
+ if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
15821949 return;
1583
- }
1950
+ if (!lockref_get_not_dead(&gl->gl_lockref))
1951
+ return;
15841952 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
15851953 gfs2_glock_queue_work(gl, 0);
15861954 }
....@@ -1596,9 +1964,12 @@
15961964 gfs2_glock_remove_from_lru(gl);
15971965
15981966 spin_lock(&gl->gl_lockref.lock);
1599
- if (gl->gl_state != LM_ST_UNLOCKED)
1600
- handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1601
- __gfs2_glock_queue_work(gl, 0);
1967
+ if (!__lockref_is_dead(&gl->gl_lockref)) {
1968
+ gl->gl_lockref.count++;
1969
+ if (gl->gl_state != LM_ST_UNLOCKED)
1970
+ handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1971
+ __gfs2_glock_queue_work(gl, 0);
1972
+ }
16021973 spin_unlock(&gl->gl_lockref.lock);
16031974 }
16041975
....@@ -1613,16 +1984,16 @@
16131984 glock_hash_walk(thaw_glock, sdp);
16141985 }
16151986
1616
-static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1987
+static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
16171988 {
16181989 spin_lock(&gl->gl_lockref.lock);
1619
- gfs2_dump_glock(seq, gl);
1990
+ gfs2_dump_glock(seq, gl, fsid);
16201991 spin_unlock(&gl->gl_lockref.lock);
16211992 }
16221993
16231994 static void dump_glock_func(struct gfs2_glock *gl)
16241995 {
1625
- dump_glock(NULL, gl);
1996
+ dump_glock(NULL, gl, true);
16261997 }
16271998
16281999 /**
....@@ -1651,7 +2022,7 @@
16512022 int ret;
16522023
16532024 ret = gfs2_truncatei_resume(ip);
1654
- gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
2025
+ gfs2_glock_assert_withdraw(gl, ret == 0);
16552026
16562027 spin_lock(&gl->gl_lockref.lock);
16572028 clear_bit(GLF_LOCK, &gl->gl_flags);
....@@ -1707,10 +2078,12 @@
17072078 * dump_holder - print information about a glock holder
17082079 * @seq: the seq_file struct
17092080 * @gh: the glock holder
2081
+ * @fs_id_buf: pointer to file system id (if requested)
17102082 *
17112083 */
17122084
1713
-static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
2085
+static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh,
2086
+ const char *fs_id_buf)
17142087 {
17152088 struct task_struct *gh_owner = NULL;
17162089 char flags_buf[32];
....@@ -1718,8 +2091,8 @@
17182091 rcu_read_lock();
17192092 if (gh->gh_owner_pid)
17202093 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1721
- gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1722
- state2str(gh->gh_state),
2094
+ gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
2095
+ fs_id_buf, state2str(gh->gh_state),
17232096 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
17242097 gh->gh_error,
17252098 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
....@@ -1753,7 +2126,7 @@
17532126 *p++ = 'I';
17542127 if (test_bit(GLF_FROZEN, gflags))
17552128 *p++ = 'F';
1756
- if (test_bit(GLF_QUEUED, gflags))
2129
+ if (!list_empty(&gl->gl_holders))
17572130 *p++ = 'q';
17582131 if (test_bit(GLF_LRU, gflags))
17592132 *p++ = 'L';
....@@ -1761,6 +2134,12 @@
17612134 *p++ = 'o';
17622135 if (test_bit(GLF_BLOCKING, gflags))
17632136 *p++ = 'b';
2137
+ if (test_bit(GLF_INODE_CREATING, gflags))
2138
+ *p++ = 'c';
2139
+ if (test_bit(GLF_PENDING_DELETE, gflags))
2140
+ *p++ = 'P';
2141
+ if (test_bit(GLF_FREEING, gflags))
2142
+ *p++ = 'x';
17642143 *p = 0;
17652144 return buf;
17662145 }
....@@ -1769,6 +2148,7 @@
17692148 * gfs2_dump_glock - print information about a glock
17702149 * @seq: The seq_file struct
17712150 * @gl: the glock
2151
+ * @fsid: If true, also dump the file system id
17722152 *
17732153 * The file format is as follows:
17742154 * One line per object, capital letters are used to indicate objects
....@@ -1782,33 +2162,45 @@
17822162 *
17832163 */
17842164
1785
-void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
2165
+void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
17862166 {
17872167 const struct gfs2_glock_operations *glops = gl->gl_ops;
17882168 unsigned long long dtime;
17892169 const struct gfs2_holder *gh;
17902170 char gflags_buf[32];
2171
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
2172
+ char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
2173
+ unsigned long nrpages = 0;
17912174
2175
+ if (gl->gl_ops->go_flags & GLOF_ASPACE) {
2176
+ struct address_space *mapping = gfs2_glock2aspace(gl);
2177
+
2178
+ nrpages = mapping->nrpages;
2179
+ }
2180
+ memset(fs_id_buf, 0, sizeof(fs_id_buf));
2181
+ if (fsid && sdp) /* safety precaution */
2182
+ sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
17922183 dtime = jiffies - gl->gl_demote_time;
17932184 dtime *= 1000000/HZ; /* demote time in uSec */
17942185 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
17952186 dtime = 0;
1796
- gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1797
- state2str(gl->gl_state),
1798
- gl->gl_name.ln_type,
1799
- (unsigned long long)gl->gl_name.ln_number,
1800
- gflags2str(gflags_buf, gl),
1801
- state2str(gl->gl_target),
1802
- state2str(gl->gl_demote_state), dtime,
1803
- atomic_read(&gl->gl_ail_count),
1804
- atomic_read(&gl->gl_revokes),
1805
- (int)gl->gl_lockref.count, gl->gl_hold_time);
2187
+ gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d "
2188
+ "v:%d r:%d m:%ld p:%lu\n",
2189
+ fs_id_buf, state2str(gl->gl_state),
2190
+ gl->gl_name.ln_type,
2191
+ (unsigned long long)gl->gl_name.ln_number,
2192
+ gflags2str(gflags_buf, gl),
2193
+ state2str(gl->gl_target),
2194
+ state2str(gl->gl_demote_state), dtime,
2195
+ atomic_read(&gl->gl_ail_count),
2196
+ atomic_read(&gl->gl_revokes),
2197
+ (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages);
18062198
18072199 list_for_each_entry(gh, &gl->gl_holders, gh_list)
1808
- dump_holder(seq, gh);
2200
+ dump_holder(seq, gh, fs_id_buf);
18092201
18102202 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1811
- glops->go_dump(seq, gl);
2203
+ glops->go_dump(seq, gl, fs_id_buf);
18122204 }
18132205
18142206 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
....@@ -2009,7 +2401,7 @@
20092401
20102402 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
20112403 {
2012
- dump_glock(seq, iter_ptr);
2404
+ dump_glock(seq, iter_ptr, false);
20132405 return 0;
20142406 }
20152407
....@@ -2049,7 +2441,7 @@
20492441 .show = gfs2_glstats_seq_show,
20502442 };
20512443
2052
-static const struct seq_operations gfs2_sbstats_seq_ops = {
2444
+static const struct seq_operations gfs2_sbstats_sops = {
20532445 .start = gfs2_sbstats_seq_start,
20542446 .next = gfs2_sbstats_seq_next,
20552447 .stop = gfs2_sbstats_seq_stop,
....@@ -2102,16 +2494,6 @@
21022494 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
21032495 }
21042496
2105
-static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2106
-{
2107
- int ret = seq_open(file, &gfs2_sbstats_seq_ops);
2108
- if (ret == 0) {
2109
- struct seq_file *seq = file->private_data;
2110
- seq->private = inode->i_private; /* sdp */
2111
- }
2112
- return ret;
2113
-}
2114
-
21152497 static const struct file_operations gfs2_glocks_fops = {
21162498 .owner = THIS_MODULE,
21172499 .open = gfs2_glocks_open,
....@@ -2128,79 +2510,31 @@
21282510 .release = gfs2_glocks_release,
21292511 };
21302512
2131
-static const struct file_operations gfs2_sbstats_fops = {
2132
- .owner = THIS_MODULE,
2133
- .open = gfs2_sbstats_open,
2134
- .read = seq_read,
2135
- .llseek = seq_lseek,
2136
- .release = seq_release,
2137
-};
2513
+DEFINE_SEQ_ATTRIBUTE(gfs2_sbstats);
21382514
2139
-int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2515
+void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
21402516 {
2141
- struct dentry *dent;
2517
+ sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
21422518
2143
- dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2144
- if (IS_ERR_OR_NULL(dent))
2145
- goto fail;
2146
- sdp->debugfs_dir = dent;
2519
+ debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2520
+ &gfs2_glocks_fops);
21472521
2148
- dent = debugfs_create_file("glocks",
2149
- S_IFREG | S_IRUGO,
2150
- sdp->debugfs_dir, sdp,
2151
- &gfs2_glocks_fops);
2152
- if (IS_ERR_OR_NULL(dent))
2153
- goto fail;
2154
- sdp->debugfs_dentry_glocks = dent;
2522
+ debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2523
+ &gfs2_glstats_fops);
21552524
2156
- dent = debugfs_create_file("glstats",
2157
- S_IFREG | S_IRUGO,
2158
- sdp->debugfs_dir, sdp,
2159
- &gfs2_glstats_fops);
2160
- if (IS_ERR_OR_NULL(dent))
2161
- goto fail;
2162
- sdp->debugfs_dentry_glstats = dent;
2163
-
2164
- dent = debugfs_create_file("sbstats",
2165
- S_IFREG | S_IRUGO,
2166
- sdp->debugfs_dir, sdp,
2167
- &gfs2_sbstats_fops);
2168
- if (IS_ERR_OR_NULL(dent))
2169
- goto fail;
2170
- sdp->debugfs_dentry_sbstats = dent;
2171
-
2172
- return 0;
2173
-fail:
2174
- gfs2_delete_debugfs_file(sdp);
2175
- return dent ? PTR_ERR(dent) : -ENOMEM;
2525
+ debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2526
+ &gfs2_sbstats_fops);
21762527 }
21772528
21782529 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
21792530 {
2180
- if (sdp->debugfs_dir) {
2181
- if (sdp->debugfs_dentry_glocks) {
2182
- debugfs_remove(sdp->debugfs_dentry_glocks);
2183
- sdp->debugfs_dentry_glocks = NULL;
2184
- }
2185
- if (sdp->debugfs_dentry_glstats) {
2186
- debugfs_remove(sdp->debugfs_dentry_glstats);
2187
- sdp->debugfs_dentry_glstats = NULL;
2188
- }
2189
- if (sdp->debugfs_dentry_sbstats) {
2190
- debugfs_remove(sdp->debugfs_dentry_sbstats);
2191
- sdp->debugfs_dentry_sbstats = NULL;
2192
- }
2193
- debugfs_remove(sdp->debugfs_dir);
2194
- sdp->debugfs_dir = NULL;
2195
- }
2531
+ debugfs_remove_recursive(sdp->debugfs_dir);
2532
+ sdp->debugfs_dir = NULL;
21962533 }
21972534
2198
-int gfs2_register_debugfs(void)
2535
+void gfs2_register_debugfs(void)
21992536 {
22002537 gfs2_root = debugfs_create_dir("gfs2", NULL);
2201
- if (IS_ERR(gfs2_root))
2202
- return PTR_ERR(gfs2_root);
2203
- return gfs2_root ? 0 : -ENOMEM;
22042538 }
22052539
22062540 void gfs2_unregister_debugfs(void)