forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/fs/gfs2/glops.c
....@@ -1,10 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
34 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4
- *
5
- * This copyrighted material is made available to anyone wishing to use,
6
- * modify, copy, or redistribute it subject to the terms and conditions
7
- * of the GNU General Public License version 2.
85 */
96
107 #include <linux/spinlock.h>
....@@ -28,8 +25,11 @@
2825 #include "util.h"
2926 #include "trans.h"
3027 #include "dir.h"
28
+#include "lops.h"
3129
3230 struct workqueue_struct *gfs2_freeze_wq;
31
+
32
+extern struct workqueue_struct *gfs2_control_wq;
3333
3434 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
3535 {
....@@ -41,7 +41,8 @@
4141 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
4242 gl->gl_name.ln_type, gl->gl_name.ln_number,
4343 gfs2_glock2aspace(gl));
44
- gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n");
44
+ gfs2_lm(gl->gl_name.ln_sbd, "AIL error\n");
45
+ gfs2_withdraw(gl->gl_name.ln_sbd);
4546 }
4647
4748 /**
....@@ -81,10 +82,11 @@
8182 }
8283
8384
84
-static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
85
+static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
8586 {
8687 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
8788 struct gfs2_trans tr;
89
+ int ret;
8890
8991 memset(&tr, 0, sizeof(tr));
9092 INIT_LIST_HEAD(&tr.tr_buf);
....@@ -93,24 +95,51 @@
9395 INIT_LIST_HEAD(&tr.tr_ail2_list);
9496 tr.tr_revokes = atomic_read(&gl->gl_ail_count);
9597
96
- if (!tr.tr_revokes)
97
- return;
98
+ if (!tr.tr_revokes) {
99
+ bool have_revokes;
100
+ bool log_in_flight;
101
+
102
+ /*
103
+ * We have nothing on the ail, but there could be revokes on
104
+ * the sdp revoke queue, in which case, we still want to flush
105
+ * the log and wait for it to finish.
106
+ *
107
+ * If the sdp revoke list is empty too, we might still have an
108
+ * io outstanding for writing revokes, so we should wait for
109
+ * it before returning.
110
+ *
111
+ * If none of these conditions are true, our revokes are all
112
+ * flushed and we can return.
113
+ */
114
+ gfs2_log_lock(sdp);
115
+ have_revokes = !list_empty(&sdp->sd_log_revokes);
116
+ log_in_flight = atomic_read(&sdp->sd_log_in_flight);
117
+ gfs2_log_unlock(sdp);
118
+ if (have_revokes)
119
+ goto flush;
120
+ if (log_in_flight)
121
+ log_flush_wait(sdp);
122
+ return 0;
123
+ }
98124
99125 /* A shortened, inline version of gfs2_trans_begin()
100126 * tr->alloced is not set since the transaction structure is
101127 * on the stack */
102
- tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
128
+ tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes);
103129 tr.tr_ip = _RET_IP_;
104
- if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0)
105
- return;
130
+ ret = gfs2_log_reserve(sdp, tr.tr_reserved);
131
+ if (ret < 0)
132
+ return ret;
106133 WARN_ON_ONCE(current->journal_info);
107134 current->journal_info = &tr;
108135
109136 __gfs2_ail_flush(gl, 0, tr.tr_revokes);
110137
111138 gfs2_trans_end(sdp);
139
+flush:
112140 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
113141 GFS2_LFC_AIL_EMPTY_GL);
142
+ return 0;
114143 }
115144
116145 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
....@@ -136,6 +165,31 @@
136165 }
137166
138167 /**
168
+ * gfs2_rgrp_metasync - sync out the metadata of a resource group
169
+ * @gl: the glock protecting the resource group
170
+ *
171
+ */
172
+
173
+static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
174
+{
175
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
176
+ struct address_space *metamapping = &sdp->sd_aspace;
177
+ struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
178
+ const unsigned bsize = sdp->sd_sb.sb_bsize;
179
+ loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
180
+ loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
181
+ int error;
182
+
183
+ filemap_fdatawrite_range(metamapping, start, end);
184
+ error = filemap_fdatawait_range(metamapping, start, end);
185
+ WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
186
+ mapping_set_error(metamapping, error);
187
+ if (error)
188
+ gfs2_io_error(sdp);
189
+ return error;
190
+}
191
+
192
+/**
139193 * rgrp_go_sync - sync out the metadata for this glock
140194 * @gl: the glock
141195 *
....@@ -144,35 +198,23 @@
144198 * return to caller to demote/unlock the glock until I/O is complete.
145199 */
146200
147
-static void rgrp_go_sync(struct gfs2_glock *gl)
201
+static int rgrp_go_sync(struct gfs2_glock *gl)
148202 {
149203 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
150
- struct address_space *mapping = &sdp->sd_aspace;
151
- struct gfs2_rgrpd *rgd;
204
+ struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
152205 int error;
153206
154
- spin_lock(&gl->gl_lockref.lock);
155
- rgd = gl->gl_object;
156
- if (rgd)
157
- gfs2_rgrp_brelse(rgd);
158
- spin_unlock(&gl->gl_lockref.lock);
159
-
160207 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
161
- return;
208
+ return 0;
162209 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
163210
164211 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
165212 GFS2_LFC_RGRP_GO_SYNC);
166
- filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
167
- error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
168
- mapping_set_error(mapping, error);
169
- gfs2_ail_empty_gl(gl);
170
-
171
- spin_lock(&gl->gl_lockref.lock);
172
- rgd = gl->gl_object;
173
- if (rgd)
174
- gfs2_free_clones(rgd);
175
- spin_unlock(&gl->gl_lockref.lock);
213
+ error = gfs2_rgrp_metasync(gl);
214
+ if (!error)
215
+ error = gfs2_ail_empty_gl(gl);
216
+ gfs2_free_clones(rgd);
217
+ return error;
176218 }
177219
178220 /**
....@@ -190,16 +232,23 @@
190232 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
191233 struct address_space *mapping = &sdp->sd_aspace;
192234 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
235
+ const unsigned bsize = sdp->sd_sb.sb_bsize;
236
+ loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
237
+ loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
193238
194
- if (rgd)
195
- gfs2_rgrp_brelse(rgd);
196
-
239
+ gfs2_rgrp_brelse(rgd);
197240 WARN_ON_ONCE(!(flags & DIO_METADATA));
198
- gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
199
- truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
241
+ truncate_inode_pages_range(mapping, start, end);
242
+ rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
243
+}
244
+
245
+static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
246
+ const char *fs_id_buf)
247
+{
248
+ struct gfs2_rgrpd *rgd = gl->gl_object;
200249
201250 if (rgd)
202
- rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
251
+ gfs2_rgrp_dump(seq, rgd, fs_id_buf);
203252 }
204253
205254 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
....@@ -235,17 +284,34 @@
235284 }
236285
237286 /**
238
- * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
287
+ * gfs2_inode_metasync - sync out the metadata of an inode
288
+ * @gl: the glock protecting the inode
289
+ *
290
+ */
291
+int gfs2_inode_metasync(struct gfs2_glock *gl)
292
+{
293
+ struct address_space *metamapping = gfs2_glock2aspace(gl);
294
+ int error;
295
+
296
+ filemap_fdatawrite(metamapping);
297
+ error = filemap_fdatawait(metamapping);
298
+ if (error)
299
+ gfs2_io_error(gl->gl_name.ln_sbd);
300
+ return error;
301
+}
302
+
303
+/**
304
+ * inode_go_sync - Sync the dirty metadata of an inode
239305 * @gl: the glock protecting the inode
240306 *
241307 */
242308
243
-static void inode_go_sync(struct gfs2_glock *gl)
309
+static int inode_go_sync(struct gfs2_glock *gl)
244310 {
245311 struct gfs2_inode *ip = gfs2_glock2inode(gl);
246312 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
247313 struct address_space *metamapping = gfs2_glock2aspace(gl);
248
- int error;
314
+ int error = 0, ret;
249315
250316 if (isreg) {
251317 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
....@@ -266,8 +332,9 @@
266332 error = filemap_fdatawait(mapping);
267333 mapping_set_error(mapping, error);
268334 }
269
- error = filemap_fdatawait(metamapping);
270
- mapping_set_error(metamapping, error);
335
+ ret = gfs2_inode_metasync(gl);
336
+ if (!error)
337
+ error = ret;
271338 gfs2_ail_empty_gl(gl);
272339 /*
273340 * Writeback of the data mapping may cause the dirty flag to be set
....@@ -278,6 +345,7 @@
278345
279346 out:
280347 gfs2_clear_glop_pending(ip);
348
+ return error;
281349 }
282350
283351 /**
....@@ -294,8 +362,6 @@
294362 static void inode_go_inval(struct gfs2_glock *gl, int flags)
295363 {
296364 struct gfs2_inode *ip = gfs2_glock2inode(gl);
297
-
298
- gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
299365
300366 if (flags & DIO_METADATA) {
301367 struct address_space *mapping = gfs2_glock2aspace(gl);
....@@ -354,7 +420,7 @@
354420 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
355421 be32_to_cpu(str->di_minor));
356422 break;
357
- };
423
+ }
358424
359425 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
360426 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
....@@ -465,20 +531,31 @@
465531 * inode_go_dump - print information about an inode
466532 * @seq: The iterator
467533 * @ip: the inode
534
+ * @fs_id_buf: file system id (may be empty)
468535 *
469536 */
470537
471
-static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
538
+static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
539
+ const char *fs_id_buf)
472540 {
473
- const struct gfs2_inode *ip = gl->gl_object;
541
+ struct gfs2_inode *ip = gl->gl_object;
542
+ struct inode *inode = &ip->i_inode;
543
+ unsigned long nrpages;
544
+
474545 if (ip == NULL)
475546 return;
476
- gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
547
+
548
+ xa_lock_irq(&inode->i_data.i_pages);
549
+ nrpages = inode->i_data.nrpages;
550
+ xa_unlock_irq(&inode->i_data.i_pages);
551
+
552
+ gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
553
+ "p:%lu\n", fs_id_buf,
477554 (unsigned long long)ip->i_no_formal_ino,
478555 (unsigned long long)ip->i_no_addr,
479556 IF2DT(ip->i_inode.i_mode), ip->i_flags,
480557 (unsigned int)ip->i_diskflags,
481
- (unsigned long long)i_size_read(&ip->i_inode));
558
+ (unsigned long long)i_size_read(inode), nrpages);
482559 }
483560
484561 /**
....@@ -489,23 +566,43 @@
489566 *
490567 */
491568
492
-static void freeze_go_sync(struct gfs2_glock *gl)
569
+static int freeze_go_sync(struct gfs2_glock *gl)
493570 {
494571 int error = 0;
495572 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
496573
497
- if (gl->gl_state == LM_ST_SHARED &&
498
- test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
574
+ /*
575
+ * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
576
+ * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
577
+ * all the nodes should have the freeze glock in SH mode and they all
578
+ * call do_xmote: One for EX and the others for UN. They ALL must
579
+ * freeze locally, and they ALL must queue freeze work. The freeze_work
580
+ * calls freeze_func, which tries to reacquire the freeze glock in SH,
581
+ * effectively waiting for the thaw on the node who holds it in EX.
582
+ * Once thawed, the work func acquires the freeze glock in
583
+ * SH and everybody goes back to thawed.
584
+ */
585
+ if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
586
+ !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
499587 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
500588 error = freeze_super(sdp->sd_vfs);
501589 if (error) {
502
- printk(KERN_INFO "GFS2: couldn't freeze filesystem: %d\n", error);
590
+ fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
591
+ error);
592
+ if (gfs2_withdrawn(sdp)) {
593
+ atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
594
+ return 0;
595
+ }
503596 gfs2_assert_withdraw(sdp, 0);
504597 }
505598 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
506
- gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
507
- GFS2_LFC_FREEZE_GO_SYNC);
599
+ if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
600
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
601
+ GFS2_LFC_FREEZE_GO_SYNC);
602
+ else /* read-only mounts */
603
+ atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
508604 }
605
+ return 0;
509606 }
510607
511608 /**
....@@ -525,17 +622,14 @@
525622 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
526623 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
527624
528
- error = gfs2_find_jhead(sdp->sd_jdesc, &head);
529
- if (error)
530
- gfs2_consist(sdp);
531
- if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
532
- gfs2_consist(sdp);
533
-
534
- /* Initialize some head of the log stuff */
535
- if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
536
- sdp->sd_log_sequence = head.lh_sequence + 1;
537
- gfs2_log_pointers_init(sdp, head.lh_blkno);
538
- }
625
+ error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
626
+ if (gfs2_assert_withdraw_delayed(sdp, !error))
627
+ return error;
628
+ if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
629
+ GFS2_LOG_HEAD_UNMOUNT))
630
+ return -EIO;
631
+ sdp->sd_log_sequence = head.lh_sequence + 1;
632
+ gfs2_log_pointers_init(sdp, head.lh_blkno);
539633 }
540634 return 0;
541635 }
....@@ -569,13 +663,87 @@
569663 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
570664 gl->gl_state == LM_ST_SHARED && ip) {
571665 gl->gl_lockref.count++;
572
- if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
666
+ if (!queue_delayed_work(gfs2_delete_workqueue,
667
+ &gl->gl_delete, 0))
573668 gl->gl_lockref.count--;
574669 }
575670 }
576671
672
+static int iopen_go_demote_ok(const struct gfs2_glock *gl)
673
+{
674
+ return !gfs2_delete_work_queued(gl);
675
+}
676
+
677
+/**
678
+ * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
679
+ * @gl: glock being freed
680
+ *
681
+ * For now, this is only used for the journal inode glock. In withdraw
682
+ * situations, we need to wait for the glock to be freed so that we know
683
+ * other nodes may proceed with recovery / journal replay.
684
+ */
685
+static void inode_go_free(struct gfs2_glock *gl)
686
+{
687
+ /* Note that we cannot reference gl_object because it's already set
688
+ * to NULL by this point in its lifecycle. */
689
+ if (!test_bit(GLF_FREEING, &gl->gl_flags))
690
+ return;
691
+ clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
692
+ wake_up_bit(&gl->gl_flags, GLF_FREEING);
693
+}
694
+
695
+/**
696
+ * nondisk_go_callback - used to signal when a node did a withdraw
697
+ * @gl: the nondisk glock
698
+ * @remote: true if this came from a different cluster node
699
+ *
700
+ */
701
+static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
702
+{
703
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
704
+
705
+ /* Ignore the callback unless it's from another node, and it's the
706
+ live lock. */
707
+ if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
708
+ return;
709
+
710
+ /* First order of business is to cancel the demote request. We don't
711
+ * really want to demote a nondisk glock. At best it's just to inform
712
+ * us of another node's withdraw. We'll keep it in SH mode. */
713
+ clear_bit(GLF_DEMOTE, &gl->gl_flags);
714
+ clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
715
+
716
+ /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
717
+ if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
718
+ test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
719
+ test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
720
+ return;
721
+
722
+ /* We only care when a node wants us to unlock, because that means
723
+ * they want a journal recovered. */
724
+ if (gl->gl_demote_state != LM_ST_UNLOCKED)
725
+ return;
726
+
727
+ if (sdp->sd_args.ar_spectator) {
728
+ fs_warn(sdp, "Spectator node cannot recover journals.\n");
729
+ return;
730
+ }
731
+
732
+ fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
733
+ set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
734
+ /*
735
+ * We can't call remote_withdraw directly here or gfs2_recover_journal
736
+ * because this is called from the glock unlock function and the
737
+ * remote_withdraw needs to enqueue and dequeue the same "live" glock
738
+ * we were called from. So we queue it to the control work queue in
739
+ * lock_dlm.
740
+ */
741
+ queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
742
+}
743
+
577744 const struct gfs2_glock_operations gfs2_meta_glops = {
578745 .go_type = LM_TYPE_META,
746
+ .go_flags = GLOF_NONDISK,
579747 };
580748
581749 const struct gfs2_glock_operations gfs2_inode_glops = {
....@@ -585,15 +753,15 @@
585753 .go_lock = inode_go_lock,
586754 .go_dump = inode_go_dump,
587755 .go_type = LM_TYPE_INODE,
588
- .go_flags = GLOF_ASPACE | GLOF_LRU,
756
+ .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
757
+ .go_free = inode_go_free,
589758 };
590759
591760 const struct gfs2_glock_operations gfs2_rgrp_glops = {
592761 .go_sync = rgrp_go_sync,
593762 .go_inval = rgrp_go_inval,
594763 .go_lock = gfs2_rgrp_go_lock,
595
- .go_unlock = gfs2_rgrp_go_unlock,
596
- .go_dump = gfs2_rgrp_dump,
764
+ .go_dump = gfs2_rgrp_go_dump,
597765 .go_type = LM_TYPE_RGRP,
598766 .go_flags = GLOF_LVB,
599767 };
....@@ -603,30 +771,36 @@
603771 .go_xmote_bh = freeze_go_xmote_bh,
604772 .go_demote_ok = freeze_go_demote_ok,
605773 .go_type = LM_TYPE_NONDISK,
774
+ .go_flags = GLOF_NONDISK,
606775 };
607776
608777 const struct gfs2_glock_operations gfs2_iopen_glops = {
609778 .go_type = LM_TYPE_IOPEN,
610779 .go_callback = iopen_go_callback,
611
- .go_flags = GLOF_LRU,
780
+ .go_demote_ok = iopen_go_demote_ok,
781
+ .go_flags = GLOF_LRU | GLOF_NONDISK,
782
+ .go_subclass = 1,
612783 };
613784
614785 const struct gfs2_glock_operations gfs2_flock_glops = {
615786 .go_type = LM_TYPE_FLOCK,
616
- .go_flags = GLOF_LRU,
787
+ .go_flags = GLOF_LRU | GLOF_NONDISK,
617788 };
618789
619790 const struct gfs2_glock_operations gfs2_nondisk_glops = {
620791 .go_type = LM_TYPE_NONDISK,
792
+ .go_flags = GLOF_NONDISK,
793
+ .go_callback = nondisk_go_callback,
621794 };
622795
623796 const struct gfs2_glock_operations gfs2_quota_glops = {
624797 .go_type = LM_TYPE_QUOTA,
625
- .go_flags = GLOF_LVB | GLOF_LRU,
798
+ .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
626799 };
627800
628801 const struct gfs2_glock_operations gfs2_journal_glops = {
629802 .go_type = LM_TYPE_JOURNAL,
803
+ .go_flags = GLOF_NONDISK,
630804 };
631805
632806 const struct gfs2_glock_operations *gfs2_glops_list[] = {