hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/fs/gfs2/glops.c
....@@ -1,10 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
34 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4
- *
5
- * This copyrighted material is made available to anyone wishing to use,
6
- * modify, copy, or redistribute it subject to the terms and conditions
7
- * of the GNU General Public License version 2.
85 */
96
107 #include <linux/spinlock.h>
....@@ -28,8 +25,11 @@
2825 #include "util.h"
2926 #include "trans.h"
3027 #include "dir.h"
28
+#include "lops.h"
3129
3230 struct workqueue_struct *gfs2_freeze_wq;
31
+
32
+extern struct workqueue_struct *gfs2_control_wq;
3333
3434 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
3535 {
....@@ -41,7 +41,8 @@
4141 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
4242 gl->gl_name.ln_type, gl->gl_name.ln_number,
4343 gfs2_glock2aspace(gl));
44
- gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n");
44
+ gfs2_lm(gl->gl_name.ln_sbd, "AIL error\n");
45
+ gfs2_withdraw(gl->gl_name.ln_sbd);
4546 }
4647
4748 /**
....@@ -81,10 +82,11 @@
8182 }
8283
8384
84
-static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
85
+static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
8586 {
8687 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
8788 struct gfs2_trans tr;
89
+ int ret;
8890
8991 memset(&tr, 0, sizeof(tr));
9092 INIT_LIST_HEAD(&tr.tr_buf);
....@@ -93,24 +95,51 @@
9395 INIT_LIST_HEAD(&tr.tr_ail2_list);
9496 tr.tr_revokes = atomic_read(&gl->gl_ail_count);
9597
96
- if (!tr.tr_revokes)
97
- return;
98
+ if (!tr.tr_revokes) {
99
+ bool have_revokes;
100
+ bool log_in_flight;
101
+
102
+ /*
103
+ * We have nothing on the ail, but there could be revokes on
104
+ * the sdp revoke queue, in which case, we still want to flush
105
+ * the log and wait for it to finish.
106
+ *
107
+ * If the sdp revoke list is empty too, we might still have an
108
+ * io outstanding for writing revokes, so we should wait for
109
+ * it before returning.
110
+ *
111
+ * If none of these conditions are true, our revokes are all
112
+ * flushed and we can return.
113
+ */
114
+ gfs2_log_lock(sdp);
115
+ have_revokes = !list_empty(&sdp->sd_log_revokes);
116
+ log_in_flight = atomic_read(&sdp->sd_log_in_flight);
117
+ gfs2_log_unlock(sdp);
118
+ if (have_revokes)
119
+ goto flush;
120
+ if (log_in_flight)
121
+ log_flush_wait(sdp);
122
+ return 0;
123
+ }
98124
99125 /* A shortened, inline version of gfs2_trans_begin()
100126 * tr->alloced is not set since the transaction structure is
101127 * on the stack */
102
- tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
128
+ tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes);
103129 tr.tr_ip = _RET_IP_;
104
- if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0)
105
- return;
130
+ ret = gfs2_log_reserve(sdp, tr.tr_reserved);
131
+ if (ret < 0)
132
+ return ret;
106133 WARN_ON_ONCE(current->journal_info);
107134 current->journal_info = &tr;
108135
109136 __gfs2_ail_flush(gl, 0, tr.tr_revokes);
110137
111138 gfs2_trans_end(sdp);
139
+flush:
112140 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
113141 GFS2_LFC_AIL_EMPTY_GL);
142
+ return 0;
114143 }
115144
116145 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
....@@ -136,6 +165,31 @@
136165 }
137166
138167 /**
168
+ * gfs2_rgrp_metasync - sync out the metadata of a resource group
169
+ * @gl: the glock protecting the resource group
170
+ *
171
+ */
172
+
173
+static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
174
+{
175
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
176
+ struct address_space *metamapping = &sdp->sd_aspace;
177
+ struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
178
+ const unsigned bsize = sdp->sd_sb.sb_bsize;
179
+ loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
180
+ loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
181
+ int error;
182
+
183
+ filemap_fdatawrite_range(metamapping, start, end);
184
+ error = filemap_fdatawait_range(metamapping, start, end);
185
+ WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
186
+ mapping_set_error(metamapping, error);
187
+ if (error)
188
+ gfs2_io_error(sdp);
189
+ return error;
190
+}
191
+
192
+/**
139193 * rgrp_go_sync - sync out the metadata for this glock
140194 * @gl: the glock
141195 *
....@@ -144,35 +198,23 @@
144198 * return to caller to demote/unlock the glock until I/O is complete.
145199 */
146200
147
-static void rgrp_go_sync(struct gfs2_glock *gl)
201
+static int rgrp_go_sync(struct gfs2_glock *gl)
148202 {
149203 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
150
- struct address_space *mapping = &sdp->sd_aspace;
151
- struct gfs2_rgrpd *rgd;
204
+ struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
152205 int error;
153206
154
- spin_lock(&gl->gl_lockref.lock);
155
- rgd = gl->gl_object;
156
- if (rgd)
157
- gfs2_rgrp_brelse(rgd);
158
- spin_unlock(&gl->gl_lockref.lock);
159
-
160207 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
161
- return;
208
+ return 0;
162209 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
163210
164211 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
165212 GFS2_LFC_RGRP_GO_SYNC);
166
- filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
167
- error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
168
- mapping_set_error(mapping, error);
169
- gfs2_ail_empty_gl(gl);
170
-
171
- spin_lock(&gl->gl_lockref.lock);
172
- rgd = gl->gl_object;
173
- if (rgd)
174
- gfs2_free_clones(rgd);
175
- spin_unlock(&gl->gl_lockref.lock);
213
+ error = gfs2_rgrp_metasync(gl);
214
+ if (!error)
215
+ error = gfs2_ail_empty_gl(gl);
216
+ gfs2_free_clones(rgd);
217
+ return error;
176218 }
177219
178220 /**
....@@ -190,16 +232,23 @@
190232 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
191233 struct address_space *mapping = &sdp->sd_aspace;
192234 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
235
+ const unsigned bsize = sdp->sd_sb.sb_bsize;
236
+ loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
237
+ loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
193238
194
- if (rgd)
195
- gfs2_rgrp_brelse(rgd);
196
-
239
+ gfs2_rgrp_brelse(rgd);
197240 WARN_ON_ONCE(!(flags & DIO_METADATA));
198
- gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
199
- truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
241
+ truncate_inode_pages_range(mapping, start, end);
242
+ rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
243
+}
244
+
245
+static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
246
+ const char *fs_id_buf)
247
+{
248
+ struct gfs2_rgrpd *rgd = gl->gl_object;
200249
201250 if (rgd)
202
- rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
251
+ gfs2_rgrp_dump(seq, rgd, fs_id_buf);
203252 }
204253
205254 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
....@@ -235,17 +284,34 @@
235284 }
236285
237286 /**
238
- * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
287
+ * gfs2_inode_metasync - sync out the metadata of an inode
288
+ * @gl: the glock protecting the inode
289
+ *
290
+ */
291
+int gfs2_inode_metasync(struct gfs2_glock *gl)
292
+{
293
+ struct address_space *metamapping = gfs2_glock2aspace(gl);
294
+ int error;
295
+
296
+ filemap_fdatawrite(metamapping);
297
+ error = filemap_fdatawait(metamapping);
298
+ if (error)
299
+ gfs2_io_error(gl->gl_name.ln_sbd);
300
+ return error;
301
+}
302
+
303
+/**
304
+ * inode_go_sync - Sync the dirty metadata of an inode
239305 * @gl: the glock protecting the inode
240306 *
241307 */
242308
243
-static void inode_go_sync(struct gfs2_glock *gl)
309
+static int inode_go_sync(struct gfs2_glock *gl)
244310 {
245311 struct gfs2_inode *ip = gfs2_glock2inode(gl);
246312 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
247313 struct address_space *metamapping = gfs2_glock2aspace(gl);
248
- int error;
314
+ int error = 0, ret;
249315
250316 if (isreg) {
251317 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
....@@ -266,8 +332,9 @@
266332 error = filemap_fdatawait(mapping);
267333 mapping_set_error(mapping, error);
268334 }
269
- error = filemap_fdatawait(metamapping);
270
- mapping_set_error(metamapping, error);
335
+ ret = gfs2_inode_metasync(gl);
336
+ if (!error)
337
+ error = ret;
271338 gfs2_ail_empty_gl(gl);
272339 /*
273340 * Writeback of the data mapping may cause the dirty flag to be set
....@@ -278,6 +345,7 @@
278345
279346 out:
280347 gfs2_clear_glop_pending(ip);
348
+ return error;
281349 }
282350
283351 /**
....@@ -294,8 +362,6 @@
294362 static void inode_go_inval(struct gfs2_glock *gl, int flags)
295363 {
296364 struct gfs2_inode *ip = gfs2_glock2inode(gl);
297
-
298
- gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
299365
300366 if (flags & DIO_METADATA) {
301367 struct address_space *mapping = gfs2_glock2aspace(gl);
....@@ -339,6 +405,7 @@
339405
340406 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
341407 {
408
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
342409 const struct gfs2_dinode *str = buf;
343410 struct timespec64 atime;
344411 u16 height, depth;
....@@ -354,7 +421,7 @@
354421 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
355422 be32_to_cpu(str->di_minor));
356423 break;
357
- };
424
+ }
358425
359426 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
360427 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
....@@ -378,7 +445,7 @@
378445 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
379446 gfs2_set_inode_flags(&ip->i_inode);
380447 height = be16_to_cpu(str->di_height);
381
- if (unlikely(height > GFS2_MAX_META_HEIGHT))
448
+ if (unlikely(height > sdp->sd_max_height))
382449 goto corrupt;
383450 ip->i_height = (u8)height;
384451
....@@ -387,6 +454,9 @@
387454 goto corrupt;
388455 ip->i_depth = (u8)depth;
389456 ip->i_entries = be32_to_cpu(str->di_entries);
457
+
458
+ if (gfs2_is_stuffed(ip) && ip->i_inode.i_size > gfs2_max_stuffed_size(ip))
459
+ goto corrupt;
390460
391461 if (S_ISREG(ip->i_inode.i_mode))
392462 gfs2_set_aops(&ip->i_inode);
....@@ -465,20 +535,31 @@
465535 * inode_go_dump - print information about an inode
466536 * @seq: The iterator
467537 * @ip: the inode
538
+ * @fs_id_buf: file system id (may be empty)
468539 *
469540 */
470541
471
-static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
542
+static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
543
+ const char *fs_id_buf)
472544 {
473
- const struct gfs2_inode *ip = gl->gl_object;
545
+ struct gfs2_inode *ip = gl->gl_object;
546
+ struct inode *inode = &ip->i_inode;
547
+ unsigned long nrpages;
548
+
474549 if (ip == NULL)
475550 return;
476
- gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
551
+
552
+ xa_lock_irq(&inode->i_data.i_pages);
553
+ nrpages = inode->i_data.nrpages;
554
+ xa_unlock_irq(&inode->i_data.i_pages);
555
+
556
+ gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
557
+ "p:%lu\n", fs_id_buf,
477558 (unsigned long long)ip->i_no_formal_ino,
478559 (unsigned long long)ip->i_no_addr,
479560 IF2DT(ip->i_inode.i_mode), ip->i_flags,
480561 (unsigned int)ip->i_diskflags,
481
- (unsigned long long)i_size_read(&ip->i_inode));
562
+ (unsigned long long)i_size_read(inode), nrpages);
482563 }
483564
484565 /**
....@@ -489,23 +570,43 @@
489570 *
490571 */
491572
492
-static void freeze_go_sync(struct gfs2_glock *gl)
573
+static int freeze_go_sync(struct gfs2_glock *gl)
493574 {
494575 int error = 0;
495576 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
496577
497
- if (gl->gl_state == LM_ST_SHARED &&
498
- test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
578
+ /*
579
+ * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
580
+ * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
581
+ * all the nodes should have the freeze glock in SH mode and they all
582
+ * call do_xmote: One for EX and the others for UN. They ALL must
583
+ * freeze locally, and they ALL must queue freeze work. The freeze_work
584
+ * calls freeze_func, which tries to reacquire the freeze glock in SH,
585
+ * effectively waiting for the thaw on the node who holds it in EX.
586
+ * Once thawed, the work func acquires the freeze glock in
587
+ * SH and everybody goes back to thawed.
588
+ */
589
+ if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
590
+ !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
499591 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
500592 error = freeze_super(sdp->sd_vfs);
501593 if (error) {
502
- printk(KERN_INFO "GFS2: couldn't freeze filesystem: %d\n", error);
594
+ fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
595
+ error);
596
+ if (gfs2_withdrawn(sdp)) {
597
+ atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
598
+ return 0;
599
+ }
503600 gfs2_assert_withdraw(sdp, 0);
504601 }
505602 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
506
- gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
507
- GFS2_LFC_FREEZE_GO_SYNC);
603
+ if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
604
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
605
+ GFS2_LFC_FREEZE_GO_SYNC);
606
+ else /* read-only mounts */
607
+ atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
508608 }
609
+ return 0;
509610 }
510611
511612 /**
....@@ -525,17 +626,14 @@
525626 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
526627 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
527628
528
- error = gfs2_find_jhead(sdp->sd_jdesc, &head);
529
- if (error)
530
- gfs2_consist(sdp);
531
- if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
532
- gfs2_consist(sdp);
533
-
534
- /* Initialize some head of the log stuff */
535
- if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
536
- sdp->sd_log_sequence = head.lh_sequence + 1;
537
- gfs2_log_pointers_init(sdp, head.lh_blkno);
538
- }
629
+ error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
630
+ if (gfs2_assert_withdraw_delayed(sdp, !error))
631
+ return error;
632
+ if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
633
+ GFS2_LOG_HEAD_UNMOUNT))
634
+ return -EIO;
635
+ sdp->sd_log_sequence = head.lh_sequence + 1;
636
+ gfs2_log_pointers_init(sdp, head.lh_blkno);
539637 }
540638 return 0;
541639 }
....@@ -569,13 +667,87 @@
569667 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
570668 gl->gl_state == LM_ST_SHARED && ip) {
571669 gl->gl_lockref.count++;
572
- if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
670
+ if (!queue_delayed_work(gfs2_delete_workqueue,
671
+ &gl->gl_delete, 0))
573672 gl->gl_lockref.count--;
574673 }
575674 }
576675
676
+static int iopen_go_demote_ok(const struct gfs2_glock *gl)
677
+{
678
+ return !gfs2_delete_work_queued(gl);
679
+}
680
+
681
+/**
682
+ * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
683
+ * @gl: glock being freed
684
+ *
685
+ * For now, this is only used for the journal inode glock. In withdraw
686
+ * situations, we need to wait for the glock to be freed so that we know
687
+ * other nodes may proceed with recovery / journal replay.
688
+ */
689
+static void inode_go_free(struct gfs2_glock *gl)
690
+{
691
+ /* Note that we cannot reference gl_object because it's already set
692
+ * to NULL by this point in its lifecycle. */
693
+ if (!test_bit(GLF_FREEING, &gl->gl_flags))
694
+ return;
695
+ clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
696
+ wake_up_bit(&gl->gl_flags, GLF_FREEING);
697
+}
698
+
699
+/**
700
+ * nondisk_go_callback - used to signal when a node did a withdraw
701
+ * @gl: the nondisk glock
702
+ * @remote: true if this came from a different cluster node
703
+ *
704
+ */
705
+static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
706
+{
707
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
708
+
709
+ /* Ignore the callback unless it's from another node, and it's the
710
+ live lock. */
711
+ if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
712
+ return;
713
+
714
+ /* First order of business is to cancel the demote request. We don't
715
+ * really want to demote a nondisk glock. At best it's just to inform
716
+ * us of another node's withdraw. We'll keep it in SH mode. */
717
+ clear_bit(GLF_DEMOTE, &gl->gl_flags);
718
+ clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
719
+
720
+ /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
721
+ if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
722
+ test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
723
+ test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
724
+ return;
725
+
726
+ /* We only care when a node wants us to unlock, because that means
727
+ * they want a journal recovered. */
728
+ if (gl->gl_demote_state != LM_ST_UNLOCKED)
729
+ return;
730
+
731
+ if (sdp->sd_args.ar_spectator) {
732
+ fs_warn(sdp, "Spectator node cannot recover journals.\n");
733
+ return;
734
+ }
735
+
736
+ fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
737
+ set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
738
+ /*
739
+ * We can't call remote_withdraw directly here or gfs2_recover_journal
740
+ * because this is called from the glock unlock function and the
741
+ * remote_withdraw needs to enqueue and dequeue the same "live" glock
742
+ * we were called from. So we queue it to the control work queue in
743
+ * lock_dlm.
744
+ */
745
+ queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
746
+}
747
+
577748 const struct gfs2_glock_operations gfs2_meta_glops = {
578749 .go_type = LM_TYPE_META,
750
+ .go_flags = GLOF_NONDISK,
579751 };
580752
581753 const struct gfs2_glock_operations gfs2_inode_glops = {
....@@ -585,15 +757,15 @@
585757 .go_lock = inode_go_lock,
586758 .go_dump = inode_go_dump,
587759 .go_type = LM_TYPE_INODE,
588
- .go_flags = GLOF_ASPACE | GLOF_LRU,
760
+ .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
761
+ .go_free = inode_go_free,
589762 };
590763
591764 const struct gfs2_glock_operations gfs2_rgrp_glops = {
592765 .go_sync = rgrp_go_sync,
593766 .go_inval = rgrp_go_inval,
594767 .go_lock = gfs2_rgrp_go_lock,
595
- .go_unlock = gfs2_rgrp_go_unlock,
596
- .go_dump = gfs2_rgrp_dump,
768
+ .go_dump = gfs2_rgrp_go_dump,
597769 .go_type = LM_TYPE_RGRP,
598770 .go_flags = GLOF_LVB,
599771 };
....@@ -603,30 +775,36 @@
603775 .go_xmote_bh = freeze_go_xmote_bh,
604776 .go_demote_ok = freeze_go_demote_ok,
605777 .go_type = LM_TYPE_NONDISK,
778
+ .go_flags = GLOF_NONDISK,
606779 };
607780
608781 const struct gfs2_glock_operations gfs2_iopen_glops = {
609782 .go_type = LM_TYPE_IOPEN,
610783 .go_callback = iopen_go_callback,
611
- .go_flags = GLOF_LRU,
784
+ .go_demote_ok = iopen_go_demote_ok,
785
+ .go_flags = GLOF_LRU | GLOF_NONDISK,
786
+ .go_subclass = 1,
612787 };
613788
614789 const struct gfs2_glock_operations gfs2_flock_glops = {
615790 .go_type = LM_TYPE_FLOCK,
616
- .go_flags = GLOF_LRU,
791
+ .go_flags = GLOF_LRU | GLOF_NONDISK,
617792 };
618793
619794 const struct gfs2_glock_operations gfs2_nondisk_glops = {
620795 .go_type = LM_TYPE_NONDISK,
796
+ .go_flags = GLOF_NONDISK,
797
+ .go_callback = nondisk_go_callback,
621798 };
622799
623800 const struct gfs2_glock_operations gfs2_quota_glops = {
624801 .go_type = LM_TYPE_QUOTA,
625
- .go_flags = GLOF_LVB | GLOF_LRU,
802
+ .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
626803 };
627804
628805 const struct gfs2_glock_operations gfs2_journal_glops = {
629806 .go_type = LM_TYPE_JOURNAL,
807
+ .go_flags = GLOF_NONDISK,
630808 };
631809
632810 const struct gfs2_glock_operations *gfs2_glops_list[] = {