hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/fs/gfs2/super.c
....@@ -1,10 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
34 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
4
- *
5
- * This copyrighted material is made available to anyone wishing to use,
6
- * modify, copy, or redistribute it subject to the terms and conditions
7
- * of the GNU General Public License version 2.
85 */
96
107 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
....@@ -45,258 +42,13 @@
4542 #include "util.h"
4643 #include "sys.h"
4744 #include "xattr.h"
45
+#include "lops.h"
4846
49
-#define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
50
-
51
-enum {
52
- Opt_lockproto,
53
- Opt_locktable,
54
- Opt_hostdata,
55
- Opt_spectator,
56
- Opt_ignore_local_fs,
57
- Opt_localflocks,
58
- Opt_localcaching,
59
- Opt_debug,
60
- Opt_nodebug,
61
- Opt_upgrade,
62
- Opt_acl,
63
- Opt_noacl,
64
- Opt_quota_off,
65
- Opt_quota_account,
66
- Opt_quota_on,
67
- Opt_quota,
68
- Opt_noquota,
69
- Opt_suiddir,
70
- Opt_nosuiddir,
71
- Opt_data_writeback,
72
- Opt_data_ordered,
73
- Opt_meta,
74
- Opt_discard,
75
- Opt_nodiscard,
76
- Opt_commit,
77
- Opt_err_withdraw,
78
- Opt_err_panic,
79
- Opt_statfs_quantum,
80
- Opt_statfs_percent,
81
- Opt_quota_quantum,
82
- Opt_barrier,
83
- Opt_nobarrier,
84
- Opt_rgrplvb,
85
- Opt_norgrplvb,
86
- Opt_loccookie,
87
- Opt_noloccookie,
88
- Opt_error,
47
+enum dinode_demise {
48
+ SHOULD_DELETE_DINODE,
49
+ SHOULD_NOT_DELETE_DINODE,
50
+ SHOULD_DEFER_EVICTION,
8951 };
90
-
91
-static const match_table_t tokens = {
92
- {Opt_lockproto, "lockproto=%s"},
93
- {Opt_locktable, "locktable=%s"},
94
- {Opt_hostdata, "hostdata=%s"},
95
- {Opt_spectator, "spectator"},
96
- {Opt_spectator, "norecovery"},
97
- {Opt_ignore_local_fs, "ignore_local_fs"},
98
- {Opt_localflocks, "localflocks"},
99
- {Opt_localcaching, "localcaching"},
100
- {Opt_debug, "debug"},
101
- {Opt_nodebug, "nodebug"},
102
- {Opt_upgrade, "upgrade"},
103
- {Opt_acl, "acl"},
104
- {Opt_noacl, "noacl"},
105
- {Opt_quota_off, "quota=off"},
106
- {Opt_quota_account, "quota=account"},
107
- {Opt_quota_on, "quota=on"},
108
- {Opt_quota, "quota"},
109
- {Opt_noquota, "noquota"},
110
- {Opt_suiddir, "suiddir"},
111
- {Opt_nosuiddir, "nosuiddir"},
112
- {Opt_data_writeback, "data=writeback"},
113
- {Opt_data_ordered, "data=ordered"},
114
- {Opt_meta, "meta"},
115
- {Opt_discard, "discard"},
116
- {Opt_nodiscard, "nodiscard"},
117
- {Opt_commit, "commit=%d"},
118
- {Opt_err_withdraw, "errors=withdraw"},
119
- {Opt_err_panic, "errors=panic"},
120
- {Opt_statfs_quantum, "statfs_quantum=%d"},
121
- {Opt_statfs_percent, "statfs_percent=%d"},
122
- {Opt_quota_quantum, "quota_quantum=%d"},
123
- {Opt_barrier, "barrier"},
124
- {Opt_nobarrier, "nobarrier"},
125
- {Opt_rgrplvb, "rgrplvb"},
126
- {Opt_norgrplvb, "norgrplvb"},
127
- {Opt_loccookie, "loccookie"},
128
- {Opt_noloccookie, "noloccookie"},
129
- {Opt_error, NULL}
130
-};
131
-
132
-/**
133
- * gfs2_mount_args - Parse mount options
134
- * @args: The structure into which the parsed options will be written
135
- * @options: The options to parse
136
- *
137
- * Return: errno
138
- */
139
-
140
-int gfs2_mount_args(struct gfs2_args *args, char *options)
141
-{
142
- char *o;
143
- int token;
144
- substring_t tmp[MAX_OPT_ARGS];
145
- int rv;
146
-
147
- /* Split the options into tokens with the "," character and
148
- process them */
149
-
150
- while (1) {
151
- o = strsep(&options, ",");
152
- if (o == NULL)
153
- break;
154
- if (*o == '\0')
155
- continue;
156
-
157
- token = match_token(o, tokens, tmp);
158
- switch (token) {
159
- case Opt_lockproto:
160
- match_strlcpy(args->ar_lockproto, &tmp[0],
161
- GFS2_LOCKNAME_LEN);
162
- break;
163
- case Opt_locktable:
164
- match_strlcpy(args->ar_locktable, &tmp[0],
165
- GFS2_LOCKNAME_LEN);
166
- break;
167
- case Opt_hostdata:
168
- match_strlcpy(args->ar_hostdata, &tmp[0],
169
- GFS2_LOCKNAME_LEN);
170
- break;
171
- case Opt_spectator:
172
- args->ar_spectator = 1;
173
- break;
174
- case Opt_ignore_local_fs:
175
- /* Retained for backwards compat only */
176
- break;
177
- case Opt_localflocks:
178
- args->ar_localflocks = 1;
179
- break;
180
- case Opt_localcaching:
181
- /* Retained for backwards compat only */
182
- break;
183
- case Opt_debug:
184
- if (args->ar_errors == GFS2_ERRORS_PANIC) {
185
- pr_warn("-o debug and -o errors=panic are mutually exclusive\n");
186
- return -EINVAL;
187
- }
188
- args->ar_debug = 1;
189
- break;
190
- case Opt_nodebug:
191
- args->ar_debug = 0;
192
- break;
193
- case Opt_upgrade:
194
- /* Retained for backwards compat only */
195
- break;
196
- case Opt_acl:
197
- args->ar_posix_acl = 1;
198
- break;
199
- case Opt_noacl:
200
- args->ar_posix_acl = 0;
201
- break;
202
- case Opt_quota_off:
203
- case Opt_noquota:
204
- args->ar_quota = GFS2_QUOTA_OFF;
205
- break;
206
- case Opt_quota_account:
207
- args->ar_quota = GFS2_QUOTA_ACCOUNT;
208
- break;
209
- case Opt_quota_on:
210
- case Opt_quota:
211
- args->ar_quota = GFS2_QUOTA_ON;
212
- break;
213
- case Opt_suiddir:
214
- args->ar_suiddir = 1;
215
- break;
216
- case Opt_nosuiddir:
217
- args->ar_suiddir = 0;
218
- break;
219
- case Opt_data_writeback:
220
- args->ar_data = GFS2_DATA_WRITEBACK;
221
- break;
222
- case Opt_data_ordered:
223
- args->ar_data = GFS2_DATA_ORDERED;
224
- break;
225
- case Opt_meta:
226
- args->ar_meta = 1;
227
- break;
228
- case Opt_discard:
229
- args->ar_discard = 1;
230
- break;
231
- case Opt_nodiscard:
232
- args->ar_discard = 0;
233
- break;
234
- case Opt_commit:
235
- rv = match_int(&tmp[0], &args->ar_commit);
236
- if (rv || args->ar_commit <= 0) {
237
- pr_warn("commit mount option requires a positive numeric argument\n");
238
- return rv ? rv : -EINVAL;
239
- }
240
- break;
241
- case Opt_statfs_quantum:
242
- rv = match_int(&tmp[0], &args->ar_statfs_quantum);
243
- if (rv || args->ar_statfs_quantum < 0) {
244
- pr_warn("statfs_quantum mount option requires a non-negative numeric argument\n");
245
- return rv ? rv : -EINVAL;
246
- }
247
- break;
248
- case Opt_quota_quantum:
249
- rv = match_int(&tmp[0], &args->ar_quota_quantum);
250
- if (rv || args->ar_quota_quantum <= 0) {
251
- pr_warn("quota_quantum mount option requires a positive numeric argument\n");
252
- return rv ? rv : -EINVAL;
253
- }
254
- break;
255
- case Opt_statfs_percent:
256
- rv = match_int(&tmp[0], &args->ar_statfs_percent);
257
- if (rv || args->ar_statfs_percent < 0 ||
258
- args->ar_statfs_percent > 100) {
259
- pr_warn("statfs_percent mount option requires a numeric argument between 0 and 100\n");
260
- return rv ? rv : -EINVAL;
261
- }
262
- break;
263
- case Opt_err_withdraw:
264
- args->ar_errors = GFS2_ERRORS_WITHDRAW;
265
- break;
266
- case Opt_err_panic:
267
- if (args->ar_debug) {
268
- pr_warn("-o debug and -o errors=panic are mutually exclusive\n");
269
- return -EINVAL;
270
- }
271
- args->ar_errors = GFS2_ERRORS_PANIC;
272
- break;
273
- case Opt_barrier:
274
- args->ar_nobarrier = 0;
275
- break;
276
- case Opt_nobarrier:
277
- args->ar_nobarrier = 1;
278
- break;
279
- case Opt_rgrplvb:
280
- args->ar_rgrplvb = 1;
281
- break;
282
- case Opt_norgrplvb:
283
- args->ar_rgrplvb = 0;
284
- break;
285
- case Opt_loccookie:
286
- args->ar_loccookie = 1;
287
- break;
288
- case Opt_noloccookie:
289
- args->ar_loccookie = 0;
290
- break;
291
- case Opt_error:
292
- default:
293
- pr_warn("invalid mount option: %s\n", o);
294
- return -EINVAL;
295
- }
296
- }
297
-
298
- return 0;
299
-}
30052
30153 /**
30254 * gfs2_jindex_free - Clear all the journal index information
....@@ -315,11 +67,13 @@
31567 sdp->sd_journals = 0;
31668 spin_unlock(&sdp->sd_jindex_spin);
31769
70
+ sdp->sd_jdesc = NULL;
31871 while (!list_empty(&list)) {
319
- jd = list_entry(list.next, struct gfs2_jdesc, jd_list);
72
+ jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
32073 gfs2_free_journal_extents(jd);
32174 list_del(&jd->jd_list);
32275 iput(jd->jd_inode);
76
+ jd->jd_inode = NULL;
32377 kfree(jd);
32478 }
32579 }
....@@ -372,33 +126,6 @@
372126 return 0;
373127 }
374128
375
-static int init_threads(struct gfs2_sbd *sdp)
376
-{
377
- struct task_struct *p;
378
- int error = 0;
379
-
380
- p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
381
- if (IS_ERR(p)) {
382
- error = PTR_ERR(p);
383
- fs_err(sdp, "can't start logd thread: %d\n", error);
384
- return error;
385
- }
386
- sdp->sd_logd_process = p;
387
-
388
- p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
389
- if (IS_ERR(p)) {
390
- error = PTR_ERR(p);
391
- fs_err(sdp, "can't start quotad thread: %d\n", error);
392
- goto fail;
393
- }
394
- sdp->sd_quotad_process = p;
395
- return 0;
396
-
397
-fail:
398
- kthread_stop(sdp->sd_logd_process);
399
- return error;
400
-}
401
-
402129 /**
403130 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
404131 * @sdp: the filesystem
....@@ -410,29 +137,22 @@
410137 {
411138 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
412139 struct gfs2_glock *j_gl = ip->i_gl;
413
- struct gfs2_holder freeze_gh;
414140 struct gfs2_log_header_host head;
415141 int error;
416142
417
- error = init_threads(sdp);
418
- if (error)
419
- return error;
420
-
421
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
422
- &freeze_gh);
423
- if (error)
424
- goto fail_threads;
425
-
426143 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
144
+ if (gfs2_withdrawn(sdp))
145
+ return -EIO;
427146
428
- error = gfs2_find_jhead(sdp->sd_jdesc, &head);
429
- if (error)
430
- goto fail;
147
+ error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
148
+ if (error) {
149
+ gfs2_consist(sdp);
150
+ return error;
151
+ }
431152
432153 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
433154 gfs2_consist(sdp);
434
- error = -EIO;
435
- goto fail;
155
+ return -EIO;
436156 }
437157
438158 /* Initialize some head of the log stuff */
....@@ -440,21 +160,10 @@
440160 gfs2_log_pointers_init(sdp, head.lh_blkno);
441161
442162 error = gfs2_quota_init(sdp);
443
- if (error)
444
- goto fail;
445
-
446
- set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
447
-
448
- gfs2_glock_dq_uninit(&freeze_gh);
449
-
450
- return 0;
451
-
452
-fail:
453
- freeze_gh.gh_flags |= GL_NOCACHE;
454
- gfs2_glock_dq_uninit(&freeze_gh);
455
-fail_threads:
456
- kthread_stop(sdp->sd_quotad_process);
457
- kthread_stop(sdp->sd_logd_process);
163
+ if (!error && gfs2_withdrawn(sdp))
164
+ error = -EIO;
165
+ if (!error)
166
+ set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
458167 return error;
459168 }
460169
....@@ -467,7 +176,7 @@
467176 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
468177 }
469178
470
-static void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
179
+void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
471180 {
472181 struct gfs2_statfs_change *str = buf;
473182
....@@ -648,8 +357,7 @@
648357 * Returns: errno
649358 */
650359
651
-static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
652
- struct gfs2_holder *freeze_gh)
360
+static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
653361 {
654362 struct gfs2_inode *ip;
655363 struct gfs2_jdesc *jd;
....@@ -674,13 +382,15 @@
674382 }
675383
676384 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
677
- GL_NOCACHE, freeze_gh);
385
+ LM_FLAG_NOEXP, &sdp->sd_freeze_gh);
386
+ if (error)
387
+ goto out;
678388
679389 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
680390 error = gfs2_jdesc_check(jd);
681391 if (error)
682392 break;
683
- error = gfs2_find_jhead(jd, &lh);
393
+ error = gfs2_find_jhead(jd, &lh, false);
684394 if (error)
685395 break;
686396 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
....@@ -690,11 +400,11 @@
690400 }
691401
692402 if (error)
693
- gfs2_glock_dq_uninit(freeze_gh);
403
+ gfs2_freeze_unlock(&sdp->sd_freeze_gh);
694404
695405 out:
696406 while (!list_empty(&list)) {
697
- lfcc = list_entry(list.next, struct lfcc, list);
407
+ lfcc = list_first_entry(&list, struct lfcc, list);
698408 list_del(&lfcc->list);
699409 gfs2_glock_dq_uninit(&lfcc->gh);
700410 kfree(lfcc);
....@@ -802,12 +512,13 @@
802512
803513 if (!(flags & I_DIRTY_INODE))
804514 return;
805
- if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
515
+ if (unlikely(gfs2_withdrawn(sdp)))
806516 return;
807517 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
808518 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
809519 if (ret) {
810520 fs_err(sdp, "dirty_inode: glock %d\n", ret);
521
+ gfs2_dump_glock(NULL, ip->i_gl, true);
811522 return;
812523 }
813524 need_unlock = 1;
....@@ -844,32 +555,43 @@
844555 * Returns: errno
845556 */
846557
847
-static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
558
+int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
848559 {
849
- struct gfs2_holder freeze_gh;
850
- int error;
560
+ int error = 0;
561
+ int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
851562
852
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, GL_NOCACHE,
853
- &freeze_gh);
854
- if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
855
- return error;
563
+ gfs2_flush_delete_work(sdp);
564
+ if (!log_write_allowed && current == sdp->sd_quotad_process)
565
+ fs_warn(sdp, "The quotad daemon is withdrawing.\n");
566
+ else if (sdp->sd_quotad_process)
567
+ kthread_stop(sdp->sd_quotad_process);
568
+ sdp->sd_quotad_process = NULL;
856569
857
- flush_workqueue(gfs2_delete_workqueue);
858
- kthread_stop(sdp->sd_quotad_process);
859
- kthread_stop(sdp->sd_logd_process);
570
+ if (!log_write_allowed && current == sdp->sd_logd_process)
571
+ fs_warn(sdp, "The logd daemon is withdrawing.\n");
572
+ else if (sdp->sd_logd_process)
573
+ kthread_stop(sdp->sd_logd_process);
574
+ sdp->sd_logd_process = NULL;
860575
861
- gfs2_quota_sync(sdp->sd_vfs, 0);
862
- gfs2_statfs_sync(sdp->sd_vfs, 0);
576
+ if (log_write_allowed) {
577
+ gfs2_quota_sync(sdp->sd_vfs, 0);
578
+ gfs2_statfs_sync(sdp->sd_vfs, 0);
863579
864
- gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
865
- GFS2_LFC_MAKE_FS_RO);
866
- wait_event(sdp->sd_reserving_log_wait, atomic_read(&sdp->sd_reserving_log) == 0);
867
- gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
868
-
869
- if (gfs2_holder_initialized(&freeze_gh))
870
- gfs2_glock_dq_uninit(&freeze_gh);
871
-
580
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
581
+ GFS2_LFC_MAKE_FS_RO);
582
+ wait_event(sdp->sd_reserving_log_wait,
583
+ atomic_read(&sdp->sd_reserving_log) == 0);
584
+ gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) ==
585
+ sdp->sd_jdesc->jd_blocks);
586
+ } else {
587
+ wait_event_timeout(sdp->sd_reserving_log_wait,
588
+ atomic_read(&sdp->sd_reserving_log) == 0,
589
+ HZ * 5);
590
+ }
872591 gfs2_quota_cleanup(sdp);
592
+
593
+ if (!log_write_allowed)
594
+ sdp->sd_vfs->s_flags |= SB_RDONLY;
873595
874596 return error;
875597 }
....@@ -908,6 +630,8 @@
908630 if (error)
909631 gfs2_io_error(sdp);
910632 }
633
+ WARN_ON(gfs2_withdrawing(sdp));
634
+
911635 /* At this point, we're through modifying the disk */
912636
913637 /* Release stuff */
....@@ -921,11 +645,13 @@
921645 gfs2_glock_put(sdp->sd_freeze_gl);
922646
923647 if (!sdp->sd_args.ar_spectator) {
924
- gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
925
- gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
648
+ if (gfs2_holder_initialized(&sdp->sd_journal_gh))
649
+ gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
650
+ if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
651
+ gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
926652 gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
927653 gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
928
- iput(sdp->sd_sc_inode);
654
+ free_local_statfs_inodes(sdp);
929655 iput(sdp->sd_qc_inode);
930656 }
931657
....@@ -941,6 +667,7 @@
941667
942668 /* At this point, we're through participating in the lockspace */
943669 gfs2_sys_fs_del(sdp);
670
+ free_sbd(sdp);
944671 }
945672
946673 /**
....@@ -969,25 +696,22 @@
969696 struct super_block *sb = sdp->sd_vfs;
970697
971698 atomic_inc(&sb->s_active);
972
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
973
- &freeze_gh);
699
+ error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
974700 if (error) {
975
- printk(KERN_INFO "GFS2: couln't get freeze lock : %d\n", error);
976701 gfs2_assert_withdraw(sdp, 0);
977
- }
978
- else {
702
+ } else {
979703 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
980704 error = thaw_super(sb);
981705 if (error) {
982
- printk(KERN_INFO "GFS2: couldn't thaw filesystem: %d\n",
983
- error);
706
+ fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
707
+ error);
984708 gfs2_assert_withdraw(sdp, 0);
985709 }
986
- if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
987
- freeze_gh.gh_flags |= GL_NOCACHE;
988
- gfs2_glock_dq_uninit(&freeze_gh);
710
+ gfs2_freeze_unlock(&freeze_gh);
989711 }
990712 deactivate_super(sb);
713
+ clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
714
+ wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
991715 return;
992716 }
993717
....@@ -1008,30 +732,29 @@
1008732 goto out;
1009733 }
1010734
1011
- if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
1012
- error = -EINVAL;
1013
- goto out;
1014
- }
1015
-
1016735 for (;;) {
1017
- error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
736
+ if (gfs2_withdrawn(sdp)) {
737
+ error = -EINVAL;
738
+ goto out;
739
+ }
740
+
741
+ error = gfs2_lock_fs_check_clean(sdp);
1018742 if (!error)
1019743 break;
1020744
1021
- switch (error) {
1022
- case -EBUSY:
745
+ if (error == -EBUSY)
1023746 fs_err(sdp, "waiting for recovery before freeze\n");
1024
- break;
1025
-
1026
- default:
747
+ else if (error == -EIO) {
748
+ fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
749
+ "to recovery error.\n");
750
+ goto out;
751
+ } else {
1027752 fs_err(sdp, "error freezing FS: %d\n", error);
1028
- break;
1029753 }
1030
-
1031754 fs_err(sdp, "retrying...\n");
1032755 msleep(1000);
1033756 }
1034
- error = 0;
757
+ set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
1035758 out:
1036759 mutex_unlock(&sdp->sd_freeze_mutex);
1037760 return error;
....@@ -1054,9 +777,9 @@
1054777 return -EINVAL;
1055778 }
1056779
1057
- gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
780
+ gfs2_freeze_unlock(&sdp->sd_freeze_gh);
1058781 mutex_unlock(&sdp->sd_freeze_mutex);
1059
- return 0;
782
+ return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
1060783 }
1061784
1062785 /**
....@@ -1227,86 +950,6 @@
1227950 }
1228951
1229952 /**
1230
- * gfs2_remount_fs - called when the FS is remounted
1231
- * @sb: the filesystem
1232
- * @flags: the remount flags
1233
- * @data: extra data passed in (not used right now)
1234
- *
1235
- * Returns: errno
1236
- */
1237
-
1238
-static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
1239
-{
1240
- struct gfs2_sbd *sdp = sb->s_fs_info;
1241
- struct gfs2_args args = sdp->sd_args; /* Default to current settings */
1242
- struct gfs2_tune *gt = &sdp->sd_tune;
1243
- int error;
1244
-
1245
- sync_filesystem(sb);
1246
-
1247
- spin_lock(&gt->gt_spin);
1248
- args.ar_commit = gt->gt_logd_secs;
1249
- args.ar_quota_quantum = gt->gt_quota_quantum;
1250
- if (gt->gt_statfs_slow)
1251
- args.ar_statfs_quantum = 0;
1252
- else
1253
- args.ar_statfs_quantum = gt->gt_statfs_quantum;
1254
- spin_unlock(&gt->gt_spin);
1255
- error = gfs2_mount_args(&args, data);
1256
- if (error)
1257
- return error;
1258
-
1259
- /* Not allowed to change locking details */
1260
- if (strcmp(args.ar_lockproto, sdp->sd_args.ar_lockproto) ||
1261
- strcmp(args.ar_locktable, sdp->sd_args.ar_locktable) ||
1262
- strcmp(args.ar_hostdata, sdp->sd_args.ar_hostdata))
1263
- return -EINVAL;
1264
-
1265
- /* Some flags must not be changed */
1266
- if (args_neq(&args, &sdp->sd_args, spectator) ||
1267
- args_neq(&args, &sdp->sd_args, localflocks) ||
1268
- args_neq(&args, &sdp->sd_args, meta))
1269
- return -EINVAL;
1270
-
1271
- if (sdp->sd_args.ar_spectator)
1272
- *flags |= SB_RDONLY;
1273
-
1274
- if ((sb->s_flags ^ *flags) & SB_RDONLY) {
1275
- if (*flags & SB_RDONLY)
1276
- error = gfs2_make_fs_ro(sdp);
1277
- else
1278
- error = gfs2_make_fs_rw(sdp);
1279
- if (error)
1280
- return error;
1281
- }
1282
-
1283
- sdp->sd_args = args;
1284
- if (sdp->sd_args.ar_posix_acl)
1285
- sb->s_flags |= SB_POSIXACL;
1286
- else
1287
- sb->s_flags &= ~SB_POSIXACL;
1288
- if (sdp->sd_args.ar_nobarrier)
1289
- set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1290
- else
1291
- clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1292
- spin_lock(&gt->gt_spin);
1293
- gt->gt_logd_secs = args.ar_commit;
1294
- gt->gt_quota_quantum = args.ar_quota_quantum;
1295
- if (args.ar_statfs_quantum) {
1296
- gt->gt_statfs_slow = 0;
1297
- gt->gt_statfs_quantum = args.ar_statfs_quantum;
1298
- }
1299
- else {
1300
- gt->gt_statfs_slow = 1;
1301
- gt->gt_statfs_quantum = 30;
1302
- }
1303
- spin_unlock(&gt->gt_spin);
1304
-
1305
- gfs2_online_uevent(sdp);
1306
- return 0;
1307
-}
1308
-
1309
-/**
1310953 * gfs2_drop_inode - Drop an inode (test for remote unlink)
1311954 * @inode: The inode to drop
1312955 *
....@@ -1344,7 +987,7 @@
1344987 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1345988
1346989 gfs2_glock_hold(gl);
1347
- if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
990
+ if (!gfs2_queue_delete_work(gl, 0))
1348991 gfs2_glock_queue_put(gl);
1349992 return false;
1350993 }
....@@ -1374,7 +1017,14 @@
13741017 {
13751018 struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
13761019 struct gfs2_args *args = &sdp->sd_args;
1377
- int val;
1020
+ unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
1021
+
1022
+ spin_lock(&sdp->sd_tune.gt_spin);
1023
+ logd_secs = sdp->sd_tune.gt_logd_secs;
1024
+ quota_quantum = sdp->sd_tune.gt_quota_quantum;
1025
+ statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
1026
+ statfs_slow = sdp->sd_tune.gt_statfs_slow;
1027
+ spin_unlock(&sdp->sd_tune.gt_spin);
13781028
13791029 if (is_ancestor(root, sdp->sd_master_dir))
13801030 seq_puts(s, ",meta");
....@@ -1429,17 +1079,14 @@
14291079 }
14301080 if (args->ar_discard)
14311081 seq_puts(s, ",discard");
1432
- val = sdp->sd_tune.gt_logd_secs;
1433
- if (val != 30)
1434
- seq_printf(s, ",commit=%d", val);
1435
- val = sdp->sd_tune.gt_statfs_quantum;
1436
- if (val != 30)
1437
- seq_printf(s, ",statfs_quantum=%d", val);
1438
- else if (sdp->sd_tune.gt_statfs_slow)
1082
+ if (logd_secs != 30)
1083
+ seq_printf(s, ",commit=%d", logd_secs);
1084
+ if (statfs_quantum != 30)
1085
+ seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
1086
+ else if (statfs_slow)
14391087 seq_puts(s, ",statfs_quantum=0");
1440
- val = sdp->sd_tune.gt_quota_quantum;
1441
- if (val != 60)
1442
- seq_printf(s, ",quota_quantum=%d", val);
1088
+ if (quota_quantum != 60)
1089
+ seq_printf(s, ",quota_quantum=%d", quota_quantum);
14431090 if (args->ar_statfs_percent)
14441091 seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
14451092 if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
....@@ -1548,6 +1195,194 @@
15481195 gfs2_glock_put(gl);
15491196 }
15501197
1198
+static bool gfs2_upgrade_iopen_glock(struct inode *inode)
1199
+{
1200
+ struct gfs2_inode *ip = GFS2_I(inode);
1201
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
1202
+ struct gfs2_holder *gh = &ip->i_iopen_gh;
1203
+ long timeout = 5 * HZ;
1204
+ int error;
1205
+
1206
+ gh->gh_flags |= GL_NOCACHE;
1207
+ gfs2_glock_dq_wait(gh);
1208
+
1209
+ /*
1210
+ * If there are no other lock holders, we'll get the lock immediately.
1211
+ * Otherwise, the other nodes holding the lock will be notified about
1212
+ * our locking request. If they don't have the inode open, they'll
1213
+ * evict the cached inode and release the lock. Otherwise, if they
1214
+ * poke the inode glock, we'll take this as an indication that they
1215
+ * still need the iopen glock and that they'll take care of deleting
1216
+ * the inode when they're done. As a last resort, if another node
1217
+ * keeps holding the iopen glock without showing any activity on the
1218
+ * inode glock, we'll eventually time out.
1219
+ *
1220
+ * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
1221
+ * locking request as an optimization to notify lock holders as soon as
1222
+ * possible. Without that flag, they'd be notified implicitly by the
1223
+ * second locking request.
1224
+ */
1225
+
1226
+ gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
1227
+ error = gfs2_glock_nq(gh);
1228
+ if (error != GLR_TRYFAILED)
1229
+ return !error;
1230
+
1231
+ gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
1232
+ error = gfs2_glock_nq(gh);
1233
+ if (error)
1234
+ return false;
1235
+
1236
+ timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
1237
+ !test_bit(HIF_WAIT, &gh->gh_iflags) ||
1238
+ test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
1239
+ timeout);
1240
+ if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1241
+ gfs2_glock_dq(gh);
1242
+ return false;
1243
+ }
1244
+ return true;
1245
+}
1246
+
1247
+/**
1248
+ * evict_should_delete - determine whether the inode is eligible for deletion
1249
+ * @inode: The inode to evict
1250
+ *
1251
+ * This function determines whether the evicted inode is eligible to be deleted
1252
+ * and locks the inode glock.
1253
+ *
1254
+ * Returns: the fate of the dinode
1255
+ */
1256
+static enum dinode_demise evict_should_delete(struct inode *inode,
1257
+ struct gfs2_holder *gh)
1258
+{
1259
+ struct gfs2_inode *ip = GFS2_I(inode);
1260
+ struct super_block *sb = inode->i_sb;
1261
+ struct gfs2_sbd *sdp = sb->s_fs_info;
1262
+ int ret;
1263
+
1264
+ if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
1265
+ BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
1266
+ goto should_delete;
1267
+ }
1268
+
1269
+ if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
1270
+ return SHOULD_DEFER_EVICTION;
1271
+
1272
+ /* Deletes should never happen under memory pressure anymore. */
1273
+ if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1274
+ return SHOULD_DEFER_EVICTION;
1275
+
1276
+ /* Must not read inode block until block type has been verified */
1277
+ ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
1278
+ if (unlikely(ret)) {
1279
+ glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1280
+ ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1281
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1282
+ return SHOULD_DEFER_EVICTION;
1283
+ }
1284
+
1285
+ if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
1286
+ return SHOULD_NOT_DELETE_DINODE;
1287
+ ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1288
+ if (ret)
1289
+ return SHOULD_NOT_DELETE_DINODE;
1290
+
1291
+ if (test_bit(GIF_INVALID, &ip->i_flags)) {
1292
+ ret = gfs2_inode_refresh(ip);
1293
+ if (ret)
1294
+ return SHOULD_NOT_DELETE_DINODE;
1295
+ }
1296
+
1297
+ /*
1298
+ * The inode may have been recreated in the meantime.
1299
+ */
1300
+ if (inode->i_nlink)
1301
+ return SHOULD_NOT_DELETE_DINODE;
1302
+
1303
+should_delete:
1304
+ if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1305
+ test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1306
+ if (!gfs2_upgrade_iopen_glock(inode)) {
1307
+ gfs2_holder_uninit(&ip->i_iopen_gh);
1308
+ return SHOULD_NOT_DELETE_DINODE;
1309
+ }
1310
+ }
1311
+ return SHOULD_DELETE_DINODE;
1312
+}
1313
+
1314
+/**
1315
+ * evict_unlinked_inode - delete the pieces of an unlinked evicted inode
1316
+ * @inode: The inode to evict
1317
+ */
1318
+static int evict_unlinked_inode(struct inode *inode)
1319
+{
1320
+ struct gfs2_inode *ip = GFS2_I(inode);
1321
+ int ret;
1322
+
1323
+ if (S_ISDIR(inode->i_mode) &&
1324
+ (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1325
+ ret = gfs2_dir_exhash_dealloc(ip);
1326
+ if (ret)
1327
+ goto out;
1328
+ }
1329
+
1330
+ if (ip->i_eattr) {
1331
+ ret = gfs2_ea_dealloc(ip);
1332
+ if (ret)
1333
+ goto out;
1334
+ }
1335
+
1336
+ if (!gfs2_is_stuffed(ip)) {
1337
+ ret = gfs2_file_dealloc(ip);
1338
+ if (ret)
1339
+ goto out;
1340
+ }
1341
+
1342
+ /* We're about to clear the bitmap for the dinode, but as soon as we
1343
+ do, gfs2_create_inode can create another inode at the same block
1344
+ location and try to set gl_object again. We clear gl_object here so
1345
+ that subsequent inode creates don't see an old gl_object. */
1346
+ glock_clear_object(ip->i_gl, ip);
1347
+ ret = gfs2_dinode_dealloc(ip);
1348
+ gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
1349
+out:
1350
+ return ret;
1351
+}
1352
+
1353
+/*
1354
+ * evict_linked_inode - evict an inode whose dinode has not been unlinked
1355
+ * @inode: The inode to evict
1356
+ */
1357
+static int evict_linked_inode(struct inode *inode)
1358
+{
1359
+ struct super_block *sb = inode->i_sb;
1360
+ struct gfs2_sbd *sdp = sb->s_fs_info;
1361
+ struct gfs2_inode *ip = GFS2_I(inode);
1362
+ struct address_space *metamapping;
1363
+ int ret;
1364
+
1365
+ gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1366
+ GFS2_LFC_EVICT_INODE);
1367
+ metamapping = gfs2_glock2aspace(ip->i_gl);
1368
+ if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1369
+ filemap_fdatawrite(metamapping);
1370
+ filemap_fdatawait(metamapping);
1371
+ }
1372
+ write_inode_now(inode, 1);
1373
+ gfs2_ail_flush(ip->i_gl, 0);
1374
+
1375
+ ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1376
+ if (ret)
1377
+ return ret;
1378
+
1379
+ /* Needs to be done before glock release & also in a transaction */
1380
+ truncate_inode_pages(&inode->i_data, 0);
1381
+ truncate_inode_pages(metamapping, 0);
1382
+ gfs2_trans_end(sdp);
1383
+ return 0;
1384
+}
1385
+
15511386 /**
15521387 * gfs2_evict_inode - Remove an inode from cache
15531388 * @inode: The inode to evict
....@@ -1575,8 +1410,7 @@
15751410 struct gfs2_sbd *sdp = sb->s_fs_info;
15761411 struct gfs2_inode *ip = GFS2_I(inode);
15771412 struct gfs2_holder gh;
1578
- struct address_space *metamapping;
1579
- int error;
1413
+ int ret;
15801414
15811415 if (test_bit(GIF_FREE_VFS_INODE, &ip->i_flags)) {
15821416 clear_inode(inode);
....@@ -1586,141 +1420,58 @@
15861420 if (inode->i_nlink || sb_rdonly(sb))
15871421 goto out;
15881422
1589
- if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
1590
- BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
1591
- gfs2_holder_mark_uninitialized(&gh);
1592
- goto alloc_failed;
1593
- }
1594
-
1595
- /* Deletes should never happen under memory pressure anymore. */
1596
- if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1597
- goto out;
1598
-
1599
- /* Must not read inode block until block type has been verified */
1600
- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
1601
- if (unlikely(error)) {
1602
- glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1603
- ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1604
- gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1605
- goto out;
1606
- }
1607
-
1608
- error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1609
- if (error)
1610
- goto out_truncate;
1611
-
1612
- if (test_bit(GIF_INVALID, &ip->i_flags)) {
1613
- error = gfs2_inode_refresh(ip);
1614
- if (error)
1615
- goto out_truncate;
1616
- }
1617
-
16181423 /*
1619
- * The inode may have been recreated in the meantime.
1424
+ * In case of an incomplete mount, gfs2_evict_inode() may be called for
1425
+ * system files without having an active journal to write to. In that
1426
+ * case, skip the filesystem evict.
16201427 */
1621
- if (inode->i_nlink)
1622
- goto out_truncate;
1428
+ if (!sdp->sd_jdesc)
1429
+ goto out;
16231430
1624
-alloc_failed:
1625
- if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1626
- test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1627
- ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1628
- gfs2_glock_dq_wait(&ip->i_iopen_gh);
1629
- gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE,
1630
- &ip->i_iopen_gh);
1631
- error = gfs2_glock_nq(&ip->i_iopen_gh);
1632
- if (error)
1633
- goto out_truncate;
1634
- }
1431
+ gfs2_holder_mark_uninitialized(&gh);
1432
+ ret = evict_should_delete(inode, &gh);
1433
+ if (ret == SHOULD_DEFER_EVICTION)
1434
+ goto out;
1435
+ if (ret == SHOULD_DELETE_DINODE)
1436
+ ret = evict_unlinked_inode(inode);
1437
+ else
1438
+ ret = evict_linked_inode(inode);
16351439
1636
- /* Case 1 starts here */
1637
-
1638
- if (S_ISDIR(inode->i_mode) &&
1639
- (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1640
- error = gfs2_dir_exhash_dealloc(ip);
1641
- if (error)
1642
- goto out_unlock;
1643
- }
1644
-
1645
- if (ip->i_eattr) {
1646
- error = gfs2_ea_dealloc(ip);
1647
- if (error)
1648
- goto out_unlock;
1649
- }
1650
-
1651
- if (!gfs2_is_stuffed(ip)) {
1652
- error = gfs2_file_dealloc(ip);
1653
- if (error)
1654
- goto out_unlock;
1655
- }
1656
-
1657
- /* We're about to clear the bitmap for the dinode, but as soon as we
1658
- do, gfs2_create_inode can create another inode at the same block
1659
- location and try to set gl_object again. We clear gl_object here so
1660
- that subsequent inode creates don't see an old gl_object. */
1661
- glock_clear_object(ip->i_gl, ip);
1662
- error = gfs2_dinode_dealloc(ip);
1663
- goto out_unlock;
1664
-
1665
-out_truncate:
1666
- gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1667
- GFS2_LFC_EVICT_INODE);
1668
- metamapping = gfs2_glock2aspace(ip->i_gl);
1669
- if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1670
- filemap_fdatawrite(metamapping);
1671
- filemap_fdatawait(metamapping);
1672
- }
1673
- write_inode_now(inode, 1);
1674
- gfs2_ail_flush(ip->i_gl, 0);
1675
-
1676
- /* Case 2 starts here */
1677
- error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1678
- if (error)
1679
- goto out_unlock;
1680
- /* Needs to be done before glock release & also in a transaction */
1681
- truncate_inode_pages(&inode->i_data, 0);
1682
- truncate_inode_pages(metamapping, 0);
1683
- gfs2_trans_end(sdp);
1684
-
1685
-out_unlock:
1686
- /* Error path for case 1 */
16871440 if (gfs2_rs_active(&ip->i_res))
16881441 gfs2_rs_deltree(&ip->i_res);
16891442
1690
- if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1691
- glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1692
- if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1693
- ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1694
- gfs2_glock_dq(&ip->i_iopen_gh);
1695
- }
1696
- gfs2_holder_uninit(&ip->i_iopen_gh);
1697
- }
16981443 if (gfs2_holder_initialized(&gh)) {
16991444 glock_clear_object(ip->i_gl, ip);
17001445 gfs2_glock_dq_uninit(&gh);
17011446 }
1702
- if (error && error != GLR_TRYFAILED && error != -EROFS)
1703
- fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
1447
+ if (ret && ret != GLR_TRYFAILED && ret != -EROFS)
1448
+ fs_warn(sdp, "gfs2_evict_inode: %d\n", ret);
17041449 out:
1705
- /* Case 3 starts here */
17061450 truncate_inode_pages_final(&inode->i_data);
1707
- gfs2_rsqa_delete(ip, NULL);
1451
+ if (ip->i_qadata)
1452
+ gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
1453
+ gfs2_rs_deltree(&ip->i_res);
17081454 gfs2_ordered_del_inode(ip);
17091455 clear_inode(inode);
17101456 gfs2_dir_hash_inval(ip);
1711
- glock_clear_object(ip->i_gl, ip);
1712
- wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1713
- gfs2_glock_add_to_lru(ip->i_gl);
1714
- gfs2_glock_put_eventually(ip->i_gl);
1715
- ip->i_gl = NULL;
17161457 if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
17171458 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
17181459
17191460 glock_clear_object(gl, ip);
1720
- ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1461
+ if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1462
+ ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1463
+ gfs2_glock_dq(&ip->i_iopen_gh);
1464
+ }
17211465 gfs2_glock_hold(gl);
1722
- gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1466
+ gfs2_holder_uninit(&ip->i_iopen_gh);
17231467 gfs2_glock_put_eventually(gl);
1468
+ }
1469
+ if (ip->i_gl) {
1470
+ glock_clear_object(ip->i_gl, ip);
1471
+ wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1472
+ gfs2_glock_add_to_lru(ip->i_gl);
1473
+ gfs2_glock_put_eventually(ip->i_gl);
1474
+ ip->i_gl = NULL;
17241475 }
17251476 }
17261477
....@@ -1729,30 +1480,54 @@
17291480 struct gfs2_inode *ip;
17301481
17311482 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
1732
- if (ip) {
1733
- ip->i_flags = 0;
1734
- ip->i_gl = NULL;
1735
- memset(&ip->i_res, 0, sizeof(ip->i_res));
1736
- RB_CLEAR_NODE(&ip->i_res.rs_node);
1737
- ip->i_rahead = 0;
1738
- }
1483
+ if (!ip)
1484
+ return NULL;
1485
+ ip->i_flags = 0;
1486
+ ip->i_gl = NULL;
1487
+ gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
1488
+ memset(&ip->i_res, 0, sizeof(ip->i_res));
1489
+ RB_CLEAR_NODE(&ip->i_res.rs_node);
1490
+ ip->i_rahead = 0;
17391491 return &ip->i_inode;
17401492 }
17411493
1742
-static void gfs2_i_callback(struct rcu_head *head)
1494
+static void gfs2_free_inode(struct inode *inode)
17431495 {
1744
- struct inode *inode = container_of(head, struct inode, i_rcu);
1745
- kmem_cache_free(gfs2_inode_cachep, inode);
1496
+ kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
17461497 }
17471498
1748
-static void gfs2_destroy_inode(struct inode *inode)
1499
+extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
17491500 {
1750
- call_rcu(&inode->i_rcu, gfs2_i_callback);
1501
+ struct local_statfs_inode *lsi, *safe;
1502
+
1503
+ /* Run through the statfs inodes list to iput and free memory */
1504
+ list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) {
1505
+ if (lsi->si_jid == sdp->sd_jdesc->jd_jid)
1506
+ sdp->sd_sc_inode = NULL; /* belongs to this node */
1507
+ if (lsi->si_sc_inode)
1508
+ iput(lsi->si_sc_inode);
1509
+ list_del(&lsi->si_list);
1510
+ kfree(lsi);
1511
+ }
1512
+}
1513
+
1514
+extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
1515
+ unsigned int index)
1516
+{
1517
+ struct local_statfs_inode *lsi;
1518
+
1519
+ /* Return the local (per node) statfs inode in the
1520
+ * sdp->sd_sc_inodes_list corresponding to the 'index'. */
1521
+ list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) {
1522
+ if (lsi->si_jid == index)
1523
+ return lsi->si_sc_inode;
1524
+ }
1525
+ return NULL;
17511526 }
17521527
17531528 const struct super_operations gfs2_super_ops = {
17541529 .alloc_inode = gfs2_alloc_inode,
1755
- .destroy_inode = gfs2_destroy_inode,
1530
+ .free_inode = gfs2_free_inode,
17561531 .write_inode = gfs2_write_inode,
17571532 .dirty_inode = gfs2_dirty_inode,
17581533 .evict_inode = gfs2_evict_inode,
....@@ -1761,7 +1536,6 @@
17611536 .freeze_super = gfs2_freeze,
17621537 .thaw_super = gfs2_unfreeze,
17631538 .statfs = gfs2_statfs,
1764
- .remount_fs = gfs2_remount_fs,
17651539 .drop_inode = gfs2_drop_inode,
17661540 .show_options = gfs2_show_options,
17671541 };