forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 958e46acc8e900e8569dd467c1af9b8d2d019394
kernel/fs/gfs2/super.c
....@@ -1,10 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
34 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
4
- *
5
- * This copyrighted material is made available to anyone wishing to use,
6
- * modify, copy, or redistribute it subject to the terms and conditions
7
- * of the GNU General Public License version 2.
85 */
96
107 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
....@@ -45,258 +42,13 @@
4542 #include "util.h"
4643 #include "sys.h"
4744 #include "xattr.h"
45
+#include "lops.h"
4846
49
-#define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
50
-
51
-enum {
52
- Opt_lockproto,
53
- Opt_locktable,
54
- Opt_hostdata,
55
- Opt_spectator,
56
- Opt_ignore_local_fs,
57
- Opt_localflocks,
58
- Opt_localcaching,
59
- Opt_debug,
60
- Opt_nodebug,
61
- Opt_upgrade,
62
- Opt_acl,
63
- Opt_noacl,
64
- Opt_quota_off,
65
- Opt_quota_account,
66
- Opt_quota_on,
67
- Opt_quota,
68
- Opt_noquota,
69
- Opt_suiddir,
70
- Opt_nosuiddir,
71
- Opt_data_writeback,
72
- Opt_data_ordered,
73
- Opt_meta,
74
- Opt_discard,
75
- Opt_nodiscard,
76
- Opt_commit,
77
- Opt_err_withdraw,
78
- Opt_err_panic,
79
- Opt_statfs_quantum,
80
- Opt_statfs_percent,
81
- Opt_quota_quantum,
82
- Opt_barrier,
83
- Opt_nobarrier,
84
- Opt_rgrplvb,
85
- Opt_norgrplvb,
86
- Opt_loccookie,
87
- Opt_noloccookie,
88
- Opt_error,
47
+enum dinode_demise {
48
+ SHOULD_DELETE_DINODE,
49
+ SHOULD_NOT_DELETE_DINODE,
50
+ SHOULD_DEFER_EVICTION,
8951 };
90
-
91
-static const match_table_t tokens = {
92
- {Opt_lockproto, "lockproto=%s"},
93
- {Opt_locktable, "locktable=%s"},
94
- {Opt_hostdata, "hostdata=%s"},
95
- {Opt_spectator, "spectator"},
96
- {Opt_spectator, "norecovery"},
97
- {Opt_ignore_local_fs, "ignore_local_fs"},
98
- {Opt_localflocks, "localflocks"},
99
- {Opt_localcaching, "localcaching"},
100
- {Opt_debug, "debug"},
101
- {Opt_nodebug, "nodebug"},
102
- {Opt_upgrade, "upgrade"},
103
- {Opt_acl, "acl"},
104
- {Opt_noacl, "noacl"},
105
- {Opt_quota_off, "quota=off"},
106
- {Opt_quota_account, "quota=account"},
107
- {Opt_quota_on, "quota=on"},
108
- {Opt_quota, "quota"},
109
- {Opt_noquota, "noquota"},
110
- {Opt_suiddir, "suiddir"},
111
- {Opt_nosuiddir, "nosuiddir"},
112
- {Opt_data_writeback, "data=writeback"},
113
- {Opt_data_ordered, "data=ordered"},
114
- {Opt_meta, "meta"},
115
- {Opt_discard, "discard"},
116
- {Opt_nodiscard, "nodiscard"},
117
- {Opt_commit, "commit=%d"},
118
- {Opt_err_withdraw, "errors=withdraw"},
119
- {Opt_err_panic, "errors=panic"},
120
- {Opt_statfs_quantum, "statfs_quantum=%d"},
121
- {Opt_statfs_percent, "statfs_percent=%d"},
122
- {Opt_quota_quantum, "quota_quantum=%d"},
123
- {Opt_barrier, "barrier"},
124
- {Opt_nobarrier, "nobarrier"},
125
- {Opt_rgrplvb, "rgrplvb"},
126
- {Opt_norgrplvb, "norgrplvb"},
127
- {Opt_loccookie, "loccookie"},
128
- {Opt_noloccookie, "noloccookie"},
129
- {Opt_error, NULL}
130
-};
131
-
132
-/**
133
- * gfs2_mount_args - Parse mount options
134
- * @args: The structure into which the parsed options will be written
135
- * @options: The options to parse
136
- *
137
- * Return: errno
138
- */
139
-
140
-int gfs2_mount_args(struct gfs2_args *args, char *options)
141
-{
142
- char *o;
143
- int token;
144
- substring_t tmp[MAX_OPT_ARGS];
145
- int rv;
146
-
147
- /* Split the options into tokens with the "," character and
148
- process them */
149
-
150
- while (1) {
151
- o = strsep(&options, ",");
152
- if (o == NULL)
153
- break;
154
- if (*o == '\0')
155
- continue;
156
-
157
- token = match_token(o, tokens, tmp);
158
- switch (token) {
159
- case Opt_lockproto:
160
- match_strlcpy(args->ar_lockproto, &tmp[0],
161
- GFS2_LOCKNAME_LEN);
162
- break;
163
- case Opt_locktable:
164
- match_strlcpy(args->ar_locktable, &tmp[0],
165
- GFS2_LOCKNAME_LEN);
166
- break;
167
- case Opt_hostdata:
168
- match_strlcpy(args->ar_hostdata, &tmp[0],
169
- GFS2_LOCKNAME_LEN);
170
- break;
171
- case Opt_spectator:
172
- args->ar_spectator = 1;
173
- break;
174
- case Opt_ignore_local_fs:
175
- /* Retained for backwards compat only */
176
- break;
177
- case Opt_localflocks:
178
- args->ar_localflocks = 1;
179
- break;
180
- case Opt_localcaching:
181
- /* Retained for backwards compat only */
182
- break;
183
- case Opt_debug:
184
- if (args->ar_errors == GFS2_ERRORS_PANIC) {
185
- pr_warn("-o debug and -o errors=panic are mutually exclusive\n");
186
- return -EINVAL;
187
- }
188
- args->ar_debug = 1;
189
- break;
190
- case Opt_nodebug:
191
- args->ar_debug = 0;
192
- break;
193
- case Opt_upgrade:
194
- /* Retained for backwards compat only */
195
- break;
196
- case Opt_acl:
197
- args->ar_posix_acl = 1;
198
- break;
199
- case Opt_noacl:
200
- args->ar_posix_acl = 0;
201
- break;
202
- case Opt_quota_off:
203
- case Opt_noquota:
204
- args->ar_quota = GFS2_QUOTA_OFF;
205
- break;
206
- case Opt_quota_account:
207
- args->ar_quota = GFS2_QUOTA_ACCOUNT;
208
- break;
209
- case Opt_quota_on:
210
- case Opt_quota:
211
- args->ar_quota = GFS2_QUOTA_ON;
212
- break;
213
- case Opt_suiddir:
214
- args->ar_suiddir = 1;
215
- break;
216
- case Opt_nosuiddir:
217
- args->ar_suiddir = 0;
218
- break;
219
- case Opt_data_writeback:
220
- args->ar_data = GFS2_DATA_WRITEBACK;
221
- break;
222
- case Opt_data_ordered:
223
- args->ar_data = GFS2_DATA_ORDERED;
224
- break;
225
- case Opt_meta:
226
- args->ar_meta = 1;
227
- break;
228
- case Opt_discard:
229
- args->ar_discard = 1;
230
- break;
231
- case Opt_nodiscard:
232
- args->ar_discard = 0;
233
- break;
234
- case Opt_commit:
235
- rv = match_int(&tmp[0], &args->ar_commit);
236
- if (rv || args->ar_commit <= 0) {
237
- pr_warn("commit mount option requires a positive numeric argument\n");
238
- return rv ? rv : -EINVAL;
239
- }
240
- break;
241
- case Opt_statfs_quantum:
242
- rv = match_int(&tmp[0], &args->ar_statfs_quantum);
243
- if (rv || args->ar_statfs_quantum < 0) {
244
- pr_warn("statfs_quantum mount option requires a non-negative numeric argument\n");
245
- return rv ? rv : -EINVAL;
246
- }
247
- break;
248
- case Opt_quota_quantum:
249
- rv = match_int(&tmp[0], &args->ar_quota_quantum);
250
- if (rv || args->ar_quota_quantum <= 0) {
251
- pr_warn("quota_quantum mount option requires a positive numeric argument\n");
252
- return rv ? rv : -EINVAL;
253
- }
254
- break;
255
- case Opt_statfs_percent:
256
- rv = match_int(&tmp[0], &args->ar_statfs_percent);
257
- if (rv || args->ar_statfs_percent < 0 ||
258
- args->ar_statfs_percent > 100) {
259
- pr_warn("statfs_percent mount option requires a numeric argument between 0 and 100\n");
260
- return rv ? rv : -EINVAL;
261
- }
262
- break;
263
- case Opt_err_withdraw:
264
- args->ar_errors = GFS2_ERRORS_WITHDRAW;
265
- break;
266
- case Opt_err_panic:
267
- if (args->ar_debug) {
268
- pr_warn("-o debug and -o errors=panic are mutually exclusive\n");
269
- return -EINVAL;
270
- }
271
- args->ar_errors = GFS2_ERRORS_PANIC;
272
- break;
273
- case Opt_barrier:
274
- args->ar_nobarrier = 0;
275
- break;
276
- case Opt_nobarrier:
277
- args->ar_nobarrier = 1;
278
- break;
279
- case Opt_rgrplvb:
280
- args->ar_rgrplvb = 1;
281
- break;
282
- case Opt_norgrplvb:
283
- args->ar_rgrplvb = 0;
284
- break;
285
- case Opt_loccookie:
286
- args->ar_loccookie = 1;
287
- break;
288
- case Opt_noloccookie:
289
- args->ar_loccookie = 0;
290
- break;
291
- case Opt_error:
292
- default:
293
- pr_warn("invalid mount option: %s\n", o);
294
- return -EINVAL;
295
- }
296
- }
297
-
298
- return 0;
299
-}
30052
30153 /**
30254 * gfs2_jindex_free - Clear all the journal index information
....@@ -315,11 +67,13 @@
31567 sdp->sd_journals = 0;
31668 spin_unlock(&sdp->sd_jindex_spin);
31769
70
+ sdp->sd_jdesc = NULL;
31871 while (!list_empty(&list)) {
319
- jd = list_entry(list.next, struct gfs2_jdesc, jd_list);
72
+ jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
32073 gfs2_free_journal_extents(jd);
32174 list_del(&jd->jd_list);
32275 iput(jd->jd_inode);
76
+ jd->jd_inode = NULL;
32377 kfree(jd);
32478 }
32579 }
....@@ -372,33 +126,6 @@
372126 return 0;
373127 }
374128
375
-static int init_threads(struct gfs2_sbd *sdp)
376
-{
377
- struct task_struct *p;
378
- int error = 0;
379
-
380
- p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
381
- if (IS_ERR(p)) {
382
- error = PTR_ERR(p);
383
- fs_err(sdp, "can't start logd thread: %d\n", error);
384
- return error;
385
- }
386
- sdp->sd_logd_process = p;
387
-
388
- p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
389
- if (IS_ERR(p)) {
390
- error = PTR_ERR(p);
391
- fs_err(sdp, "can't start quotad thread: %d\n", error);
392
- goto fail;
393
- }
394
- sdp->sd_quotad_process = p;
395
- return 0;
396
-
397
-fail:
398
- kthread_stop(sdp->sd_logd_process);
399
- return error;
400
-}
401
-
402129 /**
403130 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
404131 * @sdp: the filesystem
....@@ -410,29 +137,20 @@
410137 {
411138 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
412139 struct gfs2_glock *j_gl = ip->i_gl;
413
- struct gfs2_holder freeze_gh;
414140 struct gfs2_log_header_host head;
415141 int error;
416142
417
- error = init_threads(sdp);
418
- if (error)
419
- return error;
420
-
421
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
422
- &freeze_gh);
423
- if (error)
424
- goto fail_threads;
425
-
426143 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
144
+ if (gfs2_withdrawn(sdp))
145
+ return -EIO;
427146
428
- error = gfs2_find_jhead(sdp->sd_jdesc, &head);
429
- if (error)
430
- goto fail;
147
+ error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
148
+ if (error || gfs2_withdrawn(sdp))
149
+ return error;
431150
432151 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
433152 gfs2_consist(sdp);
434
- error = -EIO;
435
- goto fail;
153
+ return -EIO;
436154 }
437155
438156 /* Initialize some head of the log stuff */
....@@ -440,21 +158,8 @@
440158 gfs2_log_pointers_init(sdp, head.lh_blkno);
441159
442160 error = gfs2_quota_init(sdp);
443
- if (error)
444
- goto fail;
445
-
446
- set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
447
-
448
- gfs2_glock_dq_uninit(&freeze_gh);
449
-
450
- return 0;
451
-
452
-fail:
453
- freeze_gh.gh_flags |= GL_NOCACHE;
454
- gfs2_glock_dq_uninit(&freeze_gh);
455
-fail_threads:
456
- kthread_stop(sdp->sd_quotad_process);
457
- kthread_stop(sdp->sd_logd_process);
161
+ if (!error && !gfs2_withdrawn(sdp))
162
+ set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
458163 return error;
459164 }
460165
....@@ -467,7 +172,7 @@
467172 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
468173 }
469174
470
-static void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
175
+void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
471176 {
472177 struct gfs2_statfs_change *str = buf;
473178
....@@ -648,8 +353,7 @@
648353 * Returns: errno
649354 */
650355
651
-static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
652
- struct gfs2_holder *freeze_gh)
356
+static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
653357 {
654358 struct gfs2_inode *ip;
655359 struct gfs2_jdesc *jd;
....@@ -674,13 +378,15 @@
674378 }
675379
676380 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
677
- GL_NOCACHE, freeze_gh);
381
+ LM_FLAG_NOEXP, &sdp->sd_freeze_gh);
382
+ if (error)
383
+ goto out;
678384
679385 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
680386 error = gfs2_jdesc_check(jd);
681387 if (error)
682388 break;
683
- error = gfs2_find_jhead(jd, &lh);
389
+ error = gfs2_find_jhead(jd, &lh, false);
684390 if (error)
685391 break;
686392 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
....@@ -690,11 +396,11 @@
690396 }
691397
692398 if (error)
693
- gfs2_glock_dq_uninit(freeze_gh);
399
+ gfs2_freeze_unlock(&sdp->sd_freeze_gh);
694400
695401 out:
696402 while (!list_empty(&list)) {
697
- lfcc = list_entry(list.next, struct lfcc, list);
403
+ lfcc = list_first_entry(&list, struct lfcc, list);
698404 list_del(&lfcc->list);
699405 gfs2_glock_dq_uninit(&lfcc->gh);
700406 kfree(lfcc);
....@@ -802,12 +508,13 @@
802508
803509 if (!(flags & I_DIRTY_INODE))
804510 return;
805
- if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
511
+ if (unlikely(gfs2_withdrawn(sdp)))
806512 return;
807513 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
808514 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
809515 if (ret) {
810516 fs_err(sdp, "dirty_inode: glock %d\n", ret);
517
+ gfs2_dump_glock(NULL, ip->i_gl, true);
811518 return;
812519 }
813520 need_unlock = 1;
....@@ -844,32 +551,43 @@
844551 * Returns: errno
845552 */
846553
847
-static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
554
+int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
848555 {
849
- struct gfs2_holder freeze_gh;
850
- int error;
556
+ int error = 0;
557
+ int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
851558
852
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, GL_NOCACHE,
853
- &freeze_gh);
854
- if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
855
- return error;
559
+ gfs2_flush_delete_work(sdp);
560
+ if (!log_write_allowed && current == sdp->sd_quotad_process)
561
+ fs_warn(sdp, "The quotad daemon is withdrawing.\n");
562
+ else if (sdp->sd_quotad_process)
563
+ kthread_stop(sdp->sd_quotad_process);
564
+ sdp->sd_quotad_process = NULL;
856565
857
- flush_workqueue(gfs2_delete_workqueue);
858
- kthread_stop(sdp->sd_quotad_process);
859
- kthread_stop(sdp->sd_logd_process);
566
+ if (!log_write_allowed && current == sdp->sd_logd_process)
567
+ fs_warn(sdp, "The logd daemon is withdrawing.\n");
568
+ else if (sdp->sd_logd_process)
569
+ kthread_stop(sdp->sd_logd_process);
570
+ sdp->sd_logd_process = NULL;
860571
861
- gfs2_quota_sync(sdp->sd_vfs, 0);
862
- gfs2_statfs_sync(sdp->sd_vfs, 0);
572
+ if (log_write_allowed) {
573
+ gfs2_quota_sync(sdp->sd_vfs, 0);
574
+ gfs2_statfs_sync(sdp->sd_vfs, 0);
863575
864
- gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
865
- GFS2_LFC_MAKE_FS_RO);
866
- wait_event(sdp->sd_reserving_log_wait, atomic_read(&sdp->sd_reserving_log) == 0);
867
- gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
868
-
869
- if (gfs2_holder_initialized(&freeze_gh))
870
- gfs2_glock_dq_uninit(&freeze_gh);
871
-
576
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
577
+ GFS2_LFC_MAKE_FS_RO);
578
+ wait_event(sdp->sd_reserving_log_wait,
579
+ atomic_read(&sdp->sd_reserving_log) == 0);
580
+ gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) ==
581
+ sdp->sd_jdesc->jd_blocks);
582
+ } else {
583
+ wait_event_timeout(sdp->sd_reserving_log_wait,
584
+ atomic_read(&sdp->sd_reserving_log) == 0,
585
+ HZ * 5);
586
+ }
872587 gfs2_quota_cleanup(sdp);
588
+
589
+ if (!log_write_allowed)
590
+ sdp->sd_vfs->s_flags |= SB_RDONLY;
873591
874592 return error;
875593 }
....@@ -908,6 +626,8 @@
908626 if (error)
909627 gfs2_io_error(sdp);
910628 }
629
+ WARN_ON(gfs2_withdrawing(sdp));
630
+
911631 /* At this point, we're through modifying the disk */
912632
913633 /* Release stuff */
....@@ -921,11 +641,13 @@
921641 gfs2_glock_put(sdp->sd_freeze_gl);
922642
923643 if (!sdp->sd_args.ar_spectator) {
924
- gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
925
- gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
644
+ if (gfs2_holder_initialized(&sdp->sd_journal_gh))
645
+ gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
646
+ if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
647
+ gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
926648 gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
927649 gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
928
- iput(sdp->sd_sc_inode);
650
+ free_local_statfs_inodes(sdp);
929651 iput(sdp->sd_qc_inode);
930652 }
931653
....@@ -941,6 +663,7 @@
941663
942664 /* At this point, we're through participating in the lockspace */
943665 gfs2_sys_fs_del(sdp);
666
+ free_sbd(sdp);
944667 }
945668
946669 /**
....@@ -969,25 +692,22 @@
969692 struct super_block *sb = sdp->sd_vfs;
970693
971694 atomic_inc(&sb->s_active);
972
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
973
- &freeze_gh);
695
+ error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
974696 if (error) {
975
- printk(KERN_INFO "GFS2: couln't get freeze lock : %d\n", error);
976697 gfs2_assert_withdraw(sdp, 0);
977
- }
978
- else {
698
+ } else {
979699 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
980700 error = thaw_super(sb);
981701 if (error) {
982
- printk(KERN_INFO "GFS2: couldn't thaw filesystem: %d\n",
983
- error);
702
+ fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
703
+ error);
984704 gfs2_assert_withdraw(sdp, 0);
985705 }
986
- if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
987
- freeze_gh.gh_flags |= GL_NOCACHE;
988
- gfs2_glock_dq_uninit(&freeze_gh);
706
+ gfs2_freeze_unlock(&freeze_gh);
989707 }
990708 deactivate_super(sb);
709
+ clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
710
+ wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
991711 return;
992712 }
993713
....@@ -1008,30 +728,29 @@
1008728 goto out;
1009729 }
1010730
1011
- if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
1012
- error = -EINVAL;
1013
- goto out;
1014
- }
1015
-
1016731 for (;;) {
1017
- error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
732
+ if (gfs2_withdrawn(sdp)) {
733
+ error = -EINVAL;
734
+ goto out;
735
+ }
736
+
737
+ error = gfs2_lock_fs_check_clean(sdp);
1018738 if (!error)
1019739 break;
1020740
1021
- switch (error) {
1022
- case -EBUSY:
741
+ if (error == -EBUSY)
1023742 fs_err(sdp, "waiting for recovery before freeze\n");
1024
- break;
1025
-
1026
- default:
743
+ else if (error == -EIO) {
744
+ fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
745
+ "to recovery error.\n");
746
+ goto out;
747
+ } else {
1027748 fs_err(sdp, "error freezing FS: %d\n", error);
1028
- break;
1029749 }
1030
-
1031750 fs_err(sdp, "retrying...\n");
1032751 msleep(1000);
1033752 }
1034
- error = 0;
753
+ set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
1035754 out:
1036755 mutex_unlock(&sdp->sd_freeze_mutex);
1037756 return error;
....@@ -1054,9 +773,9 @@
1054773 return -EINVAL;
1055774 }
1056775
1057
- gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
776
+ gfs2_freeze_unlock(&sdp->sd_freeze_gh);
1058777 mutex_unlock(&sdp->sd_freeze_mutex);
1059
- return 0;
778
+ return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
1060779 }
1061780
1062781 /**
....@@ -1227,86 +946,6 @@
1227946 }
1228947
1229948 /**
1230
- * gfs2_remount_fs - called when the FS is remounted
1231
- * @sb: the filesystem
1232
- * @flags: the remount flags
1233
- * @data: extra data passed in (not used right now)
1234
- *
1235
- * Returns: errno
1236
- */
1237
-
1238
-static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
1239
-{
1240
- struct gfs2_sbd *sdp = sb->s_fs_info;
1241
- struct gfs2_args args = sdp->sd_args; /* Default to current settings */
1242
- struct gfs2_tune *gt = &sdp->sd_tune;
1243
- int error;
1244
-
1245
- sync_filesystem(sb);
1246
-
1247
- spin_lock(&gt->gt_spin);
1248
- args.ar_commit = gt->gt_logd_secs;
1249
- args.ar_quota_quantum = gt->gt_quota_quantum;
1250
- if (gt->gt_statfs_slow)
1251
- args.ar_statfs_quantum = 0;
1252
- else
1253
- args.ar_statfs_quantum = gt->gt_statfs_quantum;
1254
- spin_unlock(&gt->gt_spin);
1255
- error = gfs2_mount_args(&args, data);
1256
- if (error)
1257
- return error;
1258
-
1259
- /* Not allowed to change locking details */
1260
- if (strcmp(args.ar_lockproto, sdp->sd_args.ar_lockproto) ||
1261
- strcmp(args.ar_locktable, sdp->sd_args.ar_locktable) ||
1262
- strcmp(args.ar_hostdata, sdp->sd_args.ar_hostdata))
1263
- return -EINVAL;
1264
-
1265
- /* Some flags must not be changed */
1266
- if (args_neq(&args, &sdp->sd_args, spectator) ||
1267
- args_neq(&args, &sdp->sd_args, localflocks) ||
1268
- args_neq(&args, &sdp->sd_args, meta))
1269
- return -EINVAL;
1270
-
1271
- if (sdp->sd_args.ar_spectator)
1272
- *flags |= SB_RDONLY;
1273
-
1274
- if ((sb->s_flags ^ *flags) & SB_RDONLY) {
1275
- if (*flags & SB_RDONLY)
1276
- error = gfs2_make_fs_ro(sdp);
1277
- else
1278
- error = gfs2_make_fs_rw(sdp);
1279
- if (error)
1280
- return error;
1281
- }
1282
-
1283
- sdp->sd_args = args;
1284
- if (sdp->sd_args.ar_posix_acl)
1285
- sb->s_flags |= SB_POSIXACL;
1286
- else
1287
- sb->s_flags &= ~SB_POSIXACL;
1288
- if (sdp->sd_args.ar_nobarrier)
1289
- set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1290
- else
1291
- clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1292
- spin_lock(&gt->gt_spin);
1293
- gt->gt_logd_secs = args.ar_commit;
1294
- gt->gt_quota_quantum = args.ar_quota_quantum;
1295
- if (args.ar_statfs_quantum) {
1296
- gt->gt_statfs_slow = 0;
1297
- gt->gt_statfs_quantum = args.ar_statfs_quantum;
1298
- }
1299
- else {
1300
- gt->gt_statfs_slow = 1;
1301
- gt->gt_statfs_quantum = 30;
1302
- }
1303
- spin_unlock(&gt->gt_spin);
1304
-
1305
- gfs2_online_uevent(sdp);
1306
- return 0;
1307
-}
1308
-
1309
-/**
1310949 * gfs2_drop_inode - Drop an inode (test for remote unlink)
1311950 * @inode: The inode to drop
1312951 *
....@@ -1344,7 +983,7 @@
1344983 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1345984
1346985 gfs2_glock_hold(gl);
1347
- if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
986
+ if (!gfs2_queue_delete_work(gl, 0))
1348987 gfs2_glock_queue_put(gl);
1349988 return false;
1350989 }
....@@ -1548,6 +1187,194 @@
15481187 gfs2_glock_put(gl);
15491188 }
15501189
1190
+static bool gfs2_upgrade_iopen_glock(struct inode *inode)
1191
+{
1192
+ struct gfs2_inode *ip = GFS2_I(inode);
1193
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
1194
+ struct gfs2_holder *gh = &ip->i_iopen_gh;
1195
+ long timeout = 5 * HZ;
1196
+ int error;
1197
+
1198
+ gh->gh_flags |= GL_NOCACHE;
1199
+ gfs2_glock_dq_wait(gh);
1200
+
1201
+ /*
1202
+ * If there are no other lock holders, we'll get the lock immediately.
1203
+ * Otherwise, the other nodes holding the lock will be notified about
1204
+ * our locking request. If they don't have the inode open, they'll
1205
+ * evict the cached inode and release the lock. Otherwise, if they
1206
+ * poke the inode glock, we'll take this as an indication that they
1207
+ * still need the iopen glock and that they'll take care of deleting
1208
+ * the inode when they're done. As a last resort, if another node
1209
+ * keeps holding the iopen glock without showing any activity on the
1210
+ * inode glock, we'll eventually time out.
1211
+ *
1212
+ * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
1213
+ * locking request as an optimization to notify lock holders as soon as
1214
+ * possible. Without that flag, they'd be notified implicitly by the
1215
+ * second locking request.
1216
+ */
1217
+
1218
+ gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
1219
+ error = gfs2_glock_nq(gh);
1220
+ if (error != GLR_TRYFAILED)
1221
+ return !error;
1222
+
1223
+ gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
1224
+ error = gfs2_glock_nq(gh);
1225
+ if (error)
1226
+ return false;
1227
+
1228
+ timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
1229
+ !test_bit(HIF_WAIT, &gh->gh_iflags) ||
1230
+ test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
1231
+ timeout);
1232
+ if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1233
+ gfs2_glock_dq(gh);
1234
+ return false;
1235
+ }
1236
+ return true;
1237
+}
1238
+
1239
+/**
1240
+ * evict_should_delete - determine whether the inode is eligible for deletion
1241
+ * @inode: The inode to evict
1242
+ *
1243
+ * This function determines whether the evicted inode is eligible to be deleted
1244
+ * and locks the inode glock.
1245
+ *
1246
+ * Returns: the fate of the dinode
1247
+ */
1248
+static enum dinode_demise evict_should_delete(struct inode *inode,
1249
+ struct gfs2_holder *gh)
1250
+{
1251
+ struct gfs2_inode *ip = GFS2_I(inode);
1252
+ struct super_block *sb = inode->i_sb;
1253
+ struct gfs2_sbd *sdp = sb->s_fs_info;
1254
+ int ret;
1255
+
1256
+ if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
1257
+ BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
1258
+ goto should_delete;
1259
+ }
1260
+
1261
+ if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
1262
+ return SHOULD_DEFER_EVICTION;
1263
+
1264
+ /* Deletes should never happen under memory pressure anymore. */
1265
+ if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1266
+ return SHOULD_DEFER_EVICTION;
1267
+
1268
+ /* Must not read inode block until block type has been verified */
1269
+ ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
1270
+ if (unlikely(ret)) {
1271
+ glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1272
+ ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1273
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1274
+ return SHOULD_DEFER_EVICTION;
1275
+ }
1276
+
1277
+ if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
1278
+ return SHOULD_NOT_DELETE_DINODE;
1279
+ ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1280
+ if (ret)
1281
+ return SHOULD_NOT_DELETE_DINODE;
1282
+
1283
+ if (test_bit(GIF_INVALID, &ip->i_flags)) {
1284
+ ret = gfs2_inode_refresh(ip);
1285
+ if (ret)
1286
+ return SHOULD_NOT_DELETE_DINODE;
1287
+ }
1288
+
1289
+ /*
1290
+ * The inode may have been recreated in the meantime.
1291
+ */
1292
+ if (inode->i_nlink)
1293
+ return SHOULD_NOT_DELETE_DINODE;
1294
+
1295
+should_delete:
1296
+ if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1297
+ test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1298
+ if (!gfs2_upgrade_iopen_glock(inode)) {
1299
+ gfs2_holder_uninit(&ip->i_iopen_gh);
1300
+ return SHOULD_NOT_DELETE_DINODE;
1301
+ }
1302
+ }
1303
+ return SHOULD_DELETE_DINODE;
1304
+}
1305
+
1306
+/**
1307
+ * evict_unlinked_inode - delete the pieces of an unlinked evicted inode
1308
+ * @inode: The inode to evict
1309
+ */
1310
+static int evict_unlinked_inode(struct inode *inode)
1311
+{
1312
+ struct gfs2_inode *ip = GFS2_I(inode);
1313
+ int ret;
1314
+
1315
+ if (S_ISDIR(inode->i_mode) &&
1316
+ (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1317
+ ret = gfs2_dir_exhash_dealloc(ip);
1318
+ if (ret)
1319
+ goto out;
1320
+ }
1321
+
1322
+ if (ip->i_eattr) {
1323
+ ret = gfs2_ea_dealloc(ip);
1324
+ if (ret)
1325
+ goto out;
1326
+ }
1327
+
1328
+ if (!gfs2_is_stuffed(ip)) {
1329
+ ret = gfs2_file_dealloc(ip);
1330
+ if (ret)
1331
+ goto out;
1332
+ }
1333
+
1334
+ /* We're about to clear the bitmap for the dinode, but as soon as we
1335
+ do, gfs2_create_inode can create another inode at the same block
1336
+ location and try to set gl_object again. We clear gl_object here so
1337
+ that subsequent inode creates don't see an old gl_object. */
1338
+ glock_clear_object(ip->i_gl, ip);
1339
+ ret = gfs2_dinode_dealloc(ip);
1340
+ gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
1341
+out:
1342
+ return ret;
1343
+}
1344
+
1345
+/*
1346
+ * evict_linked_inode - evict an inode whose dinode has not been unlinked
1347
+ * @inode: The inode to evict
1348
+ */
1349
+static int evict_linked_inode(struct inode *inode)
1350
+{
1351
+ struct super_block *sb = inode->i_sb;
1352
+ struct gfs2_sbd *sdp = sb->s_fs_info;
1353
+ struct gfs2_inode *ip = GFS2_I(inode);
1354
+ struct address_space *metamapping;
1355
+ int ret;
1356
+
1357
+ gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1358
+ GFS2_LFC_EVICT_INODE);
1359
+ metamapping = gfs2_glock2aspace(ip->i_gl);
1360
+ if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1361
+ filemap_fdatawrite(metamapping);
1362
+ filemap_fdatawait(metamapping);
1363
+ }
1364
+ write_inode_now(inode, 1);
1365
+ gfs2_ail_flush(ip->i_gl, 0);
1366
+
1367
+ ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1368
+ if (ret)
1369
+ return ret;
1370
+
1371
+ /* Needs to be done before glock release & also in a transaction */
1372
+ truncate_inode_pages(&inode->i_data, 0);
1373
+ truncate_inode_pages(metamapping, 0);
1374
+ gfs2_trans_end(sdp);
1375
+ return 0;
1376
+}
1377
+
15511378 /**
15521379 * gfs2_evict_inode - Remove an inode from cache
15531380 * @inode: The inode to evict
....@@ -1575,8 +1402,7 @@
15751402 struct gfs2_sbd *sdp = sb->s_fs_info;
15761403 struct gfs2_inode *ip = GFS2_I(inode);
15771404 struct gfs2_holder gh;
1578
- struct address_space *metamapping;
1579
- int error;
1405
+ int ret;
15801406
15811407 if (test_bit(GIF_FREE_VFS_INODE, &ip->i_flags)) {
15821408 clear_inode(inode);
....@@ -1586,141 +1412,50 @@
15861412 if (inode->i_nlink || sb_rdonly(sb))
15871413 goto out;
15881414
1589
- if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
1590
- BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
1591
- gfs2_holder_mark_uninitialized(&gh);
1592
- goto alloc_failed;
1593
- }
1594
-
1595
- /* Deletes should never happen under memory pressure anymore. */
1596
- if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1415
+ gfs2_holder_mark_uninitialized(&gh);
1416
+ ret = evict_should_delete(inode, &gh);
1417
+ if (ret == SHOULD_DEFER_EVICTION)
15971418 goto out;
1419
+ if (ret == SHOULD_DELETE_DINODE)
1420
+ ret = evict_unlinked_inode(inode);
1421
+ else
1422
+ ret = evict_linked_inode(inode);
15981423
1599
- /* Must not read inode block until block type has been verified */
1600
- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
1601
- if (unlikely(error)) {
1602
- glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1603
- ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1604
- gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1605
- goto out;
1606
- }
1607
-
1608
- error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1609
- if (error)
1610
- goto out_truncate;
1611
-
1612
- if (test_bit(GIF_INVALID, &ip->i_flags)) {
1613
- error = gfs2_inode_refresh(ip);
1614
- if (error)
1615
- goto out_truncate;
1616
- }
1617
-
1618
- /*
1619
- * The inode may have been recreated in the meantime.
1620
- */
1621
- if (inode->i_nlink)
1622
- goto out_truncate;
1623
-
1624
-alloc_failed:
1625
- if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1626
- test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1627
- ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1628
- gfs2_glock_dq_wait(&ip->i_iopen_gh);
1629
- gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE,
1630
- &ip->i_iopen_gh);
1631
- error = gfs2_glock_nq(&ip->i_iopen_gh);
1632
- if (error)
1633
- goto out_truncate;
1634
- }
1635
-
1636
- /* Case 1 starts here */
1637
-
1638
- if (S_ISDIR(inode->i_mode) &&
1639
- (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1640
- error = gfs2_dir_exhash_dealloc(ip);
1641
- if (error)
1642
- goto out_unlock;
1643
- }
1644
-
1645
- if (ip->i_eattr) {
1646
- error = gfs2_ea_dealloc(ip);
1647
- if (error)
1648
- goto out_unlock;
1649
- }
1650
-
1651
- if (!gfs2_is_stuffed(ip)) {
1652
- error = gfs2_file_dealloc(ip);
1653
- if (error)
1654
- goto out_unlock;
1655
- }
1656
-
1657
- /* We're about to clear the bitmap for the dinode, but as soon as we
1658
- do, gfs2_create_inode can create another inode at the same block
1659
- location and try to set gl_object again. We clear gl_object here so
1660
- that subsequent inode creates don't see an old gl_object. */
1661
- glock_clear_object(ip->i_gl, ip);
1662
- error = gfs2_dinode_dealloc(ip);
1663
- goto out_unlock;
1664
-
1665
-out_truncate:
1666
- gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1667
- GFS2_LFC_EVICT_INODE);
1668
- metamapping = gfs2_glock2aspace(ip->i_gl);
1669
- if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1670
- filemap_fdatawrite(metamapping);
1671
- filemap_fdatawait(metamapping);
1672
- }
1673
- write_inode_now(inode, 1);
1674
- gfs2_ail_flush(ip->i_gl, 0);
1675
-
1676
- /* Case 2 starts here */
1677
- error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1678
- if (error)
1679
- goto out_unlock;
1680
- /* Needs to be done before glock release & also in a transaction */
1681
- truncate_inode_pages(&inode->i_data, 0);
1682
- truncate_inode_pages(metamapping, 0);
1683
- gfs2_trans_end(sdp);
1684
-
1685
-out_unlock:
1686
- /* Error path for case 1 */
16871424 if (gfs2_rs_active(&ip->i_res))
16881425 gfs2_rs_deltree(&ip->i_res);
16891426
1690
- if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1691
- glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1692
- if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1693
- ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1694
- gfs2_glock_dq(&ip->i_iopen_gh);
1695
- }
1696
- gfs2_holder_uninit(&ip->i_iopen_gh);
1697
- }
16981427 if (gfs2_holder_initialized(&gh)) {
16991428 glock_clear_object(ip->i_gl, ip);
17001429 gfs2_glock_dq_uninit(&gh);
17011430 }
1702
- if (error && error != GLR_TRYFAILED && error != -EROFS)
1703
- fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
1431
+ if (ret && ret != GLR_TRYFAILED && ret != -EROFS)
1432
+ fs_warn(sdp, "gfs2_evict_inode: %d\n", ret);
17041433 out:
1705
- /* Case 3 starts here */
17061434 truncate_inode_pages_final(&inode->i_data);
1707
- gfs2_rsqa_delete(ip, NULL);
1435
+ if (ip->i_qadata)
1436
+ gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
1437
+ gfs2_rs_deltree(&ip->i_res);
17081438 gfs2_ordered_del_inode(ip);
17091439 clear_inode(inode);
17101440 gfs2_dir_hash_inval(ip);
1711
- glock_clear_object(ip->i_gl, ip);
1712
- wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1713
- gfs2_glock_add_to_lru(ip->i_gl);
1714
- gfs2_glock_put_eventually(ip->i_gl);
1715
- ip->i_gl = NULL;
17161441 if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
17171442 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
17181443
17191444 glock_clear_object(gl, ip);
1720
- ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1445
+ if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1446
+ ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1447
+ gfs2_glock_dq(&ip->i_iopen_gh);
1448
+ }
17211449 gfs2_glock_hold(gl);
1722
- gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1450
+ gfs2_holder_uninit(&ip->i_iopen_gh);
17231451 gfs2_glock_put_eventually(gl);
1452
+ }
1453
+ if (ip->i_gl) {
1454
+ glock_clear_object(ip->i_gl, ip);
1455
+ wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1456
+ gfs2_glock_add_to_lru(ip->i_gl);
1457
+ gfs2_glock_put_eventually(ip->i_gl);
1458
+ ip->i_gl = NULL;
17241459 }
17251460 }
17261461
....@@ -1729,30 +1464,54 @@
17291464 struct gfs2_inode *ip;
17301465
17311466 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
1732
- if (ip) {
1733
- ip->i_flags = 0;
1734
- ip->i_gl = NULL;
1735
- memset(&ip->i_res, 0, sizeof(ip->i_res));
1736
- RB_CLEAR_NODE(&ip->i_res.rs_node);
1737
- ip->i_rahead = 0;
1738
- }
1467
+ if (!ip)
1468
+ return NULL;
1469
+ ip->i_flags = 0;
1470
+ ip->i_gl = NULL;
1471
+ gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
1472
+ memset(&ip->i_res, 0, sizeof(ip->i_res));
1473
+ RB_CLEAR_NODE(&ip->i_res.rs_node);
1474
+ ip->i_rahead = 0;
17391475 return &ip->i_inode;
17401476 }
17411477
1742
-static void gfs2_i_callback(struct rcu_head *head)
1478
+static void gfs2_free_inode(struct inode *inode)
17431479 {
1744
- struct inode *inode = container_of(head, struct inode, i_rcu);
1745
- kmem_cache_free(gfs2_inode_cachep, inode);
1480
+ kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
17461481 }
17471482
1748
-static void gfs2_destroy_inode(struct inode *inode)
1483
+extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
17491484 {
1750
- call_rcu(&inode->i_rcu, gfs2_i_callback);
1485
+ struct local_statfs_inode *lsi, *safe;
1486
+
1487
+ /* Run through the statfs inodes list to iput and free memory */
1488
+ list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) {
1489
+ if (lsi->si_jid == sdp->sd_jdesc->jd_jid)
1490
+ sdp->sd_sc_inode = NULL; /* belongs to this node */
1491
+ if (lsi->si_sc_inode)
1492
+ iput(lsi->si_sc_inode);
1493
+ list_del(&lsi->si_list);
1494
+ kfree(lsi);
1495
+ }
1496
+}
1497
+
1498
+extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
1499
+ unsigned int index)
1500
+{
1501
+ struct local_statfs_inode *lsi;
1502
+
1503
+ /* Return the local (per node) statfs inode in the
1504
+ * sdp->sd_sc_inodes_list corresponding to the 'index'. */
1505
+ list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) {
1506
+ if (lsi->si_jid == index)
1507
+ return lsi->si_sc_inode;
1508
+ }
1509
+ return NULL;
17511510 }
17521511
17531512 const struct super_operations gfs2_super_ops = {
17541513 .alloc_inode = gfs2_alloc_inode,
1755
- .destroy_inode = gfs2_destroy_inode,
1514
+ .free_inode = gfs2_free_inode,
17561515 .write_inode = gfs2_write_inode,
17571516 .dirty_inode = gfs2_dirty_inode,
17581517 .evict_inode = gfs2_evict_inode,
....@@ -1761,7 +1520,6 @@
17611520 .freeze_super = gfs2_freeze,
17621521 .thaw_super = gfs2_unfreeze,
17631522 .statfs = gfs2_statfs,
1764
- .remount_fs = gfs2_remount_fs,
17651523 .drop_inode = gfs2_drop_inode,
17661524 .show_options = gfs2_show_options,
17671525 };