forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/fs/ext4/extents_status.c
....@@ -142,13 +142,16 @@
142142 */
143143
144144 static struct kmem_cache *ext4_es_cachep;
145
+static struct kmem_cache *ext4_pending_cachep;
145146
146147 static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
147148 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
148
- ext4_lblk_t end);
149
+ ext4_lblk_t end, int *reserved);
149150 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
150151 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
151152 struct ext4_inode_info *locked_ei);
153
+static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
154
+ ext4_lblk_t len);
152155
153156 int __init ext4_init_es(void)
154157 {
....@@ -233,30 +236,38 @@
233236 }
234237
235238 /*
236
- * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering
237
- * @es->lblk if it exists, otherwise, the next extent after @es->lblk.
239
+ * ext4_es_find_extent_range - find extent with specified status within block
240
+ * range or next extent following block range in
241
+ * extents status tree
238242 *
239
- * @inode: the inode which owns delayed extents
240
- * @lblk: the offset where we start to search
241
- * @end: the offset where we stop to search
242
- * @es: delayed extent that we found
243
+ * @inode - file containing the range
244
+ * @matching_fn - pointer to function that matches extents with desired status
245
+ * @lblk - logical block defining start of range
246
+ * @end - logical block defining end of range
247
+ * @es - extent found, if any
248
+ *
249
+ * Find the first extent within the block range specified by @lblk and @end
250
+ * in the extents status tree that satisfies @matching_fn. If a match
251
+ * is found, it's returned in @es. If not, and a matching extent is found
252
+ * beyond the block range, it's returned in @es. If no match is found, an
253
+ * extent is returned in @es whose es_lblk, es_len, and es_pblk components
254
+ * are 0.
243255 */
244
-void ext4_es_find_delayed_extent_range(struct inode *inode,
245
- ext4_lblk_t lblk, ext4_lblk_t end,
246
- struct extent_status *es)
256
+static void __es_find_extent_range(struct inode *inode,
257
+ int (*matching_fn)(struct extent_status *es),
258
+ ext4_lblk_t lblk, ext4_lblk_t end,
259
+ struct extent_status *es)
247260 {
248261 struct ext4_es_tree *tree = NULL;
249262 struct extent_status *es1 = NULL;
250263 struct rb_node *node;
251264
252
- BUG_ON(es == NULL);
253
- BUG_ON(end < lblk);
254
- trace_ext4_es_find_delayed_extent_range_enter(inode, lblk);
265
+ WARN_ON(es == NULL);
266
+ WARN_ON(end < lblk);
255267
256
- read_lock(&EXT4_I(inode)->i_es_lock);
257268 tree = &EXT4_I(inode)->i_es_tree;
258269
259
- /* find extent in cache firstly */
270
+ /* see if the extent has been cached */
260271 es->es_lblk = es->es_len = es->es_pblk = 0;
261272 if (tree->cache_es) {
262273 es1 = tree->cache_es;
....@@ -271,28 +282,142 @@
271282 es1 = __es_tree_search(&tree->root, lblk);
272283
273284 out:
274
- if (es1 && !ext4_es_is_delayed(es1)) {
285
+ if (es1 && !matching_fn(es1)) {
275286 while ((node = rb_next(&es1->rb_node)) != NULL) {
276287 es1 = rb_entry(node, struct extent_status, rb_node);
277288 if (es1->es_lblk > end) {
278289 es1 = NULL;
279290 break;
280291 }
281
- if (ext4_es_is_delayed(es1))
292
+ if (matching_fn(es1))
282293 break;
283294 }
284295 }
285296
286
- if (es1 && ext4_es_is_delayed(es1)) {
297
+ if (es1 && matching_fn(es1)) {
287298 tree->cache_es = es1;
288299 es->es_lblk = es1->es_lblk;
289300 es->es_len = es1->es_len;
290301 es->es_pblk = es1->es_pblk;
291302 }
292303
304
+}
305
+
306
+/*
307
+ * Locking for __es_find_extent_range() for external use
308
+ */
309
+void ext4_es_find_extent_range(struct inode *inode,
310
+ int (*matching_fn)(struct extent_status *es),
311
+ ext4_lblk_t lblk, ext4_lblk_t end,
312
+ struct extent_status *es)
313
+{
314
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
315
+ return;
316
+
317
+ trace_ext4_es_find_extent_range_enter(inode, lblk);
318
+
319
+ read_lock(&EXT4_I(inode)->i_es_lock);
320
+ __es_find_extent_range(inode, matching_fn, lblk, end, es);
293321 read_unlock(&EXT4_I(inode)->i_es_lock);
294322
295
- trace_ext4_es_find_delayed_extent_range_exit(inode, es);
323
+ trace_ext4_es_find_extent_range_exit(inode, es);
324
+}
325
+
326
+/*
327
+ * __es_scan_range - search block range for block with specified status
328
+ * in extents status tree
329
+ *
330
+ * @inode - file containing the range
331
+ * @matching_fn - pointer to function that matches extents with desired status
332
+ * @lblk - logical block defining start of range
333
+ * @end - logical block defining end of range
334
+ *
335
+ * Returns true if at least one block in the specified block range satisfies
336
+ * the criterion specified by @matching_fn, and false if not. If at least
337
+ * one extent has the specified status, then there is at least one block
338
+ * in the cluster with that status. Should only be called by code that has
339
+ * taken i_es_lock.
340
+ */
341
+static bool __es_scan_range(struct inode *inode,
342
+ int (*matching_fn)(struct extent_status *es),
343
+ ext4_lblk_t start, ext4_lblk_t end)
344
+{
345
+ struct extent_status es;
346
+
347
+ __es_find_extent_range(inode, matching_fn, start, end, &es);
348
+ if (es.es_len == 0)
349
+ return false; /* no matching extent in the tree */
350
+ else if (es.es_lblk <= start &&
351
+ start < es.es_lblk + es.es_len)
352
+ return true;
353
+ else if (start <= es.es_lblk && es.es_lblk <= end)
354
+ return true;
355
+ else
356
+ return false;
357
+}
358
+/*
359
+ * Locking for __es_scan_range() for external use
360
+ */
361
+bool ext4_es_scan_range(struct inode *inode,
362
+ int (*matching_fn)(struct extent_status *es),
363
+ ext4_lblk_t lblk, ext4_lblk_t end)
364
+{
365
+ bool ret;
366
+
367
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
368
+ return false;
369
+
370
+ read_lock(&EXT4_I(inode)->i_es_lock);
371
+ ret = __es_scan_range(inode, matching_fn, lblk, end);
372
+ read_unlock(&EXT4_I(inode)->i_es_lock);
373
+
374
+ return ret;
375
+}
376
+
377
+/*
378
+ * __es_scan_clu - search cluster for block with specified status in
379
+ * extents status tree
380
+ *
381
+ * @inode - file containing the cluster
382
+ * @matching_fn - pointer to function that matches extents with desired status
383
+ * @lblk - logical block in cluster to be searched
384
+ *
385
+ * Returns true if at least one extent in the cluster containing @lblk
386
+ * satisfies the criterion specified by @matching_fn, and false if not. If at
387
+ * least one extent has the specified status, then there is at least one block
388
+ * in the cluster with that status. Should only be called by code that has
389
+ * taken i_es_lock.
390
+ */
391
+static bool __es_scan_clu(struct inode *inode,
392
+ int (*matching_fn)(struct extent_status *es),
393
+ ext4_lblk_t lblk)
394
+{
395
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
396
+ ext4_lblk_t lblk_start, lblk_end;
397
+
398
+ lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
399
+ lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
400
+
401
+ return __es_scan_range(inode, matching_fn, lblk_start, lblk_end);
402
+}
403
+
404
+/*
405
+ * Locking for __es_scan_clu() for external use
406
+ */
407
+bool ext4_es_scan_clu(struct inode *inode,
408
+ int (*matching_fn)(struct extent_status *es),
409
+ ext4_lblk_t lblk)
410
+{
411
+ bool ret;
412
+
413
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
414
+ return false;
415
+
416
+ read_lock(&EXT4_I(inode)->i_es_lock);
417
+ ret = __es_scan_clu(inode, matching_fn, lblk);
418
+ read_unlock(&EXT4_I(inode)->i_es_lock);
419
+
420
+ return ret;
296421 }
297422
298423 static void ext4_es_list_add(struct inode *inode)
....@@ -595,7 +720,7 @@
595720 * We don't need to check unwritten extent because
596721 * indirect-based file doesn't have it.
597722 */
598
- BUG_ON(1);
723
+ BUG();
599724 }
600725 } else if (retval == 0) {
601726 if (ext4_es_is_written(es)) {
....@@ -664,7 +789,7 @@
664789 }
665790 p = &(*p)->rb_right;
666791 } else {
667
- BUG_ON(1);
792
+ BUG();
668793 return -EINVAL;
669794 }
670795 }
....@@ -694,6 +819,10 @@
694819 struct extent_status newes;
695820 ext4_lblk_t end = lblk + len - 1;
696821 int err = 0;
822
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
823
+
824
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
825
+ return 0;
697826
698827 es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
699828 lblk, len, pblk, status, inode->i_ino);
....@@ -719,7 +848,7 @@
719848 ext4_es_insert_extent_check(inode, &newes);
720849
721850 write_lock(&EXT4_I(inode)->i_es_lock);
722
- err = __es_remove_extent(inode, lblk, end);
851
+ err = __es_remove_extent(inode, lblk, end, NULL);
723852 if (err != 0)
724853 goto error;
725854 retry:
....@@ -729,6 +858,11 @@
729858 goto retry;
730859 if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
731860 err = 0;
861
+
862
+ if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
863
+ (status & EXTENT_STATUS_WRITTEN ||
864
+ status & EXTENT_STATUS_UNWRITTEN))
865
+ __revise_pending(inode, lblk, len);
732866
733867 error:
734868 write_unlock(&EXT4_I(inode)->i_es_lock);
....@@ -750,6 +884,9 @@
750884 struct extent_status *es;
751885 struct extent_status newes;
752886 ext4_lblk_t end = lblk + len - 1;
887
+
888
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
889
+ return;
753890
754891 newes.es_lblk = lblk;
755892 newes.es_len = len;
....@@ -777,6 +914,7 @@
777914 * Return: 1 on found, 0 on not
778915 */
779916 int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
917
+ ext4_lblk_t *next_lblk,
780918 struct extent_status *es)
781919 {
782920 struct ext4_es_tree *tree;
....@@ -784,6 +922,9 @@
784922 struct extent_status *es1 = NULL;
785923 struct rb_node *node;
786924 int found = 0;
925
+
926
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
927
+ return 0;
787928
788929 trace_ext4_es_lookup_extent_enter(inode, lblk);
789930 es_debug("lookup extent in block %u\n", lblk);
....@@ -825,9 +966,18 @@
825966 es->es_pblk = es1->es_pblk;
826967 if (!ext4_es_is_referenced(es1))
827968 ext4_es_set_referenced(es1);
828
- stats->es_stats_cache_hits++;
969
+ percpu_counter_inc(&stats->es_stats_cache_hits);
970
+ if (next_lblk) {
971
+ node = rb_next(&es1->rb_node);
972
+ if (node) {
973
+ es1 = rb_entry(node, struct extent_status,
974
+ rb_node);
975
+ *next_lblk = es1->es_lblk;
976
+ } else
977
+ *next_lblk = 0;
978
+ }
829979 } else {
830
- stats->es_stats_cache_misses++;
980
+ percpu_counter_inc(&stats->es_stats_cache_misses);
831981 }
832982
833983 read_unlock(&EXT4_I(inode)->i_es_lock);
....@@ -836,8 +986,322 @@
836986 return found;
837987 }
838988
989
+struct rsvd_count {
990
+ int ndelonly;
991
+ bool first_do_lblk_found;
992
+ ext4_lblk_t first_do_lblk;
993
+ ext4_lblk_t last_do_lblk;
994
+ struct extent_status *left_es;
995
+ bool partial;
996
+ ext4_lblk_t lclu;
997
+};
998
+
999
+/*
1000
+ * init_rsvd - initialize reserved count data before removing block range
1001
+ * in file from extent status tree
1002
+ *
1003
+ * @inode - file containing range
1004
+ * @lblk - first block in range
1005
+ * @es - pointer to first extent in range
1006
+ * @rc - pointer to reserved count data
1007
+ *
1008
+ * Assumes es is not NULL
1009
+ */
1010
+static void init_rsvd(struct inode *inode, ext4_lblk_t lblk,
1011
+ struct extent_status *es, struct rsvd_count *rc)
1012
+{
1013
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1014
+ struct rb_node *node;
1015
+
1016
+ rc->ndelonly = 0;
1017
+
1018
+ /*
1019
+ * for bigalloc, note the first delonly block in the range has not
1020
+ * been found, record the extent containing the block to the left of
1021
+ * the region to be removed, if any, and note that there's no partial
1022
+ * cluster to track
1023
+ */
1024
+ if (sbi->s_cluster_ratio > 1) {
1025
+ rc->first_do_lblk_found = false;
1026
+ if (lblk > es->es_lblk) {
1027
+ rc->left_es = es;
1028
+ } else {
1029
+ node = rb_prev(&es->rb_node);
1030
+ rc->left_es = node ? rb_entry(node,
1031
+ struct extent_status,
1032
+ rb_node) : NULL;
1033
+ }
1034
+ rc->partial = false;
1035
+ }
1036
+}
1037
+
1038
+/*
1039
+ * count_rsvd - count the clusters containing delayed and not unwritten
1040
+ * (delonly) blocks in a range within an extent and add to
1041
+ * the running tally in rsvd_count
1042
+ *
1043
+ * @inode - file containing extent
1044
+ * @lblk - first block in range
1045
+ * @len - length of range in blocks
1046
+ * @es - pointer to extent containing clusters to be counted
1047
+ * @rc - pointer to reserved count data
1048
+ *
1049
+ * Tracks partial clusters found at the beginning and end of extents so
1050
+ * they aren't overcounted when they span adjacent extents
1051
+ */
1052
+static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
1053
+ struct extent_status *es, struct rsvd_count *rc)
1054
+{
1055
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1056
+ ext4_lblk_t i, end, nclu;
1057
+
1058
+ if (!ext4_es_is_delonly(es))
1059
+ return;
1060
+
1061
+ WARN_ON(len <= 0);
1062
+
1063
+ if (sbi->s_cluster_ratio == 1) {
1064
+ rc->ndelonly += (int) len;
1065
+ return;
1066
+ }
1067
+
1068
+ /* bigalloc */
1069
+
1070
+ i = (lblk < es->es_lblk) ? es->es_lblk : lblk;
1071
+ end = lblk + (ext4_lblk_t) len - 1;
1072
+ end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end;
1073
+
1074
+ /* record the first block of the first delonly extent seen */
1075
+ if (!rc->first_do_lblk_found) {
1076
+ rc->first_do_lblk = i;
1077
+ rc->first_do_lblk_found = true;
1078
+ }
1079
+
1080
+ /* update the last lblk in the region seen so far */
1081
+ rc->last_do_lblk = end;
1082
+
1083
+ /*
1084
+ * if we're tracking a partial cluster and the current extent
1085
+ * doesn't start with it, count it and stop tracking
1086
+ */
1087
+ if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) {
1088
+ rc->ndelonly++;
1089
+ rc->partial = false;
1090
+ }
1091
+
1092
+ /*
1093
+ * if the first cluster doesn't start on a cluster boundary but
1094
+ * ends on one, count it
1095
+ */
1096
+ if (EXT4_LBLK_COFF(sbi, i) != 0) {
1097
+ if (end >= EXT4_LBLK_CFILL(sbi, i)) {
1098
+ rc->ndelonly++;
1099
+ rc->partial = false;
1100
+ i = EXT4_LBLK_CFILL(sbi, i) + 1;
1101
+ }
1102
+ }
1103
+
1104
+ /*
1105
+ * if the current cluster starts on a cluster boundary, count the
1106
+ * number of whole delonly clusters in the extent
1107
+ */
1108
+ if ((i + sbi->s_cluster_ratio - 1) <= end) {
1109
+ nclu = (end - i + 1) >> sbi->s_cluster_bits;
1110
+ rc->ndelonly += nclu;
1111
+ i += nclu << sbi->s_cluster_bits;
1112
+ }
1113
+
1114
+ /*
1115
+ * start tracking a partial cluster if there's a partial at the end
1116
+ * of the current extent and we're not already tracking one
1117
+ */
1118
+ if (!rc->partial && i <= end) {
1119
+ rc->partial = true;
1120
+ rc->lclu = EXT4_B2C(sbi, i);
1121
+ }
1122
+}
1123
+
1124
+/*
1125
+ * __pr_tree_search - search for a pending cluster reservation
1126
+ *
1127
+ * @root - root of pending reservation tree
1128
+ * @lclu - logical cluster to search for
1129
+ *
1130
+ * Returns the pending reservation for the cluster identified by @lclu
1131
+ * if found. If not, returns a reservation for the next cluster if any,
1132
+ * and if not, returns NULL.
1133
+ */
1134
+static struct pending_reservation *__pr_tree_search(struct rb_root *root,
1135
+ ext4_lblk_t lclu)
1136
+{
1137
+ struct rb_node *node = root->rb_node;
1138
+ struct pending_reservation *pr = NULL;
1139
+
1140
+ while (node) {
1141
+ pr = rb_entry(node, struct pending_reservation, rb_node);
1142
+ if (lclu < pr->lclu)
1143
+ node = node->rb_left;
1144
+ else if (lclu > pr->lclu)
1145
+ node = node->rb_right;
1146
+ else
1147
+ return pr;
1148
+ }
1149
+ if (pr && lclu < pr->lclu)
1150
+ return pr;
1151
+ if (pr && lclu > pr->lclu) {
1152
+ node = rb_next(&pr->rb_node);
1153
+ return node ? rb_entry(node, struct pending_reservation,
1154
+ rb_node) : NULL;
1155
+ }
1156
+ return NULL;
1157
+}
1158
+
1159
+/*
1160
+ * get_rsvd - calculates and returns the number of cluster reservations to be
1161
+ * released when removing a block range from the extent status tree
1162
+ * and releases any pending reservations within the range
1163
+ *
1164
+ * @inode - file containing block range
1165
+ * @end - last block in range
1166
+ * @right_es - pointer to extent containing next block beyond end or NULL
1167
+ * @rc - pointer to reserved count data
1168
+ *
1169
+ * The number of reservations to be released is equal to the number of
1170
+ * clusters containing delayed and not unwritten (delonly) blocks within
1171
+ * the range, minus the number of clusters still containing delonly blocks
1172
+ * at the ends of the range, and minus the number of pending reservations
1173
+ * within the range.
1174
+ */
1175
+static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
1176
+ struct extent_status *right_es,
1177
+ struct rsvd_count *rc)
1178
+{
1179
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1180
+ struct pending_reservation *pr;
1181
+ struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
1182
+ struct rb_node *node;
1183
+ ext4_lblk_t first_lclu, last_lclu;
1184
+ bool left_delonly, right_delonly, count_pending;
1185
+ struct extent_status *es;
1186
+
1187
+ if (sbi->s_cluster_ratio > 1) {
1188
+ /* count any remaining partial cluster */
1189
+ if (rc->partial)
1190
+ rc->ndelonly++;
1191
+
1192
+ if (rc->ndelonly == 0)
1193
+ return 0;
1194
+
1195
+ first_lclu = EXT4_B2C(sbi, rc->first_do_lblk);
1196
+ last_lclu = EXT4_B2C(sbi, rc->last_do_lblk);
1197
+
1198
+ /*
1199
+ * decrease the delonly count by the number of clusters at the
1200
+ * ends of the range that still contain delonly blocks -
1201
+ * these clusters still need to be reserved
1202
+ */
1203
+ left_delonly = right_delonly = false;
1204
+
1205
+ es = rc->left_es;
1206
+ while (es && ext4_es_end(es) >=
1207
+ EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) {
1208
+ if (ext4_es_is_delonly(es)) {
1209
+ rc->ndelonly--;
1210
+ left_delonly = true;
1211
+ break;
1212
+ }
1213
+ node = rb_prev(&es->rb_node);
1214
+ if (!node)
1215
+ break;
1216
+ es = rb_entry(node, struct extent_status, rb_node);
1217
+ }
1218
+ if (right_es && (!left_delonly || first_lclu != last_lclu)) {
1219
+ if (end < ext4_es_end(right_es)) {
1220
+ es = right_es;
1221
+ } else {
1222
+ node = rb_next(&right_es->rb_node);
1223
+ es = node ? rb_entry(node, struct extent_status,
1224
+ rb_node) : NULL;
1225
+ }
1226
+ while (es && es->es_lblk <=
1227
+ EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) {
1228
+ if (ext4_es_is_delonly(es)) {
1229
+ rc->ndelonly--;
1230
+ right_delonly = true;
1231
+ break;
1232
+ }
1233
+ node = rb_next(&es->rb_node);
1234
+ if (!node)
1235
+ break;
1236
+ es = rb_entry(node, struct extent_status,
1237
+ rb_node);
1238
+ }
1239
+ }
1240
+
1241
+ /*
1242
+ * Determine the block range that should be searched for
1243
+ * pending reservations, if any. Clusters on the ends of the
1244
+ * original removed range containing delonly blocks are
1245
+ * excluded. They've already been accounted for and it's not
1246
+ * possible to determine if an associated pending reservation
1247
+ * should be released with the information available in the
1248
+ * extents status tree.
1249
+ */
1250
+ if (first_lclu == last_lclu) {
1251
+ if (left_delonly | right_delonly)
1252
+ count_pending = false;
1253
+ else
1254
+ count_pending = true;
1255
+ } else {
1256
+ if (left_delonly)
1257
+ first_lclu++;
1258
+ if (right_delonly)
1259
+ last_lclu--;
1260
+ if (first_lclu <= last_lclu)
1261
+ count_pending = true;
1262
+ else
1263
+ count_pending = false;
1264
+ }
1265
+
1266
+ /*
1267
+ * a pending reservation found between first_lclu and last_lclu
1268
+ * represents an allocated cluster that contained at least one
1269
+ * delonly block, so the delonly total must be reduced by one
1270
+ * for each pending reservation found and released
1271
+ */
1272
+ if (count_pending) {
1273
+ pr = __pr_tree_search(&tree->root, first_lclu);
1274
+ while (pr && pr->lclu <= last_lclu) {
1275
+ rc->ndelonly--;
1276
+ node = rb_next(&pr->rb_node);
1277
+ rb_erase(&pr->rb_node, &tree->root);
1278
+ kmem_cache_free(ext4_pending_cachep, pr);
1279
+ if (!node)
1280
+ break;
1281
+ pr = rb_entry(node, struct pending_reservation,
1282
+ rb_node);
1283
+ }
1284
+ }
1285
+ }
1286
+ return rc->ndelonly;
1287
+}
1288
+
1289
+
1290
+/*
1291
+ * __es_remove_extent - removes block range from extent status tree
1292
+ *
1293
+ * @inode - file containing range
1294
+ * @lblk - first block in range
1295
+ * @end - last block in range
1296
+ * @reserved - number of cluster reservations released
1297
+ *
1298
+ * If @reserved is not NULL and delayed allocation is enabled, counts
1299
+ * block/cluster reservations freed by removing range and if bigalloc
1300
+ * enabled cancels pending reservations as needed. Returns 0 on success,
1301
+ * error code on failure.
1302
+ */
8391303 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
840
- ext4_lblk_t end)
1304
+ ext4_lblk_t end, int *reserved)
8411305 {
8421306 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
8431307 struct rb_node *node;
....@@ -846,9 +1310,14 @@
8461310 ext4_lblk_t len1, len2;
8471311 ext4_fsblk_t block;
8481312 int err;
1313
+ bool count_reserved = true;
1314
+ struct rsvd_count rc;
8491315
1316
+ if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC))
1317
+ count_reserved = false;
8501318 retry:
8511319 err = 0;
1320
+
8521321 es = __es_tree_search(&tree->root, lblk);
8531322 if (!es)
8541323 goto out;
....@@ -857,6 +1326,8 @@
8571326
8581327 /* Simply invalidate cache_es. */
8591328 tree->cache_es = NULL;
1329
+ if (count_reserved)
1330
+ init_rsvd(inode, lblk, es, &rc);
8601331
8611332 orig_es.es_lblk = es->es_lblk;
8621333 orig_es.es_len = es->es_len;
....@@ -898,10 +1369,16 @@
8981369 ext4_es_store_pblock(es, block);
8991370 }
9001371 }
1372
+ if (count_reserved)
1373
+ count_rsvd(inode, lblk, orig_es.es_len - len1 - len2,
1374
+ &orig_es, &rc);
9011375 goto out;
9021376 }
9031377
9041378 if (len1 > 0) {
1379
+ if (count_reserved)
1380
+ count_rsvd(inode, lblk, orig_es.es_len - len1,
1381
+ &orig_es, &rc);
9051382 node = rb_next(&es->rb_node);
9061383 if (node)
9071384 es = rb_entry(node, struct extent_status, rb_node);
....@@ -910,6 +1387,8 @@
9101387 }
9111388
9121389 while (es && ext4_es_end(es) <= end) {
1390
+ if (count_reserved)
1391
+ count_rsvd(inode, es->es_lblk, es->es_len, es, &rc);
9131392 node = rb_next(&es->rb_node);
9141393 rb_erase(&es->rb_node, &tree->root);
9151394 ext4_es_free_extent(inode, es);
....@@ -924,6 +1403,9 @@
9241403 ext4_lblk_t orig_len = es->es_len;
9251404
9261405 len1 = ext4_es_end(es) - end;
1406
+ if (count_reserved)
1407
+ count_rsvd(inode, es->es_lblk, orig_len - len1,
1408
+ es, &rc);
9271409 es->es_lblk = end + 1;
9281410 es->es_len = len1;
9291411 if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) {
....@@ -932,20 +1414,31 @@
9321414 }
9331415 }
9341416
1417
+ if (count_reserved)
1418
+ *reserved = get_rsvd(inode, end, es, &rc);
9351419 out:
9361420 return err;
9371421 }
9381422
9391423 /*
940
- * ext4_es_remove_extent() removes a space from a extent status tree.
1424
+ * ext4_es_remove_extent - removes block range from extent status tree
9411425 *
942
- * Return 0 on success, error code on failure.
1426
+ * @inode - file containing range
1427
+ * @lblk - first block in range
1428
+ * @len - number of blocks to remove
1429
+ *
1430
+ * Reduces block/cluster reservation count and for bigalloc cancels pending
1431
+ * reservations as needed. Returns 0 on success, error code on failure.
9431432 */
9441433 int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
9451434 ext4_lblk_t len)
9461435 {
9471436 ext4_lblk_t end;
9481437 int err = 0;
1438
+ int reserved = 0;
1439
+
1440
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
1441
+ return 0;
9491442
9501443 trace_ext4_es_remove_extent(inode, lblk, len);
9511444 es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
....@@ -963,9 +1456,10 @@
9631456 * is reclaimed.
9641457 */
9651458 write_lock(&EXT4_I(inode)->i_es_lock);
966
- err = __es_remove_extent(inode, lblk, end);
1459
+ err = __es_remove_extent(inode, lblk, end, &reserved);
9671460 write_unlock(&EXT4_I(inode)->i_es_lock);
9681461 ext4_es_print_tree(inode);
1462
+ ext4_da_release_space(inode, reserved);
9691463 return err;
9701464 }
9711465
....@@ -1111,9 +1605,9 @@
11111605 seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n",
11121606 percpu_counter_sum_positive(&es_stats->es_stats_all_cnt),
11131607 percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt));
1114
- seq_printf(seq, " %lu/%lu cache hits/misses\n",
1115
- es_stats->es_stats_cache_hits,
1116
- es_stats->es_stats_cache_misses);
1608
+ seq_printf(seq, " %lld/%lld cache hits/misses\n",
1609
+ percpu_counter_sum_positive(&es_stats->es_stats_cache_hits),
1610
+ percpu_counter_sum_positive(&es_stats->es_stats_cache_misses));
11171611 if (inode_cnt)
11181612 seq_printf(seq, " %d inodes on list\n", inode_cnt);
11191613
....@@ -1140,35 +1634,46 @@
11401634 sbi->s_es_nr_inode = 0;
11411635 spin_lock_init(&sbi->s_es_lock);
11421636 sbi->s_es_stats.es_stats_shrunk = 0;
1143
- sbi->s_es_stats.es_stats_cache_hits = 0;
1144
- sbi->s_es_stats.es_stats_cache_misses = 0;
1637
+ err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_hits, 0,
1638
+ GFP_KERNEL);
1639
+ if (err)
1640
+ return err;
1641
+ err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_misses, 0,
1642
+ GFP_KERNEL);
1643
+ if (err)
1644
+ goto err1;
11451645 sbi->s_es_stats.es_stats_scan_time = 0;
11461646 sbi->s_es_stats.es_stats_max_scan_time = 0;
11471647 err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL);
11481648 if (err)
1149
- return err;
1649
+ goto err2;
11501650 err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL);
11511651 if (err)
1152
- goto err1;
1652
+ goto err3;
11531653
11541654 sbi->s_es_shrinker.scan_objects = ext4_es_scan;
11551655 sbi->s_es_shrinker.count_objects = ext4_es_count;
11561656 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
11571657 err = register_shrinker(&sbi->s_es_shrinker);
11581658 if (err)
1159
- goto err2;
1659
+ goto err4;
11601660
11611661 return 0;
1162
-
1163
-err2:
1662
+err4:
11641663 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
1165
-err1:
1664
+err3:
11661665 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
1666
+err2:
1667
+ percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses);
1668
+err1:
1669
+ percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits);
11671670 return err;
11681671 }
11691672
11701673 void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
11711674 {
1675
+ percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits);
1676
+ percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses);
11721677 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
11731678 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
11741679 unregister_shrinker(&sbi->s_es_shrinker);
....@@ -1193,7 +1698,7 @@
11931698 es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk);
11941699 if (!es)
11951700 goto out_wrap;
1196
- node = &es->rb_node;
1701
+
11971702 while (*nr_to_scan > 0) {
11981703 if (es->es_lblk > end) {
11991704 ei->i_es_shrink_lblk = end + 1;
....@@ -1250,3 +1755,440 @@
12501755 ei->i_es_tree.cache_es = NULL;
12511756 return nr_shrunk;
12521757 }
1758
+
1759
+/*
1760
+ * Called to support EXT4_IOC_CLEAR_ES_CACHE. We can only remove
1761
+ * discretionary entries from the extent status cache. (Some entries
1762
+ * must be present for proper operations.)
1763
+ */
1764
+void ext4_clear_inode_es(struct inode *inode)
1765
+{
1766
+ struct ext4_inode_info *ei = EXT4_I(inode);
1767
+ struct extent_status *es;
1768
+ struct ext4_es_tree *tree;
1769
+ struct rb_node *node;
1770
+
1771
+ write_lock(&ei->i_es_lock);
1772
+ tree = &EXT4_I(inode)->i_es_tree;
1773
+ tree->cache_es = NULL;
1774
+ node = rb_first(&tree->root);
1775
+ while (node) {
1776
+ es = rb_entry(node, struct extent_status, rb_node);
1777
+ node = rb_next(node);
1778
+ if (!ext4_es_is_delayed(es)) {
1779
+ rb_erase(&es->rb_node, &tree->root);
1780
+ ext4_es_free_extent(inode, es);
1781
+ }
1782
+ }
1783
+ ext4_clear_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
1784
+ write_unlock(&ei->i_es_lock);
1785
+}
1786
+
1787
+#ifdef ES_DEBUG__
1788
+static void ext4_print_pending_tree(struct inode *inode)
1789
+{
1790
+ struct ext4_pending_tree *tree;
1791
+ struct rb_node *node;
1792
+ struct pending_reservation *pr;
1793
+
1794
+ printk(KERN_DEBUG "pending reservations for inode %lu:", inode->i_ino);
1795
+ tree = &EXT4_I(inode)->i_pending_tree;
1796
+ node = rb_first(&tree->root);
1797
+ while (node) {
1798
+ pr = rb_entry(node, struct pending_reservation, rb_node);
1799
+ printk(KERN_DEBUG " %u", pr->lclu);
1800
+ node = rb_next(node);
1801
+ }
1802
+ printk(KERN_DEBUG "\n");
1803
+}
1804
+#else
1805
+#define ext4_print_pending_tree(inode)
1806
+#endif
1807
+
1808
+int __init ext4_init_pending(void)
1809
+{
1810
+ ext4_pending_cachep = kmem_cache_create("ext4_pending_reservation",
1811
+ sizeof(struct pending_reservation),
1812
+ 0, (SLAB_RECLAIM_ACCOUNT), NULL);
1813
+ if (ext4_pending_cachep == NULL)
1814
+ return -ENOMEM;
1815
+ return 0;
1816
+}
1817
+
1818
+void ext4_exit_pending(void)
1819
+{
1820
+ kmem_cache_destroy(ext4_pending_cachep);
1821
+}
1822
+
1823
+void ext4_init_pending_tree(struct ext4_pending_tree *tree)
1824
+{
1825
+ tree->root = RB_ROOT;
1826
+}
1827
+
1828
+/*
1829
+ * __get_pending - retrieve a pointer to a pending reservation
1830
+ *
1831
+ * @inode - file containing the pending cluster reservation
1832
+ * @lclu - logical cluster of interest
1833
+ *
1834
+ * Returns a pointer to a pending reservation if it's a member of
1835
+ * the set, and NULL if not. Must be called holding i_es_lock.
1836
+ */
1837
+static struct pending_reservation *__get_pending(struct inode *inode,
1838
+ ext4_lblk_t lclu)
1839
+{
1840
+ struct ext4_pending_tree *tree;
1841
+ struct rb_node *node;
1842
+ struct pending_reservation *pr = NULL;
1843
+
1844
+ tree = &EXT4_I(inode)->i_pending_tree;
1845
+ node = (&tree->root)->rb_node;
1846
+
1847
+ while (node) {
1848
+ pr = rb_entry(node, struct pending_reservation, rb_node);
1849
+ if (lclu < pr->lclu)
1850
+ node = node->rb_left;
1851
+ else if (lclu > pr->lclu)
1852
+ node = node->rb_right;
1853
+ else if (lclu == pr->lclu)
1854
+ return pr;
1855
+ }
1856
+ return NULL;
1857
+}
1858
+
1859
+/*
1860
+ * __insert_pending - adds a pending cluster reservation to the set of
1861
+ * pending reservations
1862
+ *
1863
+ * @inode - file containing the cluster
1864
+ * @lblk - logical block in the cluster to be added
1865
+ *
1866
+ * Returns 0 on successful insertion and -ENOMEM on failure. If the
1867
+ * pending reservation is already in the set, returns successfully.
1868
+ */
1869
+static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
1870
+{
1871
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1872
+ struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
1873
+ struct rb_node **p = &tree->root.rb_node;
1874
+ struct rb_node *parent = NULL;
1875
+ struct pending_reservation *pr;
1876
+ ext4_lblk_t lclu;
1877
+ int ret = 0;
1878
+
1879
+ lclu = EXT4_B2C(sbi, lblk);
1880
+ /* search to find parent for insertion */
1881
+ while (*p) {
1882
+ parent = *p;
1883
+ pr = rb_entry(parent, struct pending_reservation, rb_node);
1884
+
1885
+ if (lclu < pr->lclu) {
1886
+ p = &(*p)->rb_left;
1887
+ } else if (lclu > pr->lclu) {
1888
+ p = &(*p)->rb_right;
1889
+ } else {
1890
+ /* pending reservation already inserted */
1891
+ goto out;
1892
+ }
1893
+ }
1894
+
1895
+ pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
1896
+ if (pr == NULL) {
1897
+ ret = -ENOMEM;
1898
+ goto out;
1899
+ }
1900
+ pr->lclu = lclu;
1901
+
1902
+ rb_link_node(&pr->rb_node, parent, p);
1903
+ rb_insert_color(&pr->rb_node, &tree->root);
1904
+
1905
+out:
1906
+ return ret;
1907
+}
1908
+
1909
+/*
1910
+ * __remove_pending - removes a pending cluster reservation from the set
1911
+ * of pending reservations
1912
+ *
1913
+ * @inode - file containing the cluster
1914
+ * @lblk - logical block in the pending cluster reservation to be removed
1915
+ *
1916
+ * Returns successfully if pending reservation is not a member of the set.
1917
+ */
1918
+static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
1919
+{
1920
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1921
+ struct pending_reservation *pr;
1922
+ struct ext4_pending_tree *tree;
1923
+
1924
+ pr = __get_pending(inode, EXT4_B2C(sbi, lblk));
1925
+ if (pr != NULL) {
1926
+ tree = &EXT4_I(inode)->i_pending_tree;
1927
+ rb_erase(&pr->rb_node, &tree->root);
1928
+ kmem_cache_free(ext4_pending_cachep, pr);
1929
+ }
1930
+}
1931
+
1932
+/*
1933
+ * ext4_remove_pending - removes a pending cluster reservation from the set
1934
+ * of pending reservations
1935
+ *
1936
+ * @inode - file containing the cluster
1937
+ * @lblk - logical block in the pending cluster reservation to be removed
1938
+ *
1939
+ * Locking for external use of __remove_pending.
1940
+ */
1941
+void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk)
1942
+{
1943
+ struct ext4_inode_info *ei = EXT4_I(inode);
1944
+
1945
+ write_lock(&ei->i_es_lock);
1946
+ __remove_pending(inode, lblk);
1947
+ write_unlock(&ei->i_es_lock);
1948
+}
1949
+
1950
+/*
1951
+ * ext4_is_pending - determine whether a cluster has a pending reservation
1952
+ * on it
1953
+ *
1954
+ * @inode - file containing the cluster
1955
+ * @lblk - logical block in the cluster
1956
+ *
1957
+ * Returns true if there's a pending reservation for the cluster in the
1958
+ * set of pending reservations, and false if not.
1959
+ */
1960
+bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk)
1961
+{
1962
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1963
+ struct ext4_inode_info *ei = EXT4_I(inode);
1964
+ bool ret;
1965
+
1966
+ read_lock(&ei->i_es_lock);
1967
+ ret = (bool)(__get_pending(inode, EXT4_B2C(sbi, lblk)) != NULL);
1968
+ read_unlock(&ei->i_es_lock);
1969
+
1970
+ return ret;
1971
+}
1972
+
1973
+/*
1974
+ * ext4_es_insert_delayed_block - adds a delayed block to the extents status
1975
+ * tree, adding a pending reservation where
1976
+ * needed
1977
+ *
1978
+ * @inode - file containing the newly added block
1979
+ * @lblk - logical block to be added
1980
+ * @allocated - indicates whether a physical cluster has been allocated for
1981
+ * the logical cluster that contains the block
1982
+ *
1983
+ * Returns 0 on success, negative error code on failure.
1984
+ */
1985
+int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
1986
+ bool allocated)
1987
+{
1988
+ struct extent_status newes;
1989
+ int err = 0;
1990
+
1991
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
1992
+ return 0;
1993
+
1994
+ es_debug("add [%u/1) delayed to extent status tree of inode %lu\n",
1995
+ lblk, inode->i_ino);
1996
+
1997
+ newes.es_lblk = lblk;
1998
+ newes.es_len = 1;
1999
+ ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED);
2000
+ trace_ext4_es_insert_delayed_block(inode, &newes, allocated);
2001
+
2002
+ ext4_es_insert_extent_check(inode, &newes);
2003
+
2004
+ write_lock(&EXT4_I(inode)->i_es_lock);
2005
+
2006
+ err = __es_remove_extent(inode, lblk, lblk, NULL);
2007
+ if (err != 0)
2008
+ goto error;
2009
+retry:
2010
+ err = __es_insert_extent(inode, &newes);
2011
+ if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
2012
+ 128, EXT4_I(inode)))
2013
+ goto retry;
2014
+ if (err != 0)
2015
+ goto error;
2016
+
2017
+ if (allocated)
2018
+ __insert_pending(inode, lblk);
2019
+
2020
+error:
2021
+ write_unlock(&EXT4_I(inode)->i_es_lock);
2022
+
2023
+ ext4_es_print_tree(inode);
2024
+ ext4_print_pending_tree(inode);
2025
+
2026
+ return err;
2027
+}
2028
+
2029
+/*
2030
+ * __es_delayed_clu - count number of clusters containing blocks that
2031
+ * are delayed only
2032
+ *
2033
+ * @inode - file containing block range
2034
+ * @start - logical block defining start of range
2035
+ * @end - logical block defining end of range
2036
+ *
2037
+ * Returns the number of clusters containing only delayed (not delayed
2038
+ * and unwritten) blocks in the range specified by @start and @end. Any
2039
+ * cluster or part of a cluster within the range and containing a delayed
2040
+ * and not unwritten block within the range is counted as a whole cluster.
2041
+ */
2042
+static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start,
2043
+ ext4_lblk_t end)
2044
+{
2045
+ struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
2046
+ struct extent_status *es;
2047
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2048
+ struct rb_node *node;
2049
+ ext4_lblk_t first_lclu, last_lclu;
2050
+ unsigned long long last_counted_lclu;
2051
+ unsigned int n = 0;
2052
+
2053
+ /* guaranteed to be unequal to any ext4_lblk_t value */
2054
+ last_counted_lclu = ~0ULL;
2055
+
2056
+ es = __es_tree_search(&tree->root, start);
2057
+
2058
+ while (es && (es->es_lblk <= end)) {
2059
+ if (ext4_es_is_delonly(es)) {
2060
+ if (es->es_lblk <= start)
2061
+ first_lclu = EXT4_B2C(sbi, start);
2062
+ else
2063
+ first_lclu = EXT4_B2C(sbi, es->es_lblk);
2064
+
2065
+ if (ext4_es_end(es) >= end)
2066
+ last_lclu = EXT4_B2C(sbi, end);
2067
+ else
2068
+ last_lclu = EXT4_B2C(sbi, ext4_es_end(es));
2069
+
2070
+ if (first_lclu == last_counted_lclu)
2071
+ n += last_lclu - first_lclu;
2072
+ else
2073
+ n += last_lclu - first_lclu + 1;
2074
+ last_counted_lclu = last_lclu;
2075
+ }
2076
+ node = rb_next(&es->rb_node);
2077
+ if (!node)
2078
+ break;
2079
+ es = rb_entry(node, struct extent_status, rb_node);
2080
+ }
2081
+
2082
+ return n;
2083
+}
2084
+
2085
+/*
2086
+ * ext4_es_delayed_clu - count number of clusters containing blocks that
2087
+ * are both delayed and unwritten
2088
+ *
2089
+ * @inode - file containing block range
2090
+ * @lblk - logical block defining start of range
2091
+ * @len - number of blocks in range
2092
+ *
2093
+ * Locking for external use of __es_delayed_clu().
2094
+ */
2095
+unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
2096
+ ext4_lblk_t len)
2097
+{
2098
+ struct ext4_inode_info *ei = EXT4_I(inode);
2099
+ ext4_lblk_t end;
2100
+ unsigned int n;
2101
+
2102
+ if (len == 0)
2103
+ return 0;
2104
+
2105
+ end = lblk + len - 1;
2106
+ WARN_ON(end < lblk);
2107
+
2108
+ read_lock(&ei->i_es_lock);
2109
+
2110
+ n = __es_delayed_clu(inode, lblk, end);
2111
+
2112
+ read_unlock(&ei->i_es_lock);
2113
+
2114
+ return n;
2115
+}
2116
+
2117
+/*
2118
+ * __revise_pending - makes, cancels, or leaves unchanged pending cluster
2119
+ * reservations for a specified block range depending
2120
+ * upon the presence or absence of delayed blocks
2121
+ * outside the range within clusters at the ends of the
2122
+ * range
2123
+ *
2124
+ * @inode - file containing the range
2125
+ * @lblk - logical block defining the start of range
2126
+ * @len - length of range in blocks
2127
+ *
2128
+ * Used after a newly allocated extent is added to the extents status tree.
2129
+ * Requires that the extents in the range have either written or unwritten
2130
+ * status. Must be called while holding i_es_lock.
2131
+ */
2132
+static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
2133
+ ext4_lblk_t len)
2134
+{
2135
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2136
+ ext4_lblk_t end = lblk + len - 1;
2137
+ ext4_lblk_t first, last;
2138
+ bool f_del = false, l_del = false;
2139
+
2140
+ if (len == 0)
2141
+ return;
2142
+
2143
+ /*
2144
+ * Two cases - block range within single cluster and block range
2145
+ * spanning two or more clusters. Note that a cluster belonging
2146
+ * to a range starting and/or ending on a cluster boundary is treated
2147
+ * as if it does not contain a delayed extent. The new range may
2148
+ * have allocated space for previously delayed blocks out to the
2149
+ * cluster boundary, requiring that any pre-existing pending
2150
+ * reservation be canceled. Because this code only looks at blocks
2151
+ * outside the range, it should revise pending reservations
2152
+ * correctly even if the extent represented by the range can't be
2153
+ * inserted in the extents status tree due to ENOSPC.
2154
+ */
2155
+
2156
+ if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) {
2157
+ first = EXT4_LBLK_CMASK(sbi, lblk);
2158
+ if (first != lblk)
2159
+ f_del = __es_scan_range(inode, &ext4_es_is_delonly,
2160
+ first, lblk - 1);
2161
+ if (f_del) {
2162
+ __insert_pending(inode, first);
2163
+ } else {
2164
+ last = EXT4_LBLK_CMASK(sbi, end) +
2165
+ sbi->s_cluster_ratio - 1;
2166
+ if (last != end)
2167
+ l_del = __es_scan_range(inode,
2168
+ &ext4_es_is_delonly,
2169
+ end + 1, last);
2170
+ if (l_del)
2171
+ __insert_pending(inode, last);
2172
+ else
2173
+ __remove_pending(inode, last);
2174
+ }
2175
+ } else {
2176
+ first = EXT4_LBLK_CMASK(sbi, lblk);
2177
+ if (first != lblk)
2178
+ f_del = __es_scan_range(inode, &ext4_es_is_delonly,
2179
+ first, lblk - 1);
2180
+ if (f_del)
2181
+ __insert_pending(inode, first);
2182
+ else
2183
+ __remove_pending(inode, first);
2184
+
2185
+ last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
2186
+ if (last != end)
2187
+ l_del = __es_scan_range(inode, &ext4_es_is_delonly,
2188
+ end + 1, last);
2189
+ if (l_del)
2190
+ __insert_pending(inode, last);
2191
+ else
2192
+ __remove_pending(inode, last);
2193
+ }
2194
+}