hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/fs/f2fs/file.c
....@@ -21,6 +21,7 @@
2121 #include <linux/uuid.h>
2222 #include <linux/file.h>
2323 #include <linux/nls.h>
24
+#include <linux/sched/signal.h>
2425
2526 #include "f2fs.h"
2627 #include "node.h"
....@@ -28,18 +29,17 @@
2829 #include "xattr.h"
2930 #include "acl.h"
3031 #include "gc.h"
31
-#include "trace.h"
3232 #include <trace/events/f2fs.h>
33
-#include <trace/events/android_fs.h>
33
+#include <uapi/linux/f2fs.h>
3434
3535 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
3636 {
3737 struct inode *inode = file_inode(vmf->vma->vm_file);
3838 vm_fault_t ret;
3939
40
- down_read(&F2FS_I(inode)->i_mmap_sem);
40
+ f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
4141 ret = filemap_fault(vmf);
42
- up_read(&F2FS_I(inode)->i_mmap_sem);
42
+ f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
4343
4444 if (!ret)
4545 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
....@@ -59,6 +59,12 @@
5959 bool need_alloc = true;
6060 int err = 0;
6161
62
+ if (unlikely(IS_IMMUTABLE(inode)))
63
+ return VM_FAULT_SIGBUS;
64
+
65
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
66
+ return VM_FAULT_SIGBUS;
67
+
6268 if (unlikely(f2fs_cp_error(sbi))) {
6369 err = -EIO;
6470 goto err;
....@@ -69,6 +75,10 @@
6975 goto err;
7076 }
7177
78
+ err = f2fs_convert_inline_inode(inode);
79
+ if (err)
80
+ goto err;
81
+
7282 #ifdef CONFIG_F2FS_FS_COMPRESSION
7383 if (f2fs_compressed_file(inode)) {
7484 int ret = f2fs_is_compressed_cluster(inode, page->index);
....@@ -77,10 +87,6 @@
7787 err = ret;
7888 goto err;
7989 } else if (ret) {
80
- if (ret < F2FS_I(inode)->i_cluster_size) {
81
- err = -EAGAIN;
82
- goto err;
83
- }
8490 need_alloc = false;
8591 }
8692 }
....@@ -94,7 +100,7 @@
94100 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
95101
96102 file_update_time(vmf->vma->vm_file);
97
- down_read(&F2FS_I(inode)->i_mmap_sem);
103
+ f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
98104 lock_page(page);
99105 if (unlikely(page->mapping != inode->i_mapping ||
100106 page_offset(page) > i_size_read(inode) ||
....@@ -106,11 +112,10 @@
106112
107113 if (need_alloc) {
108114 /* block allocation */
109
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
115
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
110116 set_new_dnode(&dn, inode, NULL, NULL, 0);
111117 err = f2fs_get_block(&dn, page->index);
112
- f2fs_put_dnode(&dn);
113
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
118
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
114119 }
115120
116121 #ifdef CONFIG_F2FS_FS_COMPRESSION
....@@ -153,7 +158,7 @@
153158
154159 trace_f2fs_vm_page_mkwrite(page, DATA);
155160 out_sem:
156
- up_read(&F2FS_I(inode)->i_mmap_sem);
161
+ f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
157162
158163 sb_end_pagefault(inode->i_sb);
159164 err:
....@@ -164,6 +169,9 @@
164169 .fault = f2fs_filemap_fault,
165170 .map_pages = filemap_map_pages,
166171 .page_mkwrite = f2fs_vm_page_mkwrite,
172
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
173
+ .allow_speculation = filemap_allow_speculation,
174
+#endif
167175 };
168176
169177 static int get_parent_ino(struct inode *inode, nid_t *pino)
....@@ -231,13 +239,13 @@
231239 struct f2fs_inode_info *fi = F2FS_I(inode);
232240 nid_t pino;
233241
234
- down_write(&fi->i_sem);
242
+ f2fs_down_write(&fi->i_sem);
235243 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
236244 get_parent_ino(inode, &pino)) {
237245 f2fs_i_pino_write(inode, pino);
238246 file_got_pino(inode);
239247 }
240
- up_write(&fi->i_sem);
248
+ f2fs_up_write(&fi->i_sem);
241249 }
242250
243251 static bool f2fs_update_fsync_count(struct f2fs_sb_info *sbi,
....@@ -275,20 +283,10 @@
275283 };
276284 unsigned int seq_id = 0;
277285
278
- if (unlikely(f2fs_readonly(inode->i_sb) ||
279
- is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
286
+ if (unlikely(f2fs_readonly(inode->i_sb)))
280287 return 0;
281288
282289 trace_f2fs_sync_file_enter(inode);
283
-
284
- if (trace_android_fs_fsync_start_enabled()) {
285
- char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
286
-
287
- path = android_fstrace_get_pathname(pathbuf,
288
- MAX_TRACE_PATHBUF_LEN, inode);
289
- trace_android_fs_fsync_start(inode,
290
- current->pid, path, current->comm);
291
- }
292290
293291 if (S_ISDIR(inode->i_mode))
294292 goto go_write;
....@@ -300,7 +298,7 @@
300298 ret = file_write_and_wait_range(file, start, end);
301299 clear_inode_flag(inode, FI_NEED_IPU);
302300
303
- if (ret) {
301
+ if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
304302 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
305303 return ret;
306304 }
....@@ -331,9 +329,9 @@
331329 * Both of fdatasync() and fsync() are able to be recovered from
332330 * sudden-power-off.
333331 */
334
- down_read(&F2FS_I(inode)->i_sem);
332
+ f2fs_down_read(&F2FS_I(inode)->i_sem);
335333 cp_reason = need_do_checkpoint(inode);
336
- up_read(&F2FS_I(inode)->i_sem);
334
+ f2fs_up_read(&F2FS_I(inode)->i_sem);
337335
338336 if (cp_reason || !f2fs_update_fsync_count(sbi, npages)) {
339337 /* all the dirty node pages should be flushed for POR */
....@@ -395,9 +393,6 @@
395393 f2fs_update_time(sbi, REQ_TIME);
396394 out:
397395 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
398
- f2fs_trace_ios(NULL, 1);
399
- trace_android_fs_fsync_end(inode, start, end - start);
400
-
401396 return ret;
402397 }
403398
....@@ -408,32 +403,15 @@
408403 return f2fs_do_sync_file(file, start, end, datasync, false);
409404 }
410405
411
-static pgoff_t __get_first_dirty_index(struct address_space *mapping,
412
- pgoff_t pgofs, int whence)
413
-{
414
- struct page *page;
415
- int nr_pages;
416
-
417
- if (whence != SEEK_DATA)
418
- return 0;
419
-
420
- /* find first dirty page index */
421
- nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY,
422
- 1, &page);
423
- if (!nr_pages)
424
- return ULONG_MAX;
425
- pgofs = page->index;
426
- put_page(page);
427
- return pgofs;
428
-}
429
-
430
-static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
431
- pgoff_t dirty, pgoff_t pgofs, int whence)
406
+static bool __found_offset(struct address_space *mapping, block_t blkaddr,
407
+ pgoff_t index, int whence)
432408 {
433409 switch (whence) {
434410 case SEEK_DATA:
435
- if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
436
- __is_valid_data_blkaddr(blkaddr))
411
+ if (__is_valid_data_blkaddr(blkaddr))
412
+ return true;
413
+ if (blkaddr == NEW_ADDR &&
414
+ xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
437415 return true;
438416 break;
439417 case SEEK_HOLE:
....@@ -449,7 +427,7 @@
449427 struct inode *inode = file->f_mapping->host;
450428 loff_t maxbytes = inode->i_sb->s_maxbytes;
451429 struct dnode_of_data dn;
452
- pgoff_t pgofs, end_offset, dirty;
430
+ pgoff_t pgofs, end_offset;
453431 loff_t data_ofs = offset;
454432 loff_t isize;
455433 int err = 0;
....@@ -461,15 +439,17 @@
461439 goto fail;
462440
463441 /* handle inline data case */
464
- if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
465
- if (whence == SEEK_HOLE)
442
+ if (f2fs_has_inline_data(inode)) {
443
+ if (whence == SEEK_HOLE) {
466444 data_ofs = isize;
467
- goto found;
445
+ goto found;
446
+ } else if (whence == SEEK_DATA) {
447
+ data_ofs = offset;
448
+ goto found;
449
+ }
468450 }
469451
470452 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
471
-
472
- dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
473453
474454 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
475455 set_new_dnode(&dn, inode, NULL, NULL, 0);
....@@ -503,7 +483,7 @@
503483 goto fail;
504484 }
505485
506
- if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
486
+ if (__found_offset(file->f_mapping, blkaddr,
507487 pgofs, whence)) {
508488 f2fs_put_dnode(&dn);
509489 goto found;
....@@ -529,6 +509,9 @@
529509 struct inode *inode = file->f_mapping->host;
530510 loff_t maxbytes = inode->i_sb->s_maxbytes;
531511
512
+ if (f2fs_compressed_file(inode))
513
+ maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
514
+
532515 switch (whence) {
533516 case SEEK_SET:
534517 case SEEK_CUR:
....@@ -548,18 +531,12 @@
548531 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
549532 {
550533 struct inode *inode = file_inode(file);
551
- int err;
552534
553535 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
554536 return -EIO;
555537
556538 if (!f2fs_is_compress_backend_ready(inode))
557539 return -EOPNOTSUPP;
558
-
559
- /* we don't need to use inline_data strictly */
560
- err = f2fs_convert_inline_inode(inode);
561
- if (err)
562
- return err;
563540
564541 file_accessed(file);
565542 vma->vm_ops = &f2fs_file_vm_ops;
....@@ -596,7 +573,7 @@
596573 bool compressed_cluster = false;
597574 int cluster_index = 0, valid_blocks = 0;
598575 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
599
- bool released = !F2FS_I(dn->inode)->i_compr_blocks;
576
+ bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
600577
601578 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
602579 base = get_extra_isize(dn->inode);
....@@ -651,7 +628,8 @@
651628 */
652629 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
653630 dn->inode) + ofs;
654
- f2fs_update_extent_cache_range(dn, fofs, 0, len);
631
+ f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
632
+ f2fs_update_age_extent_cache_range(dn, fofs, len);
655633 dec_valid_block_count(sbi, dn->inode, nr_free);
656634 }
657635 dn->ofs_in_node = ofs;
....@@ -713,7 +691,7 @@
713691
714692 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
715693
716
- if (free_from >= sbi->max_file_blocks)
694
+ if (free_from >= max_file_blocks(inode))
717695 goto free_partial;
718696
719697 if (lock)
....@@ -785,11 +763,14 @@
785763 return err;
786764
787765 #ifdef CONFIG_F2FS_FS_COMPRESSION
788
- if (from != free_from)
766
+ if (from != free_from) {
789767 err = f2fs_truncate_partial_cluster(inode, from, lock);
768
+ if (err)
769
+ return err;
770
+ }
790771 #endif
791772
792
- return err;
773
+ return 0;
793774 }
794775
795776 int f2fs_truncate(struct inode *inode)
....@@ -887,14 +868,11 @@
887868 if (ia_valid & ATTR_GID)
888869 inode->i_gid = attr->ia_gid;
889870 if (ia_valid & ATTR_ATIME)
890
- inode->i_atime = timespec64_trunc(attr->ia_atime,
891
- inode->i_sb->s_time_gran);
871
+ inode->i_atime = attr->ia_atime;
892872 if (ia_valid & ATTR_MTIME)
893
- inode->i_mtime = timespec64_trunc(attr->ia_mtime,
894
- inode->i_sb->s_time_gran);
873
+ inode->i_mtime = attr->ia_mtime;
895874 if (ia_valid & ATTR_CTIME)
896
- inode->i_ctime = timespec64_trunc(attr->ia_ctime,
897
- inode->i_sb->s_time_gran);
875
+ inode->i_ctime = attr->ia_ctime;
898876 if (ia_valid & ATTR_MODE) {
899877 umode_t mode = attr->ia_mode;
900878
....@@ -915,6 +893,14 @@
915893
916894 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
917895 return -EIO;
896
+
897
+ if (unlikely(IS_IMMUTABLE(inode)))
898
+ return -EPERM;
899
+
900
+ if (unlikely(IS_APPEND(inode) &&
901
+ (attr->ia_valid & (ATTR_MODE | ATTR_UID |
902
+ ATTR_GID | ATTR_TIMES_SET))))
903
+ return -EPERM;
918904
919905 if ((attr->ia_valid & ATTR_SIZE) &&
920906 !f2fs_is_compress_backend_ready(inode))
....@@ -974,8 +960,8 @@
974960 return err;
975961 }
976962
977
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
978
- down_write(&F2FS_I(inode)->i_mmap_sem);
963
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
964
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
979965
980966 truncate_setsize(inode, attr->ia_size);
981967
....@@ -985,8 +971,8 @@
985971 * do not trim all blocks after i_size if target size is
986972 * larger than i_size.
987973 */
988
- up_write(&F2FS_I(inode)->i_mmap_sem);
989
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
974
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
975
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
990976 if (err)
991977 return err;
992978
....@@ -1000,8 +986,10 @@
1000986
1001987 if (attr->ia_valid & ATTR_MODE) {
1002988 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
1003
- if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
1004
- inode->i_mode = F2FS_I(inode)->i_acl_mode;
989
+
990
+ if (is_inode_flag_set(inode, FI_ACL_MODE)) {
991
+ if (!err)
992
+ inode->i_mode = F2FS_I(inode)->i_acl_mode;
1005993 clear_inode_flag(inode, FI_ACL_MODE);
1006994 }
1007995 }
....@@ -1124,8 +1112,8 @@
11241112 blk_start = (loff_t)pg_start << PAGE_SHIFT;
11251113 blk_end = (loff_t)pg_end << PAGE_SHIFT;
11261114
1127
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1128
- down_write(&F2FS_I(inode)->i_mmap_sem);
1115
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1116
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
11291117
11301118 truncate_pagecache_range(inode, blk_start, blk_end - 1);
11311119
....@@ -1133,8 +1121,8 @@
11331121 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
11341122 f2fs_unlock_op(sbi);
11351123
1136
- up_write(&F2FS_I(inode)->i_mmap_sem);
1137
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1124
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1125
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
11381126 }
11391127 }
11401128
....@@ -1245,7 +1233,7 @@
12451233 if (ret)
12461234 return ret;
12471235
1248
- ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1236
+ ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
12491237 if (ret) {
12501238 f2fs_put_dnode(&dn);
12511239 return ret;
....@@ -1367,8 +1355,8 @@
13671355 f2fs_balance_fs(sbi, true);
13681356
13691357 /* avoid gc operation during block exchange */
1370
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1371
- down_write(&F2FS_I(inode)->i_mmap_sem);
1358
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1359
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
13721360
13731361 f2fs_lock_op(sbi);
13741362 f2fs_drop_extent_tree(inode);
....@@ -1376,8 +1364,8 @@
13761364 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
13771365 f2fs_unlock_op(sbi);
13781366
1379
- up_write(&F2FS_I(inode)->i_mmap_sem);
1380
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1367
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1368
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
13811369 return ret;
13821370 }
13831371
....@@ -1407,15 +1395,13 @@
14071395 return ret;
14081396
14091397 /* write out all moved pages, if possible */
1410
- down_write(&F2FS_I(inode)->i_mmap_sem);
1398
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
14111399 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
14121400 truncate_pagecache(inode, offset);
14131401
14141402 new_size = i_size_read(inode) - len;
1415
- truncate_pagecache(inode, new_size);
1416
-
14171403 ret = f2fs_truncate_blocks(inode, new_size, true);
1418
- up_write(&F2FS_I(inode)->i_mmap_sem);
1404
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
14191405 if (!ret)
14201406 f2fs_i_size_write(inode, new_size);
14211407 return ret;
....@@ -1451,14 +1437,23 @@
14511437 ret = -ENOSPC;
14521438 break;
14531439 }
1454
- if (dn->data_blkaddr != NEW_ADDR) {
1455
- f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1456
- dn->data_blkaddr = NEW_ADDR;
1457
- f2fs_set_data_blkaddr(dn);
1440
+
1441
+ if (dn->data_blkaddr == NEW_ADDR)
1442
+ continue;
1443
+
1444
+ if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1445
+ DATA_GENERIC_ENHANCE)) {
1446
+ ret = -EFSCORRUPTED;
1447
+ break;
14581448 }
1449
+
1450
+ f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1451
+ dn->data_blkaddr = NEW_ADDR;
1452
+ f2fs_set_data_blkaddr(dn);
14591453 }
14601454
1461
- f2fs_update_extent_cache_range(dn, start, 0, index - start);
1455
+ f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
1456
+ f2fs_update_age_extent_cache_range(dn, start, index - start);
14621457
14631458 return ret;
14641459 }
....@@ -1514,8 +1509,8 @@
15141509 unsigned int end_offset;
15151510 pgoff_t end;
15161511
1517
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1518
- down_write(&F2FS_I(inode)->i_mmap_sem);
1512
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1513
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
15191514
15201515 truncate_pagecache_range(inode,
15211516 (loff_t)index << PAGE_SHIFT,
....@@ -1527,8 +1522,8 @@
15271522 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
15281523 if (ret) {
15291524 f2fs_unlock_op(sbi);
1530
- up_write(&F2FS_I(inode)->i_mmap_sem);
1531
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1525
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1526
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
15321527 goto out;
15331528 }
15341529
....@@ -1539,8 +1534,8 @@
15391534 f2fs_put_dnode(&dn);
15401535
15411536 f2fs_unlock_op(sbi);
1542
- up_write(&F2FS_I(inode)->i_mmap_sem);
1543
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1537
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1538
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
15441539
15451540 f2fs_balance_fs(sbi, dn.node_changed);
15461541
....@@ -1596,9 +1591,9 @@
15961591
15971592 f2fs_balance_fs(sbi, true);
15981593
1599
- down_write(&F2FS_I(inode)->i_mmap_sem);
1594
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
16001595 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1601
- up_write(&F2FS_I(inode)->i_mmap_sem);
1596
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
16021597 if (ret)
16031598 return ret;
16041599
....@@ -1613,8 +1608,8 @@
16131608 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
16141609
16151610 /* avoid gc operation during block exchange */
1616
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1617
- down_write(&F2FS_I(inode)->i_mmap_sem);
1611
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1612
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
16181613 truncate_pagecache(inode, offset);
16191614
16201615 while (!ret && idx > pg_start) {
....@@ -1630,14 +1625,14 @@
16301625 idx + delta, nr, false);
16311626 f2fs_unlock_op(sbi);
16321627 }
1633
- up_write(&F2FS_I(inode)->i_mmap_sem);
1634
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1628
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1629
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
16351630
16361631 /* write out all moved pages, if possible */
1637
- down_write(&F2FS_I(inode)->i_mmap_sem);
1632
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
16381633 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
16391634 truncate_pagecache(inode, offset);
1640
- up_write(&F2FS_I(inode)->i_mmap_sem);
1635
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
16411636
16421637 if (!ret)
16431638 f2fs_i_size_write(inode, new_size);
....@@ -1651,9 +1646,10 @@
16511646 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
16521647 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
16531648 .m_may_create = true };
1654
- pgoff_t pg_end;
1649
+ pgoff_t pg_start, pg_end;
16551650 loff_t new_size = i_size_read(inode);
16561651 loff_t off_end;
1652
+ block_t expanded = 0;
16571653 int err;
16581654
16591655 err = inode_newsize_ok(inode, (len + offset));
....@@ -1666,11 +1662,12 @@
16661662
16671663 f2fs_balance_fs(sbi, true);
16681664
1665
+ pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
16691666 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
16701667 off_end = (offset + len) & (PAGE_SIZE - 1);
16711668
1672
- map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1673
- map.m_len = pg_end - map.m_lblk;
1669
+ map.m_lblk = pg_start;
1670
+ map.m_len = pg_end - pg_start;
16741671 if (off_end)
16751672 map.m_len++;
16761673
....@@ -1678,51 +1675,49 @@
16781675 return 0;
16791676
16801677 if (f2fs_is_pinned_file(inode)) {
1681
- block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
1682
- sbi->log_blocks_per_seg;
1683
- block_t done = 0;
1678
+ block_t sec_blks = BLKS_PER_SEC(sbi);
1679
+ block_t sec_len = roundup(map.m_len, sec_blks);
16841680
1685
- if (map.m_len % sbi->blocks_per_seg)
1686
- len += sbi->blocks_per_seg;
1687
-
1688
- map.m_len = sbi->blocks_per_seg;
1681
+ map.m_len = sec_blks;
16891682 next_alloc:
16901683 if (has_not_enough_free_secs(sbi, 0,
16911684 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1692
- down_write(&sbi->gc_lock);
1693
- err = f2fs_gc(sbi, true, false, NULL_SEGNO);
1685
+ f2fs_down_write(&sbi->gc_lock);
1686
+ err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
16941687 if (err && err != -ENODATA && err != -EAGAIN)
16951688 goto out_err;
16961689 }
16971690
1698
- down_write(&sbi->pin_sem);
1699
- map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1691
+ f2fs_down_write(&sbi->pin_sem);
17001692
17011693 f2fs_lock_op(sbi);
1702
- f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA);
1694
+ f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
17031695 f2fs_unlock_op(sbi);
17041696
1697
+ map.m_seg_type = CURSEG_COLD_DATA_PINNED;
17051698 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1706
- up_write(&sbi->pin_sem);
17071699
1708
- done += map.m_len;
1709
- len -= map.m_len;
1700
+ f2fs_up_write(&sbi->pin_sem);
1701
+
1702
+ expanded += map.m_len;
1703
+ sec_len -= map.m_len;
17101704 map.m_lblk += map.m_len;
1711
- if (!err && len)
1705
+ if (!err && sec_len)
17121706 goto next_alloc;
17131707
1714
- map.m_len = done;
1708
+ map.m_len = expanded;
17151709 } else {
17161710 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1711
+ expanded = map.m_len;
17171712 }
17181713 out_err:
17191714 if (err) {
17201715 pgoff_t last_off;
17211716
1722
- if (!map.m_len)
1717
+ if (!expanded)
17231718 return err;
17241719
1725
- last_off = map.m_lblk + map.m_len - 1;
1720
+ last_off = pg_start + expanded - 1;
17261721
17271722 /* update new size to the failed position */
17281723 new_size = (last_off == pg_end) ? offset + len :
....@@ -1773,6 +1768,10 @@
17731768 return -EOPNOTSUPP;
17741769
17751770 inode_lock(inode);
1771
+
1772
+ ret = file_modified(file);
1773
+ if (ret)
1774
+ goto out;
17761775
17771776 if (mode & FALLOC_FL_PUNCH_HOLE) {
17781777 if (offset >= inode->i_size)
....@@ -1846,7 +1845,8 @@
18461845 struct f2fs_inode_info *fi = F2FS_I(inode);
18471846 u32 masked_flags = fi->i_flags & mask;
18481847
1849
- f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
1848
+ /* mask can be shrunk by flags_valid selector */
1849
+ iflags &= mask;
18501850
18511851 /* Is it quota file? Do not allow user to mess with it */
18521852 if (IS_NOQUOTA(inode))
....@@ -1868,21 +1868,16 @@
18681868
18691869 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
18701870 if (masked_flags & F2FS_COMPR_FL) {
1871
- if (f2fs_disable_compressed_file(inode))
1871
+ if (!f2fs_disable_compressed_file(inode))
18721872 return -EINVAL;
1873
- }
1874
- if (iflags & F2FS_NOCOMP_FL)
1875
- return -EINVAL;
1876
- if (iflags & F2FS_COMPR_FL) {
1873
+ } else {
18771874 if (!f2fs_may_compress(inode))
18781875 return -EINVAL;
1879
-
1880
- set_compress_context(inode);
1876
+ if (S_ISREG(inode->i_mode) && inode->i_size)
1877
+ return -EINVAL;
1878
+ if (set_compress_context(inode))
1879
+ return -EOPNOTSUPP;
18811880 }
1882
- }
1883
- if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1884
- if (masked_flags & F2FS_COMPR_FL)
1885
- return -EINVAL;
18861881 }
18871882
18881883 fi->i_flags = iflags | (fi->i_flags & ~mask);
....@@ -2071,7 +2066,10 @@
20712066
20722067 inode_lock(inode);
20732068
2074
- f2fs_disable_compressed_file(inode);
2069
+ if (!f2fs_disable_compressed_file(inode)) {
2070
+ ret = -EINVAL;
2071
+ goto out;
2072
+ }
20752073
20762074 if (f2fs_is_atomic_file(inode)) {
20772075 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
....@@ -2083,7 +2081,7 @@
20832081 if (ret)
20842082 goto out;
20852083
2086
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2084
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
20872085
20882086 /*
20892087 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
....@@ -2094,7 +2092,7 @@
20942092 inode->i_ino, get_dirty_pages(inode));
20952093 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
20962094 if (ret) {
2097
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2095
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
20982096 goto out;
20992097 }
21002098
....@@ -2107,7 +2105,7 @@
21072105 /* add inode in inmem_list first and set atomic_file */
21082106 set_inode_flag(inode, FI_ATOMIC_FILE);
21092107 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2110
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2108
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
21112109
21122110 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
21132111 F2FS_I(inode)->inmem_task = current;
....@@ -2274,7 +2272,8 @@
22742272 if (ret) {
22752273 if (ret == -EROFS) {
22762274 ret = 0;
2277
- f2fs_stop_checkpoint(sbi, false);
2275
+ f2fs_stop_checkpoint(sbi, false,
2276
+ STOP_CP_REASON_SHUTDOWN);
22782277 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
22792278 trace_f2fs_shutdown(sbi, in, ret);
22802279 }
....@@ -2284,32 +2283,28 @@
22842283
22852284 switch (in) {
22862285 case F2FS_GOING_DOWN_FULLSYNC:
2287
- sb = freeze_bdev(sb->s_bdev);
2288
- if (IS_ERR(sb)) {
2289
- ret = PTR_ERR(sb);
2286
+ ret = freeze_bdev(sb->s_bdev);
2287
+ if (ret)
22902288 goto out;
2291
- }
2292
- if (sb) {
2293
- f2fs_stop_checkpoint(sbi, false);
2294
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2295
- thaw_bdev(sb->s_bdev, sb);
2296
- }
2289
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2290
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2291
+ thaw_bdev(sb->s_bdev);
22972292 break;
22982293 case F2FS_GOING_DOWN_METASYNC:
22992294 /* do checkpoint only */
23002295 ret = f2fs_sync_fs(sb, 1);
23012296 if (ret)
23022297 goto out;
2303
- f2fs_stop_checkpoint(sbi, false);
2298
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
23042299 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
23052300 break;
23062301 case F2FS_GOING_DOWN_NOSYNC:
2307
- f2fs_stop_checkpoint(sbi, false);
2302
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
23082303 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
23092304 break;
23102305 case F2FS_GOING_DOWN_METAFLUSH:
23112306 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2312
- f2fs_stop_checkpoint(sbi, false);
2307
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
23132308 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
23142309 break;
23152310 case F2FS_GOING_DOWN_NEED_FSCK:
....@@ -2418,7 +2413,7 @@
24182413 if (err)
24192414 return err;
24202415
2421
- down_write(&sbi->sb_lock);
2416
+ f2fs_down_write(&sbi->sb_lock);
24222417
24232418 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
24242419 goto got_it;
....@@ -2437,7 +2432,7 @@
24372432 16))
24382433 err = -EFAULT;
24392434 out_err:
2440
- up_write(&sbi->sb_lock);
2435
+ f2fs_up_write(&sbi->sb_lock);
24412436 mnt_drop_write_file(filp);
24422437 return err;
24432438 }
....@@ -2514,40 +2509,33 @@
25142509 return ret;
25152510
25162511 if (!sync) {
2517
- if (!down_write_trylock(&sbi->gc_lock)) {
2512
+ if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
25182513 ret = -EBUSY;
25192514 goto out;
25202515 }
25212516 } else {
2522
- down_write(&sbi->gc_lock);
2517
+ f2fs_down_write(&sbi->gc_lock);
25232518 }
25242519
2525
- ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
2520
+ ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
25262521 out:
25272522 mnt_drop_write_file(filp);
25282523 return ret;
25292524 }
25302525
2531
-static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2526
+static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
25322527 {
2533
- struct inode *inode = file_inode(filp);
2534
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2535
- struct f2fs_gc_range range;
2528
+ struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
25362529 u64 end;
25372530 int ret;
25382531
25392532 if (!capable(CAP_SYS_ADMIN))
25402533 return -EPERM;
2541
-
2542
- if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2543
- sizeof(range)))
2544
- return -EFAULT;
2545
-
25462534 if (f2fs_readonly(sbi->sb))
25472535 return -EROFS;
25482536
2549
- end = range.start + range.len;
2550
- if (end < range.start || range.start < MAIN_BLKADDR(sbi) ||
2537
+ end = range->start + range->len;
2538
+ if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
25512539 end >= MAX_BLKADDR(sbi))
25522540 return -EINVAL;
25532541
....@@ -2556,22 +2544,38 @@
25562544 return ret;
25572545
25582546 do_more:
2559
- if (!range.sync) {
2560
- if (!down_write_trylock(&sbi->gc_lock)) {
2547
+ if (!range->sync) {
2548
+ if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
25612549 ret = -EBUSY;
25622550 goto out;
25632551 }
25642552 } else {
2565
- down_write(&sbi->gc_lock);
2553
+ f2fs_down_write(&sbi->gc_lock);
25662554 }
25672555
2568
- ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
2569
- range.start += BLKS_PER_SEC(sbi);
2570
- if (range.start <= end)
2556
+ ret = f2fs_gc(sbi, range->sync, true, false,
2557
+ GET_SEGNO(sbi, range->start));
2558
+ if (ret) {
2559
+ if (ret == -EBUSY)
2560
+ ret = -EAGAIN;
2561
+ goto out;
2562
+ }
2563
+ range->start += BLKS_PER_SEC(sbi);
2564
+ if (range->start <= end)
25712565 goto do_more;
25722566 out:
25732567 mnt_drop_write_file(filp);
25742568 return ret;
2569
+}
2570
+
2571
+static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2572
+{
2573
+ struct f2fs_gc_range range;
2574
+
2575
+ if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2576
+ sizeof(range)))
2577
+ return -EFAULT;
2578
+ return __f2fs_ioc_gc_range(filp, &range);
25752579 }
25762580
25772581 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
....@@ -2607,9 +2611,9 @@
26072611 {
26082612 struct inode *inode = file_inode(filp);
26092613 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2610
- .m_seg_type = NO_CHECK_TYPE ,
2614
+ .m_seg_type = NO_CHECK_TYPE,
26112615 .m_may_create = false };
2612
- struct extent_info ei = {0, 0, 0};
2616
+ struct extent_info ei = {};
26132617 pgoff_t pg_start, pg_end, next_pgofs;
26142618 unsigned int blk_per_seg = sbi->blocks_per_seg;
26152619 unsigned int total = 0, sec_num;
....@@ -2617,16 +2621,19 @@
26172621 bool fragmented = false;
26182622 int err;
26192623
2620
- /* if in-place-update policy is enabled, don't waste time here */
2621
- if (f2fs_should_update_inplace(inode, NULL))
2622
- return -EINVAL;
2623
-
26242624 pg_start = range->start >> PAGE_SHIFT;
26252625 pg_end = (range->start + range->len) >> PAGE_SHIFT;
26262626
26272627 f2fs_balance_fs(sbi, true);
26282628
26292629 inode_lock(inode);
2630
+
2631
+ /* if in-place-update policy is enabled, don't waste time here */
2632
+ set_inode_flag(inode, FI_OPU_WRITE);
2633
+ if (f2fs_should_update_inplace(inode, NULL)) {
2634
+ err = -EINVAL;
2635
+ goto out;
2636
+ }
26302637
26312638 /* writeback all dirty pages in the range */
26322639 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
....@@ -2638,7 +2645,7 @@
26382645 * lookup mapping info in extent cache, skip defragmenting if physical
26392646 * block addresses are continuous.
26402647 */
2641
- if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2648
+ if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
26422649 if (ei.fofs + ei.len >= pg_end)
26432650 goto out;
26442651 }
....@@ -2709,7 +2716,7 @@
27092716 goto check;
27102717 }
27112718
2712
- set_inode_flag(inode, FI_DO_DEFRAG);
2719
+ set_inode_flag(inode, FI_SKIP_WRITES);
27132720
27142721 idx = map.m_lblk;
27152722 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
....@@ -2734,15 +2741,16 @@
27342741 if (map.m_lblk < pg_end && cnt < blk_per_seg)
27352742 goto do_map;
27362743
2737
- clear_inode_flag(inode, FI_DO_DEFRAG);
2744
+ clear_inode_flag(inode, FI_SKIP_WRITES);
27382745
27392746 err = filemap_fdatawrite(inode->i_mapping);
27402747 if (err)
27412748 goto out;
27422749 }
27432750 clear_out:
2744
- clear_inode_flag(inode, FI_DO_DEFRAG);
2751
+ clear_inode_flag(inode, FI_SKIP_WRITES);
27452752 out:
2753
+ clear_inode_flag(inode, FI_OPU_WRITE);
27462754 inode_unlock(inode);
27472755 if (!err)
27482756 range->len = (u64)total << PAGE_SHIFT;
....@@ -2774,7 +2782,7 @@
27742782 return -EINVAL;
27752783
27762784 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2777
- sbi->max_file_blocks))
2785
+ max_file_blocks(inode)))
27782786 return -EINVAL;
27792787
27802788 err = mnt_want_write_file(filp);
....@@ -2817,6 +2825,9 @@
28172825
28182826 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
28192827 return -EOPNOTSUPP;
2828
+
2829
+ if (pos_out < 0 || pos_in < 0)
2830
+ return -EINVAL;
28202831
28212832 if (src == dst) {
28222833 if (pos_in == pos_out)
....@@ -2875,10 +2886,10 @@
28752886
28762887 f2fs_balance_fs(sbi, true);
28772888
2878
- down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2889
+ f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
28792890 if (src != dst) {
28802891 ret = -EBUSY;
2881
- if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2892
+ if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
28822893 goto out_src;
28832894 }
28842895
....@@ -2896,9 +2907,9 @@
28962907 f2fs_unlock_op(sbi);
28972908
28982909 if (src != dst)
2899
- up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2910
+ f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
29002911 out_src:
2901
- up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2912
+ f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
29022913 out_unlock:
29032914 if (src != dst)
29042915 inode_unlock(dst);
....@@ -2907,9 +2918,9 @@
29072918 return ret;
29082919 }
29092920
2910
-static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2921
+static int __f2fs_ioc_move_range(struct file *filp,
2922
+ struct f2fs_move_range *range)
29112923 {
2912
- struct f2fs_move_range range;
29132924 struct fd dst;
29142925 int err;
29152926
....@@ -2917,11 +2928,7 @@
29172928 !(filp->f_mode & FMODE_WRITE))
29182929 return -EBADF;
29192930
2920
- if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2921
- sizeof(range)))
2922
- return -EFAULT;
2923
-
2924
- dst = fdget(range.dst_fd);
2931
+ dst = fdget(range->dst_fd);
29252932 if (!dst.file)
29262933 return -EBADF;
29272934
....@@ -2934,19 +2941,23 @@
29342941 if (err)
29352942 goto err_out;
29362943
2937
- err = f2fs_move_file_range(filp, range.pos_in, dst.file,
2938
- range.pos_out, range.len);
2944
+ err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2945
+ range->pos_out, range->len);
29392946
29402947 mnt_drop_write_file(filp);
2941
- if (err)
2942
- goto err_out;
2943
-
2944
- if (copy_to_user((struct f2fs_move_range __user *)arg,
2945
- &range, sizeof(range)))
2946
- err = -EFAULT;
29472948 err_out:
29482949 fdput(dst);
29492950 return err;
2951
+}
2952
+
2953
+static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2954
+{
2955
+ struct f2fs_move_range range;
2956
+
2957
+ if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2958
+ sizeof(range)))
2959
+ return -EFAULT;
2960
+ return __f2fs_ioc_move_range(filp, &range);
29502961 }
29512962
29522963 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
....@@ -2993,14 +3004,14 @@
29933004 end_segno = min(start_segno + range.segments, dev_end_segno);
29943005
29953006 while (start_segno < end_segno) {
2996
- if (!down_write_trylock(&sbi->gc_lock)) {
3007
+ if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
29973008 ret = -EBUSY;
29983009 goto out;
29993010 }
30003011 sm->last_victim[GC_CB] = end_segno + 1;
30013012 sm->last_victim[GC_GREEDY] = end_segno + 1;
30023013 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
3003
- ret = f2fs_gc(sbi, true, true, start_segno);
3014
+ ret = f2fs_gc(sbi, true, true, true, start_segno);
30043015 if (ret == -EAGAIN)
30053016 ret = 0;
30063017 else if (ret < 0)
....@@ -3029,15 +3040,16 @@
30293040 struct dquot *transfer_to[MAXQUOTAS] = {};
30303041 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
30313042 struct super_block *sb = sbi->sb;
3032
- int err = 0;
3043
+ int err;
30333044
30343045 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3035
- if (!IS_ERR(transfer_to[PRJQUOTA])) {
3036
- err = __dquot_transfer(inode, transfer_to);
3037
- if (err)
3038
- set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3039
- dqput(transfer_to[PRJQUOTA]);
3040
- }
3046
+ if (IS_ERR(transfer_to[PRJQUOTA]))
3047
+ return PTR_ERR(transfer_to[PRJQUOTA]);
3048
+
3049
+ err = __dquot_transfer(inode, transfer_to);
3050
+ if (err)
3051
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3052
+ dqput(transfer_to[PRJQUOTA]);
30413053 return err;
30423054 }
30433055
....@@ -3293,7 +3305,7 @@
32933305 if (ret)
32943306 goto out;
32953307
3296
- if (f2fs_disable_compressed_file(inode)) {
3308
+ if (!f2fs_disable_compressed_file(inode)) {
32973309 ret = -EOPNOTSUPP;
32983310 goto out;
32993311 }
....@@ -3334,21 +3346,21 @@
33343346 map.m_next_extent = &m_next_extent;
33353347 map.m_seg_type = NO_CHECK_TYPE;
33363348 map.m_may_create = false;
3337
- end = F2FS_I_SB(inode)->max_file_blocks;
3349
+ end = max_file_blocks(inode);
33383350
33393351 while (map.m_lblk < end) {
33403352 map.m_len = end - map.m_lblk;
33413353
3342
- down_write(&fi->i_gc_rwsem[WRITE]);
3354
+ f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
33433355 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3344
- up_write(&fi->i_gc_rwsem[WRITE]);
3356
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
33453357 if (err)
33463358 return err;
33473359
33483360 map.m_lblk = m_next_extent;
33493361 }
33503362
3351
- return err;
3363
+ return 0;
33523364 }
33533365
33543366 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
....@@ -3371,7 +3383,7 @@
33713383 sizeof(block_count)))
33723384 return -EFAULT;
33733385
3374
- return f2fs_resize_fs(sbi, block_count);
3386
+ return f2fs_resize_fs(filp, block_count);
33753387 }
33763388
33773389 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
....@@ -3382,7 +3394,7 @@
33823394
33833395 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
33843396 f2fs_warn(F2FS_I_SB(inode),
3385
- "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3397
+ "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
33863398 inode->i_ino);
33873399 return -EOPNOTSUPP;
33883400 }
....@@ -3398,7 +3410,15 @@
33983410 return fsverity_ioctl_measure(filp, (void __user *)arg);
33993411 }
34003412
3401
-static int f2fs_get_volume_name(struct file *filp, unsigned long arg)
3413
+static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3414
+{
3415
+ if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3416
+ return -EOPNOTSUPP;
3417
+
3418
+ return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3419
+}
3420
+
3421
+static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
34023422 {
34033423 struct inode *inode = file_inode(filp);
34043424 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
....@@ -3410,21 +3430,21 @@
34103430 if (!vbuf)
34113431 return -ENOMEM;
34123432
3413
- down_read(&sbi->sb_lock);
3433
+ f2fs_down_read(&sbi->sb_lock);
34143434 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
34153435 ARRAY_SIZE(sbi->raw_super->volume_name),
34163436 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3417
- up_read(&sbi->sb_lock);
3437
+ f2fs_up_read(&sbi->sb_lock);
34183438
34193439 if (copy_to_user((char __user *)arg, vbuf,
34203440 min(FSLABEL_MAX, count)))
34213441 err = -EFAULT;
34223442
3423
- kvfree(vbuf);
3443
+ kfree(vbuf);
34243444 return err;
34253445 }
34263446
3427
-static int f2fs_set_volume_name(struct file *filp, unsigned long arg)
3447
+static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
34283448 {
34293449 struct inode *inode = file_inode(filp);
34303450 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
....@@ -3442,7 +3462,7 @@
34423462 if (err)
34433463 goto out;
34443464
3445
- down_write(&sbi->sb_lock);
3465
+ f2fs_down_write(&sbi->sb_lock);
34463466
34473467 memset(sbi->raw_super->volume_name, 0,
34483468 sizeof(sbi->raw_super->volume_name));
....@@ -3452,7 +3472,7 @@
34523472
34533473 err = f2fs_commit_super(sbi, false);
34543474
3455
- up_write(&sbi->sb_lock);
3475
+ f2fs_up_write(&sbi->sb_lock);
34563476
34573477 mnt_drop_write_file(filp);
34583478 out:
....@@ -3471,7 +3491,7 @@
34713491 if (!f2fs_compressed_file(inode))
34723492 return -EINVAL;
34733493
3474
- blocks = F2FS_I(inode)->i_compr_blocks;
3494
+ blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
34753495 return put_user(blocks, (u64 __user *)arg);
34763496 }
34773497
....@@ -3556,12 +3576,13 @@
35563576 inode_lock(inode);
35573577
35583578 writecount = atomic_read(&inode->i_writecount);
3559
- if ((filp->f_mode & FMODE_WRITE && writecount != 1) || writecount) {
3579
+ if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3580
+ (!(filp->f_mode & FMODE_WRITE) && writecount)) {
35603581 ret = -EBUSY;
35613582 goto out;
35623583 }
35633584
3564
- if (IS_IMMUTABLE(inode)) {
3585
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
35653586 ret = -EINVAL;
35663587 goto out;
35673588 }
....@@ -3570,16 +3591,15 @@
35703591 if (ret)
35713592 goto out;
35723593
3573
- if (!F2FS_I(inode)->i_compr_blocks)
3574
- goto out;
3575
-
3576
- F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
3577
- f2fs_set_inode_flags(inode);
3594
+ set_inode_flag(inode, FI_COMPRESS_RELEASED);
35783595 inode->i_ctime = current_time(inode);
35793596 f2fs_mark_inode_dirty_sync(inode, true);
35803597
3581
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3582
- down_write(&F2FS_I(inode)->i_mmap_sem);
3598
+ if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3599
+ goto out;
3600
+
3601
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3602
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
35833603
35843604 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
35853605
....@@ -3614,8 +3634,8 @@
36143634 released_blocks += ret;
36153635 }
36163636
3617
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3618
- up_write(&F2FS_I(inode)->i_mmap_sem);
3637
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
3638
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
36193639 out:
36203640 inode_unlock(inode);
36213641
....@@ -3623,14 +3643,15 @@
36233643
36243644 if (ret >= 0) {
36253645 ret = put_user(released_blocks, (u64 __user *)arg);
3626
- } else if (released_blocks && F2FS_I(inode)->i_compr_blocks) {
3646
+ } else if (released_blocks &&
3647
+ atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
36273648 set_sbi_flag(sbi, SBI_NEED_FSCK);
36283649 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3629
- "iblocks=%llu, released=%u, compr_blocks=%llu, "
3650
+ "iblocks=%llu, released=%u, compr_blocks=%u, "
36303651 "run fsck to fix.",
3631
- __func__, inode->i_ino, (u64)inode->i_blocks,
3652
+ __func__, inode->i_ino, inode->i_blocks,
36323653 released_blocks,
3633
- F2FS_I(inode)->i_compr_blocks);
3654
+ atomic_read(&F2FS_I(inode)->i_compr_blocks));
36343655 }
36353656
36363657 return ret;
....@@ -3718,20 +3739,20 @@
37183739 if (ret)
37193740 return ret;
37203741
3721
- if (F2FS_I(inode)->i_compr_blocks)
3742
+ if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
37223743 goto out;
37233744
37243745 f2fs_balance_fs(F2FS_I_SB(inode), true);
37253746
37263747 inode_lock(inode);
37273748
3728
- if (!IS_IMMUTABLE(inode)) {
3749
+ if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
37293750 ret = -EINVAL;
37303751 goto unlock_inode;
37313752 }
37323753
3733
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3734
- down_write(&F2FS_I(inode)->i_mmap_sem);
3754
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3755
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
37353756
37363757 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
37373758
....@@ -3766,12 +3787,11 @@
37663787 reserved_blocks += ret;
37673788 }
37683789
3769
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3770
- up_write(&F2FS_I(inode)->i_mmap_sem);
3790
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
3791
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
37713792
37723793 if (ret >= 0) {
3773
- F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
3774
- f2fs_set_inode_flags(inode);
3794
+ clear_inode_flag(inode, FI_COMPRESS_RELEASED);
37753795 inode->i_ctime = current_time(inode);
37763796 f2fs_mark_inode_dirty_sync(inode, true);
37773797 }
....@@ -3782,32 +3802,472 @@
37823802
37833803 if (ret >= 0) {
37843804 ret = put_user(reserved_blocks, (u64 __user *)arg);
3785
- } else if (reserved_blocks && F2FS_I(inode)->i_compr_blocks) {
3805
+ } else if (reserved_blocks &&
3806
+ atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
37863807 set_sbi_flag(sbi, SBI_NEED_FSCK);
37873808 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3788
- "iblocks=%llu, reserved=%u, compr_blocks=%llu, "
3809
+ "iblocks=%llu, reserved=%u, compr_blocks=%u, "
37893810 "run fsck to fix.",
3790
- __func__, inode->i_ino, (u64)inode->i_blocks,
3811
+ __func__, inode->i_ino, inode->i_blocks,
37913812 reserved_blocks,
3792
- F2FS_I(inode)->i_compr_blocks);
3813
+ atomic_read(&F2FS_I(inode)->i_compr_blocks));
37933814 }
37943815
37953816 return ret;
37963817 }
37973818
3798
-long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3819
+static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3820
+ pgoff_t off, block_t block, block_t len, u32 flags)
37993821 {
3800
- if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
3801
- return -EIO;
3802
- if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
3803
- return -ENOSPC;
3822
+ struct request_queue *q = bdev_get_queue(bdev);
3823
+ sector_t sector = SECTOR_FROM_BLOCK(block);
3824
+ sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3825
+ int ret = 0;
38043826
3827
+ if (!q)
3828
+ return -ENXIO;
3829
+
3830
+ if (flags & F2FS_TRIM_FILE_DISCARD)
3831
+ ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3832
+ blk_queue_secure_erase(q) ?
3833
+ BLKDEV_DISCARD_SECURE : 0);
3834
+
3835
+ if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3836
+ if (IS_ENCRYPTED(inode))
3837
+ ret = fscrypt_zeroout_range(inode, off, block, len);
3838
+ else
3839
+ ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3840
+ GFP_NOFS, 0);
3841
+ }
3842
+
3843
+ return ret;
3844
+}
3845
+
3846
+static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3847
+{
3848
+ struct inode *inode = file_inode(filp);
3849
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3850
+ struct address_space *mapping = inode->i_mapping;
3851
+ struct block_device *prev_bdev = NULL;
3852
+ struct f2fs_sectrim_range range;
3853
+ pgoff_t index, pg_end, prev_index = 0;
3854
+ block_t prev_block = 0, len = 0;
3855
+ loff_t end_addr;
3856
+ bool to_end = false;
3857
+ int ret = 0;
3858
+
3859
+ if (!(filp->f_mode & FMODE_WRITE))
3860
+ return -EBADF;
3861
+
3862
+ if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3863
+ sizeof(range)))
3864
+ return -EFAULT;
3865
+
3866
+ if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3867
+ !S_ISREG(inode->i_mode))
3868
+ return -EINVAL;
3869
+
3870
+ if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3871
+ !f2fs_hw_support_discard(sbi)) ||
3872
+ ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3873
+ IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3874
+ return -EOPNOTSUPP;
3875
+
3876
+ file_start_write(filp);
3877
+ inode_lock(inode);
3878
+
3879
+ if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3880
+ range.start >= inode->i_size) {
3881
+ ret = -EINVAL;
3882
+ goto err;
3883
+ }
3884
+
3885
+ if (range.len == 0)
3886
+ goto err;
3887
+
3888
+ if (inode->i_size - range.start > range.len) {
3889
+ end_addr = range.start + range.len;
3890
+ } else {
3891
+ end_addr = range.len == (u64)-1 ?
3892
+ sbi->sb->s_maxbytes : inode->i_size;
3893
+ to_end = true;
3894
+ }
3895
+
3896
+ if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3897
+ (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3898
+ ret = -EINVAL;
3899
+ goto err;
3900
+ }
3901
+
3902
+ index = F2FS_BYTES_TO_BLK(range.start);
3903
+ pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3904
+
3905
+ ret = f2fs_convert_inline_inode(inode);
3906
+ if (ret)
3907
+ goto err;
3908
+
3909
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3910
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
3911
+
3912
+ ret = filemap_write_and_wait_range(mapping, range.start,
3913
+ to_end ? LLONG_MAX : end_addr - 1);
3914
+ if (ret)
3915
+ goto out;
3916
+
3917
+ truncate_inode_pages_range(mapping, range.start,
3918
+ to_end ? -1 : end_addr - 1);
3919
+
3920
+ while (index < pg_end) {
3921
+ struct dnode_of_data dn;
3922
+ pgoff_t end_offset, count;
3923
+ int i;
3924
+
3925
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
3926
+ ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3927
+ if (ret) {
3928
+ if (ret == -ENOENT) {
3929
+ index = f2fs_get_next_page_offset(&dn, index);
3930
+ continue;
3931
+ }
3932
+ goto out;
3933
+ }
3934
+
3935
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3936
+ count = min(end_offset - dn.ofs_in_node, pg_end - index);
3937
+ for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3938
+ struct block_device *cur_bdev;
3939
+ block_t blkaddr = f2fs_data_blkaddr(&dn);
3940
+
3941
+ if (!__is_valid_data_blkaddr(blkaddr))
3942
+ continue;
3943
+
3944
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3945
+ DATA_GENERIC_ENHANCE)) {
3946
+ ret = -EFSCORRUPTED;
3947
+ f2fs_put_dnode(&dn);
3948
+ goto out;
3949
+ }
3950
+
3951
+ cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3952
+ if (f2fs_is_multi_device(sbi)) {
3953
+ int di = f2fs_target_device_index(sbi, blkaddr);
3954
+
3955
+ blkaddr -= FDEV(di).start_blk;
3956
+ }
3957
+
3958
+ if (len) {
3959
+ if (prev_bdev == cur_bdev &&
3960
+ index == prev_index + len &&
3961
+ blkaddr == prev_block + len) {
3962
+ len++;
3963
+ } else {
3964
+ ret = f2fs_secure_erase(prev_bdev,
3965
+ inode, prev_index, prev_block,
3966
+ len, range.flags);
3967
+ if (ret) {
3968
+ f2fs_put_dnode(&dn);
3969
+ goto out;
3970
+ }
3971
+
3972
+ len = 0;
3973
+ }
3974
+ }
3975
+
3976
+ if (!len) {
3977
+ prev_bdev = cur_bdev;
3978
+ prev_index = index;
3979
+ prev_block = blkaddr;
3980
+ len = 1;
3981
+ }
3982
+ }
3983
+
3984
+ f2fs_put_dnode(&dn);
3985
+
3986
+ if (fatal_signal_pending(current)) {
3987
+ ret = -EINTR;
3988
+ goto out;
3989
+ }
3990
+ cond_resched();
3991
+ }
3992
+
3993
+ if (len)
3994
+ ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3995
+ prev_block, len, range.flags);
3996
+out:
3997
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
3998
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3999
+err:
4000
+ inode_unlock(inode);
4001
+ file_end_write(filp);
4002
+
4003
+ return ret;
4004
+}
4005
+
4006
+static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
4007
+{
4008
+ struct inode *inode = file_inode(filp);
4009
+ struct f2fs_comp_option option;
4010
+
4011
+ if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
4012
+ return -EOPNOTSUPP;
4013
+
4014
+ inode_lock_shared(inode);
4015
+
4016
+ if (!f2fs_compressed_file(inode)) {
4017
+ inode_unlock_shared(inode);
4018
+ return -ENODATA;
4019
+ }
4020
+
4021
+ option.algorithm = F2FS_I(inode)->i_compress_algorithm;
4022
+ option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
4023
+
4024
+ inode_unlock_shared(inode);
4025
+
4026
+ if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
4027
+ sizeof(option)))
4028
+ return -EFAULT;
4029
+
4030
+ return 0;
4031
+}
4032
+
4033
+static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
4034
+{
4035
+ struct inode *inode = file_inode(filp);
4036
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4037
+ struct f2fs_comp_option option;
4038
+ int ret = 0;
4039
+
4040
+ if (!f2fs_sb_has_compression(sbi))
4041
+ return -EOPNOTSUPP;
4042
+
4043
+ if (!(filp->f_mode & FMODE_WRITE))
4044
+ return -EBADF;
4045
+
4046
+ if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
4047
+ sizeof(option)))
4048
+ return -EFAULT;
4049
+
4050
+ if (!f2fs_compressed_file(inode) ||
4051
+ option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
4052
+ option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
4053
+ option.algorithm >= COMPRESS_MAX)
4054
+ return -EINVAL;
4055
+
4056
+ file_start_write(filp);
4057
+ inode_lock(inode);
4058
+
4059
+ if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
4060
+ ret = -EBUSY;
4061
+ goto out;
4062
+ }
4063
+
4064
+ if (inode->i_size != 0) {
4065
+ ret = -EFBIG;
4066
+ goto out;
4067
+ }
4068
+
4069
+ F2FS_I(inode)->i_compress_algorithm = option.algorithm;
4070
+ F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
4071
+ F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
4072
+ f2fs_mark_inode_dirty_sync(inode, true);
4073
+
4074
+ if (!f2fs_is_compress_backend_ready(inode))
4075
+ f2fs_warn(sbi, "compression algorithm is successfully set, "
4076
+ "but current kernel doesn't support this algorithm.");
4077
+out:
4078
+ inode_unlock(inode);
4079
+ file_end_write(filp);
4080
+
4081
+ return ret;
4082
+}
4083
+
4084
+static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
4085
+{
4086
+ DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, page_idx);
4087
+ struct address_space *mapping = inode->i_mapping;
4088
+ struct page *page;
4089
+ pgoff_t redirty_idx = page_idx;
4090
+ int i, page_len = 0, ret = 0;
4091
+
4092
+ page_cache_ra_unbounded(&ractl, len, 0);
4093
+
4094
+ for (i = 0; i < len; i++, page_idx++) {
4095
+ page = read_cache_page(mapping, page_idx, NULL, NULL);
4096
+ if (IS_ERR(page)) {
4097
+ ret = PTR_ERR(page);
4098
+ break;
4099
+ }
4100
+ page_len++;
4101
+ }
4102
+
4103
+ for (i = 0; i < page_len; i++, redirty_idx++) {
4104
+ page = find_lock_page(mapping, redirty_idx);
4105
+ if (!page) {
4106
+ ret = -ENOMEM;
4107
+ break;
4108
+ }
4109
+ set_page_dirty(page);
4110
+ f2fs_put_page(page, 1);
4111
+ f2fs_put_page(page, 0);
4112
+ }
4113
+
4114
+ return ret;
4115
+}
4116
+
4117
+static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
4118
+{
4119
+ struct inode *inode = file_inode(filp);
4120
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4121
+ struct f2fs_inode_info *fi = F2FS_I(inode);
4122
+ pgoff_t page_idx = 0, last_idx;
4123
+ unsigned int blk_per_seg = sbi->blocks_per_seg;
4124
+ int cluster_size = F2FS_I(inode)->i_cluster_size;
4125
+ int count, ret;
4126
+
4127
+ if (!f2fs_sb_has_compression(sbi) ||
4128
+ F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4129
+ return -EOPNOTSUPP;
4130
+
4131
+ if (!(filp->f_mode & FMODE_WRITE))
4132
+ return -EBADF;
4133
+
4134
+ if (!f2fs_compressed_file(inode))
4135
+ return -EINVAL;
4136
+
4137
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
4138
+
4139
+ file_start_write(filp);
4140
+ inode_lock(inode);
4141
+
4142
+ if (!f2fs_is_compress_backend_ready(inode)) {
4143
+ ret = -EOPNOTSUPP;
4144
+ goto out;
4145
+ }
4146
+
4147
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4148
+ ret = -EINVAL;
4149
+ goto out;
4150
+ }
4151
+
4152
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4153
+ if (ret)
4154
+ goto out;
4155
+
4156
+ if (!atomic_read(&fi->i_compr_blocks))
4157
+ goto out;
4158
+
4159
+ last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4160
+
4161
+ count = last_idx - page_idx;
4162
+ while (count) {
4163
+ int len = min(cluster_size, count);
4164
+
4165
+ ret = redirty_blocks(inode, page_idx, len);
4166
+ if (ret < 0)
4167
+ break;
4168
+
4169
+ if (get_dirty_pages(inode) >= blk_per_seg)
4170
+ filemap_fdatawrite(inode->i_mapping);
4171
+
4172
+ count -= len;
4173
+ page_idx += len;
4174
+ }
4175
+
4176
+ if (!ret)
4177
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4178
+ LLONG_MAX);
4179
+
4180
+ if (ret)
4181
+ f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4182
+ __func__, ret);
4183
+out:
4184
+ inode_unlock(inode);
4185
+ file_end_write(filp);
4186
+
4187
+ return ret;
4188
+}
4189
+
4190
+static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4191
+{
4192
+ struct inode *inode = file_inode(filp);
4193
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4194
+ pgoff_t page_idx = 0, last_idx;
4195
+ unsigned int blk_per_seg = sbi->blocks_per_seg;
4196
+ int cluster_size = F2FS_I(inode)->i_cluster_size;
4197
+ int count, ret;
4198
+
4199
+ if (!f2fs_sb_has_compression(sbi) ||
4200
+ F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4201
+ return -EOPNOTSUPP;
4202
+
4203
+ if (!(filp->f_mode & FMODE_WRITE))
4204
+ return -EBADF;
4205
+
4206
+ if (!f2fs_compressed_file(inode))
4207
+ return -EINVAL;
4208
+
4209
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
4210
+
4211
+ file_start_write(filp);
4212
+ inode_lock(inode);
4213
+
4214
+ if (!f2fs_is_compress_backend_ready(inode)) {
4215
+ ret = -EOPNOTSUPP;
4216
+ goto out;
4217
+ }
4218
+
4219
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4220
+ ret = -EINVAL;
4221
+ goto out;
4222
+ }
4223
+
4224
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4225
+ if (ret)
4226
+ goto out;
4227
+
4228
+ set_inode_flag(inode, FI_ENABLE_COMPRESS);
4229
+
4230
+ last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4231
+
4232
+ count = last_idx - page_idx;
4233
+ while (count) {
4234
+ int len = min(cluster_size, count);
4235
+
4236
+ ret = redirty_blocks(inode, page_idx, len);
4237
+ if (ret < 0)
4238
+ break;
4239
+
4240
+ if (get_dirty_pages(inode) >= blk_per_seg)
4241
+ filemap_fdatawrite(inode->i_mapping);
4242
+
4243
+ count -= len;
4244
+ page_idx += len;
4245
+ }
4246
+
4247
+ if (!ret)
4248
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4249
+ LLONG_MAX);
4250
+
4251
+ clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4252
+
4253
+ if (ret)
4254
+ f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4255
+ __func__, ret);
4256
+out:
4257
+ inode_unlock(inode);
4258
+ file_end_write(filp);
4259
+
4260
+ return ret;
4261
+}
4262
+
4263
+static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4264
+{
38054265 switch (cmd) {
3806
- case F2FS_IOC_GETFLAGS:
4266
+ case FS_IOC_GETFLAGS:
38074267 return f2fs_ioc_getflags(filp, arg);
3808
- case F2FS_IOC_SETFLAGS:
4268
+ case FS_IOC_SETFLAGS:
38094269 return f2fs_ioc_setflags(filp, arg);
3810
- case F2FS_IOC_GETVERSION:
4270
+ case FS_IOC_GETVERSION:
38114271 return f2fs_ioc_getversion(filp, arg);
38124272 case F2FS_IOC_START_ATOMIC_WRITE:
38134273 return f2fs_ioc_start_atomic_write(filp);
....@@ -3823,11 +4283,11 @@
38234283 return f2fs_ioc_shutdown(filp, arg);
38244284 case FITRIM:
38254285 return f2fs_ioc_fitrim(filp, arg);
3826
- case F2FS_IOC_SET_ENCRYPTION_POLICY:
4286
+ case FS_IOC_SET_ENCRYPTION_POLICY:
38274287 return f2fs_ioc_set_encryption_policy(filp, arg);
3828
- case F2FS_IOC_GET_ENCRYPTION_POLICY:
4288
+ case FS_IOC_GET_ENCRYPTION_POLICY:
38294289 return f2fs_ioc_get_encryption_policy(filp, arg);
3830
- case F2FS_IOC_GET_ENCRYPTION_PWSALT:
4290
+ case FS_IOC_GET_ENCRYPTION_PWSALT:
38314291 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
38324292 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
38334293 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
....@@ -3855,9 +4315,9 @@
38554315 return f2fs_ioc_flush_device(filp, arg);
38564316 case F2FS_IOC_GET_FEATURES:
38574317 return f2fs_ioc_get_features(filp, arg);
3858
- case F2FS_IOC_FSGETXATTR:
4318
+ case FS_IOC_FSGETXATTR:
38594319 return f2fs_ioc_fsgetxattr(filp, arg);
3860
- case F2FS_IOC_FSSETXATTR:
4320
+ case FS_IOC_FSSETXATTR:
38614321 return f2fs_ioc_fssetxattr(filp, arg);
38624322 case F2FS_IOC_GET_PIN_FILE:
38634323 return f2fs_ioc_get_pin_file(filp, arg);
....@@ -3871,19 +4331,41 @@
38714331 return f2fs_ioc_enable_verity(filp, arg);
38724332 case FS_IOC_MEASURE_VERITY:
38734333 return f2fs_ioc_measure_verity(filp, arg);
3874
- case F2FS_IOC_GET_VOLUME_NAME:
3875
- return f2fs_get_volume_name(filp, arg);
3876
- case F2FS_IOC_SET_VOLUME_NAME:
3877
- return f2fs_set_volume_name(filp, arg);
4334
+ case FS_IOC_READ_VERITY_METADATA:
4335
+ return f2fs_ioc_read_verity_metadata(filp, arg);
4336
+ case FS_IOC_GETFSLABEL:
4337
+ return f2fs_ioc_getfslabel(filp, arg);
4338
+ case FS_IOC_SETFSLABEL:
4339
+ return f2fs_ioc_setfslabel(filp, arg);
38784340 case F2FS_IOC_GET_COMPRESS_BLOCKS:
38794341 return f2fs_get_compress_blocks(filp, arg);
38804342 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
38814343 return f2fs_release_compress_blocks(filp, arg);
38824344 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
38834345 return f2fs_reserve_compress_blocks(filp, arg);
4346
+ case F2FS_IOC_SEC_TRIM_FILE:
4347
+ return f2fs_sec_trim_file(filp, arg);
4348
+ case F2FS_IOC_GET_COMPRESS_OPTION:
4349
+ return f2fs_ioc_get_compress_option(filp, arg);
4350
+ case F2FS_IOC_SET_COMPRESS_OPTION:
4351
+ return f2fs_ioc_set_compress_option(filp, arg);
4352
+ case F2FS_IOC_DECOMPRESS_FILE:
4353
+ return f2fs_ioc_decompress_file(filp, arg);
4354
+ case F2FS_IOC_COMPRESS_FILE:
4355
+ return f2fs_ioc_compress_file(filp, arg);
38844356 default:
38854357 return -ENOTTY;
38864358 }
4359
+}
4360
+
4361
+long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4362
+{
4363
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4364
+ return -EIO;
4365
+ if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4366
+ return -ENOSPC;
4367
+
4368
+ return __f2fs_ioctl(filp, cmd, arg);
38874369 }
38884370
38894371 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
....@@ -3926,6 +4408,16 @@
39264408 }
39274409 } else {
39284410 inode_lock(inode);
4411
+ }
4412
+
4413
+ if (unlikely(IS_IMMUTABLE(inode))) {
4414
+ ret = -EPERM;
4415
+ goto unlock;
4416
+ }
4417
+
4418
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4419
+ ret = -EPERM;
4420
+ goto unlock;
39294421 }
39304422
39314423 ret = generic_write_checks(iocb, from);
....@@ -3986,12 +4478,18 @@
39864478 clear_inode_flag(inode, FI_NO_PREALLOC);
39874479
39884480 /* if we couldn't write data, we should deallocate blocks. */
3989
- if (preallocated && i_size_read(inode) < target_size)
4481
+ if (preallocated && i_size_read(inode) < target_size) {
4482
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4483
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
39904484 f2fs_truncate(inode);
4485
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
4486
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4487
+ }
39914488
39924489 if (ret > 0)
39934490 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
39944491 }
4492
+unlock:
39954493 inode_unlock(inode);
39964494 out:
39974495 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
....@@ -4002,27 +4500,87 @@
40024500 }
40034501
40044502 #ifdef CONFIG_COMPAT
4503
+struct compat_f2fs_gc_range {
4504
+ u32 sync;
4505
+ compat_u64 start;
4506
+ compat_u64 len;
4507
+};
4508
+#define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4509
+ struct compat_f2fs_gc_range)
4510
+
4511
+static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4512
+{
4513
+ struct compat_f2fs_gc_range __user *urange;
4514
+ struct f2fs_gc_range range;
4515
+ int err;
4516
+
4517
+ urange = compat_ptr(arg);
4518
+ err = get_user(range.sync, &urange->sync);
4519
+ err |= get_user(range.start, &urange->start);
4520
+ err |= get_user(range.len, &urange->len);
4521
+ if (err)
4522
+ return -EFAULT;
4523
+
4524
+ return __f2fs_ioc_gc_range(file, &range);
4525
+}
4526
+
4527
+struct compat_f2fs_move_range {
4528
+ u32 dst_fd;
4529
+ compat_u64 pos_in;
4530
+ compat_u64 pos_out;
4531
+ compat_u64 len;
4532
+};
4533
+#define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4534
+ struct compat_f2fs_move_range)
4535
+
4536
+static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4537
+{
4538
+ struct compat_f2fs_move_range __user *urange;
4539
+ struct f2fs_move_range range;
4540
+ int err;
4541
+
4542
+ urange = compat_ptr(arg);
4543
+ err = get_user(range.dst_fd, &urange->dst_fd);
4544
+ err |= get_user(range.pos_in, &urange->pos_in);
4545
+ err |= get_user(range.pos_out, &urange->pos_out);
4546
+ err |= get_user(range.len, &urange->len);
4547
+ if (err)
4548
+ return -EFAULT;
4549
+
4550
+ return __f2fs_ioc_move_range(file, &range);
4551
+}
4552
+
40054553 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
40064554 {
4555
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4556
+ return -EIO;
4557
+ if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4558
+ return -ENOSPC;
4559
+
40074560 switch (cmd) {
4008
- case F2FS_IOC32_GETFLAGS:
4009
- cmd = F2FS_IOC_GETFLAGS;
4561
+ case FS_IOC32_GETFLAGS:
4562
+ cmd = FS_IOC_GETFLAGS;
40104563 break;
4011
- case F2FS_IOC32_SETFLAGS:
4012
- cmd = F2FS_IOC_SETFLAGS;
4564
+ case FS_IOC32_SETFLAGS:
4565
+ cmd = FS_IOC_SETFLAGS;
40134566 break;
4014
- case F2FS_IOC32_GETVERSION:
4015
- cmd = F2FS_IOC_GETVERSION;
4567
+ case FS_IOC32_GETVERSION:
4568
+ cmd = FS_IOC_GETVERSION;
40164569 break;
4570
+ case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4571
+ return f2fs_compat_ioc_gc_range(file, arg);
4572
+ case F2FS_IOC32_MOVE_RANGE:
4573
+ return f2fs_compat_ioc_move_range(file, arg);
40174574 case F2FS_IOC_START_ATOMIC_WRITE:
40184575 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
40194576 case F2FS_IOC_START_VOLATILE_WRITE:
40204577 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
40214578 case F2FS_IOC_ABORT_VOLATILE_WRITE:
40224579 case F2FS_IOC_SHUTDOWN:
4023
- case F2FS_IOC_SET_ENCRYPTION_POLICY:
4024
- case F2FS_IOC_GET_ENCRYPTION_PWSALT:
4025
- case F2FS_IOC_GET_ENCRYPTION_POLICY:
4580
+ case FITRIM:
4581
+ case FS_IOC_SET_ENCRYPTION_POLICY:
4582
+ case FS_IOC_GET_ENCRYPTION_PWSALT:
4583
+ case FS_IOC_GET_ENCRYPTION_POLICY:
40264584 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
40274585 case FS_IOC_ADD_ENCRYPTION_KEY:
40284586 case FS_IOC_REMOVE_ENCRYPTION_KEY:
....@@ -4030,30 +4588,34 @@
40304588 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
40314589 case FS_IOC_GET_ENCRYPTION_NONCE:
40324590 case F2FS_IOC_GARBAGE_COLLECT:
4033
- case F2FS_IOC_GARBAGE_COLLECT_RANGE:
40344591 case F2FS_IOC_WRITE_CHECKPOINT:
40354592 case F2FS_IOC_DEFRAGMENT:
4036
- case F2FS_IOC_MOVE_RANGE:
40374593 case F2FS_IOC_FLUSH_DEVICE:
40384594 case F2FS_IOC_GET_FEATURES:
4039
- case F2FS_IOC_FSGETXATTR:
4040
- case F2FS_IOC_FSSETXATTR:
4595
+ case FS_IOC_FSGETXATTR:
4596
+ case FS_IOC_FSSETXATTR:
40414597 case F2FS_IOC_GET_PIN_FILE:
40424598 case F2FS_IOC_SET_PIN_FILE:
40434599 case F2FS_IOC_PRECACHE_EXTENTS:
40444600 case F2FS_IOC_RESIZE_FS:
40454601 case FS_IOC_ENABLE_VERITY:
40464602 case FS_IOC_MEASURE_VERITY:
4047
- case F2FS_IOC_GET_VOLUME_NAME:
4048
- case F2FS_IOC_SET_VOLUME_NAME:
4603
+ case FS_IOC_READ_VERITY_METADATA:
4604
+ case FS_IOC_GETFSLABEL:
4605
+ case FS_IOC_SETFSLABEL:
40494606 case F2FS_IOC_GET_COMPRESS_BLOCKS:
40504607 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
40514608 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4609
+ case F2FS_IOC_SEC_TRIM_FILE:
4610
+ case F2FS_IOC_GET_COMPRESS_OPTION:
4611
+ case F2FS_IOC_SET_COMPRESS_OPTION:
4612
+ case F2FS_IOC_DECOMPRESS_FILE:
4613
+ case F2FS_IOC_COMPRESS_FILE:
40524614 break;
40534615 default:
40544616 return -ENOIOCTLCMD;
40554617 }
4056
- return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4618
+ return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
40574619 }
40584620 #endif
40594621