forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 072de836f53be56a70cecf70b43ae43b7ce17376
kernel/fs/f2fs/file.c
....@@ -21,6 +21,7 @@
2121 #include <linux/uuid.h>
2222 #include <linux/file.h>
2323 #include <linux/nls.h>
24
+#include <linux/sched/signal.h>
2425
2526 #include "f2fs.h"
2627 #include "node.h"
....@@ -28,18 +29,17 @@
2829 #include "xattr.h"
2930 #include "acl.h"
3031 #include "gc.h"
31
-#include "trace.h"
3232 #include <trace/events/f2fs.h>
33
-#include <trace/events/android_fs.h>
33
+#include <uapi/linux/f2fs.h>
3434
3535 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
3636 {
3737 struct inode *inode = file_inode(vmf->vma->vm_file);
3838 vm_fault_t ret;
3939
40
- down_read(&F2FS_I(inode)->i_mmap_sem);
40
+ f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
4141 ret = filemap_fault(vmf);
42
- up_read(&F2FS_I(inode)->i_mmap_sem);
42
+ f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
4343
4444 if (!ret)
4545 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
....@@ -59,6 +59,12 @@
5959 bool need_alloc = true;
6060 int err = 0;
6161
62
+ if (unlikely(IS_IMMUTABLE(inode)))
63
+ return VM_FAULT_SIGBUS;
64
+
65
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
66
+ return VM_FAULT_SIGBUS;
67
+
6268 if (unlikely(f2fs_cp_error(sbi))) {
6369 err = -EIO;
6470 goto err;
....@@ -69,6 +75,10 @@
6975 goto err;
7076 }
7177
78
+ err = f2fs_convert_inline_inode(inode);
79
+ if (err)
80
+ goto err;
81
+
7282 #ifdef CONFIG_F2FS_FS_COMPRESSION
7383 if (f2fs_compressed_file(inode)) {
7484 int ret = f2fs_is_compressed_cluster(inode, page->index);
....@@ -77,10 +87,6 @@
7787 err = ret;
7888 goto err;
7989 } else if (ret) {
80
- if (ret < F2FS_I(inode)->i_cluster_size) {
81
- err = -EAGAIN;
82
- goto err;
83
- }
8490 need_alloc = false;
8591 }
8692 }
....@@ -94,7 +100,7 @@
94100 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
95101
96102 file_update_time(vmf->vma->vm_file);
97
- down_read(&F2FS_I(inode)->i_mmap_sem);
103
+ f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
98104 lock_page(page);
99105 if (unlikely(page->mapping != inode->i_mapping ||
100106 page_offset(page) > i_size_read(inode) ||
....@@ -106,11 +112,10 @@
106112
107113 if (need_alloc) {
108114 /* block allocation */
109
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
115
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
110116 set_new_dnode(&dn, inode, NULL, NULL, 0);
111117 err = f2fs_get_block(&dn, page->index);
112
- f2fs_put_dnode(&dn);
113
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
118
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
114119 }
115120
116121 #ifdef CONFIG_F2FS_FS_COMPRESSION
....@@ -153,7 +158,7 @@
153158
154159 trace_f2fs_vm_page_mkwrite(page, DATA);
155160 out_sem:
156
- up_read(&F2FS_I(inode)->i_mmap_sem);
161
+ f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
157162
158163 sb_end_pagefault(inode->i_sb);
159164 err:
....@@ -164,6 +169,9 @@
164169 .fault = f2fs_filemap_fault,
165170 .map_pages = filemap_map_pages,
166171 .page_mkwrite = f2fs_vm_page_mkwrite,
172
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
173
+ .allow_speculation = filemap_allow_speculation,
174
+#endif
167175 };
168176
169177 static int get_parent_ino(struct inode *inode, nid_t *pino)
....@@ -231,13 +239,13 @@
231239 struct f2fs_inode_info *fi = F2FS_I(inode);
232240 nid_t pino;
233241
234
- down_write(&fi->i_sem);
242
+ f2fs_down_write(&fi->i_sem);
235243 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
236244 get_parent_ino(inode, &pino)) {
237245 f2fs_i_pino_write(inode, pino);
238246 file_got_pino(inode);
239247 }
240
- up_write(&fi->i_sem);
248
+ f2fs_up_write(&fi->i_sem);
241249 }
242250
243251 static bool f2fs_update_fsync_count(struct f2fs_sb_info *sbi,
....@@ -275,20 +283,10 @@
275283 };
276284 unsigned int seq_id = 0;
277285
278
- if (unlikely(f2fs_readonly(inode->i_sb) ||
279
- is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
286
+ if (unlikely(f2fs_readonly(inode->i_sb)))
280287 return 0;
281288
282289 trace_f2fs_sync_file_enter(inode);
283
-
284
- if (trace_android_fs_fsync_start_enabled()) {
285
- char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
286
-
287
- path = android_fstrace_get_pathname(pathbuf,
288
- MAX_TRACE_PATHBUF_LEN, inode);
289
- trace_android_fs_fsync_start(inode,
290
- current->pid, path, current->comm);
291
- }
292290
293291 if (S_ISDIR(inode->i_mode))
294292 goto go_write;
....@@ -300,7 +298,7 @@
300298 ret = file_write_and_wait_range(file, start, end);
301299 clear_inode_flag(inode, FI_NEED_IPU);
302300
303
- if (ret) {
301
+ if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
304302 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
305303 return ret;
306304 }
....@@ -331,9 +329,9 @@
331329 * Both of fdatasync() and fsync() are able to be recovered from
332330 * sudden-power-off.
333331 */
334
- down_read(&F2FS_I(inode)->i_sem);
332
+ f2fs_down_read(&F2FS_I(inode)->i_sem);
335333 cp_reason = need_do_checkpoint(inode);
336
- up_read(&F2FS_I(inode)->i_sem);
334
+ f2fs_up_read(&F2FS_I(inode)->i_sem);
337335
338336 if (cp_reason || !f2fs_update_fsync_count(sbi, npages)) {
339337 /* all the dirty node pages should be flushed for POR */
....@@ -395,9 +393,6 @@
395393 f2fs_update_time(sbi, REQ_TIME);
396394 out:
397395 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
398
- f2fs_trace_ios(NULL, 1);
399
- trace_android_fs_fsync_end(inode, start, end - start);
400
-
401396 return ret;
402397 }
403398
....@@ -408,32 +403,15 @@
408403 return f2fs_do_sync_file(file, start, end, datasync, false);
409404 }
410405
411
-static pgoff_t __get_first_dirty_index(struct address_space *mapping,
412
- pgoff_t pgofs, int whence)
413
-{
414
- struct page *page;
415
- int nr_pages;
416
-
417
- if (whence != SEEK_DATA)
418
- return 0;
419
-
420
- /* find first dirty page index */
421
- nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY,
422
- 1, &page);
423
- if (!nr_pages)
424
- return ULONG_MAX;
425
- pgofs = page->index;
426
- put_page(page);
427
- return pgofs;
428
-}
429
-
430
-static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
431
- pgoff_t dirty, pgoff_t pgofs, int whence)
406
+static bool __found_offset(struct address_space *mapping, block_t blkaddr,
407
+ pgoff_t index, int whence)
432408 {
433409 switch (whence) {
434410 case SEEK_DATA:
435
- if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
436
- __is_valid_data_blkaddr(blkaddr))
411
+ if (__is_valid_data_blkaddr(blkaddr))
412
+ return true;
413
+ if (blkaddr == NEW_ADDR &&
414
+ xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
437415 return true;
438416 break;
439417 case SEEK_HOLE:
....@@ -449,7 +427,7 @@
449427 struct inode *inode = file->f_mapping->host;
450428 loff_t maxbytes = inode->i_sb->s_maxbytes;
451429 struct dnode_of_data dn;
452
- pgoff_t pgofs, end_offset, dirty;
430
+ pgoff_t pgofs, end_offset;
453431 loff_t data_ofs = offset;
454432 loff_t isize;
455433 int err = 0;
....@@ -461,15 +439,17 @@
461439 goto fail;
462440
463441 /* handle inline data case */
464
- if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
465
- if (whence == SEEK_HOLE)
442
+ if (f2fs_has_inline_data(inode)) {
443
+ if (whence == SEEK_HOLE) {
466444 data_ofs = isize;
467
- goto found;
445
+ goto found;
446
+ } else if (whence == SEEK_DATA) {
447
+ data_ofs = offset;
448
+ goto found;
449
+ }
468450 }
469451
470452 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
471
-
472
- dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
473453
474454 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
475455 set_new_dnode(&dn, inode, NULL, NULL, 0);
....@@ -503,7 +483,7 @@
503483 goto fail;
504484 }
505485
506
- if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
486
+ if (__found_offset(file->f_mapping, blkaddr,
507487 pgofs, whence)) {
508488 f2fs_put_dnode(&dn);
509489 goto found;
....@@ -529,6 +509,9 @@
529509 struct inode *inode = file->f_mapping->host;
530510 loff_t maxbytes = inode->i_sb->s_maxbytes;
531511
512
+ if (f2fs_compressed_file(inode))
513
+ maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
514
+
532515 switch (whence) {
533516 case SEEK_SET:
534517 case SEEK_CUR:
....@@ -548,18 +531,12 @@
548531 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
549532 {
550533 struct inode *inode = file_inode(file);
551
- int err;
552534
553535 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
554536 return -EIO;
555537
556538 if (!f2fs_is_compress_backend_ready(inode))
557539 return -EOPNOTSUPP;
558
-
559
- /* we don't need to use inline_data strictly */
560
- err = f2fs_convert_inline_inode(inode);
561
- if (err)
562
- return err;
563540
564541 file_accessed(file);
565542 vma->vm_ops = &f2fs_file_vm_ops;
....@@ -596,7 +573,7 @@
596573 bool compressed_cluster = false;
597574 int cluster_index = 0, valid_blocks = 0;
598575 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
599
- bool released = !F2FS_I(dn->inode)->i_compr_blocks;
576
+ bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
600577
601578 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
602579 base = get_extra_isize(dn->inode);
....@@ -651,7 +628,8 @@
651628 */
652629 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
653630 dn->inode) + ofs;
654
- f2fs_update_extent_cache_range(dn, fofs, 0, len);
631
+ f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
632
+ f2fs_update_age_extent_cache_range(dn, fofs, nr_free);
655633 dec_valid_block_count(sbi, dn->inode, nr_free);
656634 }
657635 dn->ofs_in_node = ofs;
....@@ -713,7 +691,7 @@
713691
714692 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
715693
716
- if (free_from >= sbi->max_file_blocks)
694
+ if (free_from >= max_file_blocks(inode))
717695 goto free_partial;
718696
719697 if (lock)
....@@ -785,11 +763,14 @@
785763 return err;
786764
787765 #ifdef CONFIG_F2FS_FS_COMPRESSION
788
- if (from != free_from)
766
+ if (from != free_from) {
789767 err = f2fs_truncate_partial_cluster(inode, from, lock);
768
+ if (err)
769
+ return err;
770
+ }
790771 #endif
791772
792
- return err;
773
+ return 0;
793774 }
794775
795776 int f2fs_truncate(struct inode *inode)
....@@ -887,14 +868,11 @@
887868 if (ia_valid & ATTR_GID)
888869 inode->i_gid = attr->ia_gid;
889870 if (ia_valid & ATTR_ATIME)
890
- inode->i_atime = timespec64_trunc(attr->ia_atime,
891
- inode->i_sb->s_time_gran);
871
+ inode->i_atime = attr->ia_atime;
892872 if (ia_valid & ATTR_MTIME)
893
- inode->i_mtime = timespec64_trunc(attr->ia_mtime,
894
- inode->i_sb->s_time_gran);
873
+ inode->i_mtime = attr->ia_mtime;
895874 if (ia_valid & ATTR_CTIME)
896
- inode->i_ctime = timespec64_trunc(attr->ia_ctime,
897
- inode->i_sb->s_time_gran);
875
+ inode->i_ctime = attr->ia_ctime;
898876 if (ia_valid & ATTR_MODE) {
899877 umode_t mode = attr->ia_mode;
900878
....@@ -915,6 +893,14 @@
915893
916894 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
917895 return -EIO;
896
+
897
+ if (unlikely(IS_IMMUTABLE(inode)))
898
+ return -EPERM;
899
+
900
+ if (unlikely(IS_APPEND(inode) &&
901
+ (attr->ia_valid & (ATTR_MODE | ATTR_UID |
902
+ ATTR_GID | ATTR_TIMES_SET))))
903
+ return -EPERM;
918904
919905 if ((attr->ia_valid & ATTR_SIZE) &&
920906 !f2fs_is_compress_backend_ready(inode))
....@@ -974,8 +960,8 @@
974960 return err;
975961 }
976962
977
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
978
- down_write(&F2FS_I(inode)->i_mmap_sem);
963
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
964
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
979965
980966 truncate_setsize(inode, attr->ia_size);
981967
....@@ -985,8 +971,8 @@
985971 * do not trim all blocks after i_size if target size is
986972 * larger than i_size.
987973 */
988
- up_write(&F2FS_I(inode)->i_mmap_sem);
989
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
974
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
975
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
990976 if (err)
991977 return err;
992978
....@@ -1000,8 +986,10 @@
1000986
1001987 if (attr->ia_valid & ATTR_MODE) {
1002988 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
1003
- if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
1004
- inode->i_mode = F2FS_I(inode)->i_acl_mode;
989
+
990
+ if (is_inode_flag_set(inode, FI_ACL_MODE)) {
991
+ if (!err)
992
+ inode->i_mode = F2FS_I(inode)->i_acl_mode;
1005993 clear_inode_flag(inode, FI_ACL_MODE);
1006994 }
1007995 }
....@@ -1124,8 +1112,8 @@
11241112 blk_start = (loff_t)pg_start << PAGE_SHIFT;
11251113 blk_end = (loff_t)pg_end << PAGE_SHIFT;
11261114
1127
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1128
- down_write(&F2FS_I(inode)->i_mmap_sem);
1115
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1116
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
11291117
11301118 truncate_pagecache_range(inode, blk_start, blk_end - 1);
11311119
....@@ -1133,8 +1121,8 @@
11331121 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
11341122 f2fs_unlock_op(sbi);
11351123
1136
- up_write(&F2FS_I(inode)->i_mmap_sem);
1137
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1124
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1125
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
11381126 }
11391127 }
11401128
....@@ -1245,7 +1233,7 @@
12451233 if (ret)
12461234 return ret;
12471235
1248
- ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1236
+ ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
12491237 if (ret) {
12501238 f2fs_put_dnode(&dn);
12511239 return ret;
....@@ -1367,8 +1355,8 @@
13671355 f2fs_balance_fs(sbi, true);
13681356
13691357 /* avoid gc operation during block exchange */
1370
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1371
- down_write(&F2FS_I(inode)->i_mmap_sem);
1358
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1359
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
13721360
13731361 f2fs_lock_op(sbi);
13741362 f2fs_drop_extent_tree(inode);
....@@ -1376,8 +1364,8 @@
13761364 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
13771365 f2fs_unlock_op(sbi);
13781366
1379
- up_write(&F2FS_I(inode)->i_mmap_sem);
1380
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1367
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1368
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
13811369 return ret;
13821370 }
13831371
....@@ -1407,15 +1395,13 @@
14071395 return ret;
14081396
14091397 /* write out all moved pages, if possible */
1410
- down_write(&F2FS_I(inode)->i_mmap_sem);
1398
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
14111399 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
14121400 truncate_pagecache(inode, offset);
14131401
14141402 new_size = i_size_read(inode) - len;
1415
- truncate_pagecache(inode, new_size);
1416
-
14171403 ret = f2fs_truncate_blocks(inode, new_size, true);
1418
- up_write(&F2FS_I(inode)->i_mmap_sem);
1404
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
14191405 if (!ret)
14201406 f2fs_i_size_write(inode, new_size);
14211407 return ret;
....@@ -1451,14 +1437,22 @@
14511437 ret = -ENOSPC;
14521438 break;
14531439 }
1454
- if (dn->data_blkaddr != NEW_ADDR) {
1455
- f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1456
- dn->data_blkaddr = NEW_ADDR;
1457
- f2fs_set_data_blkaddr(dn);
1440
+
1441
+ if (dn->data_blkaddr == NEW_ADDR)
1442
+ continue;
1443
+
1444
+ if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1445
+ DATA_GENERIC_ENHANCE)) {
1446
+ ret = -EFSCORRUPTED;
1447
+ break;
14581448 }
1449
+
1450
+ f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1451
+ dn->data_blkaddr = NEW_ADDR;
1452
+ f2fs_set_data_blkaddr(dn);
14591453 }
14601454
1461
- f2fs_update_extent_cache_range(dn, start, 0, index - start);
1455
+ f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
14621456
14631457 return ret;
14641458 }
....@@ -1514,8 +1508,8 @@
15141508 unsigned int end_offset;
15151509 pgoff_t end;
15161510
1517
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1518
- down_write(&F2FS_I(inode)->i_mmap_sem);
1511
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1512
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
15191513
15201514 truncate_pagecache_range(inode,
15211515 (loff_t)index << PAGE_SHIFT,
....@@ -1527,8 +1521,8 @@
15271521 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
15281522 if (ret) {
15291523 f2fs_unlock_op(sbi);
1530
- up_write(&F2FS_I(inode)->i_mmap_sem);
1531
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1524
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1525
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
15321526 goto out;
15331527 }
15341528
....@@ -1539,8 +1533,8 @@
15391533 f2fs_put_dnode(&dn);
15401534
15411535 f2fs_unlock_op(sbi);
1542
- up_write(&F2FS_I(inode)->i_mmap_sem);
1543
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1536
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1537
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
15441538
15451539 f2fs_balance_fs(sbi, dn.node_changed);
15461540
....@@ -1596,9 +1590,9 @@
15961590
15971591 f2fs_balance_fs(sbi, true);
15981592
1599
- down_write(&F2FS_I(inode)->i_mmap_sem);
1593
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
16001594 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1601
- up_write(&F2FS_I(inode)->i_mmap_sem);
1595
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
16021596 if (ret)
16031597 return ret;
16041598
....@@ -1613,8 +1607,8 @@
16131607 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
16141608
16151609 /* avoid gc operation during block exchange */
1616
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1617
- down_write(&F2FS_I(inode)->i_mmap_sem);
1610
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1611
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
16181612 truncate_pagecache(inode, offset);
16191613
16201614 while (!ret && idx > pg_start) {
....@@ -1630,14 +1624,14 @@
16301624 idx + delta, nr, false);
16311625 f2fs_unlock_op(sbi);
16321626 }
1633
- up_write(&F2FS_I(inode)->i_mmap_sem);
1634
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1627
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1628
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
16351629
16361630 /* write out all moved pages, if possible */
1637
- down_write(&F2FS_I(inode)->i_mmap_sem);
1631
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
16381632 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
16391633 truncate_pagecache(inode, offset);
1640
- up_write(&F2FS_I(inode)->i_mmap_sem);
1634
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
16411635
16421636 if (!ret)
16431637 f2fs_i_size_write(inode, new_size);
....@@ -1651,9 +1645,10 @@
16511645 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
16521646 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
16531647 .m_may_create = true };
1654
- pgoff_t pg_end;
1648
+ pgoff_t pg_start, pg_end;
16551649 loff_t new_size = i_size_read(inode);
16561650 loff_t off_end;
1651
+ block_t expanded = 0;
16571652 int err;
16581653
16591654 err = inode_newsize_ok(inode, (len + offset));
....@@ -1666,11 +1661,12 @@
16661661
16671662 f2fs_balance_fs(sbi, true);
16681663
1664
+ pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
16691665 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
16701666 off_end = (offset + len) & (PAGE_SIZE - 1);
16711667
1672
- map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1673
- map.m_len = pg_end - map.m_lblk;
1668
+ map.m_lblk = pg_start;
1669
+ map.m_len = pg_end - pg_start;
16741670 if (off_end)
16751671 map.m_len++;
16761672
....@@ -1678,51 +1674,49 @@
16781674 return 0;
16791675
16801676 if (f2fs_is_pinned_file(inode)) {
1681
- block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
1682
- sbi->log_blocks_per_seg;
1683
- block_t done = 0;
1677
+ block_t sec_blks = BLKS_PER_SEC(sbi);
1678
+ block_t sec_len = roundup(map.m_len, sec_blks);
16841679
1685
- if (map.m_len % sbi->blocks_per_seg)
1686
- len += sbi->blocks_per_seg;
1687
-
1688
- map.m_len = sbi->blocks_per_seg;
1680
+ map.m_len = sec_blks;
16891681 next_alloc:
16901682 if (has_not_enough_free_secs(sbi, 0,
16911683 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1692
- down_write(&sbi->gc_lock);
1693
- err = f2fs_gc(sbi, true, false, NULL_SEGNO);
1684
+ f2fs_down_write(&sbi->gc_lock);
1685
+ err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
16941686 if (err && err != -ENODATA && err != -EAGAIN)
16951687 goto out_err;
16961688 }
16971689
1698
- down_write(&sbi->pin_sem);
1699
- map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1690
+ f2fs_down_write(&sbi->pin_sem);
17001691
17011692 f2fs_lock_op(sbi);
1702
- f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA);
1693
+ f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
17031694 f2fs_unlock_op(sbi);
17041695
1696
+ map.m_seg_type = CURSEG_COLD_DATA_PINNED;
17051697 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1706
- up_write(&sbi->pin_sem);
17071698
1708
- done += map.m_len;
1709
- len -= map.m_len;
1699
+ f2fs_up_write(&sbi->pin_sem);
1700
+
1701
+ expanded += map.m_len;
1702
+ sec_len -= map.m_len;
17101703 map.m_lblk += map.m_len;
1711
- if (!err && len)
1704
+ if (!err && sec_len)
17121705 goto next_alloc;
17131706
1714
- map.m_len = done;
1707
+ map.m_len = expanded;
17151708 } else {
17161709 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1710
+ expanded = map.m_len;
17171711 }
17181712 out_err:
17191713 if (err) {
17201714 pgoff_t last_off;
17211715
1722
- if (!map.m_len)
1716
+ if (!expanded)
17231717 return err;
17241718
1725
- last_off = map.m_lblk + map.m_len - 1;
1719
+ last_off = pg_start + expanded - 1;
17261720
17271721 /* update new size to the failed position */
17281722 new_size = (last_off == pg_end) ? offset + len :
....@@ -1773,6 +1767,10 @@
17731767 return -EOPNOTSUPP;
17741768
17751769 inode_lock(inode);
1770
+
1771
+ ret = file_modified(file);
1772
+ if (ret)
1773
+ goto out;
17761774
17771775 if (mode & FALLOC_FL_PUNCH_HOLE) {
17781776 if (offset >= inode->i_size)
....@@ -1846,7 +1844,8 @@
18461844 struct f2fs_inode_info *fi = F2FS_I(inode);
18471845 u32 masked_flags = fi->i_flags & mask;
18481846
1849
- f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
1847
+ /* mask can be shrunk by flags_valid selector */
1848
+ iflags &= mask;
18501849
18511850 /* Is it quota file? Do not allow user to mess with it */
18521851 if (IS_NOQUOTA(inode))
....@@ -1868,21 +1867,16 @@
18681867
18691868 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
18701869 if (masked_flags & F2FS_COMPR_FL) {
1871
- if (f2fs_disable_compressed_file(inode))
1870
+ if (!f2fs_disable_compressed_file(inode))
18721871 return -EINVAL;
1873
- }
1874
- if (iflags & F2FS_NOCOMP_FL)
1875
- return -EINVAL;
1876
- if (iflags & F2FS_COMPR_FL) {
1872
+ } else {
18771873 if (!f2fs_may_compress(inode))
18781874 return -EINVAL;
1879
-
1880
- set_compress_context(inode);
1875
+ if (S_ISREG(inode->i_mode) && inode->i_size)
1876
+ return -EINVAL;
1877
+ if (set_compress_context(inode))
1878
+ return -EOPNOTSUPP;
18811879 }
1882
- }
1883
- if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1884
- if (masked_flags & F2FS_COMPR_FL)
1885
- return -EINVAL;
18861880 }
18871881
18881882 fi->i_flags = iflags | (fi->i_flags & ~mask);
....@@ -2071,7 +2065,10 @@
20712065
20722066 inode_lock(inode);
20732067
2074
- f2fs_disable_compressed_file(inode);
2068
+ if (!f2fs_disable_compressed_file(inode)) {
2069
+ ret = -EINVAL;
2070
+ goto out;
2071
+ }
20752072
20762073 if (f2fs_is_atomic_file(inode)) {
20772074 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
....@@ -2083,7 +2080,7 @@
20832080 if (ret)
20842081 goto out;
20852082
2086
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2083
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
20872084
20882085 /*
20892086 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
....@@ -2094,7 +2091,7 @@
20942091 inode->i_ino, get_dirty_pages(inode));
20952092 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
20962093 if (ret) {
2097
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2094
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
20982095 goto out;
20992096 }
21002097
....@@ -2107,7 +2104,7 @@
21072104 /* add inode in inmem_list first and set atomic_file */
21082105 set_inode_flag(inode, FI_ATOMIC_FILE);
21092106 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2110
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2107
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
21112108
21122109 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
21132110 F2FS_I(inode)->inmem_task = current;
....@@ -2274,7 +2271,8 @@
22742271 if (ret) {
22752272 if (ret == -EROFS) {
22762273 ret = 0;
2277
- f2fs_stop_checkpoint(sbi, false);
2274
+ f2fs_stop_checkpoint(sbi, false,
2275
+ STOP_CP_REASON_SHUTDOWN);
22782276 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
22792277 trace_f2fs_shutdown(sbi, in, ret);
22802278 }
....@@ -2284,32 +2282,28 @@
22842282
22852283 switch (in) {
22862284 case F2FS_GOING_DOWN_FULLSYNC:
2287
- sb = freeze_bdev(sb->s_bdev);
2288
- if (IS_ERR(sb)) {
2289
- ret = PTR_ERR(sb);
2285
+ ret = freeze_bdev(sb->s_bdev);
2286
+ if (ret)
22902287 goto out;
2291
- }
2292
- if (sb) {
2293
- f2fs_stop_checkpoint(sbi, false);
2294
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2295
- thaw_bdev(sb->s_bdev, sb);
2296
- }
2288
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2289
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2290
+ thaw_bdev(sb->s_bdev);
22972291 break;
22982292 case F2FS_GOING_DOWN_METASYNC:
22992293 /* do checkpoint only */
23002294 ret = f2fs_sync_fs(sb, 1);
23012295 if (ret)
23022296 goto out;
2303
- f2fs_stop_checkpoint(sbi, false);
2297
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
23042298 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
23052299 break;
23062300 case F2FS_GOING_DOWN_NOSYNC:
2307
- f2fs_stop_checkpoint(sbi, false);
2301
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
23082302 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
23092303 break;
23102304 case F2FS_GOING_DOWN_METAFLUSH:
23112305 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2312
- f2fs_stop_checkpoint(sbi, false);
2306
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
23132307 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
23142308 break;
23152309 case F2FS_GOING_DOWN_NEED_FSCK:
....@@ -2418,7 +2412,7 @@
24182412 if (err)
24192413 return err;
24202414
2421
- down_write(&sbi->sb_lock);
2415
+ f2fs_down_write(&sbi->sb_lock);
24222416
24232417 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
24242418 goto got_it;
....@@ -2437,7 +2431,7 @@
24372431 16))
24382432 err = -EFAULT;
24392433 out_err:
2440
- up_write(&sbi->sb_lock);
2434
+ f2fs_up_write(&sbi->sb_lock);
24412435 mnt_drop_write_file(filp);
24422436 return err;
24432437 }
....@@ -2514,40 +2508,33 @@
25142508 return ret;
25152509
25162510 if (!sync) {
2517
- if (!down_write_trylock(&sbi->gc_lock)) {
2511
+ if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
25182512 ret = -EBUSY;
25192513 goto out;
25202514 }
25212515 } else {
2522
- down_write(&sbi->gc_lock);
2516
+ f2fs_down_write(&sbi->gc_lock);
25232517 }
25242518
2525
- ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
2519
+ ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
25262520 out:
25272521 mnt_drop_write_file(filp);
25282522 return ret;
25292523 }
25302524
2531
-static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2525
+static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
25322526 {
2533
- struct inode *inode = file_inode(filp);
2534
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2535
- struct f2fs_gc_range range;
2527
+ struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
25362528 u64 end;
25372529 int ret;
25382530
25392531 if (!capable(CAP_SYS_ADMIN))
25402532 return -EPERM;
2541
-
2542
- if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2543
- sizeof(range)))
2544
- return -EFAULT;
2545
-
25462533 if (f2fs_readonly(sbi->sb))
25472534 return -EROFS;
25482535
2549
- end = range.start + range.len;
2550
- if (end < range.start || range.start < MAIN_BLKADDR(sbi) ||
2536
+ end = range->start + range->len;
2537
+ if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
25512538 end >= MAX_BLKADDR(sbi))
25522539 return -EINVAL;
25532540
....@@ -2556,22 +2543,38 @@
25562543 return ret;
25572544
25582545 do_more:
2559
- if (!range.sync) {
2560
- if (!down_write_trylock(&sbi->gc_lock)) {
2546
+ if (!range->sync) {
2547
+ if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
25612548 ret = -EBUSY;
25622549 goto out;
25632550 }
25642551 } else {
2565
- down_write(&sbi->gc_lock);
2552
+ f2fs_down_write(&sbi->gc_lock);
25662553 }
25672554
2568
- ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
2569
- range.start += BLKS_PER_SEC(sbi);
2570
- if (range.start <= end)
2555
+ ret = f2fs_gc(sbi, range->sync, true, false,
2556
+ GET_SEGNO(sbi, range->start));
2557
+ if (ret) {
2558
+ if (ret == -EBUSY)
2559
+ ret = -EAGAIN;
2560
+ goto out;
2561
+ }
2562
+ range->start += BLKS_PER_SEC(sbi);
2563
+ if (range->start <= end)
25712564 goto do_more;
25722565 out:
25732566 mnt_drop_write_file(filp);
25742567 return ret;
2568
+}
2569
+
2570
+static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2571
+{
2572
+ struct f2fs_gc_range range;
2573
+
2574
+ if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2575
+ sizeof(range)))
2576
+ return -EFAULT;
2577
+ return __f2fs_ioc_gc_range(filp, &range);
25752578 }
25762579
25772580 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
....@@ -2607,9 +2610,9 @@
26072610 {
26082611 struct inode *inode = file_inode(filp);
26092612 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2610
- .m_seg_type = NO_CHECK_TYPE ,
2613
+ .m_seg_type = NO_CHECK_TYPE,
26112614 .m_may_create = false };
2612
- struct extent_info ei = {0, 0, 0};
2615
+ struct extent_info ei = {};
26132616 pgoff_t pg_start, pg_end, next_pgofs;
26142617 unsigned int blk_per_seg = sbi->blocks_per_seg;
26152618 unsigned int total = 0, sec_num;
....@@ -2617,16 +2620,19 @@
26172620 bool fragmented = false;
26182621 int err;
26192622
2620
- /* if in-place-update policy is enabled, don't waste time here */
2621
- if (f2fs_should_update_inplace(inode, NULL))
2622
- return -EINVAL;
2623
-
26242623 pg_start = range->start >> PAGE_SHIFT;
26252624 pg_end = (range->start + range->len) >> PAGE_SHIFT;
26262625
26272626 f2fs_balance_fs(sbi, true);
26282627
26292628 inode_lock(inode);
2629
+
2630
+ /* if in-place-update policy is enabled, don't waste time here */
2631
+ set_inode_flag(inode, FI_OPU_WRITE);
2632
+ if (f2fs_should_update_inplace(inode, NULL)) {
2633
+ err = -EINVAL;
2634
+ goto out;
2635
+ }
26302636
26312637 /* writeback all dirty pages in the range */
26322638 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
....@@ -2638,7 +2644,7 @@
26382644 * lookup mapping info in extent cache, skip defragmenting if physical
26392645 * block addresses are continuous.
26402646 */
2641
- if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2647
+ if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
26422648 if (ei.fofs + ei.len >= pg_end)
26432649 goto out;
26442650 }
....@@ -2709,7 +2715,7 @@
27092715 goto check;
27102716 }
27112717
2712
- set_inode_flag(inode, FI_DO_DEFRAG);
2718
+ set_inode_flag(inode, FI_SKIP_WRITES);
27132719
27142720 idx = map.m_lblk;
27152721 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
....@@ -2734,15 +2740,16 @@
27342740 if (map.m_lblk < pg_end && cnt < blk_per_seg)
27352741 goto do_map;
27362742
2737
- clear_inode_flag(inode, FI_DO_DEFRAG);
2743
+ clear_inode_flag(inode, FI_SKIP_WRITES);
27382744
27392745 err = filemap_fdatawrite(inode->i_mapping);
27402746 if (err)
27412747 goto out;
27422748 }
27432749 clear_out:
2744
- clear_inode_flag(inode, FI_DO_DEFRAG);
2750
+ clear_inode_flag(inode, FI_SKIP_WRITES);
27452751 out:
2752
+ clear_inode_flag(inode, FI_OPU_WRITE);
27462753 inode_unlock(inode);
27472754 if (!err)
27482755 range->len = (u64)total << PAGE_SHIFT;
....@@ -2774,7 +2781,7 @@
27742781 return -EINVAL;
27752782
27762783 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2777
- sbi->max_file_blocks))
2784
+ max_file_blocks(inode)))
27782785 return -EINVAL;
27792786
27802787 err = mnt_want_write_file(filp);
....@@ -2817,6 +2824,9 @@
28172824
28182825 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
28192826 return -EOPNOTSUPP;
2827
+
2828
+ if (pos_out < 0 || pos_in < 0)
2829
+ return -EINVAL;
28202830
28212831 if (src == dst) {
28222832 if (pos_in == pos_out)
....@@ -2875,10 +2885,10 @@
28752885
28762886 f2fs_balance_fs(sbi, true);
28772887
2878
- down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2888
+ f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
28792889 if (src != dst) {
28802890 ret = -EBUSY;
2881
- if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2891
+ if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
28822892 goto out_src;
28832893 }
28842894
....@@ -2896,9 +2906,9 @@
28962906 f2fs_unlock_op(sbi);
28972907
28982908 if (src != dst)
2899
- up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2909
+ f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
29002910 out_src:
2901
- up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2911
+ f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
29022912 out_unlock:
29032913 if (src != dst)
29042914 inode_unlock(dst);
....@@ -2907,9 +2917,9 @@
29072917 return ret;
29082918 }
29092919
2910
-static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2920
+static int __f2fs_ioc_move_range(struct file *filp,
2921
+ struct f2fs_move_range *range)
29112922 {
2912
- struct f2fs_move_range range;
29132923 struct fd dst;
29142924 int err;
29152925
....@@ -2917,11 +2927,7 @@
29172927 !(filp->f_mode & FMODE_WRITE))
29182928 return -EBADF;
29192929
2920
- if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2921
- sizeof(range)))
2922
- return -EFAULT;
2923
-
2924
- dst = fdget(range.dst_fd);
2930
+ dst = fdget(range->dst_fd);
29252931 if (!dst.file)
29262932 return -EBADF;
29272933
....@@ -2934,19 +2940,23 @@
29342940 if (err)
29352941 goto err_out;
29362942
2937
- err = f2fs_move_file_range(filp, range.pos_in, dst.file,
2938
- range.pos_out, range.len);
2943
+ err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2944
+ range->pos_out, range->len);
29392945
29402946 mnt_drop_write_file(filp);
2941
- if (err)
2942
- goto err_out;
2943
-
2944
- if (copy_to_user((struct f2fs_move_range __user *)arg,
2945
- &range, sizeof(range)))
2946
- err = -EFAULT;
29472947 err_out:
29482948 fdput(dst);
29492949 return err;
2950
+}
2951
+
2952
+static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2953
+{
2954
+ struct f2fs_move_range range;
2955
+
2956
+ if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2957
+ sizeof(range)))
2958
+ return -EFAULT;
2959
+ return __f2fs_ioc_move_range(filp, &range);
29502960 }
29512961
29522962 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
....@@ -2993,14 +3003,14 @@
29933003 end_segno = min(start_segno + range.segments, dev_end_segno);
29943004
29953005 while (start_segno < end_segno) {
2996
- if (!down_write_trylock(&sbi->gc_lock)) {
3006
+ if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
29973007 ret = -EBUSY;
29983008 goto out;
29993009 }
30003010 sm->last_victim[GC_CB] = end_segno + 1;
30013011 sm->last_victim[GC_GREEDY] = end_segno + 1;
30023012 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
3003
- ret = f2fs_gc(sbi, true, true, start_segno);
3013
+ ret = f2fs_gc(sbi, true, true, true, start_segno);
30043014 if (ret == -EAGAIN)
30053015 ret = 0;
30063016 else if (ret < 0)
....@@ -3293,7 +3303,7 @@
32933303 if (ret)
32943304 goto out;
32953305
3296
- if (f2fs_disable_compressed_file(inode)) {
3306
+ if (!f2fs_disable_compressed_file(inode)) {
32973307 ret = -EOPNOTSUPP;
32983308 goto out;
32993309 }
....@@ -3334,21 +3344,21 @@
33343344 map.m_next_extent = &m_next_extent;
33353345 map.m_seg_type = NO_CHECK_TYPE;
33363346 map.m_may_create = false;
3337
- end = F2FS_I_SB(inode)->max_file_blocks;
3347
+ end = max_file_blocks(inode);
33383348
33393349 while (map.m_lblk < end) {
33403350 map.m_len = end - map.m_lblk;
33413351
3342
- down_write(&fi->i_gc_rwsem[WRITE]);
3352
+ f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
33433353 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3344
- up_write(&fi->i_gc_rwsem[WRITE]);
3354
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
33453355 if (err)
33463356 return err;
33473357
33483358 map.m_lblk = m_next_extent;
33493359 }
33503360
3351
- return err;
3361
+ return 0;
33523362 }
33533363
33543364 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
....@@ -3382,7 +3392,7 @@
33823392
33833393 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
33843394 f2fs_warn(F2FS_I_SB(inode),
3385
- "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3395
+ "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
33863396 inode->i_ino);
33873397 return -EOPNOTSUPP;
33883398 }
....@@ -3398,7 +3408,15 @@
33983408 return fsverity_ioctl_measure(filp, (void __user *)arg);
33993409 }
34003410
3401
-static int f2fs_get_volume_name(struct file *filp, unsigned long arg)
3411
+static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3412
+{
3413
+ if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3414
+ return -EOPNOTSUPP;
3415
+
3416
+ return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3417
+}
3418
+
3419
+static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
34023420 {
34033421 struct inode *inode = file_inode(filp);
34043422 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
....@@ -3410,21 +3428,21 @@
34103428 if (!vbuf)
34113429 return -ENOMEM;
34123430
3413
- down_read(&sbi->sb_lock);
3431
+ f2fs_down_read(&sbi->sb_lock);
34143432 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
34153433 ARRAY_SIZE(sbi->raw_super->volume_name),
34163434 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3417
- up_read(&sbi->sb_lock);
3435
+ f2fs_up_read(&sbi->sb_lock);
34183436
34193437 if (copy_to_user((char __user *)arg, vbuf,
34203438 min(FSLABEL_MAX, count)))
34213439 err = -EFAULT;
34223440
3423
- kvfree(vbuf);
3441
+ kfree(vbuf);
34243442 return err;
34253443 }
34263444
3427
-static int f2fs_set_volume_name(struct file *filp, unsigned long arg)
3445
+static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
34283446 {
34293447 struct inode *inode = file_inode(filp);
34303448 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
....@@ -3442,7 +3460,7 @@
34423460 if (err)
34433461 goto out;
34443462
3445
- down_write(&sbi->sb_lock);
3463
+ f2fs_down_write(&sbi->sb_lock);
34463464
34473465 memset(sbi->raw_super->volume_name, 0,
34483466 sizeof(sbi->raw_super->volume_name));
....@@ -3452,7 +3470,7 @@
34523470
34533471 err = f2fs_commit_super(sbi, false);
34543472
3455
- up_write(&sbi->sb_lock);
3473
+ f2fs_up_write(&sbi->sb_lock);
34563474
34573475 mnt_drop_write_file(filp);
34583476 out:
....@@ -3471,7 +3489,7 @@
34713489 if (!f2fs_compressed_file(inode))
34723490 return -EINVAL;
34733491
3474
- blocks = F2FS_I(inode)->i_compr_blocks;
3492
+ blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
34753493 return put_user(blocks, (u64 __user *)arg);
34763494 }
34773495
....@@ -3556,12 +3574,13 @@
35563574 inode_lock(inode);
35573575
35583576 writecount = atomic_read(&inode->i_writecount);
3559
- if ((filp->f_mode & FMODE_WRITE && writecount != 1) || writecount) {
3577
+ if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3578
+ (!(filp->f_mode & FMODE_WRITE) && writecount)) {
35603579 ret = -EBUSY;
35613580 goto out;
35623581 }
35633582
3564
- if (IS_IMMUTABLE(inode)) {
3583
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
35653584 ret = -EINVAL;
35663585 goto out;
35673586 }
....@@ -3570,16 +3589,15 @@
35703589 if (ret)
35713590 goto out;
35723591
3573
- if (!F2FS_I(inode)->i_compr_blocks)
3574
- goto out;
3575
-
3576
- F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
3577
- f2fs_set_inode_flags(inode);
3592
+ set_inode_flag(inode, FI_COMPRESS_RELEASED);
35783593 inode->i_ctime = current_time(inode);
35793594 f2fs_mark_inode_dirty_sync(inode, true);
35803595
3581
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3582
- down_write(&F2FS_I(inode)->i_mmap_sem);
3596
+ if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3597
+ goto out;
3598
+
3599
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3600
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
35833601
35843602 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
35853603
....@@ -3614,8 +3632,8 @@
36143632 released_blocks += ret;
36153633 }
36163634
3617
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3618
- up_write(&F2FS_I(inode)->i_mmap_sem);
3635
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
3636
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
36193637 out:
36203638 inode_unlock(inode);
36213639
....@@ -3623,14 +3641,15 @@
36233641
36243642 if (ret >= 0) {
36253643 ret = put_user(released_blocks, (u64 __user *)arg);
3626
- } else if (released_blocks && F2FS_I(inode)->i_compr_blocks) {
3644
+ } else if (released_blocks &&
3645
+ atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
36273646 set_sbi_flag(sbi, SBI_NEED_FSCK);
36283647 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3629
- "iblocks=%llu, released=%u, compr_blocks=%llu, "
3648
+ "iblocks=%llu, released=%u, compr_blocks=%u, "
36303649 "run fsck to fix.",
3631
- __func__, inode->i_ino, (u64)inode->i_blocks,
3650
+ __func__, inode->i_ino, inode->i_blocks,
36323651 released_blocks,
3633
- F2FS_I(inode)->i_compr_blocks);
3652
+ atomic_read(&F2FS_I(inode)->i_compr_blocks));
36343653 }
36353654
36363655 return ret;
....@@ -3718,20 +3737,20 @@
37183737 if (ret)
37193738 return ret;
37203739
3721
- if (F2FS_I(inode)->i_compr_blocks)
3740
+ if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
37223741 goto out;
37233742
37243743 f2fs_balance_fs(F2FS_I_SB(inode), true);
37253744
37263745 inode_lock(inode);
37273746
3728
- if (!IS_IMMUTABLE(inode)) {
3747
+ if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
37293748 ret = -EINVAL;
37303749 goto unlock_inode;
37313750 }
37323751
3733
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3734
- down_write(&F2FS_I(inode)->i_mmap_sem);
3752
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3753
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
37353754
37363755 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
37373756
....@@ -3766,12 +3785,11 @@
37663785 reserved_blocks += ret;
37673786 }
37683787
3769
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3770
- up_write(&F2FS_I(inode)->i_mmap_sem);
3788
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
3789
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
37713790
37723791 if (ret >= 0) {
3773
- F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
3774
- f2fs_set_inode_flags(inode);
3792
+ clear_inode_flag(inode, FI_COMPRESS_RELEASED);
37753793 inode->i_ctime = current_time(inode);
37763794 f2fs_mark_inode_dirty_sync(inode, true);
37773795 }
....@@ -3782,32 +3800,472 @@
37823800
37833801 if (ret >= 0) {
37843802 ret = put_user(reserved_blocks, (u64 __user *)arg);
3785
- } else if (reserved_blocks && F2FS_I(inode)->i_compr_blocks) {
3803
+ } else if (reserved_blocks &&
3804
+ atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
37863805 set_sbi_flag(sbi, SBI_NEED_FSCK);
37873806 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3788
- "iblocks=%llu, reserved=%u, compr_blocks=%llu, "
3807
+ "iblocks=%llu, reserved=%u, compr_blocks=%u, "
37893808 "run fsck to fix.",
3790
- __func__, inode->i_ino, (u64)inode->i_blocks,
3809
+ __func__, inode->i_ino, inode->i_blocks,
37913810 reserved_blocks,
3792
- F2FS_I(inode)->i_compr_blocks);
3811
+ atomic_read(&F2FS_I(inode)->i_compr_blocks));
37933812 }
37943813
37953814 return ret;
37963815 }
37973816
3798
-long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3817
+static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3818
+ pgoff_t off, block_t block, block_t len, u32 flags)
37993819 {
3800
- if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
3801
- return -EIO;
3802
- if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
3803
- return -ENOSPC;
3820
+ struct request_queue *q = bdev_get_queue(bdev);
3821
+ sector_t sector = SECTOR_FROM_BLOCK(block);
3822
+ sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3823
+ int ret = 0;
38043824
3825
+ if (!q)
3826
+ return -ENXIO;
3827
+
3828
+ if (flags & F2FS_TRIM_FILE_DISCARD)
3829
+ ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3830
+ blk_queue_secure_erase(q) ?
3831
+ BLKDEV_DISCARD_SECURE : 0);
3832
+
3833
+ if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3834
+ if (IS_ENCRYPTED(inode))
3835
+ ret = fscrypt_zeroout_range(inode, off, block, len);
3836
+ else
3837
+ ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3838
+ GFP_NOFS, 0);
3839
+ }
3840
+
3841
+ return ret;
3842
+}
3843
+
3844
+static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3845
+{
3846
+ struct inode *inode = file_inode(filp);
3847
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3848
+ struct address_space *mapping = inode->i_mapping;
3849
+ struct block_device *prev_bdev = NULL;
3850
+ struct f2fs_sectrim_range range;
3851
+ pgoff_t index, pg_end, prev_index = 0;
3852
+ block_t prev_block = 0, len = 0;
3853
+ loff_t end_addr;
3854
+ bool to_end = false;
3855
+ int ret = 0;
3856
+
3857
+ if (!(filp->f_mode & FMODE_WRITE))
3858
+ return -EBADF;
3859
+
3860
+ if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3861
+ sizeof(range)))
3862
+ return -EFAULT;
3863
+
3864
+ if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3865
+ !S_ISREG(inode->i_mode))
3866
+ return -EINVAL;
3867
+
3868
+ if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3869
+ !f2fs_hw_support_discard(sbi)) ||
3870
+ ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3871
+ IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3872
+ return -EOPNOTSUPP;
3873
+
3874
+ file_start_write(filp);
3875
+ inode_lock(inode);
3876
+
3877
+ if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3878
+ range.start >= inode->i_size) {
3879
+ ret = -EINVAL;
3880
+ goto err;
3881
+ }
3882
+
3883
+ if (range.len == 0)
3884
+ goto err;
3885
+
3886
+ if (inode->i_size - range.start > range.len) {
3887
+ end_addr = range.start + range.len;
3888
+ } else {
3889
+ end_addr = range.len == (u64)-1 ?
3890
+ sbi->sb->s_maxbytes : inode->i_size;
3891
+ to_end = true;
3892
+ }
3893
+
3894
+ if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3895
+ (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3896
+ ret = -EINVAL;
3897
+ goto err;
3898
+ }
3899
+
3900
+ index = F2FS_BYTES_TO_BLK(range.start);
3901
+ pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3902
+
3903
+ ret = f2fs_convert_inline_inode(inode);
3904
+ if (ret)
3905
+ goto err;
3906
+
3907
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3908
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
3909
+
3910
+ ret = filemap_write_and_wait_range(mapping, range.start,
3911
+ to_end ? LLONG_MAX : end_addr - 1);
3912
+ if (ret)
3913
+ goto out;
3914
+
3915
+ truncate_inode_pages_range(mapping, range.start,
3916
+ to_end ? -1 : end_addr - 1);
3917
+
3918
+ while (index < pg_end) {
3919
+ struct dnode_of_data dn;
3920
+ pgoff_t end_offset, count;
3921
+ int i;
3922
+
3923
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
3924
+ ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3925
+ if (ret) {
3926
+ if (ret == -ENOENT) {
3927
+ index = f2fs_get_next_page_offset(&dn, index);
3928
+ continue;
3929
+ }
3930
+ goto out;
3931
+ }
3932
+
3933
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3934
+ count = min(end_offset - dn.ofs_in_node, pg_end - index);
3935
+ for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3936
+ struct block_device *cur_bdev;
3937
+ block_t blkaddr = f2fs_data_blkaddr(&dn);
3938
+
3939
+ if (!__is_valid_data_blkaddr(blkaddr))
3940
+ continue;
3941
+
3942
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3943
+ DATA_GENERIC_ENHANCE)) {
3944
+ ret = -EFSCORRUPTED;
3945
+ f2fs_put_dnode(&dn);
3946
+ goto out;
3947
+ }
3948
+
3949
+ cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3950
+ if (f2fs_is_multi_device(sbi)) {
3951
+ int di = f2fs_target_device_index(sbi, blkaddr);
3952
+
3953
+ blkaddr -= FDEV(di).start_blk;
3954
+ }
3955
+
3956
+ if (len) {
3957
+ if (prev_bdev == cur_bdev &&
3958
+ index == prev_index + len &&
3959
+ blkaddr == prev_block + len) {
3960
+ len++;
3961
+ } else {
3962
+ ret = f2fs_secure_erase(prev_bdev,
3963
+ inode, prev_index, prev_block,
3964
+ len, range.flags);
3965
+ if (ret) {
3966
+ f2fs_put_dnode(&dn);
3967
+ goto out;
3968
+ }
3969
+
3970
+ len = 0;
3971
+ }
3972
+ }
3973
+
3974
+ if (!len) {
3975
+ prev_bdev = cur_bdev;
3976
+ prev_index = index;
3977
+ prev_block = blkaddr;
3978
+ len = 1;
3979
+ }
3980
+ }
3981
+
3982
+ f2fs_put_dnode(&dn);
3983
+
3984
+ if (fatal_signal_pending(current)) {
3985
+ ret = -EINTR;
3986
+ goto out;
3987
+ }
3988
+ cond_resched();
3989
+ }
3990
+
3991
+ if (len)
3992
+ ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3993
+ prev_block, len, range.flags);
3994
+out:
3995
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
3996
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3997
+err:
3998
+ inode_unlock(inode);
3999
+ file_end_write(filp);
4000
+
4001
+ return ret;
4002
+}
4003
+
4004
+static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
4005
+{
4006
+ struct inode *inode = file_inode(filp);
4007
+ struct f2fs_comp_option option;
4008
+
4009
+ if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
4010
+ return -EOPNOTSUPP;
4011
+
4012
+ inode_lock_shared(inode);
4013
+
4014
+ if (!f2fs_compressed_file(inode)) {
4015
+ inode_unlock_shared(inode);
4016
+ return -ENODATA;
4017
+ }
4018
+
4019
+ option.algorithm = F2FS_I(inode)->i_compress_algorithm;
4020
+ option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
4021
+
4022
+ inode_unlock_shared(inode);
4023
+
4024
+ if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
4025
+ sizeof(option)))
4026
+ return -EFAULT;
4027
+
4028
+ return 0;
4029
+}
4030
+
4031
+static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
4032
+{
4033
+ struct inode *inode = file_inode(filp);
4034
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4035
+ struct f2fs_comp_option option;
4036
+ int ret = 0;
4037
+
4038
+ if (!f2fs_sb_has_compression(sbi))
4039
+ return -EOPNOTSUPP;
4040
+
4041
+ if (!(filp->f_mode & FMODE_WRITE))
4042
+ return -EBADF;
4043
+
4044
+ if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
4045
+ sizeof(option)))
4046
+ return -EFAULT;
4047
+
4048
+ if (!f2fs_compressed_file(inode) ||
4049
+ option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
4050
+ option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
4051
+ option.algorithm >= COMPRESS_MAX)
4052
+ return -EINVAL;
4053
+
4054
+ file_start_write(filp);
4055
+ inode_lock(inode);
4056
+
4057
+ if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
4058
+ ret = -EBUSY;
4059
+ goto out;
4060
+ }
4061
+
4062
+ if (inode->i_size != 0) {
4063
+ ret = -EFBIG;
4064
+ goto out;
4065
+ }
4066
+
4067
+ F2FS_I(inode)->i_compress_algorithm = option.algorithm;
4068
+ F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
4069
+ F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
4070
+ f2fs_mark_inode_dirty_sync(inode, true);
4071
+
4072
+ if (!f2fs_is_compress_backend_ready(inode))
4073
+ f2fs_warn(sbi, "compression algorithm is successfully set, "
4074
+ "but current kernel doesn't support this algorithm.");
4075
+out:
4076
+ inode_unlock(inode);
4077
+ file_end_write(filp);
4078
+
4079
+ return ret;
4080
+}
4081
+
4082
+static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
4083
+{
4084
+ DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, page_idx);
4085
+ struct address_space *mapping = inode->i_mapping;
4086
+ struct page *page;
4087
+ pgoff_t redirty_idx = page_idx;
4088
+ int i, page_len = 0, ret = 0;
4089
+
4090
+ page_cache_ra_unbounded(&ractl, len, 0);
4091
+
4092
+ for (i = 0; i < len; i++, page_idx++) {
4093
+ page = read_cache_page(mapping, page_idx, NULL, NULL);
4094
+ if (IS_ERR(page)) {
4095
+ ret = PTR_ERR(page);
4096
+ break;
4097
+ }
4098
+ page_len++;
4099
+ }
4100
+
4101
+ for (i = 0; i < page_len; i++, redirty_idx++) {
4102
+ page = find_lock_page(mapping, redirty_idx);
4103
+ if (!page) {
4104
+ ret = -ENOMEM;
4105
+ break;
4106
+ }
4107
+ set_page_dirty(page);
4108
+ f2fs_put_page(page, 1);
4109
+ f2fs_put_page(page, 0);
4110
+ }
4111
+
4112
+ return ret;
4113
+}
4114
+
4115
+static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
4116
+{
4117
+ struct inode *inode = file_inode(filp);
4118
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4119
+ struct f2fs_inode_info *fi = F2FS_I(inode);
4120
+ pgoff_t page_idx = 0, last_idx;
4121
+ unsigned int blk_per_seg = sbi->blocks_per_seg;
4122
+ int cluster_size = F2FS_I(inode)->i_cluster_size;
4123
+ int count, ret;
4124
+
4125
+ if (!f2fs_sb_has_compression(sbi) ||
4126
+ F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4127
+ return -EOPNOTSUPP;
4128
+
4129
+ if (!(filp->f_mode & FMODE_WRITE))
4130
+ return -EBADF;
4131
+
4132
+ if (!f2fs_compressed_file(inode))
4133
+ return -EINVAL;
4134
+
4135
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
4136
+
4137
+ file_start_write(filp);
4138
+ inode_lock(inode);
4139
+
4140
+ if (!f2fs_is_compress_backend_ready(inode)) {
4141
+ ret = -EOPNOTSUPP;
4142
+ goto out;
4143
+ }
4144
+
4145
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4146
+ ret = -EINVAL;
4147
+ goto out;
4148
+ }
4149
+
4150
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4151
+ if (ret)
4152
+ goto out;
4153
+
4154
+ if (!atomic_read(&fi->i_compr_blocks))
4155
+ goto out;
4156
+
4157
+ last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4158
+
4159
+ count = last_idx - page_idx;
4160
+ while (count) {
4161
+ int len = min(cluster_size, count);
4162
+
4163
+ ret = redirty_blocks(inode, page_idx, len);
4164
+ if (ret < 0)
4165
+ break;
4166
+
4167
+ if (get_dirty_pages(inode) >= blk_per_seg)
4168
+ filemap_fdatawrite(inode->i_mapping);
4169
+
4170
+ count -= len;
4171
+ page_idx += len;
4172
+ }
4173
+
4174
+ if (!ret)
4175
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4176
+ LLONG_MAX);
4177
+
4178
+ if (ret)
4179
+ f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4180
+ __func__, ret);
4181
+out:
4182
+ inode_unlock(inode);
4183
+ file_end_write(filp);
4184
+
4185
+ return ret;
4186
+}
4187
+
4188
+static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4189
+{
4190
+ struct inode *inode = file_inode(filp);
4191
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4192
+ pgoff_t page_idx = 0, last_idx;
4193
+ unsigned int blk_per_seg = sbi->blocks_per_seg;
4194
+ int cluster_size = F2FS_I(inode)->i_cluster_size;
4195
+ int count, ret;
4196
+
4197
+ if (!f2fs_sb_has_compression(sbi) ||
4198
+ F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4199
+ return -EOPNOTSUPP;
4200
+
4201
+ if (!(filp->f_mode & FMODE_WRITE))
4202
+ return -EBADF;
4203
+
4204
+ if (!f2fs_compressed_file(inode))
4205
+ return -EINVAL;
4206
+
4207
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
4208
+
4209
+ file_start_write(filp);
4210
+ inode_lock(inode);
4211
+
4212
+ if (!f2fs_is_compress_backend_ready(inode)) {
4213
+ ret = -EOPNOTSUPP;
4214
+ goto out;
4215
+ }
4216
+
4217
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4218
+ ret = -EINVAL;
4219
+ goto out;
4220
+ }
4221
+
4222
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4223
+ if (ret)
4224
+ goto out;
4225
+
4226
+ set_inode_flag(inode, FI_ENABLE_COMPRESS);
4227
+
4228
+ last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4229
+
4230
+ count = last_idx - page_idx;
4231
+ while (count) {
4232
+ int len = min(cluster_size, count);
4233
+
4234
+ ret = redirty_blocks(inode, page_idx, len);
4235
+ if (ret < 0)
4236
+ break;
4237
+
4238
+ if (get_dirty_pages(inode) >= blk_per_seg)
4239
+ filemap_fdatawrite(inode->i_mapping);
4240
+
4241
+ count -= len;
4242
+ page_idx += len;
4243
+ }
4244
+
4245
+ if (!ret)
4246
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4247
+ LLONG_MAX);
4248
+
4249
+ clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4250
+
4251
+ if (ret)
4252
+ f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4253
+ __func__, ret);
4254
+out:
4255
+ inode_unlock(inode);
4256
+ file_end_write(filp);
4257
+
4258
+ return ret;
4259
+}
4260
+
4261
+static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4262
+{
38054263 switch (cmd) {
3806
- case F2FS_IOC_GETFLAGS:
4264
+ case FS_IOC_GETFLAGS:
38074265 return f2fs_ioc_getflags(filp, arg);
3808
- case F2FS_IOC_SETFLAGS:
4266
+ case FS_IOC_SETFLAGS:
38094267 return f2fs_ioc_setflags(filp, arg);
3810
- case F2FS_IOC_GETVERSION:
4268
+ case FS_IOC_GETVERSION:
38114269 return f2fs_ioc_getversion(filp, arg);
38124270 case F2FS_IOC_START_ATOMIC_WRITE:
38134271 return f2fs_ioc_start_atomic_write(filp);
....@@ -3823,11 +4281,11 @@
38234281 return f2fs_ioc_shutdown(filp, arg);
38244282 case FITRIM:
38254283 return f2fs_ioc_fitrim(filp, arg);
3826
- case F2FS_IOC_SET_ENCRYPTION_POLICY:
4284
+ case FS_IOC_SET_ENCRYPTION_POLICY:
38274285 return f2fs_ioc_set_encryption_policy(filp, arg);
3828
- case F2FS_IOC_GET_ENCRYPTION_POLICY:
4286
+ case FS_IOC_GET_ENCRYPTION_POLICY:
38294287 return f2fs_ioc_get_encryption_policy(filp, arg);
3830
- case F2FS_IOC_GET_ENCRYPTION_PWSALT:
4288
+ case FS_IOC_GET_ENCRYPTION_PWSALT:
38314289 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
38324290 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
38334291 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
....@@ -3855,9 +4313,9 @@
38554313 return f2fs_ioc_flush_device(filp, arg);
38564314 case F2FS_IOC_GET_FEATURES:
38574315 return f2fs_ioc_get_features(filp, arg);
3858
- case F2FS_IOC_FSGETXATTR:
4316
+ case FS_IOC_FSGETXATTR:
38594317 return f2fs_ioc_fsgetxattr(filp, arg);
3860
- case F2FS_IOC_FSSETXATTR:
4318
+ case FS_IOC_FSSETXATTR:
38614319 return f2fs_ioc_fssetxattr(filp, arg);
38624320 case F2FS_IOC_GET_PIN_FILE:
38634321 return f2fs_ioc_get_pin_file(filp, arg);
....@@ -3871,19 +4329,41 @@
38714329 return f2fs_ioc_enable_verity(filp, arg);
38724330 case FS_IOC_MEASURE_VERITY:
38734331 return f2fs_ioc_measure_verity(filp, arg);
3874
- case F2FS_IOC_GET_VOLUME_NAME:
3875
- return f2fs_get_volume_name(filp, arg);
3876
- case F2FS_IOC_SET_VOLUME_NAME:
3877
- return f2fs_set_volume_name(filp, arg);
4332
+ case FS_IOC_READ_VERITY_METADATA:
4333
+ return f2fs_ioc_read_verity_metadata(filp, arg);
4334
+ case FS_IOC_GETFSLABEL:
4335
+ return f2fs_ioc_getfslabel(filp, arg);
4336
+ case FS_IOC_SETFSLABEL:
4337
+ return f2fs_ioc_setfslabel(filp, arg);
38784338 case F2FS_IOC_GET_COMPRESS_BLOCKS:
38794339 return f2fs_get_compress_blocks(filp, arg);
38804340 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
38814341 return f2fs_release_compress_blocks(filp, arg);
38824342 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
38834343 return f2fs_reserve_compress_blocks(filp, arg);
4344
+ case F2FS_IOC_SEC_TRIM_FILE:
4345
+ return f2fs_sec_trim_file(filp, arg);
4346
+ case F2FS_IOC_GET_COMPRESS_OPTION:
4347
+ return f2fs_ioc_get_compress_option(filp, arg);
4348
+ case F2FS_IOC_SET_COMPRESS_OPTION:
4349
+ return f2fs_ioc_set_compress_option(filp, arg);
4350
+ case F2FS_IOC_DECOMPRESS_FILE:
4351
+ return f2fs_ioc_decompress_file(filp, arg);
4352
+ case F2FS_IOC_COMPRESS_FILE:
4353
+ return f2fs_ioc_compress_file(filp, arg);
38844354 default:
38854355 return -ENOTTY;
38864356 }
4357
+}
4358
+
4359
+long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4360
+{
4361
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4362
+ return -EIO;
4363
+ if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4364
+ return -ENOSPC;
4365
+
4366
+ return __f2fs_ioctl(filp, cmd, arg);
38874367 }
38884368
38894369 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
....@@ -3926,6 +4406,16 @@
39264406 }
39274407 } else {
39284408 inode_lock(inode);
4409
+ }
4410
+
4411
+ if (unlikely(IS_IMMUTABLE(inode))) {
4412
+ ret = -EPERM;
4413
+ goto unlock;
4414
+ }
4415
+
4416
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4417
+ ret = -EPERM;
4418
+ goto unlock;
39294419 }
39304420
39314421 ret = generic_write_checks(iocb, from);
....@@ -3986,12 +4476,18 @@
39864476 clear_inode_flag(inode, FI_NO_PREALLOC);
39874477
39884478 /* if we couldn't write data, we should deallocate blocks. */
3989
- if (preallocated && i_size_read(inode) < target_size)
4479
+ if (preallocated && i_size_read(inode) < target_size) {
4480
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4481
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
39904482 f2fs_truncate(inode);
4483
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
4484
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4485
+ }
39914486
39924487 if (ret > 0)
39934488 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
39944489 }
4490
+unlock:
39954491 inode_unlock(inode);
39964492 out:
39974493 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
....@@ -4002,27 +4498,87 @@
40024498 }
40034499
40044500 #ifdef CONFIG_COMPAT
4501
+struct compat_f2fs_gc_range {
4502
+ u32 sync;
4503
+ compat_u64 start;
4504
+ compat_u64 len;
4505
+};
4506
+#define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4507
+ struct compat_f2fs_gc_range)
4508
+
4509
+static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4510
+{
4511
+ struct compat_f2fs_gc_range __user *urange;
4512
+ struct f2fs_gc_range range;
4513
+ int err;
4514
+
4515
+ urange = compat_ptr(arg);
4516
+ err = get_user(range.sync, &urange->sync);
4517
+ err |= get_user(range.start, &urange->start);
4518
+ err |= get_user(range.len, &urange->len);
4519
+ if (err)
4520
+ return -EFAULT;
4521
+
4522
+ return __f2fs_ioc_gc_range(file, &range);
4523
+}
4524
+
4525
+struct compat_f2fs_move_range {
4526
+ u32 dst_fd;
4527
+ compat_u64 pos_in;
4528
+ compat_u64 pos_out;
4529
+ compat_u64 len;
4530
+};
4531
+#define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4532
+ struct compat_f2fs_move_range)
4533
+
4534
+static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4535
+{
4536
+ struct compat_f2fs_move_range __user *urange;
4537
+ struct f2fs_move_range range;
4538
+ int err;
4539
+
4540
+ urange = compat_ptr(arg);
4541
+ err = get_user(range.dst_fd, &urange->dst_fd);
4542
+ err |= get_user(range.pos_in, &urange->pos_in);
4543
+ err |= get_user(range.pos_out, &urange->pos_out);
4544
+ err |= get_user(range.len, &urange->len);
4545
+ if (err)
4546
+ return -EFAULT;
4547
+
4548
+ return __f2fs_ioc_move_range(file, &range);
4549
+}
4550
+
40054551 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
40064552 {
4553
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4554
+ return -EIO;
4555
+ if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4556
+ return -ENOSPC;
4557
+
40074558 switch (cmd) {
4008
- case F2FS_IOC32_GETFLAGS:
4009
- cmd = F2FS_IOC_GETFLAGS;
4559
+ case FS_IOC32_GETFLAGS:
4560
+ cmd = FS_IOC_GETFLAGS;
40104561 break;
4011
- case F2FS_IOC32_SETFLAGS:
4012
- cmd = F2FS_IOC_SETFLAGS;
4562
+ case FS_IOC32_SETFLAGS:
4563
+ cmd = FS_IOC_SETFLAGS;
40134564 break;
4014
- case F2FS_IOC32_GETVERSION:
4015
- cmd = F2FS_IOC_GETVERSION;
4565
+ case FS_IOC32_GETVERSION:
4566
+ cmd = FS_IOC_GETVERSION;
40164567 break;
4568
+ case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4569
+ return f2fs_compat_ioc_gc_range(file, arg);
4570
+ case F2FS_IOC32_MOVE_RANGE:
4571
+ return f2fs_compat_ioc_move_range(file, arg);
40174572 case F2FS_IOC_START_ATOMIC_WRITE:
40184573 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
40194574 case F2FS_IOC_START_VOLATILE_WRITE:
40204575 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
40214576 case F2FS_IOC_ABORT_VOLATILE_WRITE:
40224577 case F2FS_IOC_SHUTDOWN:
4023
- case F2FS_IOC_SET_ENCRYPTION_POLICY:
4024
- case F2FS_IOC_GET_ENCRYPTION_PWSALT:
4025
- case F2FS_IOC_GET_ENCRYPTION_POLICY:
4578
+ case FITRIM:
4579
+ case FS_IOC_SET_ENCRYPTION_POLICY:
4580
+ case FS_IOC_GET_ENCRYPTION_PWSALT:
4581
+ case FS_IOC_GET_ENCRYPTION_POLICY:
40264582 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
40274583 case FS_IOC_ADD_ENCRYPTION_KEY:
40284584 case FS_IOC_REMOVE_ENCRYPTION_KEY:
....@@ -4030,30 +4586,34 @@
40304586 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
40314587 case FS_IOC_GET_ENCRYPTION_NONCE:
40324588 case F2FS_IOC_GARBAGE_COLLECT:
4033
- case F2FS_IOC_GARBAGE_COLLECT_RANGE:
40344589 case F2FS_IOC_WRITE_CHECKPOINT:
40354590 case F2FS_IOC_DEFRAGMENT:
4036
- case F2FS_IOC_MOVE_RANGE:
40374591 case F2FS_IOC_FLUSH_DEVICE:
40384592 case F2FS_IOC_GET_FEATURES:
4039
- case F2FS_IOC_FSGETXATTR:
4040
- case F2FS_IOC_FSSETXATTR:
4593
+ case FS_IOC_FSGETXATTR:
4594
+ case FS_IOC_FSSETXATTR:
40414595 case F2FS_IOC_GET_PIN_FILE:
40424596 case F2FS_IOC_SET_PIN_FILE:
40434597 case F2FS_IOC_PRECACHE_EXTENTS:
40444598 case F2FS_IOC_RESIZE_FS:
40454599 case FS_IOC_ENABLE_VERITY:
40464600 case FS_IOC_MEASURE_VERITY:
4047
- case F2FS_IOC_GET_VOLUME_NAME:
4048
- case F2FS_IOC_SET_VOLUME_NAME:
4601
+ case FS_IOC_READ_VERITY_METADATA:
4602
+ case FS_IOC_GETFSLABEL:
4603
+ case FS_IOC_SETFSLABEL:
40494604 case F2FS_IOC_GET_COMPRESS_BLOCKS:
40504605 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
40514606 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4607
+ case F2FS_IOC_SEC_TRIM_FILE:
4608
+ case F2FS_IOC_GET_COMPRESS_OPTION:
4609
+ case F2FS_IOC_SET_COMPRESS_OPTION:
4610
+ case F2FS_IOC_DECOMPRESS_FILE:
4611
+ case F2FS_IOC_COMPRESS_FILE:
40524612 break;
40534613 default:
40544614 return -ENOIOCTLCMD;
40554615 }
4056
- return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4616
+ return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
40574617 }
40584618 #endif
40594619