From 95099d4622f8cb224d94e314c7a8e0df60b13f87 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 08:38:01 +0000
Subject: [PATCH] enable docker ppp
---
kernel/fs/f2fs/file.c | 1154 ++++++++++++++++++++++++++++++++++++++++++--------------
1 files changed, 857 insertions(+), 297 deletions(-)
diff --git a/kernel/fs/f2fs/file.c b/kernel/fs/f2fs/file.c
index 63872d8..cd13532 100644
--- a/kernel/fs/f2fs/file.c
+++ b/kernel/fs/f2fs/file.c
@@ -21,6 +21,7 @@
#include <linux/uuid.h>
#include <linux/file.h>
#include <linux/nls.h>
+#include <linux/sched/signal.h>
#include "f2fs.h"
#include "node.h"
@@ -28,18 +29,17 @@
#include "xattr.h"
#include "acl.h"
#include "gc.h"
-#include "trace.h"
#include <trace/events/f2fs.h>
-#include <trace/events/android_fs.h>
+#include <uapi/linux/f2fs.h>
static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
vm_fault_t ret;
- down_read(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
ret = filemap_fault(vmf);
- up_read(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
if (!ret)
f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
@@ -59,6 +59,12 @@
bool need_alloc = true;
int err = 0;
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return VM_FAULT_SIGBUS;
+
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
+ return VM_FAULT_SIGBUS;
+
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
goto err;
@@ -69,6 +75,10 @@
goto err;
}
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ goto err;
+
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
int ret = f2fs_is_compressed_cluster(inode, page->index);
@@ -77,10 +87,6 @@
err = ret;
goto err;
} else if (ret) {
- if (ret < F2FS_I(inode)->i_cluster_size) {
- err = -EAGAIN;
- goto err;
- }
need_alloc = false;
}
}
@@ -94,7 +100,7 @@
f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
file_update_time(vmf->vma->vm_file);
- down_read(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
lock_page(page);
if (unlikely(page->mapping != inode->i_mapping ||
page_offset(page) > i_size_read(inode) ||
@@ -106,11 +112,10 @@
if (need_alloc) {
/* block allocation */
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = f2fs_get_block(&dn, page->index);
- f2fs_put_dnode(&dn);
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -153,7 +158,7 @@
trace_f2fs_vm_page_mkwrite(page, DATA);
out_sem:
- up_read(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
sb_end_pagefault(inode->i_sb);
err:
@@ -164,6 +169,9 @@
.fault = f2fs_filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = f2fs_vm_page_mkwrite,
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+ .allow_speculation = filemap_allow_speculation,
+#endif
};
static int get_parent_ino(struct inode *inode, nid_t *pino)
@@ -231,13 +239,13 @@
struct f2fs_inode_info *fi = F2FS_I(inode);
nid_t pino;
- down_write(&fi->i_sem);
+ f2fs_down_write(&fi->i_sem);
if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
get_parent_ino(inode, &pino)) {
f2fs_i_pino_write(inode, pino);
file_got_pino(inode);
}
- up_write(&fi->i_sem);
+ f2fs_up_write(&fi->i_sem);
}
static bool f2fs_update_fsync_count(struct f2fs_sb_info *sbi,
@@ -275,20 +283,10 @@
};
unsigned int seq_id = 0;
- if (unlikely(f2fs_readonly(inode->i_sb) ||
- is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ if (unlikely(f2fs_readonly(inode->i_sb)))
return 0;
trace_f2fs_sync_file_enter(inode);
-
- if (trace_android_fs_fsync_start_enabled()) {
- char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
-
- path = android_fstrace_get_pathname(pathbuf,
- MAX_TRACE_PATHBUF_LEN, inode);
- trace_android_fs_fsync_start(inode,
- current->pid, path, current->comm);
- }
if (S_ISDIR(inode->i_mode))
goto go_write;
@@ -300,7 +298,7 @@
ret = file_write_and_wait_range(file, start, end);
clear_inode_flag(inode, FI_NEED_IPU);
- if (ret) {
+ if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
return ret;
}
@@ -331,9 +329,9 @@
* Both of fdatasync() and fsync() are able to be recovered from
* sudden-power-off.
*/
- down_read(&F2FS_I(inode)->i_sem);
+ f2fs_down_read(&F2FS_I(inode)->i_sem);
cp_reason = need_do_checkpoint(inode);
- up_read(&F2FS_I(inode)->i_sem);
+ f2fs_up_read(&F2FS_I(inode)->i_sem);
if (cp_reason || !f2fs_update_fsync_count(sbi, npages)) {
/* all the dirty node pages should be flushed for POR */
@@ -395,9 +393,6 @@
f2fs_update_time(sbi, REQ_TIME);
out:
trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
- f2fs_trace_ios(NULL, 1);
- trace_android_fs_fsync_end(inode, start, end - start);
-
return ret;
}
@@ -408,32 +403,15 @@
return f2fs_do_sync_file(file, start, end, datasync, false);
}
-static pgoff_t __get_first_dirty_index(struct address_space *mapping,
- pgoff_t pgofs, int whence)
-{
- struct page *page;
- int nr_pages;
-
- if (whence != SEEK_DATA)
- return 0;
-
- /* find first dirty page index */
- nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY,
- 1, &page);
- if (!nr_pages)
- return ULONG_MAX;
- pgofs = page->index;
- put_page(page);
- return pgofs;
-}
-
-static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
- pgoff_t dirty, pgoff_t pgofs, int whence)
+static bool __found_offset(struct address_space *mapping, block_t blkaddr,
+ pgoff_t index, int whence)
{
switch (whence) {
case SEEK_DATA:
- if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
- __is_valid_data_blkaddr(blkaddr))
+ if (__is_valid_data_blkaddr(blkaddr))
+ return true;
+ if (blkaddr == NEW_ADDR &&
+ xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
return true;
break;
case SEEK_HOLE:
@@ -449,7 +427,7 @@
struct inode *inode = file->f_mapping->host;
loff_t maxbytes = inode->i_sb->s_maxbytes;
struct dnode_of_data dn;
- pgoff_t pgofs, end_offset, dirty;
+ pgoff_t pgofs, end_offset;
loff_t data_ofs = offset;
loff_t isize;
int err = 0;
@@ -461,15 +439,17 @@
goto fail;
/* handle inline data case */
- if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
- if (whence == SEEK_HOLE)
+ if (f2fs_has_inline_data(inode)) {
+ if (whence == SEEK_HOLE) {
data_ofs = isize;
- goto found;
+ goto found;
+ } else if (whence == SEEK_DATA) {
+ data_ofs = offset;
+ goto found;
+ }
}
pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
-
- dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
set_new_dnode(&dn, inode, NULL, NULL, 0);
@@ -503,7 +483,7 @@
goto fail;
}
- if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
+ if (__found_offset(file->f_mapping, blkaddr,
pgofs, whence)) {
f2fs_put_dnode(&dn);
goto found;
@@ -529,6 +509,9 @@
struct inode *inode = file->f_mapping->host;
loff_t maxbytes = inode->i_sb->s_maxbytes;
+ if (f2fs_compressed_file(inode))
+ maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
+
switch (whence) {
case SEEK_SET:
case SEEK_CUR:
@@ -548,18 +531,12 @@
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file_inode(file);
- int err;
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
return -EIO;
if (!f2fs_is_compress_backend_ready(inode))
return -EOPNOTSUPP;
-
- /* we don't need to use inline_data strictly */
- err = f2fs_convert_inline_inode(inode);
- if (err)
- return err;
file_accessed(file);
vma->vm_ops = &f2fs_file_vm_ops;
@@ -596,7 +573,7 @@
bool compressed_cluster = false;
int cluster_index = 0, valid_blocks = 0;
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
- bool released = !F2FS_I(dn->inode)->i_compr_blocks;
+ bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
base = get_extra_isize(dn->inode);
@@ -651,7 +628,8 @@
*/
fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
dn->inode) + ofs;
- f2fs_update_extent_cache_range(dn, fofs, 0, len);
+ f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
+ f2fs_update_age_extent_cache_range(dn, fofs, nr_free);
dec_valid_block_count(sbi, dn->inode, nr_free);
}
dn->ofs_in_node = ofs;
@@ -713,7 +691,7 @@
free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
- if (free_from >= sbi->max_file_blocks)
+ if (free_from >= max_file_blocks(inode))
goto free_partial;
if (lock)
@@ -785,11 +763,14 @@
return err;
#ifdef CONFIG_F2FS_FS_COMPRESSION
- if (from != free_from)
+ if (from != free_from) {
err = f2fs_truncate_partial_cluster(inode, from, lock);
+ if (err)
+ return err;
+ }
#endif
- return err;
+ return 0;
}
int f2fs_truncate(struct inode *inode)
@@ -887,14 +868,11 @@
if (ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
if (ia_valid & ATTR_ATIME)
- inode->i_atime = timespec64_trunc(attr->ia_atime,
- inode->i_sb->s_time_gran);
+ inode->i_atime = attr->ia_atime;
if (ia_valid & ATTR_MTIME)
- inode->i_mtime = timespec64_trunc(attr->ia_mtime,
- inode->i_sb->s_time_gran);
+ inode->i_mtime = attr->ia_mtime;
if (ia_valid & ATTR_CTIME)
- inode->i_ctime = timespec64_trunc(attr->ia_ctime,
- inode->i_sb->s_time_gran);
+ inode->i_ctime = attr->ia_ctime;
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
@@ -915,6 +893,14 @@
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
return -EIO;
+
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return -EPERM;
+
+ if (unlikely(IS_APPEND(inode) &&
+ (attr->ia_valid & (ATTR_MODE | ATTR_UID |
+ ATTR_GID | ATTR_TIMES_SET))))
+ return -EPERM;
if ((attr->ia_valid & ATTR_SIZE) &&
!f2fs_is_compress_backend_ready(inode))
@@ -974,8 +960,8 @@
return err;
}
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- down_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
truncate_setsize(inode, attr->ia_size);
@@ -985,8 +971,8 @@
* do not trim all blocks after i_size if target size is
* larger than i_size.
*/
- up_write(&F2FS_I(inode)->i_mmap_sem);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
if (err)
return err;
@@ -1000,8 +986,10 @@
if (attr->ia_valid & ATTR_MODE) {
err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
- if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
- inode->i_mode = F2FS_I(inode)->i_acl_mode;
+
+ if (is_inode_flag_set(inode, FI_ACL_MODE)) {
+ if (!err)
+ inode->i_mode = F2FS_I(inode)->i_acl_mode;
clear_inode_flag(inode, FI_ACL_MODE);
}
}
@@ -1124,8 +1112,8 @@
blk_start = (loff_t)pg_start << PAGE_SHIFT;
blk_end = (loff_t)pg_end << PAGE_SHIFT;
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- down_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
truncate_pagecache_range(inode, blk_start, blk_end - 1);
@@ -1133,8 +1121,8 @@
ret = f2fs_truncate_hole(inode, pg_start, pg_end);
f2fs_unlock_op(sbi);
- up_write(&F2FS_I(inode)->i_mmap_sem);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
}
}
@@ -1245,7 +1233,7 @@
if (ret)
return ret;
- ret = f2fs_get_node_info(sbi, dn.nid, &ni);
+ ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
if (ret) {
f2fs_put_dnode(&dn);
return ret;
@@ -1367,8 +1355,8 @@
f2fs_balance_fs(sbi, true);
/* avoid gc operation during block exchange */
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- down_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
f2fs_lock_op(sbi);
f2fs_drop_extent_tree(inode);
@@ -1376,8 +1364,8 @@
ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
f2fs_unlock_op(sbi);
- up_write(&F2FS_I(inode)->i_mmap_sem);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
return ret;
}
@@ -1407,15 +1395,13 @@
return ret;
/* write out all moved pages, if possible */
- down_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
truncate_pagecache(inode, offset);
new_size = i_size_read(inode) - len;
- truncate_pagecache(inode, new_size);
-
ret = f2fs_truncate_blocks(inode, new_size, true);
- up_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
if (!ret)
f2fs_i_size_write(inode, new_size);
return ret;
@@ -1451,14 +1437,22 @@
ret = -ENOSPC;
break;
}
- if (dn->data_blkaddr != NEW_ADDR) {
- f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
- dn->data_blkaddr = NEW_ADDR;
- f2fs_set_data_blkaddr(dn);
+
+ if (dn->data_blkaddr == NEW_ADDR)
+ continue;
+
+ if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
+ DATA_GENERIC_ENHANCE)) {
+ ret = -EFSCORRUPTED;
+ break;
}
+
+ f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
+ dn->data_blkaddr = NEW_ADDR;
+ f2fs_set_data_blkaddr(dn);
}
- f2fs_update_extent_cache_range(dn, start, 0, index - start);
+ f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
return ret;
}
@@ -1514,8 +1508,8 @@
unsigned int end_offset;
pgoff_t end;
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- down_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
truncate_pagecache_range(inode,
(loff_t)index << PAGE_SHIFT,
@@ -1527,8 +1521,8 @@
ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
if (ret) {
f2fs_unlock_op(sbi);
- up_write(&F2FS_I(inode)->i_mmap_sem);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
goto out;
}
@@ -1539,8 +1533,8 @@
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
- up_write(&F2FS_I(inode)->i_mmap_sem);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_balance_fs(sbi, dn.node_changed);
@@ -1596,9 +1590,9 @@
f2fs_balance_fs(sbi, true);
- down_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
- up_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
if (ret)
return ret;
@@ -1613,8 +1607,8 @@
idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
/* avoid gc operation during block exchange */
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- down_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
truncate_pagecache(inode, offset);
while (!ret && idx > pg_start) {
@@ -1630,14 +1624,14 @@
idx + delta, nr, false);
f2fs_unlock_op(sbi);
}
- up_write(&F2FS_I(inode)->i_mmap_sem);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
/* write out all moved pages, if possible */
- down_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
truncate_pagecache(inode, offset);
- up_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
if (!ret)
f2fs_i_size_write(inode, new_size);
@@ -1651,9 +1645,10 @@
struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
.m_may_create = true };
- pgoff_t pg_end;
+ pgoff_t pg_start, pg_end;
loff_t new_size = i_size_read(inode);
loff_t off_end;
+ block_t expanded = 0;
int err;
err = inode_newsize_ok(inode, (len + offset));
@@ -1666,11 +1661,12 @@
f2fs_balance_fs(sbi, true);
+ pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
off_end = (offset + len) & (PAGE_SIZE - 1);
- map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
- map.m_len = pg_end - map.m_lblk;
+ map.m_lblk = pg_start;
+ map.m_len = pg_end - pg_start;
if (off_end)
map.m_len++;
@@ -1678,51 +1674,49 @@
return 0;
if (f2fs_is_pinned_file(inode)) {
- block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
- sbi->log_blocks_per_seg;
- block_t done = 0;
+ block_t sec_blks = BLKS_PER_SEC(sbi);
+ block_t sec_len = roundup(map.m_len, sec_blks);
- if (map.m_len % sbi->blocks_per_seg)
- len += sbi->blocks_per_seg;
-
- map.m_len = sbi->blocks_per_seg;
+ map.m_len = sec_blks;
next_alloc:
if (has_not_enough_free_secs(sbi, 0,
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
- down_write(&sbi->gc_lock);
- err = f2fs_gc(sbi, true, false, NULL_SEGNO);
+ f2fs_down_write(&sbi->gc_lock);
+ err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
if (err && err != -ENODATA && err != -EAGAIN)
goto out_err;
}
- down_write(&sbi->pin_sem);
- map.m_seg_type = CURSEG_COLD_DATA_PINNED;
+ f2fs_down_write(&sbi->pin_sem);
f2fs_lock_op(sbi);
- f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA);
+ f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
f2fs_unlock_op(sbi);
+ map.m_seg_type = CURSEG_COLD_DATA_PINNED;
err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
- up_write(&sbi->pin_sem);
- done += map.m_len;
- len -= map.m_len;
+ f2fs_up_write(&sbi->pin_sem);
+
+ expanded += map.m_len;
+ sec_len -= map.m_len;
map.m_lblk += map.m_len;
- if (!err && len)
+ if (!err && sec_len)
goto next_alloc;
- map.m_len = done;
+ map.m_len = expanded;
} else {
err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+ expanded = map.m_len;
}
out_err:
if (err) {
pgoff_t last_off;
- if (!map.m_len)
+ if (!expanded)
return err;
- last_off = map.m_lblk + map.m_len - 1;
+ last_off = pg_start + expanded - 1;
/* update new size to the failed position */
new_size = (last_off == pg_end) ? offset + len :
@@ -1773,6 +1767,10 @@
return -EOPNOTSUPP;
inode_lock(inode);
+
+ ret = file_modified(file);
+ if (ret)
+ goto out;
if (mode & FALLOC_FL_PUNCH_HOLE) {
if (offset >= inode->i_size)
@@ -1846,7 +1844,8 @@
struct f2fs_inode_info *fi = F2FS_I(inode);
u32 masked_flags = fi->i_flags & mask;
- f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
+ /* mask can be shrunk by flags_valid selector */
+ iflags &= mask;
/* Is it quota file? Do not allow user to mess with it */
if (IS_NOQUOTA(inode))
@@ -1868,21 +1867,16 @@
if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
if (masked_flags & F2FS_COMPR_FL) {
- if (f2fs_disable_compressed_file(inode))
+ if (!f2fs_disable_compressed_file(inode))
return -EINVAL;
- }
- if (iflags & F2FS_NOCOMP_FL)
- return -EINVAL;
- if (iflags & F2FS_COMPR_FL) {
+ } else {
if (!f2fs_may_compress(inode))
return -EINVAL;
-
- set_compress_context(inode);
+ if (S_ISREG(inode->i_mode) && inode->i_size)
+ return -EINVAL;
+ if (set_compress_context(inode))
+ return -EOPNOTSUPP;
}
- }
- if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
- if (masked_flags & F2FS_COMPR_FL)
- return -EINVAL;
}
fi->i_flags = iflags | (fi->i_flags & ~mask);
@@ -2071,7 +2065,10 @@
inode_lock(inode);
- f2fs_disable_compressed_file(inode);
+ if (!f2fs_disable_compressed_file(inode)) {
+ ret = -EINVAL;
+ goto out;
+ }
if (f2fs_is_atomic_file(inode)) {
if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
@@ -2083,7 +2080,7 @@
if (ret)
goto out;
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
/*
* Should wait end_io to count F2FS_WB_CP_DATA correctly by
@@ -2094,7 +2091,7 @@
inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
if (ret) {
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
goto out;
}
@@ -2107,7 +2104,7 @@
/* add inode in inmem_list first and set atomic_file */
set_inode_flag(inode, FI_ATOMIC_FILE);
clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
F2FS_I(inode)->inmem_task = current;
@@ -2274,7 +2271,8 @@
if (ret) {
if (ret == -EROFS) {
ret = 0;
- f2fs_stop_checkpoint(sbi, false);
+ f2fs_stop_checkpoint(sbi, false,
+ STOP_CP_REASON_SHUTDOWN);
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
trace_f2fs_shutdown(sbi, in, ret);
}
@@ -2284,32 +2282,28 @@
switch (in) {
case F2FS_GOING_DOWN_FULLSYNC:
- sb = freeze_bdev(sb->s_bdev);
- if (IS_ERR(sb)) {
- ret = PTR_ERR(sb);
+ ret = freeze_bdev(sb->s_bdev);
+ if (ret)
goto out;
- }
- if (sb) {
- f2fs_stop_checkpoint(sbi, false);
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
- thaw_bdev(sb->s_bdev, sb);
- }
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
+ thaw_bdev(sb->s_bdev);
break;
case F2FS_GOING_DOWN_METASYNC:
/* do checkpoint only */
ret = f2fs_sync_fs(sb, 1);
if (ret)
goto out;
- f2fs_stop_checkpoint(sbi, false);
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_NOSYNC:
- f2fs_stop_checkpoint(sbi, false);
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_METAFLUSH:
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
- f2fs_stop_checkpoint(sbi, false);
+ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_NEED_FSCK:
@@ -2418,7 +2412,7 @@
if (err)
return err;
- down_write(&sbi->sb_lock);
+ f2fs_down_write(&sbi->sb_lock);
if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
goto got_it;
@@ -2437,7 +2431,7 @@
16))
err = -EFAULT;
out_err:
- up_write(&sbi->sb_lock);
+ f2fs_up_write(&sbi->sb_lock);
mnt_drop_write_file(filp);
return err;
}
@@ -2514,40 +2508,33 @@
return ret;
if (!sync) {
- if (!down_write_trylock(&sbi->gc_lock)) {
+ if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
ret = -EBUSY;
goto out;
}
} else {
- down_write(&sbi->gc_lock);
+ f2fs_down_write(&sbi->gc_lock);
}
- ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
+ ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
out:
mnt_drop_write_file(filp);
return ret;
}
-static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
+static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
{
- struct inode *inode = file_inode(filp);
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_gc_range range;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
u64 end;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
-
- if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
- sizeof(range)))
- return -EFAULT;
-
if (f2fs_readonly(sbi->sb))
return -EROFS;
- end = range.start + range.len;
- if (end < range.start || range.start < MAIN_BLKADDR(sbi) ||
+ end = range->start + range->len;
+ if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
end >= MAX_BLKADDR(sbi))
return -EINVAL;
@@ -2556,22 +2543,38 @@
return ret;
do_more:
- if (!range.sync) {
- if (!down_write_trylock(&sbi->gc_lock)) {
+ if (!range->sync) {
+ if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
ret = -EBUSY;
goto out;
}
} else {
- down_write(&sbi->gc_lock);
+ f2fs_down_write(&sbi->gc_lock);
}
- ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
- range.start += BLKS_PER_SEC(sbi);
- if (range.start <= end)
+ ret = f2fs_gc(sbi, range->sync, true, false,
+ GET_SEGNO(sbi, range->start));
+ if (ret) {
+ if (ret == -EBUSY)
+ ret = -EAGAIN;
+ goto out;
+ }
+ range->start += BLKS_PER_SEC(sbi);
+ if (range->start <= end)
goto do_more;
out:
mnt_drop_write_file(filp);
return ret;
+}
+
+static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
+{
+ struct f2fs_gc_range range;
+
+ if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
+ sizeof(range)))
+ return -EFAULT;
+ return __f2fs_ioc_gc_range(filp, &range);
}
static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
@@ -2607,9 +2610,9 @@
{
struct inode *inode = file_inode(filp);
struct f2fs_map_blocks map = { .m_next_extent = NULL,
- .m_seg_type = NO_CHECK_TYPE ,
+ .m_seg_type = NO_CHECK_TYPE,
.m_may_create = false };
- struct extent_info ei = {0, 0, 0};
+ struct extent_info ei = {};
pgoff_t pg_start, pg_end, next_pgofs;
unsigned int blk_per_seg = sbi->blocks_per_seg;
unsigned int total = 0, sec_num;
@@ -2617,16 +2620,19 @@
bool fragmented = false;
int err;
- /* if in-place-update policy is enabled, don't waste time here */
- if (f2fs_should_update_inplace(inode, NULL))
- return -EINVAL;
-
pg_start = range->start >> PAGE_SHIFT;
pg_end = (range->start + range->len) >> PAGE_SHIFT;
f2fs_balance_fs(sbi, true);
inode_lock(inode);
+
+ /* if in-place-update policy is enabled, don't waste time here */
+ set_inode_flag(inode, FI_OPU_WRITE);
+ if (f2fs_should_update_inplace(inode, NULL)) {
+ err = -EINVAL;
+ goto out;
+ }
/* writeback all dirty pages in the range */
err = filemap_write_and_wait_range(inode->i_mapping, range->start,
@@ -2638,7 +2644,7 @@
* lookup mapping info in extent cache, skip defragmenting if physical
* block addresses are continuous.
*/
- if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
+ if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
if (ei.fofs + ei.len >= pg_end)
goto out;
}
@@ -2709,7 +2715,7 @@
goto check;
}
- set_inode_flag(inode, FI_DO_DEFRAG);
+ set_inode_flag(inode, FI_SKIP_WRITES);
idx = map.m_lblk;
while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
@@ -2734,15 +2740,16 @@
if (map.m_lblk < pg_end && cnt < blk_per_seg)
goto do_map;
- clear_inode_flag(inode, FI_DO_DEFRAG);
+ clear_inode_flag(inode, FI_SKIP_WRITES);
err = filemap_fdatawrite(inode->i_mapping);
if (err)
goto out;
}
clear_out:
- clear_inode_flag(inode, FI_DO_DEFRAG);
+ clear_inode_flag(inode, FI_SKIP_WRITES);
out:
+ clear_inode_flag(inode, FI_OPU_WRITE);
inode_unlock(inode);
if (!err)
range->len = (u64)total << PAGE_SHIFT;
@@ -2774,7 +2781,7 @@
return -EINVAL;
if (unlikely((range.start + range.len) >> PAGE_SHIFT >
- sbi->max_file_blocks))
+ max_file_blocks(inode)))
return -EINVAL;
err = mnt_want_write_file(filp);
@@ -2817,6 +2824,9 @@
if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
return -EOPNOTSUPP;
+
+ if (pos_out < 0 || pos_in < 0)
+ return -EINVAL;
if (src == dst) {
if (pos_in == pos_out)
@@ -2875,10 +2885,10 @@
f2fs_balance_fs(sbi, true);
- down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
if (src != dst) {
ret = -EBUSY;
- if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
+ if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
goto out_src;
}
@@ -2896,9 +2906,9 @@
f2fs_unlock_op(sbi);
if (src != dst)
- up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
out_src:
- up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
out_unlock:
if (src != dst)
inode_unlock(dst);
@@ -2907,9 +2917,9 @@
return ret;
}
-static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
+static int __f2fs_ioc_move_range(struct file *filp,
+ struct f2fs_move_range *range)
{
- struct f2fs_move_range range;
struct fd dst;
int err;
@@ -2917,11 +2927,7 @@
!(filp->f_mode & FMODE_WRITE))
return -EBADF;
- if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
- sizeof(range)))
- return -EFAULT;
-
- dst = fdget(range.dst_fd);
+ dst = fdget(range->dst_fd);
if (!dst.file)
return -EBADF;
@@ -2934,19 +2940,23 @@
if (err)
goto err_out;
- err = f2fs_move_file_range(filp, range.pos_in, dst.file,
- range.pos_out, range.len);
+ err = f2fs_move_file_range(filp, range->pos_in, dst.file,
+ range->pos_out, range->len);
mnt_drop_write_file(filp);
- if (err)
- goto err_out;
-
- if (copy_to_user((struct f2fs_move_range __user *)arg,
- &range, sizeof(range)))
- err = -EFAULT;
err_out:
fdput(dst);
return err;
+}
+
+static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
+{
+ struct f2fs_move_range range;
+
+ if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
+ sizeof(range)))
+ return -EFAULT;
+ return __f2fs_ioc_move_range(filp, &range);
}
static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
@@ -2993,14 +3003,14 @@
end_segno = min(start_segno + range.segments, dev_end_segno);
while (start_segno < end_segno) {
- if (!down_write_trylock(&sbi->gc_lock)) {
+ if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
ret = -EBUSY;
goto out;
}
sm->last_victim[GC_CB] = end_segno + 1;
sm->last_victim[GC_GREEDY] = end_segno + 1;
sm->last_victim[ALLOC_NEXT] = end_segno + 1;
- ret = f2fs_gc(sbi, true, true, start_segno);
+ ret = f2fs_gc(sbi, true, true, true, start_segno);
if (ret == -EAGAIN)
ret = 0;
else if (ret < 0)
@@ -3293,7 +3303,7 @@
if (ret)
goto out;
- if (f2fs_disable_compressed_file(inode)) {
+ if (!f2fs_disable_compressed_file(inode)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -3334,21 +3344,21 @@
map.m_next_extent = &m_next_extent;
map.m_seg_type = NO_CHECK_TYPE;
map.m_may_create = false;
- end = F2FS_I_SB(inode)->max_file_blocks;
+ end = max_file_blocks(inode);
while (map.m_lblk < end) {
map.m_len = end - map.m_lblk;
- down_write(&fi->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
- up_write(&fi->i_gc_rwsem[WRITE]);
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
if (err)
return err;
map.m_lblk = m_next_extent;
}
- return err;
+ return 0;
}
static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
@@ -3382,7 +3392,7 @@
if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
f2fs_warn(F2FS_I_SB(inode),
- "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
+ "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
inode->i_ino);
return -EOPNOTSUPP;
}
@@ -3398,7 +3408,15 @@
return fsverity_ioctl_measure(filp, (void __user *)arg);
}
-static int f2fs_get_volume_name(struct file *filp, unsigned long arg)
+static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
+{
+ if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
+ return -EOPNOTSUPP;
+
+ return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
+}
+
+static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -3410,21 +3428,21 @@
if (!vbuf)
return -ENOMEM;
- down_read(&sbi->sb_lock);
+ f2fs_down_read(&sbi->sb_lock);
count = utf16s_to_utf8s(sbi->raw_super->volume_name,
ARRAY_SIZE(sbi->raw_super->volume_name),
UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
- up_read(&sbi->sb_lock);
+ f2fs_up_read(&sbi->sb_lock);
if (copy_to_user((char __user *)arg, vbuf,
min(FSLABEL_MAX, count)))
err = -EFAULT;
- kvfree(vbuf);
+ kfree(vbuf);
return err;
}
-static int f2fs_set_volume_name(struct file *filp, unsigned long arg)
+static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -3442,7 +3460,7 @@
if (err)
goto out;
- down_write(&sbi->sb_lock);
+ f2fs_down_write(&sbi->sb_lock);
memset(sbi->raw_super->volume_name, 0,
sizeof(sbi->raw_super->volume_name));
@@ -3452,7 +3470,7 @@
err = f2fs_commit_super(sbi, false);
- up_write(&sbi->sb_lock);
+ f2fs_up_write(&sbi->sb_lock);
mnt_drop_write_file(filp);
out:
@@ -3471,7 +3489,7 @@
if (!f2fs_compressed_file(inode))
return -EINVAL;
- blocks = F2FS_I(inode)->i_compr_blocks;
+ blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
return put_user(blocks, (u64 __user *)arg);
}
@@ -3556,12 +3574,13 @@
inode_lock(inode);
writecount = atomic_read(&inode->i_writecount);
- if ((filp->f_mode & FMODE_WRITE && writecount != 1) || writecount) {
+ if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
+ (!(filp->f_mode & FMODE_WRITE) && writecount)) {
ret = -EBUSY;
goto out;
}
- if (IS_IMMUTABLE(inode)) {
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
ret = -EINVAL;
goto out;
}
@@ -3570,16 +3589,15 @@
if (ret)
goto out;
- if (!F2FS_I(inode)->i_compr_blocks)
- goto out;
-
- F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
- f2fs_set_inode_flags(inode);
+ set_inode_flag(inode, FI_COMPRESS_RELEASED);
inode->i_ctime = current_time(inode);
f2fs_mark_inode_dirty_sync(inode, true);
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- down_write(&F2FS_I(inode)->i_mmap_sem);
+ if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
+ goto out;
+
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
@@ -3614,8 +3632,8 @@
released_blocks += ret;
}
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- up_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
out:
inode_unlock(inode);
@@ -3623,14 +3641,15 @@
if (ret >= 0) {
ret = put_user(released_blocks, (u64 __user *)arg);
- } else if (released_blocks && F2FS_I(inode)->i_compr_blocks) {
+ } else if (released_blocks &&
+ atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
- "iblocks=%llu, released=%u, compr_blocks=%llu, "
+ "iblocks=%llu, released=%u, compr_blocks=%u, "
"run fsck to fix.",
- __func__, inode->i_ino, (u64)inode->i_blocks,
+ __func__, inode->i_ino, inode->i_blocks,
released_blocks,
- F2FS_I(inode)->i_compr_blocks);
+ atomic_read(&F2FS_I(inode)->i_compr_blocks));
}
return ret;
@@ -3718,20 +3737,20 @@
if (ret)
return ret;
- if (F2FS_I(inode)->i_compr_blocks)
+ if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
goto out;
f2fs_balance_fs(F2FS_I_SB(inode), true);
inode_lock(inode);
- if (!IS_IMMUTABLE(inode)) {
+ if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
ret = -EINVAL;
goto unlock_inode;
}
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- down_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
@@ -3766,12 +3785,11 @@
reserved_blocks += ret;
}
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- up_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
if (ret >= 0) {
- F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
- f2fs_set_inode_flags(inode);
+ clear_inode_flag(inode, FI_COMPRESS_RELEASED);
inode->i_ctime = current_time(inode);
f2fs_mark_inode_dirty_sync(inode, true);
}
@@ -3782,32 +3800,472 @@
if (ret >= 0) {
ret = put_user(reserved_blocks, (u64 __user *)arg);
- } else if (reserved_blocks && F2FS_I(inode)->i_compr_blocks) {
+ } else if (reserved_blocks &&
+ atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
- "iblocks=%llu, reserved=%u, compr_blocks=%llu, "
+ "iblocks=%llu, reserved=%u, compr_blocks=%u, "
"run fsck to fix.",
- __func__, inode->i_ino, (u64)inode->i_blocks,
+ __func__, inode->i_ino, inode->i_blocks,
reserved_blocks,
- F2FS_I(inode)->i_compr_blocks);
+ atomic_read(&F2FS_I(inode)->i_compr_blocks));
}
return ret;
}
-long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
+ pgoff_t off, block_t block, block_t len, u32 flags)
{
- if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
- return -EIO;
- if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
- return -ENOSPC;
+ struct request_queue *q = bdev_get_queue(bdev);
+ sector_t sector = SECTOR_FROM_BLOCK(block);
+ sector_t nr_sects = SECTOR_FROM_BLOCK(len);
+ int ret = 0;
+ if (!q)
+ return -ENXIO;
+
+ if (flags & F2FS_TRIM_FILE_DISCARD)
+ ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
+ blk_queue_secure_erase(q) ?
+ BLKDEV_DISCARD_SECURE : 0);
+
+ if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
+ if (IS_ENCRYPTED(inode))
+ ret = fscrypt_zeroout_range(inode, off, block, len);
+ else
+ ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
+ GFP_NOFS, 0);
+ }
+
+ return ret;
+}
+
+static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct address_space *mapping = inode->i_mapping;
+ struct block_device *prev_bdev = NULL;
+ struct f2fs_sectrim_range range;
+ pgoff_t index, pg_end, prev_index = 0;
+ block_t prev_block = 0, len = 0;
+ loff_t end_addr;
+ bool to_end = false;
+ int ret = 0;
+
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
+ sizeof(range)))
+ return -EFAULT;
+
+ if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
+ !S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
+ !f2fs_hw_support_discard(sbi)) ||
+ ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
+ IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
+ return -EOPNOTSUPP;
+
+ file_start_write(filp);
+ inode_lock(inode);
+
+ if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
+ range.start >= inode->i_size) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (range.len == 0)
+ goto err;
+
+ if (inode->i_size - range.start > range.len) {
+ end_addr = range.start + range.len;
+ } else {
+ end_addr = range.len == (u64)-1 ?
+ sbi->sb->s_maxbytes : inode->i_size;
+ to_end = true;
+ }
+
+ if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
+ (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ index = F2FS_BYTES_TO_BLK(range.start);
+ pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
+
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ goto err;
+
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
+
+ ret = filemap_write_and_wait_range(mapping, range.start,
+ to_end ? LLONG_MAX : end_addr - 1);
+ if (ret)
+ goto out;
+
+ truncate_inode_pages_range(mapping, range.start,
+ to_end ? -1 : end_addr - 1);
+
+ while (index < pg_end) {
+ struct dnode_of_data dn;
+ pgoff_t end_offset, count;
+ int i;
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
+ if (ret) {
+ if (ret == -ENOENT) {
+ index = f2fs_get_next_page_offset(&dn, index);
+ continue;
+ }
+ goto out;
+ }
+
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ count = min(end_offset - dn.ofs_in_node, pg_end - index);
+ for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
+ struct block_device *cur_bdev;
+ block_t blkaddr = f2fs_data_blkaddr(&dn);
+
+ if (!__is_valid_data_blkaddr(blkaddr))
+ continue;
+
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
+ DATA_GENERIC_ENHANCE)) {
+ ret = -EFSCORRUPTED;
+ f2fs_put_dnode(&dn);
+ goto out;
+ }
+
+ cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
+ if (f2fs_is_multi_device(sbi)) {
+ int di = f2fs_target_device_index(sbi, blkaddr);
+
+ blkaddr -= FDEV(di).start_blk;
+ }
+
+ if (len) {
+ if (prev_bdev == cur_bdev &&
+ index == prev_index + len &&
+ blkaddr == prev_block + len) {
+ len++;
+ } else {
+ ret = f2fs_secure_erase(prev_bdev,
+ inode, prev_index, prev_block,
+ len, range.flags);
+ if (ret) {
+ f2fs_put_dnode(&dn);
+ goto out;
+ }
+
+ len = 0;
+ }
+ }
+
+ if (!len) {
+ prev_bdev = cur_bdev;
+ prev_index = index;
+ prev_block = blkaddr;
+ len = 1;
+ }
+ }
+
+ f2fs_put_dnode(&dn);
+
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
+ cond_resched();
+ }
+
+ if (len)
+ ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
+ prev_block, len, range.flags);
+out:
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+err:
+ inode_unlock(inode);
+ file_end_write(filp);
+
+ return ret;
+}
+
+static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_comp_option option;
+
+ if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
+ return -EOPNOTSUPP;
+
+ inode_lock_shared(inode);
+
+ if (!f2fs_compressed_file(inode)) {
+ inode_unlock_shared(inode);
+ return -ENODATA;
+ }
+
+ option.algorithm = F2FS_I(inode)->i_compress_algorithm;
+ option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
+
+ inode_unlock_shared(inode);
+
+ if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
+ sizeof(option)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_comp_option option;
+ int ret = 0;
+
+ if (!f2fs_sb_has_compression(sbi))
+ return -EOPNOTSUPP;
+
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
+ sizeof(option)))
+ return -EFAULT;
+
+ if (!f2fs_compressed_file(inode) ||
+ option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
+ option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
+ option.algorithm >= COMPRESS_MAX)
+ return -EINVAL;
+
+ file_start_write(filp);
+ inode_lock(inode);
+
+ if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (inode->i_size != 0) {
+ ret = -EFBIG;
+ goto out;
+ }
+
+ F2FS_I(inode)->i_compress_algorithm = option.algorithm;
+ F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
+ F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
+ f2fs_mark_inode_dirty_sync(inode, true);
+
+ if (!f2fs_is_compress_backend_ready(inode))
+ f2fs_warn(sbi, "compression algorithm is successfully set, "
+ "but current kernel doesn't support this algorithm.");
+out:
+ inode_unlock(inode);
+ file_end_write(filp);
+
+ return ret;
+}
+
+static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
+{
+ DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, page_idx);
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+ pgoff_t redirty_idx = page_idx;
+ int i, page_len = 0, ret = 0;
+
+ page_cache_ra_unbounded(&ractl, len, 0);
+
+ for (i = 0; i < len; i++, page_idx++) {
+ page = read_cache_page(mapping, page_idx, NULL, NULL);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ break;
+ }
+ page_len++;
+ }
+
+ for (i = 0; i < page_len; i++, redirty_idx++) {
+ page = find_lock_page(mapping, redirty_idx);
+ if (!page) {
+ ret = -ENOMEM;
+ break;
+ }
+ set_page_dirty(page);
+ f2fs_put_page(page, 1);
+ f2fs_put_page(page, 0);
+ }
+
+ return ret;
+}
+
+static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ pgoff_t page_idx = 0, last_idx;
+ unsigned int blk_per_seg = sbi->blocks_per_seg;
+ int cluster_size = F2FS_I(inode)->i_cluster_size;
+ int count, ret;
+
+ if (!f2fs_sb_has_compression(sbi) ||
+ F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
+ return -EOPNOTSUPP;
+
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (!f2fs_compressed_file(inode))
+ return -EINVAL;
+
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
+
+ file_start_write(filp);
+ inode_lock(inode);
+
+ if (!f2fs_is_compress_backend_ready(inode)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
+ if (ret)
+ goto out;
+
+ if (!atomic_read(&fi->i_compr_blocks))
+ goto out;
+
+ last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+
+ count = last_idx - page_idx;
+ while (count) {
+ int len = min(cluster_size, count);
+
+ ret = redirty_blocks(inode, page_idx, len);
+ if (ret < 0)
+ break;
+
+ if (get_dirty_pages(inode) >= blk_per_seg)
+ filemap_fdatawrite(inode->i_mapping);
+
+ count -= len;
+ page_idx += len;
+ }
+
+ if (!ret)
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0,
+ LLONG_MAX);
+
+ if (ret)
+ f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
+ __func__, ret);
+out:
+ inode_unlock(inode);
+ file_end_write(filp);
+
+ return ret;
+}
+
+static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ pgoff_t page_idx = 0, last_idx;
+ unsigned int blk_per_seg = sbi->blocks_per_seg;
+ int cluster_size = F2FS_I(inode)->i_cluster_size;
+ int count, ret;
+
+ if (!f2fs_sb_has_compression(sbi) ||
+ F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
+ return -EOPNOTSUPP;
+
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (!f2fs_compressed_file(inode))
+ return -EINVAL;
+
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
+
+ file_start_write(filp);
+ inode_lock(inode);
+
+ if (!f2fs_is_compress_backend_ready(inode)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
+ if (ret)
+ goto out;
+
+ set_inode_flag(inode, FI_ENABLE_COMPRESS);
+
+ last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+
+ count = last_idx - page_idx;
+ while (count) {
+ int len = min(cluster_size, count);
+
+ ret = redirty_blocks(inode, page_idx, len);
+ if (ret < 0)
+ break;
+
+ if (get_dirty_pages(inode) >= blk_per_seg)
+ filemap_fdatawrite(inode->i_mapping);
+
+ count -= len;
+ page_idx += len;
+ }
+
+ if (!ret)
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0,
+ LLONG_MAX);
+
+ clear_inode_flag(inode, FI_ENABLE_COMPRESS);
+
+ if (ret)
+ f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
+ __func__, ret);
+out:
+ inode_unlock(inode);
+ file_end_write(filp);
+
+ return ret;
+}
+
+static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
switch (cmd) {
- case F2FS_IOC_GETFLAGS:
+ case FS_IOC_GETFLAGS:
return f2fs_ioc_getflags(filp, arg);
- case F2FS_IOC_SETFLAGS:
+ case FS_IOC_SETFLAGS:
return f2fs_ioc_setflags(filp, arg);
- case F2FS_IOC_GETVERSION:
+ case FS_IOC_GETVERSION:
return f2fs_ioc_getversion(filp, arg);
case F2FS_IOC_START_ATOMIC_WRITE:
return f2fs_ioc_start_atomic_write(filp);
@@ -3823,11 +4281,11 @@
return f2fs_ioc_shutdown(filp, arg);
case FITRIM:
return f2fs_ioc_fitrim(filp, arg);
- case F2FS_IOC_SET_ENCRYPTION_POLICY:
+ case FS_IOC_SET_ENCRYPTION_POLICY:
return f2fs_ioc_set_encryption_policy(filp, arg);
- case F2FS_IOC_GET_ENCRYPTION_POLICY:
+ case FS_IOC_GET_ENCRYPTION_POLICY:
return f2fs_ioc_get_encryption_policy(filp, arg);
- case F2FS_IOC_GET_ENCRYPTION_PWSALT:
+ case FS_IOC_GET_ENCRYPTION_PWSALT:
return f2fs_ioc_get_encryption_pwsalt(filp, arg);
case FS_IOC_GET_ENCRYPTION_POLICY_EX:
return f2fs_ioc_get_encryption_policy_ex(filp, arg);
@@ -3855,9 +4313,9 @@
return f2fs_ioc_flush_device(filp, arg);
case F2FS_IOC_GET_FEATURES:
return f2fs_ioc_get_features(filp, arg);
- case F2FS_IOC_FSGETXATTR:
+ case FS_IOC_FSGETXATTR:
return f2fs_ioc_fsgetxattr(filp, arg);
- case F2FS_IOC_FSSETXATTR:
+ case FS_IOC_FSSETXATTR:
return f2fs_ioc_fssetxattr(filp, arg);
case F2FS_IOC_GET_PIN_FILE:
return f2fs_ioc_get_pin_file(filp, arg);
@@ -3871,19 +4329,41 @@
return f2fs_ioc_enable_verity(filp, arg);
case FS_IOC_MEASURE_VERITY:
return f2fs_ioc_measure_verity(filp, arg);
- case F2FS_IOC_GET_VOLUME_NAME:
- return f2fs_get_volume_name(filp, arg);
- case F2FS_IOC_SET_VOLUME_NAME:
- return f2fs_set_volume_name(filp, arg);
+ case FS_IOC_READ_VERITY_METADATA:
+ return f2fs_ioc_read_verity_metadata(filp, arg);
+ case FS_IOC_GETFSLABEL:
+ return f2fs_ioc_getfslabel(filp, arg);
+ case FS_IOC_SETFSLABEL:
+ return f2fs_ioc_setfslabel(filp, arg);
case F2FS_IOC_GET_COMPRESS_BLOCKS:
return f2fs_get_compress_blocks(filp, arg);
case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
return f2fs_release_compress_blocks(filp, arg);
case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
return f2fs_reserve_compress_blocks(filp, arg);
+ case F2FS_IOC_SEC_TRIM_FILE:
+ return f2fs_sec_trim_file(filp, arg);
+ case F2FS_IOC_GET_COMPRESS_OPTION:
+ return f2fs_ioc_get_compress_option(filp, arg);
+ case F2FS_IOC_SET_COMPRESS_OPTION:
+ return f2fs_ioc_set_compress_option(filp, arg);
+ case F2FS_IOC_DECOMPRESS_FILE:
+ return f2fs_ioc_decompress_file(filp, arg);
+ case F2FS_IOC_COMPRESS_FILE:
+ return f2fs_ioc_compress_file(filp, arg);
default:
return -ENOTTY;
}
+}
+
+long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
+ return -EIO;
+ if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
+ return -ENOSPC;
+
+ return __f2fs_ioctl(filp, cmd, arg);
}
static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
@@ -3926,6 +4406,16 @@
}
} else {
inode_lock(inode);
+ }
+
+ if (unlikely(IS_IMMUTABLE(inode))) {
+ ret = -EPERM;
+ goto unlock;
+ }
+
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ ret = -EPERM;
+ goto unlock;
}
ret = generic_write_checks(iocb, from);
@@ -3986,12 +4476,18 @@
clear_inode_flag(inode, FI_NO_PREALLOC);
/* if we couldn't write data, we should deallocate blocks. */
- if (preallocated && i_size_read(inode) < target_size)
+ if (preallocated && i_size_read(inode) < target_size) {
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
f2fs_truncate(inode);
+ f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ }
if (ret > 0)
f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
}
+unlock:
inode_unlock(inode);
out:
trace_f2fs_file_write_iter(inode, iocb->ki_pos,
@@ -4002,27 +4498,87 @@
}
#ifdef CONFIG_COMPAT
+struct compat_f2fs_gc_range {
+ u32 sync;
+ compat_u64 start;
+ compat_u64 len;
+};
+#define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
+ struct compat_f2fs_gc_range)
+
+static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
+{
+ struct compat_f2fs_gc_range __user *urange;
+ struct f2fs_gc_range range;
+ int err;
+
+ urange = compat_ptr(arg);
+ err = get_user(range.sync, &urange->sync);
+ err |= get_user(range.start, &urange->start);
+ err |= get_user(range.len, &urange->len);
+ if (err)
+ return -EFAULT;
+
+ return __f2fs_ioc_gc_range(file, &range);
+}
+
+struct compat_f2fs_move_range {
+ u32 dst_fd;
+ compat_u64 pos_in;
+ compat_u64 pos_out;
+ compat_u64 len;
+};
+#define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
+ struct compat_f2fs_move_range)
+
+static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
+{
+ struct compat_f2fs_move_range __user *urange;
+ struct f2fs_move_range range;
+ int err;
+
+ urange = compat_ptr(arg);
+ err = get_user(range.dst_fd, &urange->dst_fd);
+ err |= get_user(range.pos_in, &urange->pos_in);
+ err |= get_user(range.pos_out, &urange->pos_out);
+ err |= get_user(range.len, &urange->len);
+ if (err)
+ return -EFAULT;
+
+ return __f2fs_ioc_move_range(file, &range);
+}
+
long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
+ return -EIO;
+ if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
+ return -ENOSPC;
+
switch (cmd) {
- case F2FS_IOC32_GETFLAGS:
- cmd = F2FS_IOC_GETFLAGS;
+ case FS_IOC32_GETFLAGS:
+ cmd = FS_IOC_GETFLAGS;
break;
- case F2FS_IOC32_SETFLAGS:
- cmd = F2FS_IOC_SETFLAGS;
+ case FS_IOC32_SETFLAGS:
+ cmd = FS_IOC_SETFLAGS;
break;
- case F2FS_IOC32_GETVERSION:
- cmd = F2FS_IOC_GETVERSION;
+ case FS_IOC32_GETVERSION:
+ cmd = FS_IOC_GETVERSION;
break;
+ case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
+ return f2fs_compat_ioc_gc_range(file, arg);
+ case F2FS_IOC32_MOVE_RANGE:
+ return f2fs_compat_ioc_move_range(file, arg);
case F2FS_IOC_START_ATOMIC_WRITE:
case F2FS_IOC_COMMIT_ATOMIC_WRITE:
case F2FS_IOC_START_VOLATILE_WRITE:
case F2FS_IOC_RELEASE_VOLATILE_WRITE:
case F2FS_IOC_ABORT_VOLATILE_WRITE:
case F2FS_IOC_SHUTDOWN:
- case F2FS_IOC_SET_ENCRYPTION_POLICY:
- case F2FS_IOC_GET_ENCRYPTION_PWSALT:
- case F2FS_IOC_GET_ENCRYPTION_POLICY:
+ case FITRIM:
+ case FS_IOC_SET_ENCRYPTION_POLICY:
+ case FS_IOC_GET_ENCRYPTION_PWSALT:
+ case FS_IOC_GET_ENCRYPTION_POLICY:
case FS_IOC_GET_ENCRYPTION_POLICY_EX:
case FS_IOC_ADD_ENCRYPTION_KEY:
case FS_IOC_REMOVE_ENCRYPTION_KEY:
@@ -4030,30 +4586,34 @@
case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
case FS_IOC_GET_ENCRYPTION_NONCE:
case F2FS_IOC_GARBAGE_COLLECT:
- case F2FS_IOC_GARBAGE_COLLECT_RANGE:
case F2FS_IOC_WRITE_CHECKPOINT:
case F2FS_IOC_DEFRAGMENT:
- case F2FS_IOC_MOVE_RANGE:
case F2FS_IOC_FLUSH_DEVICE:
case F2FS_IOC_GET_FEATURES:
- case F2FS_IOC_FSGETXATTR:
- case F2FS_IOC_FSSETXATTR:
+ case FS_IOC_FSGETXATTR:
+ case FS_IOC_FSSETXATTR:
case F2FS_IOC_GET_PIN_FILE:
case F2FS_IOC_SET_PIN_FILE:
case F2FS_IOC_PRECACHE_EXTENTS:
case F2FS_IOC_RESIZE_FS:
case FS_IOC_ENABLE_VERITY:
case FS_IOC_MEASURE_VERITY:
- case F2FS_IOC_GET_VOLUME_NAME:
- case F2FS_IOC_SET_VOLUME_NAME:
+ case FS_IOC_READ_VERITY_METADATA:
+ case FS_IOC_GETFSLABEL:
+ case FS_IOC_SETFSLABEL:
case F2FS_IOC_GET_COMPRESS_BLOCKS:
case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
+ case F2FS_IOC_SEC_TRIM_FILE:
+ case F2FS_IOC_GET_COMPRESS_OPTION:
+ case F2FS_IOC_SET_COMPRESS_OPTION:
+ case F2FS_IOC_DECOMPRESS_FILE:
+ case F2FS_IOC_COMPRESS_FILE:
break;
default:
return -ENOIOCTLCMD;
}
- return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+ return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif
--
Gitblit v1.6.2