.. | .. |
---|
123 | 123 | #ifdef CONFIG_LOCKDEP |
---|
124 | 124 | void ext4_xattr_inode_set_class(struct inode *ea_inode) |
---|
125 | 125 | { |
---|
| 126 | + struct ext4_inode_info *ei = EXT4_I(ea_inode); |
---|
| 127 | + |
---|
126 | 128 | lockdep_set_subclass(&ea_inode->i_rwsem, 1); |
---|
| 129 | + (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */ |
---|
| 130 | + lockdep_set_subclass(&ei->i_data_sem, I_DATA_SEM_EA); |
---|
127 | 131 | } |
---|
128 | 132 | #endif |
---|
129 | 133 | |
---|
.. | .. |
---|
386 | 390 | struct inode *inode; |
---|
387 | 391 | int err; |
---|
388 | 392 | |
---|
389 | | - inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL); |
---|
| 393 | + /* |
---|
| 394 | + * We have to check for this corruption early as otherwise |
---|
| 395 | + * iget_locked() could wait indefinitely for the state of our |
---|
| 396 | + * parent inode. |
---|
| 397 | + */ |
---|
| 398 | + if (parent->i_ino == ea_ino) { |
---|
| 399 | + ext4_error(parent->i_sb, |
---|
| 400 | + "Parent and EA inode have the same ino %lu", ea_ino); |
---|
| 401 | + return -EFSCORRUPTED; |
---|
| 402 | + } |
---|
| 403 | + |
---|
| 404 | + inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_EA_INODE); |
---|
390 | 405 | if (IS_ERR(inode)) { |
---|
391 | 406 | err = PTR_ERR(inode); |
---|
392 | 407 | ext4_error(parent->i_sb, |
---|
.. | .. |
---|
394 | 409 | err); |
---|
395 | 410 | return err; |
---|
396 | 411 | } |
---|
397 | | - |
---|
398 | | - if (is_bad_inode(inode)) { |
---|
399 | | - ext4_error(parent->i_sb, |
---|
400 | | - "error while reading EA inode %lu is_bad_inode", |
---|
401 | | - ea_ino); |
---|
402 | | - err = -EIO; |
---|
403 | | - goto error; |
---|
404 | | - } |
---|
405 | | - |
---|
406 | | - if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { |
---|
407 | | - ext4_error(parent->i_sb, |
---|
408 | | - "EA inode %lu does not have EXT4_EA_INODE_FL flag", |
---|
409 | | - ea_ino); |
---|
410 | | - err = -EINVAL; |
---|
411 | | - goto error; |
---|
412 | | - } |
---|
413 | | - |
---|
414 | 412 | ext4_xattr_inode_set_class(inode); |
---|
415 | 413 | |
---|
416 | 414 | /* |
---|
.. | .. |
---|
431 | 429 | |
---|
432 | 430 | *ea_inode = inode; |
---|
433 | 431 | return 0; |
---|
434 | | -error: |
---|
435 | | - iput(inode); |
---|
436 | | - return err; |
---|
| 432 | +} |
---|
| 433 | + |
---|
| 434 | +/* Remove entry from mbcache when EA inode is getting evicted */ |
---|
| 435 | +void ext4_evict_ea_inode(struct inode *inode) |
---|
| 436 | +{ |
---|
| 437 | + struct mb_cache_entry *oe; |
---|
| 438 | + |
---|
| 439 | + if (!EA_INODE_CACHE(inode)) |
---|
| 440 | + return; |
---|
| 441 | + /* Wait for entry to get unused so that we can remove it */ |
---|
| 442 | + while ((oe = mb_cache_entry_delete_or_get(EA_INODE_CACHE(inode), |
---|
| 443 | + ext4_xattr_inode_get_hash(inode), inode->i_ino))) { |
---|
| 444 | + mb_cache_entry_wait_unused(oe); |
---|
| 445 | + mb_cache_entry_put(EA_INODE_CACHE(inode), oe); |
---|
| 446 | + } |
---|
437 | 447 | } |
---|
438 | 448 | |
---|
439 | 449 | static int |
---|
.. | .. |
---|
972 | 982 | static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode, |
---|
973 | 983 | int ref_change) |
---|
974 | 984 | { |
---|
975 | | - struct mb_cache *ea_inode_cache = EA_INODE_CACHE(ea_inode); |
---|
976 | 985 | struct ext4_iloc iloc; |
---|
977 | 986 | s64 ref_count; |
---|
978 | | - u32 hash; |
---|
979 | 987 | int ret; |
---|
980 | 988 | |
---|
981 | 989 | inode_lock(ea_inode); |
---|
.. | .. |
---|
998 | 1006 | |
---|
999 | 1007 | set_nlink(ea_inode, 1); |
---|
1000 | 1008 | ext4_orphan_del(handle, ea_inode); |
---|
1001 | | - |
---|
1002 | | - if (ea_inode_cache) { |
---|
1003 | | - hash = ext4_xattr_inode_get_hash(ea_inode); |
---|
1004 | | - mb_cache_entry_create(ea_inode_cache, |
---|
1005 | | - GFP_NOFS, hash, |
---|
1006 | | - ea_inode->i_ino, |
---|
1007 | | - true /* reusable */); |
---|
1008 | | - } |
---|
1009 | 1009 | } |
---|
1010 | 1010 | } else { |
---|
1011 | 1011 | WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld", |
---|
.. | .. |
---|
1018 | 1018 | |
---|
1019 | 1019 | clear_nlink(ea_inode); |
---|
1020 | 1020 | ext4_orphan_add(handle, ea_inode); |
---|
1021 | | - |
---|
1022 | | - if (ea_inode_cache) { |
---|
1023 | | - hash = ext4_xattr_inode_get_hash(ea_inode); |
---|
1024 | | - mb_cache_entry_delete(ea_inode_cache, hash, |
---|
1025 | | - ea_inode->i_ino); |
---|
1026 | | - } |
---|
1027 | 1021 | } |
---|
1028 | 1022 | } |
---|
1029 | 1023 | |
---|
.. | .. |
---|
1231 | 1225 | if (error) |
---|
1232 | 1226 | goto out; |
---|
1233 | 1227 | |
---|
| 1228 | +retry_ref: |
---|
1234 | 1229 | lock_buffer(bh); |
---|
1235 | 1230 | hash = le32_to_cpu(BHDR(bh)->h_hash); |
---|
1236 | 1231 | ref = le32_to_cpu(BHDR(bh)->h_refcount); |
---|
.. | .. |
---|
1240 | 1235 | * This must happen under buffer lock for |
---|
1241 | 1236 | * ext4_xattr_block_set() to reliably detect freed block |
---|
1242 | 1237 | */ |
---|
1243 | | - if (ea_block_cache) |
---|
1244 | | - mb_cache_entry_delete(ea_block_cache, hash, |
---|
1245 | | - bh->b_blocknr); |
---|
| 1238 | + if (ea_block_cache) { |
---|
| 1239 | + struct mb_cache_entry *oe; |
---|
| 1240 | + |
---|
| 1241 | + oe = mb_cache_entry_delete_or_get(ea_block_cache, hash, |
---|
| 1242 | + bh->b_blocknr); |
---|
| 1243 | + if (oe) { |
---|
| 1244 | + unlock_buffer(bh); |
---|
| 1245 | + mb_cache_entry_wait_unused(oe); |
---|
| 1246 | + mb_cache_entry_put(ea_block_cache, oe); |
---|
| 1247 | + goto retry_ref; |
---|
| 1248 | + } |
---|
| 1249 | + } |
---|
1246 | 1250 | get_bh(bh); |
---|
1247 | 1251 | unlock_buffer(bh); |
---|
1248 | 1252 | |
---|
.. | .. |
---|
1266 | 1270 | ce = mb_cache_entry_get(ea_block_cache, hash, |
---|
1267 | 1271 | bh->b_blocknr); |
---|
1268 | 1272 | if (ce) { |
---|
1269 | | - ce->e_reusable = 1; |
---|
| 1273 | + set_bit(MBE_REUSABLE_B, &ce->e_flags); |
---|
1270 | 1274 | mb_cache_entry_put(ea_block_cache, ce); |
---|
1271 | 1275 | } |
---|
1272 | 1276 | } |
---|
.. | .. |
---|
1406 | 1410 | uid_t owner[2] = { i_uid_read(inode), i_gid_read(inode) }; |
---|
1407 | 1411 | int err; |
---|
1408 | 1412 | |
---|
| 1413 | + if (inode->i_sb->s_root == NULL) { |
---|
| 1414 | + ext4_warning(inode->i_sb, |
---|
| 1415 | + "refuse to create EA inode when umounting"); |
---|
| 1416 | + WARN_ON(1); |
---|
| 1417 | + return ERR_PTR(-EINVAL); |
---|
| 1418 | + } |
---|
| 1419 | + |
---|
1409 | 1420 | /* |
---|
1410 | 1421 | * Let the next inode be the goal, so we try and allocate the EA inode |
---|
1411 | 1422 | * in the same group, or nearby one. |
---|
.. | .. |
---|
1425 | 1436 | if (!err) |
---|
1426 | 1437 | err = ext4_inode_attach_jinode(ea_inode); |
---|
1427 | 1438 | if (err) { |
---|
| 1439 | + if (ext4_xattr_inode_dec_ref(handle, ea_inode)) |
---|
| 1440 | + ext4_warning_inode(ea_inode, |
---|
| 1441 | + "cleanup dec ref error %d", err); |
---|
1428 | 1442 | iput(ea_inode); |
---|
1429 | 1443 | return ERR_PTR(err); |
---|
1430 | 1444 | } |
---|
.. | .. |
---|
1470 | 1484 | |
---|
1471 | 1485 | while (ce) { |
---|
1472 | 1486 | ea_inode = ext4_iget(inode->i_sb, ce->e_value, |
---|
1473 | | - EXT4_IGET_NORMAL); |
---|
1474 | | - if (!IS_ERR(ea_inode) && |
---|
1475 | | - !is_bad_inode(ea_inode) && |
---|
1476 | | - (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) && |
---|
1477 | | - i_size_read(ea_inode) == value_len && |
---|
| 1487 | + EXT4_IGET_EA_INODE); |
---|
| 1488 | + if (IS_ERR(ea_inode)) |
---|
| 1489 | + goto next_entry; |
---|
| 1490 | + ext4_xattr_inode_set_class(ea_inode); |
---|
| 1491 | + if (i_size_read(ea_inode) == value_len && |
---|
1478 | 1492 | !ext4_xattr_inode_read(ea_inode, ea_data, value_len) && |
---|
1479 | 1493 | !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data, |
---|
1480 | 1494 | value_len) && |
---|
.. | .. |
---|
1484 | 1498 | kvfree(ea_data); |
---|
1485 | 1499 | return ea_inode; |
---|
1486 | 1500 | } |
---|
1487 | | - |
---|
1488 | | - if (!IS_ERR(ea_inode)) |
---|
1489 | | - iput(ea_inode); |
---|
| 1501 | + iput(ea_inode); |
---|
| 1502 | + next_entry: |
---|
1490 | 1503 | ce = mb_cache_entry_find_next(ea_inode_cache, ce); |
---|
1491 | 1504 | } |
---|
1492 | 1505 | kvfree(ea_data); |
---|
.. | .. |
---|
1614 | 1627 | * If storing the value in an external inode is an option, |
---|
1615 | 1628 | * reserve space for xattr entries/names in the external |
---|
1616 | 1629 | * attribute block so that a long value does not occupy the |
---|
1617 | | - * whole space and prevent futher entries being added. |
---|
| 1630 | + * whole space and prevent further entries being added. |
---|
1618 | 1631 | */ |
---|
1619 | 1632 | if (ext4_has_feature_ea_inode(inode->i_sb) && |
---|
1620 | 1633 | new_size && is_block && |
---|
.. | .. |
---|
1712 | 1725 | memmove(here, (void *)here + size, |
---|
1713 | 1726 | (void *)last - (void *)here + sizeof(__u32)); |
---|
1714 | 1727 | memset(last, 0, size); |
---|
| 1728 | + |
---|
| 1729 | + /* |
---|
| 1730 | + * Update i_inline_off - moved ibody region might contain |
---|
| 1731 | + * system.data attribute. Handling a failure here won't |
---|
| 1732 | + * cause other complications for setting an xattr. |
---|
| 1733 | + */ |
---|
| 1734 | + if (!is_block && ext4_has_inline_data(inode)) { |
---|
| 1735 | + ret = ext4_find_inline_data_nolock(inode); |
---|
| 1736 | + if (ret) { |
---|
| 1737 | + ext4_warning_inode(inode, |
---|
| 1738 | + "unable to update i_inline_off"); |
---|
| 1739 | + goto out; |
---|
| 1740 | + } |
---|
| 1741 | + } |
---|
1715 | 1742 | } else if (s->not_found) { |
---|
1716 | 1743 | /* Insert new name. */ |
---|
1717 | 1744 | size_t size = EXT4_XATTR_LEN(name_len); |
---|
.. | .. |
---|
1851 | 1878 | #define header(x) ((struct ext4_xattr_header *)(x)) |
---|
1852 | 1879 | |
---|
1853 | 1880 | if (s->base) { |
---|
| 1881 | + int offset = (char *)s->here - bs->bh->b_data; |
---|
| 1882 | + |
---|
1854 | 1883 | BUFFER_TRACE(bs->bh, "get_write_access"); |
---|
1855 | 1884 | error = ext4_journal_get_write_access(handle, bs->bh); |
---|
1856 | 1885 | if (error) |
---|
.. | .. |
---|
1865 | 1894 | * ext4_xattr_block_set() to reliably detect modified |
---|
1866 | 1895 | * block |
---|
1867 | 1896 | */ |
---|
1868 | | - if (ea_block_cache) |
---|
1869 | | - mb_cache_entry_delete(ea_block_cache, hash, |
---|
1870 | | - bs->bh->b_blocknr); |
---|
| 1897 | + if (ea_block_cache) { |
---|
| 1898 | + struct mb_cache_entry *oe; |
---|
| 1899 | + |
---|
| 1900 | + oe = mb_cache_entry_delete_or_get(ea_block_cache, |
---|
| 1901 | + hash, bs->bh->b_blocknr); |
---|
| 1902 | + if (oe) { |
---|
| 1903 | + /* |
---|
| 1904 | + * Xattr block is getting reused. Leave |
---|
| 1905 | + * it alone. |
---|
| 1906 | + */ |
---|
| 1907 | + mb_cache_entry_put(ea_block_cache, oe); |
---|
| 1908 | + goto clone_block; |
---|
| 1909 | + } |
---|
| 1910 | + } |
---|
1871 | 1911 | ea_bdebug(bs->bh, "modifying in-place"); |
---|
1872 | 1912 | error = ext4_xattr_set_entry(i, s, handle, inode, |
---|
1873 | 1913 | true /* is_block */); |
---|
.. | .. |
---|
1882 | 1922 | if (error) |
---|
1883 | 1923 | goto cleanup; |
---|
1884 | 1924 | goto inserted; |
---|
1885 | | - } else { |
---|
1886 | | - int offset = (char *)s->here - bs->bh->b_data; |
---|
| 1925 | + } |
---|
| 1926 | +clone_block: |
---|
| 1927 | + unlock_buffer(bs->bh); |
---|
| 1928 | + ea_bdebug(bs->bh, "cloning"); |
---|
| 1929 | + s->base = kmemdup(BHDR(bs->bh), bs->bh->b_size, GFP_NOFS); |
---|
| 1930 | + error = -ENOMEM; |
---|
| 1931 | + if (s->base == NULL) |
---|
| 1932 | + goto cleanup; |
---|
| 1933 | + s->first = ENTRY(header(s->base)+1); |
---|
| 1934 | + header(s->base)->h_refcount = cpu_to_le32(1); |
---|
| 1935 | + s->here = ENTRY(s->base + offset); |
---|
| 1936 | + s->end = s->base + bs->bh->b_size; |
---|
1887 | 1937 | |
---|
1888 | | - unlock_buffer(bs->bh); |
---|
1889 | | - ea_bdebug(bs->bh, "cloning"); |
---|
1890 | | - s->base = kmalloc(bs->bh->b_size, GFP_NOFS); |
---|
1891 | | - error = -ENOMEM; |
---|
1892 | | - if (s->base == NULL) |
---|
| 1938 | + /* |
---|
| 1939 | + * If existing entry points to an xattr inode, we need |
---|
| 1940 | + * to prevent ext4_xattr_set_entry() from decrementing |
---|
| 1941 | + * ref count on it because the reference belongs to the |
---|
| 1942 | + * original block. In this case, make the entry look |
---|
| 1943 | + * like it has an empty value. |
---|
| 1944 | + */ |
---|
| 1945 | + if (!s->not_found && s->here->e_value_inum) { |
---|
| 1946 | + ea_ino = le32_to_cpu(s->here->e_value_inum); |
---|
| 1947 | + error = ext4_xattr_inode_iget(inode, ea_ino, |
---|
| 1948 | + le32_to_cpu(s->here->e_hash), |
---|
| 1949 | + &tmp_inode); |
---|
| 1950 | + if (error) |
---|
1893 | 1951 | goto cleanup; |
---|
1894 | | - memcpy(s->base, BHDR(bs->bh), bs->bh->b_size); |
---|
1895 | | - s->first = ENTRY(header(s->base)+1); |
---|
1896 | | - header(s->base)->h_refcount = cpu_to_le32(1); |
---|
1897 | | - s->here = ENTRY(s->base + offset); |
---|
1898 | | - s->end = s->base + bs->bh->b_size; |
---|
1899 | 1952 | |
---|
1900 | | - /* |
---|
1901 | | - * If existing entry points to an xattr inode, we need |
---|
1902 | | - * to prevent ext4_xattr_set_entry() from decrementing |
---|
1903 | | - * ref count on it because the reference belongs to the |
---|
1904 | | - * original block. In this case, make the entry look |
---|
1905 | | - * like it has an empty value. |
---|
1906 | | - */ |
---|
1907 | | - if (!s->not_found && s->here->e_value_inum) { |
---|
1908 | | - ea_ino = le32_to_cpu(s->here->e_value_inum); |
---|
1909 | | - error = ext4_xattr_inode_iget(inode, ea_ino, |
---|
1910 | | - le32_to_cpu(s->here->e_hash), |
---|
1911 | | - &tmp_inode); |
---|
1912 | | - if (error) |
---|
1913 | | - goto cleanup; |
---|
1914 | | - |
---|
1915 | | - if (!ext4_test_inode_state(tmp_inode, |
---|
1916 | | - EXT4_STATE_LUSTRE_EA_INODE)) { |
---|
1917 | | - /* |
---|
1918 | | - * Defer quota free call for previous |
---|
1919 | | - * inode until success is guaranteed. |
---|
1920 | | - */ |
---|
1921 | | - old_ea_inode_quota = le32_to_cpu( |
---|
1922 | | - s->here->e_value_size); |
---|
1923 | | - } |
---|
1924 | | - iput(tmp_inode); |
---|
1925 | | - |
---|
1926 | | - s->here->e_value_inum = 0; |
---|
1927 | | - s->here->e_value_size = 0; |
---|
| 1953 | + if (!ext4_test_inode_state(tmp_inode, |
---|
| 1954 | + EXT4_STATE_LUSTRE_EA_INODE)) { |
---|
| 1955 | + /* |
---|
| 1956 | + * Defer quota free call for previous |
---|
| 1957 | + * inode until success is guaranteed. |
---|
| 1958 | + */ |
---|
| 1959 | + old_ea_inode_quota = le32_to_cpu( |
---|
| 1960 | + s->here->e_value_size); |
---|
1928 | 1961 | } |
---|
| 1962 | + iput(tmp_inode); |
---|
| 1963 | + |
---|
| 1964 | + s->here->e_value_inum = 0; |
---|
| 1965 | + s->here->e_value_size = 0; |
---|
1929 | 1966 | } |
---|
1930 | 1967 | } else { |
---|
1931 | 1968 | /* Allocate a buffer where we construct the new block. */ |
---|
.. | .. |
---|
1976 | 2013 | else { |
---|
1977 | 2014 | u32 ref; |
---|
1978 | 2015 | |
---|
| 2016 | +#ifdef EXT4_XATTR_DEBUG |
---|
1979 | 2017 | WARN_ON_ONCE(dquot_initialize_needed(inode)); |
---|
1980 | | - |
---|
| 2018 | +#endif |
---|
1981 | 2019 | /* The old block is released after updating |
---|
1982 | 2020 | the inode. */ |
---|
1983 | 2021 | error = dquot_alloc_block(inode, |
---|
.. | .. |
---|
1992 | 2030 | lock_buffer(new_bh); |
---|
1993 | 2031 | /* |
---|
1994 | 2032 | * We have to be careful about races with |
---|
1995 | | - * freeing, rehashing or adding references to |
---|
1996 | | - * xattr block. Once we hold buffer lock xattr |
---|
1997 | | - * block's state is stable so we can check |
---|
1998 | | - * whether the block got freed / rehashed or |
---|
1999 | | - * not. Since we unhash mbcache entry under |
---|
2000 | | - * buffer lock when freeing / rehashing xattr |
---|
2001 | | - * block, checking whether entry is still |
---|
2002 | | - * hashed is reliable. Same rules hold for |
---|
2003 | | - * e_reusable handling. |
---|
| 2033 | + * adding references to xattr block. Once we |
---|
| 2034 | + * hold buffer lock xattr block's state is |
---|
| 2035 | + * stable so we can check the additional |
---|
| 2036 | + * reference fits. |
---|
2004 | 2037 | */ |
---|
2005 | | - if (hlist_bl_unhashed(&ce->e_hash_list) || |
---|
2006 | | - !ce->e_reusable) { |
---|
| 2038 | + ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1; |
---|
| 2039 | + if (ref > EXT4_XATTR_REFCOUNT_MAX) { |
---|
2007 | 2040 | /* |
---|
2008 | 2041 | * Undo everything and check mbcache |
---|
2009 | 2042 | * again. |
---|
.. | .. |
---|
2018 | 2051 | new_bh = NULL; |
---|
2019 | 2052 | goto inserted; |
---|
2020 | 2053 | } |
---|
2021 | | - ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1; |
---|
2022 | 2054 | BHDR(new_bh)->h_refcount = cpu_to_le32(ref); |
---|
2023 | | - if (ref >= EXT4_XATTR_REFCOUNT_MAX) |
---|
2024 | | - ce->e_reusable = 0; |
---|
| 2055 | + if (ref == EXT4_XATTR_REFCOUNT_MAX) |
---|
| 2056 | + clear_bit(MBE_REUSABLE_B, &ce->e_flags); |
---|
2025 | 2057 | ea_bdebug(new_bh, "reusing; refcount now=%d", |
---|
2026 | 2058 | ref); |
---|
2027 | 2059 | ext4_xattr_block_csum_set(inode, new_bh); |
---|
.. | .. |
---|
2045 | 2077 | /* We need to allocate a new block */ |
---|
2046 | 2078 | ext4_fsblk_t goal, block; |
---|
2047 | 2079 | |
---|
| 2080 | +#ifdef EXT4_XATTR_DEBUG |
---|
2048 | 2081 | WARN_ON_ONCE(dquot_initialize_needed(inode)); |
---|
2049 | | - |
---|
| 2082 | +#endif |
---|
2050 | 2083 | goal = ext4_group_first_block_no(sb, |
---|
2051 | 2084 | EXT4_I(inode)->i_block_group); |
---|
2052 | | - |
---|
2053 | | - /* non-extent files can't have physical blocks past 2^32 */ |
---|
2054 | | - if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) |
---|
2055 | | - goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; |
---|
2056 | | - |
---|
2057 | 2085 | block = ext4_new_meta_blocks(handle, inode, goal, 0, |
---|
2058 | 2086 | NULL, &error); |
---|
2059 | 2087 | if (error) |
---|
2060 | 2088 | goto cleanup; |
---|
2061 | | - |
---|
2062 | | - if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) |
---|
2063 | | - BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS); |
---|
2064 | 2089 | |
---|
2065 | 2090 | ea_idebug(inode, "creating block %llu", |
---|
2066 | 2091 | (unsigned long long)block); |
---|
.. | .. |
---|
2189 | 2214 | return 0; |
---|
2190 | 2215 | } |
---|
2191 | 2216 | |
---|
2192 | | -int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode, |
---|
| 2217 | +int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, |
---|
2193 | 2218 | struct ext4_xattr_info *i, |
---|
2194 | 2219 | struct ext4_xattr_ibody_find *is) |
---|
2195 | 2220 | { |
---|
.. | .. |
---|
2200 | 2225 | if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) |
---|
2201 | 2226 | return -ENOSPC; |
---|
2202 | 2227 | |
---|
2203 | | - error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); |
---|
2204 | | - if (error) |
---|
2205 | | - return error; |
---|
2206 | | - header = IHDR(inode, ext4_raw_inode(&is->iloc)); |
---|
2207 | | - if (!IS_LAST_ENTRY(s->first)) { |
---|
2208 | | - header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); |
---|
2209 | | - ext4_set_inode_state(inode, EXT4_STATE_XATTR); |
---|
2210 | | - } else { |
---|
2211 | | - header->h_magic = cpu_to_le32(0); |
---|
2212 | | - ext4_clear_inode_state(inode, EXT4_STATE_XATTR); |
---|
2213 | | - } |
---|
2214 | | - return 0; |
---|
2215 | | -} |
---|
2216 | | - |
---|
2217 | | -static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, |
---|
2218 | | - struct ext4_xattr_info *i, |
---|
2219 | | - struct ext4_xattr_ibody_find *is) |
---|
2220 | | -{ |
---|
2221 | | - struct ext4_xattr_ibody_header *header; |
---|
2222 | | - struct ext4_xattr_search *s = &is->s; |
---|
2223 | | - int error; |
---|
2224 | | - |
---|
2225 | | - if (EXT4_I(inode)->i_extra_isize == 0) |
---|
2226 | | - return -ENOSPC; |
---|
2227 | 2228 | error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); |
---|
2228 | 2229 | if (error) |
---|
2229 | 2230 | return error; |
---|
.. | .. |
---|
2552 | 2553 | .in_inode = !!entry->e_value_inum, |
---|
2553 | 2554 | }; |
---|
2554 | 2555 | struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode); |
---|
| 2556 | + int needs_kvfree = 0; |
---|
2555 | 2557 | int error; |
---|
2556 | 2558 | |
---|
2557 | 2559 | is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS); |
---|
2558 | 2560 | bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS); |
---|
2559 | | - buffer = kmalloc(value_size, GFP_NOFS); |
---|
2560 | 2561 | b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS); |
---|
2561 | | - if (!is || !bs || !buffer || !b_entry_name) { |
---|
| 2562 | + if (!is || !bs || !b_entry_name) { |
---|
2562 | 2563 | error = -ENOMEM; |
---|
2563 | 2564 | goto out; |
---|
2564 | 2565 | } |
---|
.. | .. |
---|
2570 | 2571 | |
---|
2571 | 2572 | /* Save the entry name and the entry value */ |
---|
2572 | 2573 | if (entry->e_value_inum) { |
---|
| 2574 | + buffer = kvmalloc(value_size, GFP_NOFS); |
---|
| 2575 | + if (!buffer) { |
---|
| 2576 | + error = -ENOMEM; |
---|
| 2577 | + goto out; |
---|
| 2578 | + } |
---|
| 2579 | + needs_kvfree = 1; |
---|
2573 | 2580 | error = ext4_xattr_inode_get(inode, entry, buffer, value_size); |
---|
2574 | 2581 | if (error) |
---|
2575 | 2582 | goto out; |
---|
2576 | 2583 | } else { |
---|
2577 | 2584 | size_t value_offs = le16_to_cpu(entry->e_value_offs); |
---|
2578 | | - memcpy(buffer, (void *)IFIRST(header) + value_offs, value_size); |
---|
| 2585 | + buffer = (void *)IFIRST(header) + value_offs; |
---|
2579 | 2586 | } |
---|
2580 | 2587 | |
---|
2581 | 2588 | memcpy(b_entry_name, entry->e_name, entry->e_name_len); |
---|
.. | .. |
---|
2590 | 2597 | if (error) |
---|
2591 | 2598 | goto out; |
---|
2592 | 2599 | |
---|
2593 | | - /* Remove the chosen entry from the inode */ |
---|
2594 | | - error = ext4_xattr_ibody_set(handle, inode, &i, is); |
---|
2595 | | - if (error) |
---|
2596 | | - goto out; |
---|
2597 | | - |
---|
2598 | 2600 | i.value = buffer; |
---|
2599 | 2601 | i.value_len = value_size; |
---|
2600 | 2602 | error = ext4_xattr_block_find(inode, &i, bs); |
---|
2601 | 2603 | if (error) |
---|
2602 | 2604 | goto out; |
---|
2603 | 2605 | |
---|
2604 | | - /* Add entry which was removed from the inode into the block */ |
---|
| 2606 | + /* Move ea entry from the inode into the block */ |
---|
2605 | 2607 | error = ext4_xattr_block_set(handle, inode, &i, bs); |
---|
2606 | 2608 | if (error) |
---|
2607 | 2609 | goto out; |
---|
2608 | | - error = 0; |
---|
| 2610 | + |
---|
| 2611 | + /* Remove the chosen entry from the inode */ |
---|
| 2612 | + i.value = NULL; |
---|
| 2613 | + i.value_len = 0; |
---|
| 2614 | + error = ext4_xattr_ibody_set(handle, inode, &i, is); |
---|
| 2615 | + |
---|
2609 | 2616 | out: |
---|
2610 | 2617 | kfree(b_entry_name); |
---|
2611 | | - kfree(buffer); |
---|
| 2618 | + if (needs_kvfree && buffer) |
---|
| 2619 | + kvfree(buffer); |
---|
2612 | 2620 | if (is) |
---|
2613 | 2621 | brelse(is->iloc.bh); |
---|
2614 | 2622 | if (bs) |
---|
.. | .. |
---|
2783 | 2791 | (void *)header, total_ino); |
---|
2784 | 2792 | EXT4_I(inode)->i_extra_isize = new_extra_isize; |
---|
2785 | 2793 | |
---|
| 2794 | + if (ext4_has_inline_data(inode)) |
---|
| 2795 | + error = ext4_find_inline_data_nolock(inode); |
---|
| 2796 | + |
---|
2786 | 2797 | cleanup: |
---|
2787 | 2798 | if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) { |
---|
2788 | 2799 | ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.", |
---|