From ea08eeccae9297f7aabd2ef7f0c2517ac4549acc Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Tue, 20 Feb 2024 01:18:26 +0000
Subject: [PATCH] write in 30M
---
kernel/fs/crypto/inline_crypt.c | 171 +++++++++++++++++++++-----------------------------------
1 files changed, 64 insertions(+), 107 deletions(-)
diff --git a/kernel/fs/crypto/inline_crypt.c b/kernel/fs/crypto/inline_crypt.c
index 3b3a684..7df5877 100644
--- a/kernel/fs/crypto/inline_crypt.c
+++ b/kernel/fs/crypto/inline_crypt.c
@@ -16,6 +16,8 @@
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/keyslot-manager.h>
+#include <linux/sched/mm.h>
+#include <linux/slab.h>
#include <linux/uio.h>
#include "fscrypt_private.h"
@@ -69,23 +71,21 @@
{
const struct inode *inode = ci->ci_inode;
struct super_block *sb = inode->i_sb;
- enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
- unsigned int dun_bytes;
- struct request_queue **devs;
+ struct blk_crypto_config crypto_cfg;
int num_devs;
+ struct request_queue **devs;
int i;
/* The file must need contents encryption, not filenames encryption */
if (!S_ISREG(inode->i_mode))
return 0;
- /* blk-crypto must implement the needed encryption algorithm */
- if (crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
+ /* The crypto mode must have a blk-crypto counterpart */
+ if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
return 0;
/* The filesystem must be mounted with -o inlinecrypt */
- if (!sb->s_cop->inline_crypt_enabled ||
- !sb->s_cop->inline_crypt_enabled(sb))
+ if (!(sb->s_flags & SB_INLINECRYPT))
return 0;
/*
@@ -102,37 +102,28 @@
return 0;
/*
- * The needed encryption settings must be supported either by
- * blk-crypto-fallback, or by hardware on all the filesystem's devices.
+ * On all the filesystem's devices, blk-crypto must support the crypto
+ * configuration that the file would use.
*/
-
- if (IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) &&
- !is_hw_wrapped_key) {
- ci->ci_inlinecrypt = true;
- return 0;
- }
-
+ crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode;
+ crypto_cfg.data_unit_size = sb->s_blocksize;
+ crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci);
+ crypto_cfg.is_hw_wrapped = is_hw_wrapped_key;
num_devs = fscrypt_get_num_devices(sb);
- devs = kmalloc_array(num_devs, sizeof(*devs), GFP_NOFS);
+ devs = kmalloc_array(num_devs, sizeof(*devs), GFP_KERNEL);
if (!devs)
return -ENOMEM;
-
fscrypt_get_devices(sb, num_devs, devs);
- dun_bytes = fscrypt_get_dun_bytes(ci);
-
for (i = 0; i < num_devs; i++) {
- if (!keyslot_manager_crypto_mode_supported(devs[i]->ksm,
- crypto_mode,
- dun_bytes,
- sb->s_blocksize,
- is_hw_wrapped_key))
+ if (!blk_crypto_config_supported(devs[i], &crypto_cfg))
goto out_free_devs;
}
ci->ci_inlinecrypt = true;
out_free_devs:
kfree(devs);
+
return 0;
}
@@ -145,32 +136,25 @@
const struct inode *inode = ci->ci_inode;
struct super_block *sb = inode->i_sb;
enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
- unsigned int dun_bytes;
- int num_devs;
+ int num_devs = fscrypt_get_num_devices(sb);
int queue_refs = 0;
struct fscrypt_blk_crypto_key *blk_key;
int err;
int i;
- num_devs = fscrypt_get_num_devices(sb);
- if (WARN_ON(num_devs < 1))
- return -EINVAL;
-
- blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_NOFS);
+ blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_KERNEL);
if (!blk_key)
return -ENOMEM;
blk_key->num_devs = num_devs;
fscrypt_get_devices(sb, num_devs, blk_key->devs);
- dun_bytes = fscrypt_get_dun_bytes(ci);
-
BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE >
BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE);
err = blk_crypto_init_key(&blk_key->base, raw_key, raw_key_size,
- is_hw_wrapped, crypto_mode, dun_bytes,
- sb->s_blocksize);
+ is_hw_wrapped, crypto_mode,
+ fscrypt_get_dun_bytes(ci), sb->s_blocksize);
if (err) {
fscrypt_err(inode, "error %d initializing blk-crypto key", err);
goto fail;
@@ -191,10 +175,8 @@
}
queue_refs++;
- err = blk_crypto_start_using_mode(crypto_mode, dun_bytes,
- sb->s_blocksize,
- is_hw_wrapped,
- blk_key->devs[i]);
+ err = blk_crypto_start_using_key(&blk_key->base,
+ blk_key->devs[i]);
if (err) {
fscrypt_err(inode,
"error %d starting to use blk-crypto", err);
@@ -213,7 +195,7 @@
fail:
for (i = 0; i < queue_refs; i++)
blk_put_queue(blk_key->devs[i]);
- kzfree(blk_key);
+ kfree_sensitive(blk_key);
return err;
}
@@ -227,7 +209,7 @@
blk_crypto_evict_key(blk_key->devs[i], &blk_key->base);
blk_put_queue(blk_key->devs[i]);
}
- kzfree(blk_key);
+ kfree_sensitive(blk_key);
}
}
@@ -238,46 +220,19 @@
{
struct request_queue *q;
- q = sb->s_bdev->bd_queue;
+ q = bdev_get_queue(sb->s_bdev);
if (!q->ksm)
return -EOPNOTSUPP;
- return keyslot_manager_derive_raw_secret(q->ksm,
- wrapped_key, wrapped_key_size,
- raw_secret, raw_secret_size);
+ return blk_ksm_derive_raw_secret(q->ksm, wrapped_key, wrapped_key_size,
+ raw_secret, raw_secret_size);
}
-/**
- * fscrypt_inode_uses_inline_crypto - test whether an inode uses inline
- * encryption
- * @inode: an inode
- *
- * Return: true if the inode requires file contents encryption and if the
- * encryption should be done in the block layer via blk-crypto rather
- * than in the filesystem layer.
- */
-bool fscrypt_inode_uses_inline_crypto(const struct inode *inode)
+bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
{
- return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) &&
- inode->i_crypt_info->ci_inlinecrypt;
+ return inode->i_crypt_info->ci_inlinecrypt;
}
-EXPORT_SYMBOL_GPL(fscrypt_inode_uses_inline_crypto);
-
-/**
- * fscrypt_inode_uses_fs_layer_crypto - test whether an inode uses fs-layer
- * encryption
- * @inode: an inode
- *
- * Return: true if the inode requires file contents encryption and if the
- * encryption should be done in the filesystem layer rather than in the
- * block layer via blk-crypto.
- */
-bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode)
-{
- return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) &&
- !inode->i_crypt_info->ci_inlinecrypt;
-}
-EXPORT_SYMBOL_GPL(fscrypt_inode_uses_fs_layer_crypto);
+EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto);
static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
@@ -294,7 +249,7 @@
}
/**
- * fscrypt_set_bio_crypt_ctx - prepare a file contents bio for inline encryption
+ * fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto
* @bio: a bio which will eventually be submitted to the file
* @inode: the file's inode
* @first_lblk: the first file logical block number in the I/O
@@ -314,7 +269,7 @@
void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
u64 first_lblk, gfp_t gfp_mask)
{
- const struct fscrypt_info *ci = inode->i_crypt_info;
+ const struct fscrypt_info *ci;
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
if (fscrypt_inode_should_skip_dm_default_key(inode))
@@ -322,9 +277,10 @@
if (!fscrypt_inode_uses_inline_crypto(inode))
return;
+ ci = inode->i_crypt_info;
fscrypt_generate_dun(ci, first_lblk, dun);
- bio_crypt_set_ctx(bio, &ci->ci_key.blk_key->base, dun, gfp_mask);
+ bio_crypt_set_ctx(bio, &ci->ci_enc_key.blk_key->base, dun, gfp_mask);
}
EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
@@ -353,8 +309,8 @@
}
/**
- * fscrypt_set_bio_crypt_ctx_bh - prepare a file contents bio for inline
- * encryption
+ * fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline
+ * crypto
* @bio: a bio which will eventually be submitted to the file
* @first_bh: the first buffer_head for which I/O will be submitted
* @gfp_mask: memory allocation flags
@@ -363,8 +319,8 @@
* of an inode and block number directly.
*/
void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
- const struct buffer_head *first_bh,
- gfp_t gfp_mask)
+ const struct buffer_head *first_bh,
+ gfp_t gfp_mask)
{
const struct inode *inode;
u64 first_lblk;
@@ -375,14 +331,14 @@
EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
/**
- * fscrypt_mergeable_bio - test whether data can be added to a bio
+ * fscrypt_mergeable_bio() - test whether data can be added to a bio
* @bio: the bio being built up
* @inode: the inode for the next part of the I/O
* @next_lblk: the next file logical block number in the I/O
*
* When building a bio which may contain data which should undergo inline
* encryption (or decryption) via fscrypt, filesystems should call this function
- * to ensure that the resulting bio contains only logically contiguous data.
+ * to ensure that the resulting bio contains only contiguous data unit numbers.
* This will return false if the next part of the I/O cannot be merged with the
* bio because either the encryption key would be different or the encryption
* data unit numbers would be discontiguous.
@@ -413,7 +369,7 @@
* uses the same pointer. I.e., there's currently no need to support
* merging requests where the keys are the same but the pointers differ.
*/
- if (bc->bc_key != &inode->i_crypt_info->ci_key.blk_key->base)
+ if (bc->bc_key != &inode->i_crypt_info->ci_enc_key.blk_key->base)
return false;
fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun);
@@ -422,7 +378,7 @@
EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);
/**
- * fscrypt_mergeable_bio_bh - test whether data can be added to a bio
+ * fscrypt_mergeable_bio_bh() - test whether data can be added to a bio
* @bio: the bio being built up
* @next_bh: the next buffer_head for which I/O will be submitted
*
@@ -478,42 +434,43 @@
EXPORT_SYMBOL_GPL(fscrypt_dio_supported);
/**
- * fscrypt_limit_dio_pages() - limit I/O pages to avoid discontiguous DUNs
+ * fscrypt_limit_io_blocks() - limit I/O blocks to avoid discontiguous DUNs
* @inode: the file on which I/O is being done
- * @pos: the file position (in bytes) at which the I/O is being done
- * @nr_pages: the number of pages we want to submit starting at @pos
+ * @lblk: the block at which the I/O is being started from
+ * @nr_blocks: the number of blocks we want to submit starting at @pos
*
- * For direct I/O: limit the number of pages that will be submitted in the bio
- * targeting @pos, in order to avoid crossing a data unit number (DUN)
- * discontinuity. This is only needed for certain IV generation methods.
+ * Determine the limit to the number of blocks that can be submitted in the bio
+ * targeting @pos without causing a data unit number (DUN) discontinuity.
*
- * Return: the actual number of pages that can be submitted
+ * This is normally just @nr_blocks, as normally the DUNs just increment along
+ * with the logical blocks. (Or the file is not encrypted.)
+ *
+ * In rare cases, fscrypt can be using an IV generation method that allows the
+ * DUN to wrap around within logically continuous blocks, and that wraparound
+ * will occur. If this happens, a value less than @nr_blocks will be returned
+ * so that the wraparound doesn't occur in the middle of the bio.
+ *
+ * Return: the actual number of blocks that can be submitted
*/
-int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos, int nr_pages)
+u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)
{
const struct fscrypt_info *ci = inode->i_crypt_info;
u32 dun;
if (!fscrypt_inode_uses_inline_crypto(inode))
- return nr_pages;
+ return nr_blocks;
- if (nr_pages <= 1)
- return nr_pages;
+ if (nr_blocks <= 1)
+ return nr_blocks;
if (!(fscrypt_policy_flags(&ci->ci_policy) &
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
- return nr_pages;
-
- /*
- * fscrypt_select_encryption_impl() ensures that block_size == PAGE_SIZE
- * when using FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32.
- */
- if (WARN_ON_ONCE(i_blocksize(inode) != PAGE_SIZE))
- return 1;
+ return nr_blocks;
/* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */
- dun = ci->ci_hashed_ino + (pos >> inode->i_blkbits);
+ dun = ci->ci_hashed_ino + lblk;
- return min_t(u64, nr_pages, (u64)U32_MAX + 1 - dun);
+ return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun);
}
+EXPORT_SYMBOL_GPL(fscrypt_limit_io_blocks);
--
Gitblit v1.6.2