| .. | .. |
|---|
| 9 | 9 | |
|---|
| 10 | 10 | #define pr_fmt(fmt) "blk-crypto: " fmt |
|---|
| 11 | 11 | |
|---|
| 12 | | -#include <linux/blk-crypto.h> |
|---|
| 12 | +#include <linux/bio.h> |
|---|
| 13 | 13 | #include <linux/blkdev.h> |
|---|
| 14 | 14 | #include <linux/keyslot-manager.h> |
|---|
| 15 | | -#include <linux/random.h> |
|---|
| 16 | | -#include <linux/siphash.h> |
|---|
| 15 | +#include <linux/module.h> |
|---|
| 16 | +#include <linux/ratelimit.h> |
|---|
| 17 | +#include <linux/slab.h> |
|---|
| 17 | 18 | |
|---|
| 18 | 19 | #include "blk-crypto-internal.h" |
|---|
| 19 | 20 | |
|---|
| .. | .. |
|---|
| 35 | 36 | }, |
|---|
| 36 | 37 | }; |
|---|
| 37 | 38 | |
|---|
| 38 | | -/* Check that all I/O segments are data unit aligned */ |
|---|
| 39 | | -static int bio_crypt_check_alignment(struct bio *bio) |
|---|
| 39 | +/* |
|---|
| 40 | + * This number needs to be at least (the number of threads doing IO |
|---|
| 41 | + * concurrently) * (maximum recursive depth of a bio), so that we don't |
|---|
| 42 | + * deadlock on crypt_ctx allocations. The default is chosen to be the same |
|---|
| 43 | + * as the default number of post read contexts in both EXT4 and F2FS. |
|---|
| 44 | + */ |
|---|
| 45 | +static int num_prealloc_crypt_ctxs = 128; |
|---|
| 46 | + |
|---|
| 47 | +module_param(num_prealloc_crypt_ctxs, int, 0444); |
|---|
| 48 | +MODULE_PARM_DESC(num_prealloc_crypt_ctxs, |
|---|
| 49 | + "Number of bio crypto contexts to preallocate"); |
|---|
| 50 | + |
|---|
| 51 | +static struct kmem_cache *bio_crypt_ctx_cache; |
|---|
| 52 | +static mempool_t *bio_crypt_ctx_pool; |
|---|
| 53 | + |
|---|
| 54 | +static int __init bio_crypt_ctx_init(void) |
|---|
| 55 | +{ |
|---|
| 56 | + size_t i; |
|---|
| 57 | + |
|---|
| 58 | + bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0); |
|---|
| 59 | + if (!bio_crypt_ctx_cache) |
|---|
| 60 | + goto out_no_mem; |
|---|
| 61 | + |
|---|
| 62 | + bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs, |
|---|
| 63 | + bio_crypt_ctx_cache); |
|---|
| 64 | + if (!bio_crypt_ctx_pool) |
|---|
| 65 | + goto out_no_mem; |
|---|
| 66 | + |
|---|
| 67 | + /* This is assumed in various places. */ |
|---|
| 68 | + BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0); |
|---|
| 69 | + |
|---|
| 70 | + /* Sanity check that no algorithm exceeds the defined limits. */ |
|---|
| 71 | + for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) { |
|---|
| 72 | + BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE); |
|---|
| 73 | + BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE); |
|---|
| 74 | + } |
|---|
| 75 | + |
|---|
| 76 | + return 0; |
|---|
| 77 | +out_no_mem: |
|---|
| 78 | + panic("Failed to allocate mem for bio crypt ctxs\n"); |
|---|
| 79 | +} |
|---|
| 80 | +subsys_initcall(bio_crypt_ctx_init); |
|---|
| 81 | + |
|---|
| 82 | +void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key, |
|---|
| 83 | + const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) |
|---|
| 84 | +{ |
|---|
| 85 | + struct bio_crypt_ctx *bc; |
|---|
| 86 | + |
|---|
| 87 | + /* |
|---|
| 88 | + * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so |
|---|
| 89 | + * that the mempool_alloc() can't fail. |
|---|
| 90 | + */ |
|---|
| 91 | + WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM)); |
|---|
| 92 | + |
|---|
| 93 | + bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); |
|---|
| 94 | + |
|---|
| 95 | + bc->bc_key = key; |
|---|
| 96 | + memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun)); |
|---|
| 97 | + |
|---|
| 98 | + bio->bi_crypt_context = bc; |
|---|
| 99 | +} |
|---|
| 100 | +EXPORT_SYMBOL_GPL(bio_crypt_set_ctx); |
|---|
| 101 | + |
|---|
| 102 | +void __bio_crypt_free_ctx(struct bio *bio) |
|---|
| 103 | +{ |
|---|
| 104 | + mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool); |
|---|
| 105 | + bio->bi_crypt_context = NULL; |
|---|
| 106 | +} |
|---|
| 107 | + |
|---|
| 108 | +int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) |
|---|
| 109 | +{ |
|---|
| 110 | + dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); |
|---|
| 111 | + if (!dst->bi_crypt_context) |
|---|
| 112 | + return -ENOMEM; |
|---|
| 113 | + *dst->bi_crypt_context = *src->bi_crypt_context; |
|---|
| 114 | + return 0; |
|---|
| 115 | +} |
|---|
| 116 | +EXPORT_SYMBOL_GPL(__bio_crypt_clone); |
|---|
| 117 | + |
|---|
| 118 | +/* Increments @dun by @inc, treating @dun as a multi-limb integer. */ |
|---|
| 119 | +void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], |
|---|
| 120 | + unsigned int inc) |
|---|
| 121 | +{ |
|---|
| 122 | + int i; |
|---|
| 123 | + |
|---|
| 124 | + for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { |
|---|
| 125 | + dun[i] += inc; |
|---|
| 126 | + /* |
|---|
| 127 | + * If the addition in this limb overflowed, then we need to |
|---|
| 128 | + * carry 1 into the next limb. Else the carry is 0. |
|---|
| 129 | + */ |
|---|
| 130 | + if (dun[i] < inc) |
|---|
| 131 | + inc = 1; |
|---|
| 132 | + else |
|---|
| 133 | + inc = 0; |
|---|
| 134 | + } |
|---|
| 135 | +} |
|---|
| 136 | + |
|---|
| 137 | +void __bio_crypt_advance(struct bio *bio, unsigned int bytes) |
|---|
| 138 | +{ |
|---|
| 139 | + struct bio_crypt_ctx *bc = bio->bi_crypt_context; |
|---|
| 140 | + |
|---|
| 141 | + bio_crypt_dun_increment(bc->bc_dun, |
|---|
| 142 | + bytes >> bc->bc_key->data_unit_size_bits); |
|---|
| 143 | +} |
|---|
| 144 | + |
|---|
| 145 | +/* |
|---|
| 146 | + * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to |
|---|
| 147 | + * @next_dun, treating the DUNs as multi-limb integers. |
|---|
| 148 | + */ |
|---|
| 149 | +bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc, |
|---|
| 150 | + unsigned int bytes, |
|---|
| 151 | + const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) |
|---|
| 152 | +{ |
|---|
| 153 | + int i; |
|---|
| 154 | + unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits; |
|---|
| 155 | + |
|---|
| 156 | + for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { |
|---|
| 157 | + if (bc->bc_dun[i] + carry != next_dun[i]) |
|---|
| 158 | + return false; |
|---|
| 159 | + /* |
|---|
| 160 | + * If the addition in this limb overflowed, then we need to |
|---|
| 161 | + * carry 1 into the next limb. Else the carry is 0. |
|---|
| 162 | + */ |
|---|
| 163 | + if ((bc->bc_dun[i] + carry) < carry) |
|---|
| 164 | + carry = 1; |
|---|
| 165 | + else |
|---|
| 166 | + carry = 0; |
|---|
| 167 | + } |
|---|
| 168 | + |
|---|
| 169 | + /* If the DUN wrapped through 0, don't treat it as contiguous. */ |
|---|
| 170 | + return carry == 0; |
|---|
| 171 | +} |
|---|
| 172 | + |
|---|
| 173 | +/* |
|---|
| 174 | + * Checks that two bio crypt contexts are compatible - i.e. that |
|---|
| 175 | + * they are mergeable except for data_unit_num continuity. |
|---|
| 176 | + */ |
|---|
| 177 | +static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1, |
|---|
| 178 | + struct bio_crypt_ctx *bc2) |
|---|
| 179 | +{ |
|---|
| 180 | + if (!bc1) |
|---|
| 181 | + return !bc2; |
|---|
| 182 | + |
|---|
| 183 | + return bc2 && bc1->bc_key == bc2->bc_key; |
|---|
| 184 | +} |
|---|
| 185 | + |
|---|
| 186 | +bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio) |
|---|
| 187 | +{ |
|---|
| 188 | + return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context); |
|---|
| 189 | +} |
|---|
| 190 | + |
|---|
| 191 | +/* |
|---|
| 192 | + * Checks that two bio crypt contexts are compatible, and also |
|---|
| 193 | + * that their data_unit_nums are continuous (and can hence be merged) |
|---|
| 194 | + * in the order @bc1 followed by @bc2. |
|---|
| 195 | + */ |
|---|
| 196 | +bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes, |
|---|
| 197 | + struct bio_crypt_ctx *bc2) |
|---|
| 198 | +{ |
|---|
| 199 | + if (!bio_crypt_ctx_compatible(bc1, bc2)) |
|---|
| 200 | + return false; |
|---|
| 201 | + |
|---|
| 202 | + return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun); |
|---|
| 203 | +} |
|---|
| 204 | + |
|---|
| 205 | +/* Check that all I/O segments are data unit aligned. */ |
|---|
| 206 | +static bool bio_crypt_check_alignment(struct bio *bio) |
|---|
| 40 | 207 | { |
|---|
| 41 | 208 | const unsigned int data_unit_size = |
|---|
| 42 | | - bio->bi_crypt_context->bc_key->data_unit_size; |
|---|
| 209 | + bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size; |
|---|
| 43 | 210 | struct bvec_iter iter; |
|---|
| 44 | 211 | struct bio_vec bv; |
|---|
| 45 | 212 | |
|---|
| 46 | 213 | bio_for_each_segment(bv, bio, iter) { |
|---|
| 47 | 214 | if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) |
|---|
| 48 | | - return -EIO; |
|---|
| 215 | + return false; |
|---|
| 49 | 216 | } |
|---|
| 50 | | - return 0; |
|---|
| 217 | + |
|---|
| 218 | + return true; |
|---|
| 219 | +} |
|---|
| 220 | + |
|---|
| 221 | +blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq) |
|---|
| 222 | +{ |
|---|
| 223 | + return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key, |
|---|
| 224 | + &rq->crypt_keyslot); |
|---|
| 225 | +} |
|---|
| 226 | + |
|---|
| 227 | +void __blk_crypto_rq_put_keyslot(struct request *rq) |
|---|
| 228 | +{ |
|---|
| 229 | + blk_ksm_put_slot(rq->crypt_keyslot); |
|---|
| 230 | + rq->crypt_keyslot = NULL; |
|---|
| 231 | +} |
|---|
| 232 | + |
|---|
| 233 | +void __blk_crypto_free_request(struct request *rq) |
|---|
| 234 | +{ |
|---|
| 235 | + /* The keyslot, if one was needed, should have been released earlier. */ |
|---|
| 236 | + if (WARN_ON_ONCE(rq->crypt_keyslot)) |
|---|
| 237 | + __blk_crypto_rq_put_keyslot(rq); |
|---|
| 238 | + |
|---|
| 239 | + mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool); |
|---|
| 240 | + rq->crypt_ctx = NULL; |
|---|
| 51 | 241 | } |
|---|
| 52 | 242 | |
|---|
| 53 | 243 | /** |
|---|
| 54 | | - * blk_crypto_submit_bio - handle submitting bio for inline encryption |
|---|
| 244 | + * __blk_crypto_bio_prep - Prepare bio for inline encryption |
|---|
| 55 | 245 | * |
|---|
| 56 | 246 | * @bio_ptr: pointer to original bio pointer |
|---|
| 57 | 247 | * |
|---|
| 58 | | - * If the bio doesn't have inline encryption enabled or the submitter already |
|---|
| 59 | | - * specified a keyslot for the target device, do nothing. Else, a raw key must |
|---|
| 60 | | - * have been provided, so acquire a device keyslot for it if supported. Else, |
|---|
| 61 | | - * use the crypto API fallback. |
|---|
| 248 | + * If the bio crypt context provided for the bio is supported by the underlying |
|---|
| 249 | + * device's inline encryption hardware, do nothing. |
|---|
| 62 | 250 | * |
|---|
| 63 | | - * When the crypto API fallback is used for encryption, blk-crypto may choose to |
|---|
| 64 | | - * split the bio into 2 - the first one that will continue to be processed and |
|---|
| 65 | | - * the second one that will be resubmitted via generic_make_request. |
|---|
| 66 | | - * A bounce bio will be allocated to encrypt the contents of the aforementioned |
|---|
| 67 | | - * "first one", and *bio_ptr will be updated to this bounce bio. |
|---|
| 251 | + * Otherwise, try to perform en/decryption for this bio by falling back to the |
|---|
| 252 | + * kernel crypto API. When the crypto API fallback is used for encryption, |
|---|
| 253 | + * blk-crypto may choose to split the bio into 2 - the first one that will |
|---|
| 254 | + * continue to be processed and the second one that will be resubmitted via |
|---|
| 255 | + * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents |
|---|
| 256 | + * of the aforementioned "first one", and *bio_ptr will be updated to this |
|---|
| 257 | + * bounce bio. |
|---|
| 68 | 258 | * |
|---|
| 69 | | - * Return: 0 if bio submission should continue; nonzero if bio_endio() was |
|---|
| 70 | | - * already called so bio submission should abort. |
|---|
| 259 | + * Caller must ensure bio has bio_crypt_ctx. |
|---|
| 260 | + * |
|---|
| 261 | + * Return: true on success; false on error (and bio->bi_status will be set |
|---|
| 262 | + * appropriately, and bio_endio() will have been called so bio |
|---|
| 263 | + * submission should abort). |
|---|
| 71 | 264 | */ |
|---|
| 72 | | -int blk_crypto_submit_bio(struct bio **bio_ptr) |
|---|
| 265 | +bool __blk_crypto_bio_prep(struct bio **bio_ptr) |
|---|
| 73 | 266 | { |
|---|
| 74 | 267 | struct bio *bio = *bio_ptr; |
|---|
| 75 | | - struct request_queue *q; |
|---|
| 76 | | - struct bio_crypt_ctx *bc = bio->bi_crypt_context; |
|---|
| 77 | | - int err; |
|---|
| 268 | + const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key; |
|---|
| 78 | 269 | |
|---|
| 79 | | - if (!bc || !bio_has_data(bio)) |
|---|
| 80 | | - return 0; |
|---|
| 270 | + /* Error if bio has no data. */ |
|---|
| 271 | + if (WARN_ON_ONCE(!bio_has_data(bio))) { |
|---|
| 272 | + bio->bi_status = BLK_STS_IOERR; |
|---|
| 273 | + goto fail; |
|---|
| 274 | + } |
|---|
| 275 | + |
|---|
| 276 | + if (!bio_crypt_check_alignment(bio)) { |
|---|
| 277 | + bio->bi_status = BLK_STS_IOERR; |
|---|
| 278 | + goto fail; |
|---|
| 279 | + } |
|---|
| 81 | 280 | |
|---|
| 82 | 281 | /* |
|---|
| 83 | | - * When a read bio is marked for fallback decryption, its bi_iter is |
|---|
| 84 | | - * saved so that when we decrypt the bio later, we know what part of it |
|---|
| 85 | | - * was marked for fallback decryption (when the bio is passed down after |
|---|
| 86 | | - * blk_crypto_submit bio, it may be split or advanced so we cannot rely |
|---|
| 87 | | - * on the bi_iter while decrypting in blk_crypto_endio) |
|---|
| 282 | + * Success if device supports the encryption context, or if we succeeded |
|---|
| 283 | + * in falling back to the crypto API. |
|---|
| 88 | 284 | */ |
|---|
| 89 | | - if (bio_crypt_fallback_crypted(bc)) |
|---|
| 90 | | - return 0; |
|---|
| 91 | | - |
|---|
| 92 | | - err = bio_crypt_check_alignment(bio); |
|---|
| 93 | | - if (err) { |
|---|
| 94 | | - bio->bi_status = BLK_STS_IOERR; |
|---|
| 95 | | - goto out; |
|---|
| 96 | | - } |
|---|
| 97 | | - |
|---|
| 98 | | - q = bio->bi_disk->queue; |
|---|
| 99 | | - |
|---|
| 100 | | - if (bc->bc_ksm) { |
|---|
| 101 | | - /* Key already programmed into device? */ |
|---|
| 102 | | - if (q->ksm == bc->bc_ksm) |
|---|
| 103 | | - return 0; |
|---|
| 104 | | - |
|---|
| 105 | | - /* Nope, release the existing keyslot. */ |
|---|
| 106 | | - bio_crypt_ctx_release_keyslot(bc); |
|---|
| 107 | | - } |
|---|
| 108 | | - |
|---|
| 109 | | - /* Get device keyslot if supported */ |
|---|
| 110 | | - if (keyslot_manager_crypto_mode_supported(q->ksm, |
|---|
| 111 | | - bc->bc_key->crypto_mode, |
|---|
| 112 | | - blk_crypto_key_dun_bytes(bc->bc_key), |
|---|
| 113 | | - bc->bc_key->data_unit_size, |
|---|
| 114 | | - bc->bc_key->is_hw_wrapped)) { |
|---|
| 115 | | - err = bio_crypt_ctx_acquire_keyslot(bc, q->ksm); |
|---|
| 116 | | - if (!err) |
|---|
| 117 | | - return 0; |
|---|
| 118 | | - |
|---|
| 119 | | - pr_warn_once("Failed to acquire keyslot for %s (err=%d). Falling back to crypto API.\n", |
|---|
| 120 | | - bio->bi_disk->disk_name, err); |
|---|
| 121 | | - } |
|---|
| 122 | | - |
|---|
| 123 | | - /* Fallback to crypto API */ |
|---|
| 124 | | - err = blk_crypto_fallback_submit_bio(bio_ptr); |
|---|
| 125 | | - if (err) |
|---|
| 126 | | - goto out; |
|---|
| 127 | | - |
|---|
| 128 | | - return 0; |
|---|
| 129 | | -out: |
|---|
| 130 | | - bio_endio(*bio_ptr); |
|---|
| 131 | | - return err; |
|---|
| 132 | | -} |
|---|
| 133 | | - |
|---|
| 134 | | -/** |
|---|
| 135 | | - * blk_crypto_endio - clean up bio w.r.t inline encryption during bio_endio |
|---|
| 136 | | - * |
|---|
| 137 | | - * @bio: the bio to clean up |
|---|
| 138 | | - * |
|---|
| 139 | | - * If blk_crypto_submit_bio decided to fallback to crypto API for this bio, |
|---|
| 140 | | - * we queue the bio for decryption into a workqueue and return false, |
|---|
| 141 | | - * and call bio_endio(bio) at a later time (after the bio has been decrypted). |
|---|
| 142 | | - * |
|---|
| 143 | | - * If the bio is not to be decrypted by the crypto API, this function releases |
|---|
| 144 | | - * the reference to the keyslot that blk_crypto_submit_bio got. |
|---|
| 145 | | - * |
|---|
| 146 | | - * Return: true if bio_endio should continue; false otherwise (bio_endio will |
|---|
| 147 | | - * be called again when bio has been decrypted). |
|---|
| 148 | | - */ |
|---|
| 149 | | -bool blk_crypto_endio(struct bio *bio) |
|---|
| 150 | | -{ |
|---|
| 151 | | - struct bio_crypt_ctx *bc = bio->bi_crypt_context; |
|---|
| 152 | | - |
|---|
| 153 | | - if (!bc) |
|---|
| 285 | + if (blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm, |
|---|
| 286 | + &bc_key->crypto_cfg)) |
|---|
| 154 | 287 | return true; |
|---|
| 155 | 288 | |
|---|
| 156 | | - if (bio_crypt_fallback_crypted(bc)) { |
|---|
| 157 | | - /* |
|---|
| 158 | | - * The only bios who's crypto is handled by the blk-crypto |
|---|
| 159 | | - * fallback when they reach here are those with |
|---|
| 160 | | - * bio_data_dir(bio) == READ, since WRITE bios that are |
|---|
| 161 | | - * encrypted by the crypto API fallback are handled by |
|---|
| 162 | | - * blk_crypto_encrypt_endio(). |
|---|
| 163 | | - */ |
|---|
| 164 | | - return !blk_crypto_queue_decrypt_bio(bio); |
|---|
| 289 | + if (blk_crypto_fallback_bio_prep(bio_ptr)) |
|---|
| 290 | + return true; |
|---|
| 291 | +fail: |
|---|
| 292 | + bio_endio(*bio_ptr); |
|---|
| 293 | + return false; |
|---|
| 294 | +} |
|---|
| 295 | + |
|---|
| 296 | +int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, |
|---|
| 297 | + gfp_t gfp_mask) |
|---|
| 298 | +{ |
|---|
| 299 | + if (!rq->crypt_ctx) { |
|---|
| 300 | + rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); |
|---|
| 301 | + if (!rq->crypt_ctx) |
|---|
| 302 | + return -ENOMEM; |
|---|
| 165 | 303 | } |
|---|
| 166 | | - |
|---|
| 167 | | - if (bc->bc_keyslot >= 0) |
|---|
| 168 | | - bio_crypt_ctx_release_keyslot(bc); |
|---|
| 169 | | - |
|---|
| 170 | | - return true; |
|---|
| 304 | + *rq->crypt_ctx = *bio->bi_crypt_context; |
|---|
| 305 | + return 0; |
|---|
| 171 | 306 | } |
|---|
| 172 | 307 | |
|---|
| 173 | 308 | /** |
|---|
| .. | .. |
|---|
| 185 | 320 | * key is used |
|---|
| 186 | 321 | * @data_unit_size: the data unit size to use for en/decryption |
|---|
| 187 | 322 | * |
|---|
| 188 | | - * Return: The blk_crypto_key that was prepared, or an ERR_PTR() on error. When |
|---|
| 189 | | - * done using the key, it must be freed with blk_crypto_free_key(). |
|---|
| 323 | + * Return: 0 on success, -errno on failure. The caller is responsible for |
|---|
| 324 | + * zeroizing both blk_key and raw_key when done with them. |
|---|
| 190 | 325 | */ |
|---|
| 191 | 326 | int blk_crypto_init_key(struct blk_crypto_key *blk_key, |
|---|
| 192 | 327 | const u8 *raw_key, unsigned int raw_key_size, |
|---|
| .. | .. |
|---|
| 196 | 331 | unsigned int data_unit_size) |
|---|
| 197 | 332 | { |
|---|
| 198 | 333 | const struct blk_crypto_mode *mode; |
|---|
| 199 | | - static siphash_key_t hash_key; |
|---|
| 200 | | - u32 hash; |
|---|
| 201 | 334 | |
|---|
| 202 | 335 | memset(blk_key, 0, sizeof(*blk_key)); |
|---|
| 203 | 336 | |
|---|
| .. | .. |
|---|
| 216 | 349 | return -EINVAL; |
|---|
| 217 | 350 | } |
|---|
| 218 | 351 | |
|---|
| 219 | | - if (dun_bytes <= 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE) |
|---|
| 352 | + if (dun_bytes == 0 || dun_bytes > mode->ivsize) |
|---|
| 220 | 353 | return -EINVAL; |
|---|
| 221 | 354 | |
|---|
| 222 | 355 | if (!is_power_of_2(data_unit_size)) |
|---|
| 223 | 356 | return -EINVAL; |
|---|
| 224 | 357 | |
|---|
| 225 | | - blk_key->crypto_mode = crypto_mode; |
|---|
| 226 | | - blk_key->data_unit_size = data_unit_size; |
|---|
| 358 | + blk_key->crypto_cfg.crypto_mode = crypto_mode; |
|---|
| 359 | + blk_key->crypto_cfg.dun_bytes = dun_bytes; |
|---|
| 360 | + blk_key->crypto_cfg.data_unit_size = data_unit_size; |
|---|
| 361 | + blk_key->crypto_cfg.is_hw_wrapped = is_hw_wrapped; |
|---|
| 227 | 362 | blk_key->data_unit_size_bits = ilog2(data_unit_size); |
|---|
| 228 | 363 | blk_key->size = raw_key_size; |
|---|
| 229 | | - blk_key->is_hw_wrapped = is_hw_wrapped; |
|---|
| 230 | 364 | memcpy(blk_key->raw, raw_key, raw_key_size); |
|---|
| 231 | | - |
|---|
| 232 | | - /* |
|---|
| 233 | | - * The keyslot manager uses the SipHash of the key to implement O(1) key |
|---|
| 234 | | - * lookups while avoiding leaking information about the keys. It's |
|---|
| 235 | | - * precomputed here so that it only needs to be computed once per key. |
|---|
| 236 | | - */ |
|---|
| 237 | | - get_random_once(&hash_key, sizeof(hash_key)); |
|---|
| 238 | | - hash = (u32)siphash(raw_key, raw_key_size, &hash_key); |
|---|
| 239 | | - blk_crypto_key_set_hash_and_dun_bytes(blk_key, hash, dun_bytes); |
|---|
| 240 | 365 | |
|---|
| 241 | 366 | return 0; |
|---|
| 242 | 367 | } |
|---|
| 243 | 368 | EXPORT_SYMBOL_GPL(blk_crypto_init_key); |
|---|
| 244 | 369 | |
|---|
| 370 | +/* |
|---|
| 371 | + * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the |
|---|
| 372 | + * request queue it's submitted to supports inline crypto, or the |
|---|
| 373 | + * blk-crypto-fallback is enabled and supports the cfg). |
|---|
| 374 | + */ |
|---|
| 375 | +bool blk_crypto_config_supported(struct request_queue *q, |
|---|
| 376 | + const struct blk_crypto_config *cfg) |
|---|
| 377 | +{ |
|---|
| 378 | + if (IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) && |
|---|
| 379 | + !cfg->is_hw_wrapped) |
|---|
| 380 | + return true; |
|---|
| 381 | + return blk_ksm_crypto_cfg_supported(q->ksm, cfg); |
|---|
| 382 | +} |
|---|
| 383 | + |
|---|
| 245 | 384 | /** |
|---|
| 246 | | - * blk_crypto_start_using_mode() - Start using blk-crypto on a device |
|---|
| 247 | | - * @crypto_mode: the crypto mode that will be used |
|---|
| 248 | | - * @dun_bytes: number of bytes that will be used to specify the DUN |
|---|
| 249 | | - * @data_unit_size: the data unit size that will be used |
|---|
| 250 | | - * @is_hw_wrapped_key: whether the key will be hardware-wrapped |
|---|
| 385 | + * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device |
|---|
| 386 | + * @key: A key to use on the device |
|---|
| 251 | 387 | * @q: the request queue for the device |
|---|
| 252 | 388 | * |
|---|
| 253 | 389 | * Upper layers must call this function to ensure that either the hardware |
|---|
| 254 | | - * supports the needed crypto settings, or the crypto API fallback has |
|---|
| 255 | | - * transforms for the needed mode allocated and ready to go. |
|---|
| 390 | + * supports the key's crypto settings, or the crypto API fallback has transforms |
|---|
| 391 | + * for the needed mode allocated and ready to go. This function may allocate |
|---|
| 392 | + * an skcipher, and *should not* be called from the data path, since that might |
|---|
| 393 | + * cause a deadlock |
|---|
| 256 | 394 | * |
|---|
| 257 | | - * Return: 0 on success; -ENOPKG if the hardware doesn't support the crypto |
|---|
| 258 | | - * settings and blk-crypto-fallback is either disabled or the needed |
|---|
| 259 | | - * algorithm is disabled in the crypto API; or another -errno code. |
|---|
| 395 | + * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and |
|---|
| 396 | + * blk-crypto-fallback is either disabled or the needed algorithm |
|---|
| 397 | + * is disabled in the crypto API; or another -errno code. |
|---|
| 260 | 398 | */ |
|---|
| 261 | | -int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode, |
|---|
| 262 | | - unsigned int dun_bytes, |
|---|
| 263 | | - unsigned int data_unit_size, |
|---|
| 264 | | - bool is_hw_wrapped_key, |
|---|
| 265 | | - struct request_queue *q) |
|---|
| 399 | +int blk_crypto_start_using_key(const struct blk_crypto_key *key, |
|---|
| 400 | + struct request_queue *q) |
|---|
| 266 | 401 | { |
|---|
| 267 | | - if (keyslot_manager_crypto_mode_supported(q->ksm, crypto_mode, |
|---|
| 268 | | - dun_bytes, data_unit_size, |
|---|
| 269 | | - is_hw_wrapped_key)) |
|---|
| 402 | + if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg)) |
|---|
| 270 | 403 | return 0; |
|---|
| 271 | | - if (is_hw_wrapped_key) { |
|---|
| 404 | + if (key->crypto_cfg.is_hw_wrapped) { |
|---|
| 272 | 405 | pr_warn_once("hardware doesn't support wrapped keys\n"); |
|---|
| 273 | 406 | return -EOPNOTSUPP; |
|---|
| 274 | 407 | } |
|---|
| 275 | | - return blk_crypto_fallback_start_using_mode(crypto_mode); |
|---|
| 408 | + return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode); |
|---|
| 276 | 409 | } |
|---|
| 277 | | -EXPORT_SYMBOL_GPL(blk_crypto_start_using_mode); |
|---|
| 410 | +EXPORT_SYMBOL_GPL(blk_crypto_start_using_key); |
|---|
| 278 | 411 | |
|---|
| 279 | 412 | /** |
|---|
| 280 | | - * blk_crypto_evict_key() - Evict a key from any inline encryption hardware |
|---|
| 281 | | - * it may have been programmed into |
|---|
| 282 | | - * @q: The request queue who's keyslot manager this key might have been |
|---|
| 283 | | - * programmed into |
|---|
| 284 | | - * @key: The key to evict |
|---|
| 413 | + * blk_crypto_evict_key() - Evict a blk_crypto_key from a request_queue |
|---|
| 414 | + * @q: a request_queue on which I/O using the key may have been done |
|---|
| 415 | + * @key: the key to evict |
|---|
| 285 | 416 | * |
|---|
| 286 | | - * Upper layers (filesystems) should call this function to ensure that a key |
|---|
| 287 | | - * is evicted from hardware that it might have been programmed into. This |
|---|
| 288 | | - * will call keyslot_manager_evict_key on the queue's keyslot manager, if one |
|---|
| 289 | | - * exists, and supports the crypto algorithm with the specified data unit size. |
|---|
| 290 | | - * Otherwise, it will evict the key from the blk-crypto-fallback's ksm. |
|---|
| 417 | + * For a given request_queue, this function removes the given blk_crypto_key |
|---|
| 418 | + * from the keyslot management structures and evicts it from any underlying |
|---|
| 419 | + * hardware keyslot(s) or blk-crypto-fallback keyslot it may have been |
|---|
| 420 | + * programmed into. |
|---|
| 291 | 421 | * |
|---|
| 292 | | - * Return: 0 on success, -err on error. |
|---|
| 422 | + * Upper layers must call this before freeing the blk_crypto_key. It must be |
|---|
| 423 | + * called for every request_queue the key may have been used on. The key must |
|---|
| 424 | + * no longer be in use by any I/O when this function is called. |
|---|
| 425 | + * |
|---|
| 426 | + * Context: May sleep. |
|---|
| 293 | 427 | */ |
|---|
| 294 | | -int blk_crypto_evict_key(struct request_queue *q, |
|---|
| 295 | | - const struct blk_crypto_key *key) |
|---|
| 428 | +void blk_crypto_evict_key(struct request_queue *q, |
|---|
| 429 | + const struct blk_crypto_key *key) |
|---|
| 296 | 430 | { |
|---|
| 297 | | - if (q->ksm && |
|---|
| 298 | | - keyslot_manager_crypto_mode_supported(q->ksm, key->crypto_mode, |
|---|
| 299 | | - blk_crypto_key_dun_bytes(key), |
|---|
| 300 | | - key->data_unit_size, |
|---|
| 301 | | - key->is_hw_wrapped)) |
|---|
| 302 | | - return keyslot_manager_evict_key(q->ksm, key); |
|---|
| 431 | + int err; |
|---|
| 303 | 432 | |
|---|
| 304 | | - return blk_crypto_fallback_evict_key(key); |
|---|
| 433 | + if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg)) |
|---|
| 434 | + err = blk_ksm_evict_key(q->ksm, key); |
|---|
| 435 | + else |
|---|
| 436 | + err = blk_crypto_fallback_evict_key(key); |
|---|
| 437 | + /* |
|---|
| 438 | + * An error can only occur here if the key failed to be evicted from a |
|---|
| 439 | + * keyslot (due to a hardware or driver issue) or is allegedly still in |
|---|
| 440 | + * use by I/O (due to a kernel bug). Even in these cases, the key is |
|---|
| 441 | + * still unlinked from the keyslot management structures, and the caller |
|---|
| 442 | + * is allowed and expected to free it right away. There's nothing |
|---|
| 443 | + * callers can do to handle errors, so just log them and return void. |
|---|
| 444 | + */ |
|---|
| 445 | + if (err) |
|---|
| 446 | + pr_warn_ratelimited("error %d evicting key\n", err); |
|---|
| 305 | 447 | } |
|---|
| 306 | 448 | EXPORT_SYMBOL_GPL(blk_crypto_evict_key); |
|---|