.. | .. |
---|
12 | 12 | #include <crypto/skcipher.h> |
---|
13 | 13 | #include <linux/blk-cgroup.h> |
---|
14 | 14 | #include <linux/blk-crypto.h> |
---|
| 15 | +#include <linux/blkdev.h> |
---|
15 | 16 | #include <linux/crypto.h> |
---|
16 | 17 | #include <linux/keyslot-manager.h> |
---|
17 | 18 | #include <linux/mempool.h> |
---|
.. | .. |
---|
44 | 45 | * resubmitted |
---|
45 | 46 | */ |
---|
46 | 47 | struct bvec_iter crypt_iter; |
---|
47 | | - u64 fallback_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; |
---|
| 48 | + union { |
---|
| 49 | + struct { |
---|
| 50 | + struct work_struct work; |
---|
| 51 | + struct bio *bio; |
---|
| 52 | + }; |
---|
| 53 | + struct { |
---|
| 54 | + void *bi_private_orig; |
---|
| 55 | + bio_end_io_t *bi_end_io_orig; |
---|
| 56 | + }; |
---|
| 57 | + }; |
---|
48 | 58 | }; |
---|
49 | 59 | |
---|
50 | | -/* The following few vars are only used during the crypto API fallback */ |
---|
51 | 60 | static struct kmem_cache *bio_fallback_crypt_ctx_cache; |
---|
52 | 61 | static mempool_t *bio_fallback_crypt_ctx_pool; |
---|
53 | 62 | |
---|
.. | .. |
---|
63 | 72 | static DEFINE_MUTEX(tfms_init_lock); |
---|
64 | 73 | static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX]; |
---|
65 | 74 | |
---|
66 | | -struct blk_crypto_decrypt_work { |
---|
67 | | - struct work_struct work; |
---|
68 | | - struct bio *bio; |
---|
69 | | -}; |
---|
70 | | - |
---|
71 | 75 | static struct blk_crypto_keyslot { |
---|
72 | | - struct crypto_skcipher *tfm; |
---|
73 | 76 | enum blk_crypto_mode_num crypto_mode; |
---|
74 | 77 | struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX]; |
---|
75 | 78 | } *blk_crypto_keyslots; |
---|
76 | 79 | |
---|
77 | | -/* The following few vars are only used during the crypto API fallback */ |
---|
78 | | -static struct keyslot_manager *blk_crypto_ksm; |
---|
| 80 | +static struct blk_keyslot_manager blk_crypto_ksm; |
---|
79 | 81 | static struct workqueue_struct *blk_crypto_wq; |
---|
80 | 82 | static mempool_t *blk_crypto_bounce_page_pool; |
---|
81 | | -static struct kmem_cache *blk_crypto_decrypt_work_cache; |
---|
82 | | - |
---|
83 | | -bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc) |
---|
84 | | -{ |
---|
85 | | - return bc && bc->bc_ksm == blk_crypto_ksm; |
---|
86 | | -} |
---|
87 | 83 | |
---|
88 | 84 | /* |
---|
89 | 85 | * This is the key we set when evicting a keyslot. This *should* be the all 0's |
---|
.. | .. |
---|
106 | 102 | slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID; |
---|
107 | 103 | } |
---|
108 | 104 | |
---|
109 | | -static int blk_crypto_keyslot_program(struct keyslot_manager *ksm, |
---|
| 105 | +static int blk_crypto_keyslot_program(struct blk_keyslot_manager *ksm, |
---|
110 | 106 | const struct blk_crypto_key *key, |
---|
111 | 107 | unsigned int slot) |
---|
112 | 108 | { |
---|
113 | 109 | struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot]; |
---|
114 | | - const enum blk_crypto_mode_num crypto_mode = key->crypto_mode; |
---|
| 110 | + const enum blk_crypto_mode_num crypto_mode = |
---|
| 111 | + key->crypto_cfg.crypto_mode; |
---|
115 | 112 | int err; |
---|
116 | 113 | |
---|
117 | 114 | if (crypto_mode != slotp->crypto_mode && |
---|
118 | | - slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) { |
---|
| 115 | + slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) |
---|
119 | 116 | blk_crypto_evict_keyslot(slot); |
---|
120 | | - } |
---|
121 | 117 | |
---|
122 | | - if (!slotp->tfms[crypto_mode]) |
---|
123 | | - return -ENOMEM; |
---|
124 | 118 | slotp->crypto_mode = crypto_mode; |
---|
125 | 119 | err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw, |
---|
126 | 120 | key->size); |
---|
.. | .. |
---|
131 | 125 | return 0; |
---|
132 | 126 | } |
---|
133 | 127 | |
---|
134 | | -static int blk_crypto_keyslot_evict(struct keyslot_manager *ksm, |
---|
| 128 | +static int blk_crypto_keyslot_evict(struct blk_keyslot_manager *ksm, |
---|
135 | 129 | const struct blk_crypto_key *key, |
---|
136 | 130 | unsigned int slot) |
---|
137 | 131 | { |
---|
.. | .. |
---|
141 | 135 | |
---|
142 | 136 | /* |
---|
143 | 137 | * The crypto API fallback KSM ops - only used for a bio when it specifies a |
---|
144 | | - * blk_crypto_mode for which we failed to get a keyslot in the device's inline |
---|
145 | | - * encryption hardware (which probably means the device doesn't have inline |
---|
146 | | - * encryption hardware that supports that crypto mode). |
---|
| 138 | + * blk_crypto_key that was not supported by the device's inline encryption |
---|
| 139 | + * hardware. |
---|
147 | 140 | */ |
---|
148 | | -static const struct keyslot_mgmt_ll_ops blk_crypto_ksm_ll_ops = { |
---|
| 141 | +static const struct blk_ksm_ll_ops blk_crypto_ksm_ll_ops = { |
---|
149 | 142 | .keyslot_program = blk_crypto_keyslot_program, |
---|
150 | 143 | .keyslot_evict = blk_crypto_keyslot_evict, |
---|
151 | 144 | }; |
---|
152 | 145 | |
---|
153 | | -static void blk_crypto_encrypt_endio(struct bio *enc_bio) |
---|
| 146 | +static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) |
---|
154 | 147 | { |
---|
155 | 148 | struct bio *src_bio = enc_bio->bi_private; |
---|
156 | 149 | int i; |
---|
.. | .. |
---|
184 | 177 | bio_for_each_segment(bv, bio_src, iter) |
---|
185 | 178 | bio->bi_io_vec[bio->bi_vcnt++] = bv; |
---|
186 | 179 | |
---|
187 | | - if (bio_integrity(bio_src) && |
---|
188 | | - bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0) { |
---|
189 | | - bio_put(bio); |
---|
190 | | - return NULL; |
---|
191 | | - } |
---|
192 | | - |
---|
193 | | - bio_clone_blkcg_association(bio, bio_src); |
---|
| 180 | + bio_clone_blkg_association(bio, bio_src); |
---|
| 181 | + blkcg_bio_issue_init(bio); |
---|
194 | 182 | |
---|
195 | 183 | bio_clone_skip_dm_default_key(bio, bio_src); |
---|
196 | 184 | |
---|
197 | 185 | return bio; |
---|
198 | 186 | } |
---|
199 | 187 | |
---|
200 | | -static int blk_crypto_alloc_cipher_req(struct bio *src_bio, |
---|
201 | | - struct skcipher_request **ciph_req_ret, |
---|
202 | | - struct crypto_wait *wait) |
---|
| 188 | +static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot, |
---|
| 189 | + struct skcipher_request **ciph_req_ret, |
---|
| 190 | + struct crypto_wait *wait) |
---|
203 | 191 | { |
---|
204 | 192 | struct skcipher_request *ciph_req; |
---|
205 | 193 | const struct blk_crypto_keyslot *slotp; |
---|
| 194 | + int keyslot_idx = blk_ksm_get_slot_idx(slot); |
---|
206 | 195 | |
---|
207 | | - slotp = &blk_crypto_keyslots[src_bio->bi_crypt_context->bc_keyslot]; |
---|
| 196 | + slotp = &blk_crypto_keyslots[keyslot_idx]; |
---|
208 | 197 | ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode], |
---|
209 | 198 | GFP_NOIO); |
---|
210 | | - if (!ciph_req) { |
---|
211 | | - src_bio->bi_status = BLK_STS_RESOURCE; |
---|
212 | | - return -ENOMEM; |
---|
213 | | - } |
---|
| 199 | + if (!ciph_req) |
---|
| 200 | + return false; |
---|
214 | 201 | |
---|
215 | 202 | skcipher_request_set_callback(ciph_req, |
---|
216 | 203 | CRYPTO_TFM_REQ_MAY_BACKLOG | |
---|
217 | 204 | CRYPTO_TFM_REQ_MAY_SLEEP, |
---|
218 | 205 | crypto_req_done, wait); |
---|
219 | 206 | *ciph_req_ret = ciph_req; |
---|
220 | | - return 0; |
---|
| 207 | + |
---|
| 208 | + return true; |
---|
221 | 209 | } |
---|
222 | 210 | |
---|
223 | | -static int blk_crypto_split_bio_if_needed(struct bio **bio_ptr) |
---|
| 211 | +static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr) |
---|
224 | 212 | { |
---|
225 | 213 | struct bio *bio = *bio_ptr; |
---|
226 | 214 | unsigned int i = 0; |
---|
.. | .. |
---|
239 | 227 | split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL); |
---|
240 | 228 | if (!split_bio) { |
---|
241 | 229 | bio->bi_status = BLK_STS_RESOURCE; |
---|
242 | | - return -ENOMEM; |
---|
| 230 | + return false; |
---|
243 | 231 | } |
---|
244 | 232 | bio_chain(split_bio, bio); |
---|
245 | | - generic_make_request(bio); |
---|
| 233 | + submit_bio_noacct(bio); |
---|
246 | 234 | *bio_ptr = split_bio; |
---|
247 | 235 | } |
---|
248 | | - return 0; |
---|
| 236 | + |
---|
| 237 | + return true; |
---|
249 | 238 | } |
---|
250 | 239 | |
---|
251 | 240 | union blk_crypto_iv { |
---|
.. | .. |
---|
266 | 255 | * The crypto API fallback's encryption routine. |
---|
267 | 256 | * Allocate a bounce bio for encryption, encrypt the input bio using crypto API, |
---|
268 | 257 | * and replace *bio_ptr with the bounce bio. May split input bio if it's too |
---|
269 | | - * large. |
---|
| 258 | + * large. Returns true on success. Returns false and sets bio->bi_status on |
---|
| 259 | + * error. |
---|
270 | 260 | */ |
---|
271 | | -static int blk_crypto_encrypt_bio(struct bio **bio_ptr) |
---|
| 261 | +static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) |
---|
272 | 262 | { |
---|
273 | | - struct bio *src_bio; |
---|
| 263 | + struct bio *src_bio, *enc_bio; |
---|
| 264 | + struct bio_crypt_ctx *bc; |
---|
| 265 | + struct blk_ksm_keyslot *slot; |
---|
| 266 | + int data_unit_size; |
---|
274 | 267 | struct skcipher_request *ciph_req = NULL; |
---|
275 | 268 | DECLARE_CRYPTO_WAIT(wait); |
---|
276 | 269 | u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; |
---|
277 | | - union blk_crypto_iv iv; |
---|
278 | 270 | struct scatterlist src, dst; |
---|
279 | | - struct bio *enc_bio; |
---|
| 271 | + union blk_crypto_iv iv; |
---|
280 | 272 | unsigned int i, j; |
---|
281 | | - int data_unit_size; |
---|
282 | | - struct bio_crypt_ctx *bc; |
---|
283 | | - int err = 0; |
---|
| 273 | + bool ret = false; |
---|
| 274 | + blk_status_t blk_st; |
---|
284 | 275 | |
---|
285 | 276 | /* Split the bio if it's too big for single page bvec */ |
---|
286 | | - err = blk_crypto_split_bio_if_needed(bio_ptr); |
---|
287 | | - if (err) |
---|
288 | | - return err; |
---|
| 277 | + if (!blk_crypto_split_bio_if_needed(bio_ptr)) |
---|
| 278 | + return false; |
---|
289 | 279 | |
---|
290 | 280 | src_bio = *bio_ptr; |
---|
291 | 281 | bc = src_bio->bi_crypt_context; |
---|
292 | | - data_unit_size = bc->bc_key->data_unit_size; |
---|
| 282 | + data_unit_size = bc->bc_key->crypto_cfg.data_unit_size; |
---|
293 | 283 | |
---|
294 | 284 | /* Allocate bounce bio for encryption */ |
---|
295 | 285 | enc_bio = blk_crypto_clone_bio(src_bio); |
---|
296 | 286 | if (!enc_bio) { |
---|
297 | 287 | src_bio->bi_status = BLK_STS_RESOURCE; |
---|
298 | | - return -ENOMEM; |
---|
| 288 | + return false; |
---|
299 | 289 | } |
---|
300 | 290 | |
---|
301 | 291 | /* |
---|
302 | 292 | * Use the crypto API fallback keyslot manager to get a crypto_skcipher |
---|
303 | 293 | * for the algorithm and key specified for this bio. |
---|
304 | 294 | */ |
---|
305 | | - err = bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm); |
---|
306 | | - if (err) { |
---|
307 | | - src_bio->bi_status = BLK_STS_IOERR; |
---|
| 295 | + blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot); |
---|
| 296 | + if (blk_st != BLK_STS_OK) { |
---|
| 297 | + src_bio->bi_status = blk_st; |
---|
308 | 298 | goto out_put_enc_bio; |
---|
309 | 299 | } |
---|
310 | 300 | |
---|
311 | 301 | /* and then allocate an skcipher_request for it */ |
---|
312 | | - err = blk_crypto_alloc_cipher_req(src_bio, &ciph_req, &wait); |
---|
313 | | - if (err) |
---|
| 302 | + if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) { |
---|
| 303 | + src_bio->bi_status = BLK_STS_RESOURCE; |
---|
314 | 304 | goto out_release_keyslot; |
---|
| 305 | + } |
---|
315 | 306 | |
---|
316 | 307 | memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun)); |
---|
317 | 308 | sg_init_table(&src, 1); |
---|
.. | .. |
---|
331 | 322 | |
---|
332 | 323 | if (!ciphertext_page) { |
---|
333 | 324 | src_bio->bi_status = BLK_STS_RESOURCE; |
---|
334 | | - err = -ENOMEM; |
---|
335 | 325 | goto out_free_bounce_pages; |
---|
336 | 326 | } |
---|
337 | 327 | |
---|
.. | .. |
---|
343 | 333 | /* Encrypt each data unit in this page */ |
---|
344 | 334 | for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) { |
---|
345 | 335 | blk_crypto_dun_to_iv(curr_dun, &iv); |
---|
346 | | - err = crypto_wait_req(crypto_skcipher_encrypt(ciph_req), |
---|
347 | | - &wait); |
---|
348 | | - if (err) { |
---|
| 336 | + if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req), |
---|
| 337 | + &wait)) { |
---|
349 | 338 | i++; |
---|
350 | | - src_bio->bi_status = BLK_STS_RESOURCE; |
---|
| 339 | + src_bio->bi_status = BLK_STS_IOERR; |
---|
351 | 340 | goto out_free_bounce_pages; |
---|
352 | 341 | } |
---|
353 | 342 | bio_crypt_dun_increment(curr_dun, 1); |
---|
.. | .. |
---|
357 | 346 | } |
---|
358 | 347 | |
---|
359 | 348 | enc_bio->bi_private = src_bio; |
---|
360 | | - enc_bio->bi_end_io = blk_crypto_encrypt_endio; |
---|
| 349 | + enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio; |
---|
361 | 350 | *bio_ptr = enc_bio; |
---|
| 351 | + ret = true; |
---|
362 | 352 | |
---|
363 | 353 | enc_bio = NULL; |
---|
364 | | - err = 0; |
---|
365 | 354 | goto out_free_ciph_req; |
---|
366 | 355 | |
---|
367 | 356 | out_free_bounce_pages: |
---|
.. | .. |
---|
371 | 360 | out_free_ciph_req: |
---|
372 | 361 | skcipher_request_free(ciph_req); |
---|
373 | 362 | out_release_keyslot: |
---|
374 | | - bio_crypt_ctx_release_keyslot(bc); |
---|
| 363 | + blk_ksm_put_slot(slot); |
---|
375 | 364 | out_put_enc_bio: |
---|
376 | 365 | if (enc_bio) |
---|
377 | 366 | bio_put(enc_bio); |
---|
378 | 367 | |
---|
379 | | - return err; |
---|
380 | | -} |
---|
381 | | - |
---|
382 | | -static void blk_crypto_free_fallback_crypt_ctx(struct bio *bio) |
---|
383 | | -{ |
---|
384 | | - mempool_free(container_of(bio->bi_crypt_context, |
---|
385 | | - struct bio_fallback_crypt_ctx, |
---|
386 | | - crypt_ctx), |
---|
387 | | - bio_fallback_crypt_ctx_pool); |
---|
388 | | - bio->bi_crypt_context = NULL; |
---|
| 368 | + return ret; |
---|
389 | 369 | } |
---|
390 | 370 | |
---|
391 | 371 | /* |
---|
392 | 372 | * The crypto API fallback's main decryption routine. |
---|
393 | | - * Decrypts input bio in place. |
---|
| 373 | + * Decrypts input bio in place, and calls bio_endio on the bio. |
---|
394 | 374 | */ |
---|
395 | | -static void blk_crypto_decrypt_bio(struct work_struct *work) |
---|
| 375 | +static void blk_crypto_fallback_decrypt_bio(struct work_struct *work) |
---|
396 | 376 | { |
---|
397 | | - struct blk_crypto_decrypt_work *decrypt_work = |
---|
398 | | - container_of(work, struct blk_crypto_decrypt_work, work); |
---|
399 | | - struct bio *bio = decrypt_work->bio; |
---|
| 377 | + struct bio_fallback_crypt_ctx *f_ctx = |
---|
| 378 | + container_of(work, struct bio_fallback_crypt_ctx, work); |
---|
| 379 | + struct bio *bio = f_ctx->bio; |
---|
| 380 | + struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx; |
---|
| 381 | + struct blk_ksm_keyslot *slot; |
---|
400 | 382 | struct skcipher_request *ciph_req = NULL; |
---|
401 | 383 | DECLARE_CRYPTO_WAIT(wait); |
---|
402 | | - struct bio_vec bv; |
---|
403 | | - struct bvec_iter iter; |
---|
404 | 384 | u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; |
---|
405 | 385 | union blk_crypto_iv iv; |
---|
406 | 386 | struct scatterlist sg; |
---|
407 | | - struct bio_crypt_ctx *bc = bio->bi_crypt_context; |
---|
408 | | - struct bio_fallback_crypt_ctx *f_ctx = |
---|
409 | | - container_of(bc, struct bio_fallback_crypt_ctx, crypt_ctx); |
---|
410 | | - const int data_unit_size = bc->bc_key->data_unit_size; |
---|
| 387 | + struct bio_vec bv; |
---|
| 388 | + struct bvec_iter iter; |
---|
| 389 | + const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size; |
---|
411 | 390 | unsigned int i; |
---|
412 | | - int err; |
---|
| 391 | + blk_status_t blk_st; |
---|
413 | 392 | |
---|
414 | 393 | /* |
---|
415 | 394 | * Use the crypto API fallback keyslot manager to get a crypto_skcipher |
---|
416 | 395 | * for the algorithm and key specified for this bio. |
---|
417 | 396 | */ |
---|
418 | | - if (bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm)) { |
---|
419 | | - bio->bi_status = BLK_STS_RESOURCE; |
---|
| 397 | + blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot); |
---|
| 398 | + if (blk_st != BLK_STS_OK) { |
---|
| 399 | + bio->bi_status = blk_st; |
---|
420 | 400 | goto out_no_keyslot; |
---|
421 | 401 | } |
---|
422 | 402 | |
---|
423 | 403 | /* and then allocate an skcipher_request for it */ |
---|
424 | | - err = blk_crypto_alloc_cipher_req(bio, &ciph_req, &wait); |
---|
425 | | - if (err) |
---|
| 404 | + if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) { |
---|
| 405 | + bio->bi_status = BLK_STS_RESOURCE; |
---|
426 | 406 | goto out; |
---|
| 407 | + } |
---|
427 | 408 | |
---|
428 | | - memcpy(curr_dun, f_ctx->fallback_dun, sizeof(curr_dun)); |
---|
| 409 | + memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun)); |
---|
429 | 410 | sg_init_table(&sg, 1); |
---|
430 | 411 | skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size, |
---|
431 | 412 | iv.bytes); |
---|
.. | .. |
---|
451 | 432 | |
---|
452 | 433 | out: |
---|
453 | 434 | skcipher_request_free(ciph_req); |
---|
454 | | - bio_crypt_ctx_release_keyslot(bc); |
---|
| 435 | + blk_ksm_put_slot(slot); |
---|
455 | 436 | out_no_keyslot: |
---|
456 | | - kmem_cache_free(blk_crypto_decrypt_work_cache, decrypt_work); |
---|
457 | | - blk_crypto_free_fallback_crypt_ctx(bio); |
---|
| 437 | + mempool_free(f_ctx, bio_fallback_crypt_ctx_pool); |
---|
458 | 438 | bio_endio(bio); |
---|
459 | 439 | } |
---|
460 | 440 | |
---|
461 | | -/* |
---|
462 | | - * Queue bio for decryption. |
---|
463 | | - * Returns true iff bio was queued for decryption. |
---|
| 441 | +/** |
---|
| 442 | + * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption |
---|
| 443 | + * |
---|
| 444 | + * @bio: the bio to queue |
---|
| 445 | + * |
---|
| 446 | + * Restore bi_private and bi_end_io, and queue the bio for decryption into a |
---|
| 447 | + * workqueue, since this function will be called from an atomic context. |
---|
464 | 448 | */ |
---|
465 | | -bool blk_crypto_queue_decrypt_bio(struct bio *bio) |
---|
| 449 | +static void blk_crypto_fallback_decrypt_endio(struct bio *bio) |
---|
466 | 450 | { |
---|
467 | | - struct blk_crypto_decrypt_work *decrypt_work; |
---|
| 451 | + struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private; |
---|
| 452 | + |
---|
| 453 | + bio->bi_private = f_ctx->bi_private_orig; |
---|
| 454 | + bio->bi_end_io = f_ctx->bi_end_io_orig; |
---|
468 | 455 | |
---|
469 | 456 | /* If there was an IO error, don't queue for decrypt. */ |
---|
470 | | - if (bio->bi_status) |
---|
471 | | - goto out; |
---|
472 | | - |
---|
473 | | - decrypt_work = kmem_cache_zalloc(blk_crypto_decrypt_work_cache, |
---|
474 | | - GFP_ATOMIC); |
---|
475 | | - if (!decrypt_work) { |
---|
476 | | - bio->bi_status = BLK_STS_RESOURCE; |
---|
477 | | - goto out; |
---|
| 457 | + if (bio->bi_status) { |
---|
| 458 | + mempool_free(f_ctx, bio_fallback_crypt_ctx_pool); |
---|
| 459 | + bio_endio(bio); |
---|
| 460 | + return; |
---|
478 | 461 | } |
---|
479 | 462 | |
---|
480 | | - INIT_WORK(&decrypt_work->work, blk_crypto_decrypt_bio); |
---|
481 | | - decrypt_work->bio = bio; |
---|
482 | | - queue_work(blk_crypto_wq, &decrypt_work->work); |
---|
| 463 | + INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio); |
---|
| 464 | + f_ctx->bio = bio; |
---|
| 465 | + queue_work(blk_crypto_wq, &f_ctx->work); |
---|
| 466 | +} |
---|
| 467 | + |
---|
| 468 | +/** |
---|
| 469 | + * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption |
---|
| 470 | + * |
---|
| 471 | + * @bio_ptr: pointer to the bio to prepare |
---|
| 472 | + * |
---|
| 473 | + * If bio is doing a WRITE operation, this splits the bio into two parts if it's |
---|
| 474 | + * too big (see blk_crypto_split_bio_if_needed). It then allocates a bounce bio |
---|
| 475 | + * for the first part, encrypts it, and update bio_ptr to point to the bounce |
---|
| 476 | + * bio. |
---|
| 477 | + * |
---|
| 478 | + * For a READ operation, we mark the bio for decryption by using bi_private and |
---|
| 479 | + * bi_end_io. |
---|
| 480 | + * |
---|
| 481 | + * In either case, this function will make the bio look like a regular bio (i.e. |
---|
| 482 | + * as if no encryption context was ever specified) for the purposes of the rest |
---|
| 483 | + * of the stack except for blk-integrity (blk-integrity and blk-crypto are not |
---|
| 484 | + * currently supported together). |
---|
| 485 | + * |
---|
| 486 | + * Return: true on success. Sets bio->bi_status and returns false on error. |
---|
| 487 | + */ |
---|
| 488 | +bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr) |
---|
| 489 | +{ |
---|
| 490 | + struct bio *bio = *bio_ptr; |
---|
| 491 | + struct bio_crypt_ctx *bc = bio->bi_crypt_context; |
---|
| 492 | + struct bio_fallback_crypt_ctx *f_ctx; |
---|
| 493 | + |
---|
| 494 | + if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) { |
---|
| 495 | + /* User didn't call blk_crypto_start_using_key() first */ |
---|
| 496 | + bio->bi_status = BLK_STS_IOERR; |
---|
| 497 | + return false; |
---|
| 498 | + } |
---|
| 499 | + |
---|
| 500 | + if (!blk_ksm_crypto_cfg_supported(&blk_crypto_ksm, |
---|
| 501 | + &bc->bc_key->crypto_cfg)) { |
---|
| 502 | + bio->bi_status = BLK_STS_NOTSUPP; |
---|
| 503 | + return false; |
---|
| 504 | + } |
---|
| 505 | + |
---|
| 506 | + if (bio_data_dir(bio) == WRITE) |
---|
| 507 | + return blk_crypto_fallback_encrypt_bio(bio_ptr); |
---|
| 508 | + |
---|
| 509 | + /* |
---|
| 510 | + * bio READ case: Set up a f_ctx in the bio's bi_private and set the |
---|
| 511 | + * bi_end_io appropriately to trigger decryption when the bio is ended. |
---|
| 512 | + */ |
---|
| 513 | + f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO); |
---|
| 514 | + f_ctx->crypt_ctx = *bc; |
---|
| 515 | + f_ctx->crypt_iter = bio->bi_iter; |
---|
| 516 | + f_ctx->bi_private_orig = bio->bi_private; |
---|
| 517 | + f_ctx->bi_end_io_orig = bio->bi_end_io; |
---|
| 518 | + bio->bi_private = (void *)f_ctx; |
---|
| 519 | + bio->bi_end_io = blk_crypto_fallback_decrypt_endio; |
---|
| 520 | + bio_crypt_free_ctx(bio); |
---|
483 | 521 | |
---|
484 | 522 | return true; |
---|
| 523 | +} |
---|
| 524 | + |
---|
| 525 | +int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) |
---|
| 526 | +{ |
---|
| 527 | + return blk_ksm_evict_key(&blk_crypto_ksm, key); |
---|
| 528 | +} |
---|
| 529 | + |
---|
| 530 | +static bool blk_crypto_fallback_inited; |
---|
| 531 | +static int blk_crypto_fallback_init(void) |
---|
| 532 | +{ |
---|
| 533 | + int i; |
---|
| 534 | + int err; |
---|
| 535 | + |
---|
| 536 | + if (blk_crypto_fallback_inited) |
---|
| 537 | + return 0; |
---|
| 538 | + |
---|
| 539 | + prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE); |
---|
| 540 | + |
---|
| 541 | + err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots); |
---|
| 542 | + if (err) |
---|
| 543 | + goto out; |
---|
| 544 | + err = -ENOMEM; |
---|
| 545 | + |
---|
| 546 | + blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops; |
---|
| 547 | + blk_crypto_ksm.max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; |
---|
| 548 | + blk_crypto_ksm.features = BLK_CRYPTO_FEATURE_STANDARD_KEYS; |
---|
| 549 | + |
---|
| 550 | + /* All blk-crypto modes have a crypto API fallback. */ |
---|
| 551 | + for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) |
---|
| 552 | + blk_crypto_ksm.crypto_modes_supported[i] = 0xFFFFFFFF; |
---|
| 553 | + blk_crypto_ksm.crypto_modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; |
---|
| 554 | + |
---|
| 555 | + blk_crypto_wq = alloc_workqueue("blk_crypto_wq", |
---|
| 556 | + WQ_UNBOUND | WQ_HIGHPRI | |
---|
| 557 | + WQ_MEM_RECLAIM, num_online_cpus()); |
---|
| 558 | + if (!blk_crypto_wq) |
---|
| 559 | + goto fail_free_ksm; |
---|
| 560 | + |
---|
| 561 | + blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots, |
---|
| 562 | + sizeof(blk_crypto_keyslots[0]), |
---|
| 563 | + GFP_KERNEL); |
---|
| 564 | + if (!blk_crypto_keyslots) |
---|
| 565 | + goto fail_free_wq; |
---|
| 566 | + |
---|
| 567 | + blk_crypto_bounce_page_pool = |
---|
| 568 | + mempool_create_page_pool(num_prealloc_bounce_pg, 0); |
---|
| 569 | + if (!blk_crypto_bounce_page_pool) |
---|
| 570 | + goto fail_free_keyslots; |
---|
| 571 | + |
---|
| 572 | + bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0); |
---|
| 573 | + if (!bio_fallback_crypt_ctx_cache) |
---|
| 574 | + goto fail_free_bounce_page_pool; |
---|
| 575 | + |
---|
| 576 | + bio_fallback_crypt_ctx_pool = |
---|
| 577 | + mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs, |
---|
| 578 | + bio_fallback_crypt_ctx_cache); |
---|
| 579 | + if (!bio_fallback_crypt_ctx_pool) |
---|
| 580 | + goto fail_free_crypt_ctx_cache; |
---|
| 581 | + |
---|
| 582 | + blk_crypto_fallback_inited = true; |
---|
| 583 | + |
---|
| 584 | + return 0; |
---|
| 585 | +fail_free_crypt_ctx_cache: |
---|
| 586 | + kmem_cache_destroy(bio_fallback_crypt_ctx_cache); |
---|
| 587 | +fail_free_bounce_page_pool: |
---|
| 588 | + mempool_destroy(blk_crypto_bounce_page_pool); |
---|
| 589 | +fail_free_keyslots: |
---|
| 590 | + kfree(blk_crypto_keyslots); |
---|
| 591 | +fail_free_wq: |
---|
| 592 | + destroy_workqueue(blk_crypto_wq); |
---|
| 593 | +fail_free_ksm: |
---|
| 594 | + blk_ksm_destroy(&blk_crypto_ksm); |
---|
485 | 595 | out: |
---|
486 | | - blk_crypto_free_fallback_crypt_ctx(bio); |
---|
487 | | - return false; |
---|
| 596 | + return err; |
---|
488 | 597 | } |
---|
489 | 598 | |
---|
490 | 599 | /* |
---|
.. | .. |
---|
507 | 616 | return 0; |
---|
508 | 617 | |
---|
509 | 618 | mutex_lock(&tfms_init_lock); |
---|
510 | | - if (likely(tfms_inited[mode_num])) |
---|
| 619 | + if (tfms_inited[mode_num]) |
---|
| 620 | + goto out; |
---|
| 621 | + |
---|
| 622 | + err = blk_crypto_fallback_init(); |
---|
| 623 | + if (err) |
---|
511 | 624 | goto out; |
---|
512 | 625 | |
---|
513 | 626 | for (i = 0; i < blk_crypto_num_keyslots; i++) { |
---|
.. | .. |
---|
525 | 638 | } |
---|
526 | 639 | |
---|
527 | 640 | crypto_skcipher_set_flags(slotp->tfms[mode_num], |
---|
528 | | - CRYPTO_TFM_REQ_WEAK_KEY); |
---|
| 641 | + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); |
---|
529 | 642 | } |
---|
530 | 643 | |
---|
531 | 644 | /* |
---|
.. | .. |
---|
544 | 657 | out: |
---|
545 | 658 | mutex_unlock(&tfms_init_lock); |
---|
546 | 659 | return err; |
---|
547 | | -} |
---|
548 | | - |
---|
549 | | -int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) |
---|
550 | | -{ |
---|
551 | | - return keyslot_manager_evict_key(blk_crypto_ksm, key); |
---|
552 | | -} |
---|
553 | | - |
---|
554 | | -int blk_crypto_fallback_submit_bio(struct bio **bio_ptr) |
---|
555 | | -{ |
---|
556 | | - struct bio *bio = *bio_ptr; |
---|
557 | | - struct bio_crypt_ctx *bc = bio->bi_crypt_context; |
---|
558 | | - struct bio_fallback_crypt_ctx *f_ctx; |
---|
559 | | - |
---|
560 | | - if (bc->bc_key->is_hw_wrapped) { |
---|
561 | | - pr_warn_once("HW wrapped key cannot be used with fallback.\n"); |
---|
562 | | - bio->bi_status = BLK_STS_NOTSUPP; |
---|
563 | | - return -EOPNOTSUPP; |
---|
564 | | - } |
---|
565 | | - |
---|
566 | | - if (!tfms_inited[bc->bc_key->crypto_mode]) { |
---|
567 | | - bio->bi_status = BLK_STS_IOERR; |
---|
568 | | - return -EIO; |
---|
569 | | - } |
---|
570 | | - |
---|
571 | | - if (bio_data_dir(bio) == WRITE) |
---|
572 | | - return blk_crypto_encrypt_bio(bio_ptr); |
---|
573 | | - |
---|
574 | | - /* |
---|
575 | | - * Mark bio as fallback crypted and replace the bio_crypt_ctx with |
---|
576 | | - * another one contained in a bio_fallback_crypt_ctx, so that the |
---|
577 | | - * fallback has space to store the info it needs for decryption. |
---|
578 | | - */ |
---|
579 | | - bc->bc_ksm = blk_crypto_ksm; |
---|
580 | | - f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO); |
---|
581 | | - f_ctx->crypt_ctx = *bc; |
---|
582 | | - memcpy(f_ctx->fallback_dun, bc->bc_dun, sizeof(f_ctx->fallback_dun)); |
---|
583 | | - f_ctx->crypt_iter = bio->bi_iter; |
---|
584 | | - |
---|
585 | | - bio_crypt_free_ctx(bio); |
---|
586 | | - bio->bi_crypt_context = &f_ctx->crypt_ctx; |
---|
587 | | - |
---|
588 | | - return 0; |
---|
589 | | -} |
---|
590 | | - |
---|
591 | | -int __init blk_crypto_fallback_init(void) |
---|
592 | | -{ |
---|
593 | | - int i; |
---|
594 | | - unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX]; |
---|
595 | | - |
---|
596 | | - prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE); |
---|
597 | | - |
---|
598 | | - /* All blk-crypto modes have a crypto API fallback. */ |
---|
599 | | - for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) |
---|
600 | | - crypto_mode_supported[i] = 0xFFFFFFFF; |
---|
601 | | - crypto_mode_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; |
---|
602 | | - |
---|
603 | | - blk_crypto_ksm = keyslot_manager_create( |
---|
604 | | - NULL, blk_crypto_num_keyslots, |
---|
605 | | - &blk_crypto_ksm_ll_ops, |
---|
606 | | - BLK_CRYPTO_FEATURE_STANDARD_KEYS, |
---|
607 | | - crypto_mode_supported, NULL); |
---|
608 | | - if (!blk_crypto_ksm) |
---|
609 | | - return -ENOMEM; |
---|
610 | | - |
---|
611 | | - blk_crypto_wq = alloc_workqueue("blk_crypto_wq", |
---|
612 | | - WQ_UNBOUND | WQ_HIGHPRI | |
---|
613 | | - WQ_MEM_RECLAIM, num_online_cpus()); |
---|
614 | | - if (!blk_crypto_wq) |
---|
615 | | - return -ENOMEM; |
---|
616 | | - |
---|
617 | | - blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots, |
---|
618 | | - sizeof(blk_crypto_keyslots[0]), |
---|
619 | | - GFP_KERNEL); |
---|
620 | | - if (!blk_crypto_keyslots) |
---|
621 | | - return -ENOMEM; |
---|
622 | | - |
---|
623 | | - blk_crypto_bounce_page_pool = |
---|
624 | | - mempool_create_page_pool(num_prealloc_bounce_pg, 0); |
---|
625 | | - if (!blk_crypto_bounce_page_pool) |
---|
626 | | - return -ENOMEM; |
---|
627 | | - |
---|
628 | | - blk_crypto_decrypt_work_cache = KMEM_CACHE(blk_crypto_decrypt_work, |
---|
629 | | - SLAB_RECLAIM_ACCOUNT); |
---|
630 | | - if (!blk_crypto_decrypt_work_cache) |
---|
631 | | - return -ENOMEM; |
---|
632 | | - |
---|
633 | | - bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0); |
---|
634 | | - if (!bio_fallback_crypt_ctx_cache) |
---|
635 | | - return -ENOMEM; |
---|
636 | | - |
---|
637 | | - bio_fallback_crypt_ctx_pool = |
---|
638 | | - mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs, |
---|
639 | | - bio_fallback_crypt_ctx_cache); |
---|
640 | | - if (!bio_fallback_crypt_ctx_pool) |
---|
641 | | - return -ENOMEM; |
---|
642 | | - |
---|
643 | | - return 0; |
---|
644 | 660 | } |
---|