hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/block/blk-crypto-fallback.c
....@@ -12,6 +12,7 @@
1212 #include <crypto/skcipher.h>
1313 #include <linux/blk-cgroup.h>
1414 #include <linux/blk-crypto.h>
15
+#include <linux/blkdev.h>
1516 #include <linux/crypto.h>
1617 #include <linux/keyslot-manager.h>
1718 #include <linux/mempool.h>
....@@ -44,10 +45,18 @@
4445 * resubmitted
4546 */
4647 struct bvec_iter crypt_iter;
47
- u64 fallback_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
48
+ union {
49
+ struct {
50
+ struct work_struct work;
51
+ struct bio *bio;
52
+ };
53
+ struct {
54
+ void *bi_private_orig;
55
+ bio_end_io_t *bi_end_io_orig;
56
+ };
57
+ };
4858 };
4959
50
-/* The following few vars are only used during the crypto API fallback */
5160 static struct kmem_cache *bio_fallback_crypt_ctx_cache;
5261 static mempool_t *bio_fallback_crypt_ctx_pool;
5362
....@@ -63,27 +72,14 @@
6372 static DEFINE_MUTEX(tfms_init_lock);
6473 static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
6574
66
-struct blk_crypto_decrypt_work {
67
- struct work_struct work;
68
- struct bio *bio;
69
-};
70
-
7175 static struct blk_crypto_keyslot {
72
- struct crypto_skcipher *tfm;
7376 enum blk_crypto_mode_num crypto_mode;
7477 struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
7578 } *blk_crypto_keyslots;
7679
77
-/* The following few vars are only used during the crypto API fallback */
78
-static struct keyslot_manager *blk_crypto_ksm;
80
+static struct blk_keyslot_manager blk_crypto_ksm;
7981 static struct workqueue_struct *blk_crypto_wq;
8082 static mempool_t *blk_crypto_bounce_page_pool;
81
-static struct kmem_cache *blk_crypto_decrypt_work_cache;
82
-
83
-bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc)
84
-{
85
- return bc && bc->bc_ksm == blk_crypto_ksm;
86
-}
8783
8884 /*
8985 * This is the key we set when evicting a keyslot. This *should* be the all 0's
....@@ -106,21 +102,19 @@
106102 slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
107103 }
108104
109
-static int blk_crypto_keyslot_program(struct keyslot_manager *ksm,
105
+static int blk_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
110106 const struct blk_crypto_key *key,
111107 unsigned int slot)
112108 {
113109 struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
114
- const enum blk_crypto_mode_num crypto_mode = key->crypto_mode;
110
+ const enum blk_crypto_mode_num crypto_mode =
111
+ key->crypto_cfg.crypto_mode;
115112 int err;
116113
117114 if (crypto_mode != slotp->crypto_mode &&
118
- slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) {
115
+ slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID)
119116 blk_crypto_evict_keyslot(slot);
120
- }
121117
122
- if (!slotp->tfms[crypto_mode])
123
- return -ENOMEM;
124118 slotp->crypto_mode = crypto_mode;
125119 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
126120 key->size);
....@@ -131,7 +125,7 @@
131125 return 0;
132126 }
133127
134
-static int blk_crypto_keyslot_evict(struct keyslot_manager *ksm,
128
+static int blk_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
135129 const struct blk_crypto_key *key,
136130 unsigned int slot)
137131 {
....@@ -141,16 +135,15 @@
141135
142136 /*
143137 * The crypto API fallback KSM ops - only used for a bio when it specifies a
144
- * blk_crypto_mode for which we failed to get a keyslot in the device's inline
145
- * encryption hardware (which probably means the device doesn't have inline
146
- * encryption hardware that supports that crypto mode).
138
+ * blk_crypto_key that was not supported by the device's inline encryption
139
+ * hardware.
147140 */
148
-static const struct keyslot_mgmt_ll_ops blk_crypto_ksm_ll_ops = {
141
+static const struct blk_ksm_ll_ops blk_crypto_ksm_ll_ops = {
149142 .keyslot_program = blk_crypto_keyslot_program,
150143 .keyslot_evict = blk_crypto_keyslot_evict,
151144 };
152145
153
-static void blk_crypto_encrypt_endio(struct bio *enc_bio)
146
+static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
154147 {
155148 struct bio *src_bio = enc_bio->bi_private;
156149 int i;
....@@ -184,43 +177,38 @@
184177 bio_for_each_segment(bv, bio_src, iter)
185178 bio->bi_io_vec[bio->bi_vcnt++] = bv;
186179
187
- if (bio_integrity(bio_src) &&
188
- bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0) {
189
- bio_put(bio);
190
- return NULL;
191
- }
192
-
193
- bio_clone_blkcg_association(bio, bio_src);
180
+ bio_clone_blkg_association(bio, bio_src);
181
+ blkcg_bio_issue_init(bio);
194182
195183 bio_clone_skip_dm_default_key(bio, bio_src);
196184
197185 return bio;
198186 }
199187
200
-static int blk_crypto_alloc_cipher_req(struct bio *src_bio,
201
- struct skcipher_request **ciph_req_ret,
202
- struct crypto_wait *wait)
188
+static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot,
189
+ struct skcipher_request **ciph_req_ret,
190
+ struct crypto_wait *wait)
203191 {
204192 struct skcipher_request *ciph_req;
205193 const struct blk_crypto_keyslot *slotp;
194
+ int keyslot_idx = blk_ksm_get_slot_idx(slot);
206195
207
- slotp = &blk_crypto_keyslots[src_bio->bi_crypt_context->bc_keyslot];
196
+ slotp = &blk_crypto_keyslots[keyslot_idx];
208197 ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
209198 GFP_NOIO);
210
- if (!ciph_req) {
211
- src_bio->bi_status = BLK_STS_RESOURCE;
212
- return -ENOMEM;
213
- }
199
+ if (!ciph_req)
200
+ return false;
214201
215202 skcipher_request_set_callback(ciph_req,
216203 CRYPTO_TFM_REQ_MAY_BACKLOG |
217204 CRYPTO_TFM_REQ_MAY_SLEEP,
218205 crypto_req_done, wait);
219206 *ciph_req_ret = ciph_req;
220
- return 0;
207
+
208
+ return true;
221209 }
222210
223
-static int blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
211
+static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
224212 {
225213 struct bio *bio = *bio_ptr;
226214 unsigned int i = 0;
....@@ -239,13 +227,14 @@
239227 split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL);
240228 if (!split_bio) {
241229 bio->bi_status = BLK_STS_RESOURCE;
242
- return -ENOMEM;
230
+ return false;
243231 }
244232 bio_chain(split_bio, bio);
245
- generic_make_request(bio);
233
+ submit_bio_noacct(bio);
246234 *bio_ptr = split_bio;
247235 }
248
- return 0;
236
+
237
+ return true;
249238 }
250239
251240 union blk_crypto_iv {
....@@ -266,52 +255,54 @@
266255 * The crypto API fallback's encryption routine.
267256 * Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
268257 * and replace *bio_ptr with the bounce bio. May split input bio if it's too
269
- * large.
258
+ * large. Returns true on success. Returns false and sets bio->bi_status on
259
+ * error.
270260 */
271
-static int blk_crypto_encrypt_bio(struct bio **bio_ptr)
261
+static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
272262 {
273
- struct bio *src_bio;
263
+ struct bio *src_bio, *enc_bio;
264
+ struct bio_crypt_ctx *bc;
265
+ struct blk_ksm_keyslot *slot;
266
+ int data_unit_size;
274267 struct skcipher_request *ciph_req = NULL;
275268 DECLARE_CRYPTO_WAIT(wait);
276269 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
277
- union blk_crypto_iv iv;
278270 struct scatterlist src, dst;
279
- struct bio *enc_bio;
271
+ union blk_crypto_iv iv;
280272 unsigned int i, j;
281
- int data_unit_size;
282
- struct bio_crypt_ctx *bc;
283
- int err = 0;
273
+ bool ret = false;
274
+ blk_status_t blk_st;
284275
285276 /* Split the bio if it's too big for single page bvec */
286
- err = blk_crypto_split_bio_if_needed(bio_ptr);
287
- if (err)
288
- return err;
277
+ if (!blk_crypto_split_bio_if_needed(bio_ptr))
278
+ return false;
289279
290280 src_bio = *bio_ptr;
291281 bc = src_bio->bi_crypt_context;
292
- data_unit_size = bc->bc_key->data_unit_size;
282
+ data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
293283
294284 /* Allocate bounce bio for encryption */
295285 enc_bio = blk_crypto_clone_bio(src_bio);
296286 if (!enc_bio) {
297287 src_bio->bi_status = BLK_STS_RESOURCE;
298
- return -ENOMEM;
288
+ return false;
299289 }
300290
301291 /*
302292 * Use the crypto API fallback keyslot manager to get a crypto_skcipher
303293 * for the algorithm and key specified for this bio.
304294 */
305
- err = bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm);
306
- if (err) {
307
- src_bio->bi_status = BLK_STS_IOERR;
295
+ blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot);
296
+ if (blk_st != BLK_STS_OK) {
297
+ src_bio->bi_status = blk_st;
308298 goto out_put_enc_bio;
309299 }
310300
311301 /* and then allocate an skcipher_request for it */
312
- err = blk_crypto_alloc_cipher_req(src_bio, &ciph_req, &wait);
313
- if (err)
302
+ if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) {
303
+ src_bio->bi_status = BLK_STS_RESOURCE;
314304 goto out_release_keyslot;
305
+ }
315306
316307 memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
317308 sg_init_table(&src, 1);
....@@ -331,7 +322,6 @@
331322
332323 if (!ciphertext_page) {
333324 src_bio->bi_status = BLK_STS_RESOURCE;
334
- err = -ENOMEM;
335325 goto out_free_bounce_pages;
336326 }
337327
....@@ -343,11 +333,10 @@
343333 /* Encrypt each data unit in this page */
344334 for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
345335 blk_crypto_dun_to_iv(curr_dun, &iv);
346
- err = crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
347
- &wait);
348
- if (err) {
336
+ if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
337
+ &wait)) {
349338 i++;
350
- src_bio->bi_status = BLK_STS_RESOURCE;
339
+ src_bio->bi_status = BLK_STS_IOERR;
351340 goto out_free_bounce_pages;
352341 }
353342 bio_crypt_dun_increment(curr_dun, 1);
....@@ -357,11 +346,11 @@
357346 }
358347
359348 enc_bio->bi_private = src_bio;
360
- enc_bio->bi_end_io = blk_crypto_encrypt_endio;
349
+ enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio;
361350 *bio_ptr = enc_bio;
351
+ ret = true;
362352
363353 enc_bio = NULL;
364
- err = 0;
365354 goto out_free_ciph_req;
366355
367356 out_free_bounce_pages:
....@@ -371,61 +360,53 @@
371360 out_free_ciph_req:
372361 skcipher_request_free(ciph_req);
373362 out_release_keyslot:
374
- bio_crypt_ctx_release_keyslot(bc);
363
+ blk_ksm_put_slot(slot);
375364 out_put_enc_bio:
376365 if (enc_bio)
377366 bio_put(enc_bio);
378367
379
- return err;
380
-}
381
-
382
-static void blk_crypto_free_fallback_crypt_ctx(struct bio *bio)
383
-{
384
- mempool_free(container_of(bio->bi_crypt_context,
385
- struct bio_fallback_crypt_ctx,
386
- crypt_ctx),
387
- bio_fallback_crypt_ctx_pool);
388
- bio->bi_crypt_context = NULL;
368
+ return ret;
389369 }
390370
391371 /*
392372 * The crypto API fallback's main decryption routine.
393
- * Decrypts input bio in place.
373
+ * Decrypts input bio in place, and calls bio_endio on the bio.
394374 */
395
-static void blk_crypto_decrypt_bio(struct work_struct *work)
375
+static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
396376 {
397
- struct blk_crypto_decrypt_work *decrypt_work =
398
- container_of(work, struct blk_crypto_decrypt_work, work);
399
- struct bio *bio = decrypt_work->bio;
377
+ struct bio_fallback_crypt_ctx *f_ctx =
378
+ container_of(work, struct bio_fallback_crypt_ctx, work);
379
+ struct bio *bio = f_ctx->bio;
380
+ struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx;
381
+ struct blk_ksm_keyslot *slot;
400382 struct skcipher_request *ciph_req = NULL;
401383 DECLARE_CRYPTO_WAIT(wait);
402
- struct bio_vec bv;
403
- struct bvec_iter iter;
404384 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
405385 union blk_crypto_iv iv;
406386 struct scatterlist sg;
407
- struct bio_crypt_ctx *bc = bio->bi_crypt_context;
408
- struct bio_fallback_crypt_ctx *f_ctx =
409
- container_of(bc, struct bio_fallback_crypt_ctx, crypt_ctx);
410
- const int data_unit_size = bc->bc_key->data_unit_size;
387
+ struct bio_vec bv;
388
+ struct bvec_iter iter;
389
+ const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
411390 unsigned int i;
412
- int err;
391
+ blk_status_t blk_st;
413392
414393 /*
415394 * Use the crypto API fallback keyslot manager to get a crypto_skcipher
416395 * for the algorithm and key specified for this bio.
417396 */
418
- if (bio_crypt_ctx_acquire_keyslot(bc, blk_crypto_ksm)) {
419
- bio->bi_status = BLK_STS_RESOURCE;
397
+ blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot);
398
+ if (blk_st != BLK_STS_OK) {
399
+ bio->bi_status = blk_st;
420400 goto out_no_keyslot;
421401 }
422402
423403 /* and then allocate an skcipher_request for it */
424
- err = blk_crypto_alloc_cipher_req(bio, &ciph_req, &wait);
425
- if (err)
404
+ if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) {
405
+ bio->bi_status = BLK_STS_RESOURCE;
426406 goto out;
407
+ }
427408
428
- memcpy(curr_dun, f_ctx->fallback_dun, sizeof(curr_dun));
409
+ memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
429410 sg_init_table(&sg, 1);
430411 skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
431412 iv.bytes);
....@@ -451,40 +432,168 @@
451432
452433 out:
453434 skcipher_request_free(ciph_req);
454
- bio_crypt_ctx_release_keyslot(bc);
435
+ blk_ksm_put_slot(slot);
455436 out_no_keyslot:
456
- kmem_cache_free(blk_crypto_decrypt_work_cache, decrypt_work);
457
- blk_crypto_free_fallback_crypt_ctx(bio);
437
+ mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
458438 bio_endio(bio);
459439 }
460440
461
-/*
462
- * Queue bio for decryption.
463
- * Returns true iff bio was queued for decryption.
441
+/**
442
+ * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption
443
+ *
444
+ * @bio: the bio to queue
445
+ *
446
+ * Restore bi_private and bi_end_io, and queue the bio for decryption into a
447
+ * workqueue, since this function will be called from an atomic context.
464448 */
465
-bool blk_crypto_queue_decrypt_bio(struct bio *bio)
449
+static void blk_crypto_fallback_decrypt_endio(struct bio *bio)
466450 {
467
- struct blk_crypto_decrypt_work *decrypt_work;
451
+ struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private;
452
+
453
+ bio->bi_private = f_ctx->bi_private_orig;
454
+ bio->bi_end_io = f_ctx->bi_end_io_orig;
468455
469456 /* If there was an IO error, don't queue for decrypt. */
470
- if (bio->bi_status)
471
- goto out;
472
-
473
- decrypt_work = kmem_cache_zalloc(blk_crypto_decrypt_work_cache,
474
- GFP_ATOMIC);
475
- if (!decrypt_work) {
476
- bio->bi_status = BLK_STS_RESOURCE;
477
- goto out;
457
+ if (bio->bi_status) {
458
+ mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
459
+ bio_endio(bio);
460
+ return;
478461 }
479462
480
- INIT_WORK(&decrypt_work->work, blk_crypto_decrypt_bio);
481
- decrypt_work->bio = bio;
482
- queue_work(blk_crypto_wq, &decrypt_work->work);
463
+ INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio);
464
+ f_ctx->bio = bio;
465
+ queue_work(blk_crypto_wq, &f_ctx->work);
466
+}
467
+
468
+/**
469
+ * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption
470
+ *
471
+ * @bio_ptr: pointer to the bio to prepare
472
+ *
473
+ * If bio is doing a WRITE operation, this splits the bio into two parts if it's
474
+ * too big (see blk_crypto_split_bio_if_needed). It then allocates a bounce bio
475
+ * for the first part, encrypts it, and update bio_ptr to point to the bounce
476
+ * bio.
477
+ *
478
+ * For a READ operation, we mark the bio for decryption by using bi_private and
479
+ * bi_end_io.
480
+ *
481
+ * In either case, this function will make the bio look like a regular bio (i.e.
482
+ * as if no encryption context was ever specified) for the purposes of the rest
483
+ * of the stack except for blk-integrity (blk-integrity and blk-crypto are not
484
+ * currently supported together).
485
+ *
486
+ * Return: true on success. Sets bio->bi_status and returns false on error.
487
+ */
488
+bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
489
+{
490
+ struct bio *bio = *bio_ptr;
491
+ struct bio_crypt_ctx *bc = bio->bi_crypt_context;
492
+ struct bio_fallback_crypt_ctx *f_ctx;
493
+
494
+ if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) {
495
+ /* User didn't call blk_crypto_start_using_key() first */
496
+ bio->bi_status = BLK_STS_IOERR;
497
+ return false;
498
+ }
499
+
500
+ if (!blk_ksm_crypto_cfg_supported(&blk_crypto_ksm,
501
+ &bc->bc_key->crypto_cfg)) {
502
+ bio->bi_status = BLK_STS_NOTSUPP;
503
+ return false;
504
+ }
505
+
506
+ if (bio_data_dir(bio) == WRITE)
507
+ return blk_crypto_fallback_encrypt_bio(bio_ptr);
508
+
509
+ /*
510
+ * bio READ case: Set up a f_ctx in the bio's bi_private and set the
511
+ * bi_end_io appropriately to trigger decryption when the bio is ended.
512
+ */
513
+ f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
514
+ f_ctx->crypt_ctx = *bc;
515
+ f_ctx->crypt_iter = bio->bi_iter;
516
+ f_ctx->bi_private_orig = bio->bi_private;
517
+ f_ctx->bi_end_io_orig = bio->bi_end_io;
518
+ bio->bi_private = (void *)f_ctx;
519
+ bio->bi_end_io = blk_crypto_fallback_decrypt_endio;
520
+ bio_crypt_free_ctx(bio);
483521
484522 return true;
523
+}
524
+
525
+int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
526
+{
527
+ return blk_ksm_evict_key(&blk_crypto_ksm, key);
528
+}
529
+
530
+static bool blk_crypto_fallback_inited;
531
+static int blk_crypto_fallback_init(void)
532
+{
533
+ int i;
534
+ int err;
535
+
536
+ if (blk_crypto_fallback_inited)
537
+ return 0;
538
+
539
+ prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
540
+
541
+ err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots);
542
+ if (err)
543
+ goto out;
544
+ err = -ENOMEM;
545
+
546
+ blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops;
547
+ blk_crypto_ksm.max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
548
+ blk_crypto_ksm.features = BLK_CRYPTO_FEATURE_STANDARD_KEYS;
549
+
550
+ /* All blk-crypto modes have a crypto API fallback. */
551
+ for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
552
+ blk_crypto_ksm.crypto_modes_supported[i] = 0xFFFFFFFF;
553
+ blk_crypto_ksm.crypto_modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
554
+
555
+ blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
556
+ WQ_UNBOUND | WQ_HIGHPRI |
557
+ WQ_MEM_RECLAIM, num_online_cpus());
558
+ if (!blk_crypto_wq)
559
+ goto fail_free_ksm;
560
+
561
+ blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
562
+ sizeof(blk_crypto_keyslots[0]),
563
+ GFP_KERNEL);
564
+ if (!blk_crypto_keyslots)
565
+ goto fail_free_wq;
566
+
567
+ blk_crypto_bounce_page_pool =
568
+ mempool_create_page_pool(num_prealloc_bounce_pg, 0);
569
+ if (!blk_crypto_bounce_page_pool)
570
+ goto fail_free_keyslots;
571
+
572
+ bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
573
+ if (!bio_fallback_crypt_ctx_cache)
574
+ goto fail_free_bounce_page_pool;
575
+
576
+ bio_fallback_crypt_ctx_pool =
577
+ mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
578
+ bio_fallback_crypt_ctx_cache);
579
+ if (!bio_fallback_crypt_ctx_pool)
580
+ goto fail_free_crypt_ctx_cache;
581
+
582
+ blk_crypto_fallback_inited = true;
583
+
584
+ return 0;
585
+fail_free_crypt_ctx_cache:
586
+ kmem_cache_destroy(bio_fallback_crypt_ctx_cache);
587
+fail_free_bounce_page_pool:
588
+ mempool_destroy(blk_crypto_bounce_page_pool);
589
+fail_free_keyslots:
590
+ kfree(blk_crypto_keyslots);
591
+fail_free_wq:
592
+ destroy_workqueue(blk_crypto_wq);
593
+fail_free_ksm:
594
+ blk_ksm_destroy(&blk_crypto_ksm);
485595 out:
486
- blk_crypto_free_fallback_crypt_ctx(bio);
487
- return false;
596
+ return err;
488597 }
489598
490599 /*
....@@ -507,7 +616,11 @@
507616 return 0;
508617
509618 mutex_lock(&tfms_init_lock);
510
- if (likely(tfms_inited[mode_num]))
619
+ if (tfms_inited[mode_num])
620
+ goto out;
621
+
622
+ err = blk_crypto_fallback_init();
623
+ if (err)
511624 goto out;
512625
513626 for (i = 0; i < blk_crypto_num_keyslots; i++) {
....@@ -525,7 +638,7 @@
525638 }
526639
527640 crypto_skcipher_set_flags(slotp->tfms[mode_num],
528
- CRYPTO_TFM_REQ_WEAK_KEY);
641
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
529642 }
530643
531644 /*
....@@ -544,101 +657,4 @@
544657 out:
545658 mutex_unlock(&tfms_init_lock);
546659 return err;
547
-}
548
-
549
-int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
550
-{
551
- return keyslot_manager_evict_key(blk_crypto_ksm, key);
552
-}
553
-
554
-int blk_crypto_fallback_submit_bio(struct bio **bio_ptr)
555
-{
556
- struct bio *bio = *bio_ptr;
557
- struct bio_crypt_ctx *bc = bio->bi_crypt_context;
558
- struct bio_fallback_crypt_ctx *f_ctx;
559
-
560
- if (bc->bc_key->is_hw_wrapped) {
561
- pr_warn_once("HW wrapped key cannot be used with fallback.\n");
562
- bio->bi_status = BLK_STS_NOTSUPP;
563
- return -EOPNOTSUPP;
564
- }
565
-
566
- if (!tfms_inited[bc->bc_key->crypto_mode]) {
567
- bio->bi_status = BLK_STS_IOERR;
568
- return -EIO;
569
- }
570
-
571
- if (bio_data_dir(bio) == WRITE)
572
- return blk_crypto_encrypt_bio(bio_ptr);
573
-
574
- /*
575
- * Mark bio as fallback crypted and replace the bio_crypt_ctx with
576
- * another one contained in a bio_fallback_crypt_ctx, so that the
577
- * fallback has space to store the info it needs for decryption.
578
- */
579
- bc->bc_ksm = blk_crypto_ksm;
580
- f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
581
- f_ctx->crypt_ctx = *bc;
582
- memcpy(f_ctx->fallback_dun, bc->bc_dun, sizeof(f_ctx->fallback_dun));
583
- f_ctx->crypt_iter = bio->bi_iter;
584
-
585
- bio_crypt_free_ctx(bio);
586
- bio->bi_crypt_context = &f_ctx->crypt_ctx;
587
-
588
- return 0;
589
-}
590
-
591
-int __init blk_crypto_fallback_init(void)
592
-{
593
- int i;
594
- unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX];
595
-
596
- prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
597
-
598
- /* All blk-crypto modes have a crypto API fallback. */
599
- for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
600
- crypto_mode_supported[i] = 0xFFFFFFFF;
601
- crypto_mode_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
602
-
603
- blk_crypto_ksm = keyslot_manager_create(
604
- NULL, blk_crypto_num_keyslots,
605
- &blk_crypto_ksm_ll_ops,
606
- BLK_CRYPTO_FEATURE_STANDARD_KEYS,
607
- crypto_mode_supported, NULL);
608
- if (!blk_crypto_ksm)
609
- return -ENOMEM;
610
-
611
- blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
612
- WQ_UNBOUND | WQ_HIGHPRI |
613
- WQ_MEM_RECLAIM, num_online_cpus());
614
- if (!blk_crypto_wq)
615
- return -ENOMEM;
616
-
617
- blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
618
- sizeof(blk_crypto_keyslots[0]),
619
- GFP_KERNEL);
620
- if (!blk_crypto_keyslots)
621
- return -ENOMEM;
622
-
623
- blk_crypto_bounce_page_pool =
624
- mempool_create_page_pool(num_prealloc_bounce_pg, 0);
625
- if (!blk_crypto_bounce_page_pool)
626
- return -ENOMEM;
627
-
628
- blk_crypto_decrypt_work_cache = KMEM_CACHE(blk_crypto_decrypt_work,
629
- SLAB_RECLAIM_ACCOUNT);
630
- if (!blk_crypto_decrypt_work_cache)
631
- return -ENOMEM;
632
-
633
- bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
634
- if (!bio_fallback_crypt_ctx_cache)
635
- return -ENOMEM;
636
-
637
- bio_fallback_crypt_ctx_pool =
638
- mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
639
- bio_fallback_crypt_ctx_cache);
640
- if (!bio_fallback_crypt_ctx_pool)
641
- return -ENOMEM;
642
-
643
- return 0;
644660 }