.. | .. |
---|
1 | 1 | /* |
---|
2 | 2 | * Copyright (C) 2003 Jana Saout <jana@saout.de> |
---|
3 | 3 | * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> |
---|
4 | | - * Copyright (C) 2006-2017 Red Hat, Inc. All rights reserved. |
---|
5 | | - * Copyright (C) 2013-2017 Milan Broz <gmazyland@gmail.com> |
---|
| 4 | + * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved. |
---|
| 5 | + * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com> |
---|
6 | 6 | * |
---|
7 | 7 | * This file is released under the GPL. |
---|
8 | 8 | */ |
---|
.. | .. |
---|
34 | 34 | #include <crypto/aead.h> |
---|
35 | 35 | #include <crypto/authenc.h> |
---|
36 | 36 | #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */ |
---|
| 37 | +#include <linux/key-type.h> |
---|
37 | 38 | #include <keys/user-type.h> |
---|
| 39 | +#include <keys/encrypted-type.h> |
---|
38 | 40 | |
---|
39 | 41 | #include <linux/device-mapper.h> |
---|
40 | 42 | |
---|
.. | .. |
---|
65 | 67 | struct crypt_config *cc; |
---|
66 | 68 | struct bio *base_bio; |
---|
67 | 69 | u8 *integrity_metadata; |
---|
68 | | - bool integrity_metadata_from_pool; |
---|
| 70 | + bool integrity_metadata_from_pool:1; |
---|
| 71 | + bool in_tasklet:1; |
---|
| 72 | + |
---|
69 | 73 | struct work_struct work; |
---|
| 74 | + struct tasklet_struct tasklet; |
---|
70 | 75 | |
---|
71 | 76 | struct convert_context ctx; |
---|
72 | 77 | |
---|
.. | .. |
---|
98 | 103 | struct dm_crypt_request *dmreq); |
---|
99 | 104 | }; |
---|
100 | 105 | |
---|
101 | | -struct iv_essiv_private { |
---|
102 | | - struct crypto_shash *hash_tfm; |
---|
103 | | - u8 *salt; |
---|
104 | | -}; |
---|
105 | | - |
---|
106 | 106 | struct iv_benbi_private { |
---|
107 | 107 | int shift; |
---|
108 | 108 | }; |
---|
.. | .. |
---|
120 | 120 | u8 *whitening; |
---|
121 | 121 | }; |
---|
122 | 122 | |
---|
| 123 | +#define ELEPHANT_MAX_KEY_SIZE 32 |
---|
| 124 | +struct iv_elephant_private { |
---|
| 125 | + struct crypto_skcipher *tfm; |
---|
| 126 | +}; |
---|
| 127 | + |
---|
123 | 128 | /* |
---|
124 | 129 | * Crypt: maps a linear range of a block device |
---|
125 | 130 | * and encrypts / decrypts at the same time. |
---|
126 | 131 | */ |
---|
127 | 132 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, |
---|
128 | | - DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; |
---|
| 133 | + DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD, |
---|
| 134 | + DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE, |
---|
| 135 | + DM_CRYPT_WRITE_INLINE }; |
---|
129 | 136 | |
---|
130 | 137 | enum cipher_flags { |
---|
131 | 138 | CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */ |
---|
132 | 139 | CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */ |
---|
| 140 | + CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */ |
---|
133 | 141 | }; |
---|
134 | 142 | |
---|
135 | 143 | /* |
---|
.. | .. |
---|
148 | 156 | struct task_struct *write_thread; |
---|
149 | 157 | struct rb_root write_tree; |
---|
150 | 158 | |
---|
151 | | - char *cipher; |
---|
152 | 159 | char *cipher_string; |
---|
153 | 160 | char *cipher_auth; |
---|
154 | 161 | char *key_string; |
---|
155 | 162 | |
---|
156 | 163 | const struct crypt_iv_operations *iv_gen_ops; |
---|
157 | 164 | union { |
---|
158 | | - struct iv_essiv_private essiv; |
---|
159 | 165 | struct iv_benbi_private benbi; |
---|
160 | 166 | struct iv_lmk_private lmk; |
---|
161 | 167 | struct iv_tcw_private tcw; |
---|
| 168 | + struct iv_elephant_private elephant; |
---|
162 | 169 | } iv_gen_private; |
---|
163 | 170 | u64 iv_offset; |
---|
164 | 171 | unsigned int iv_size; |
---|
165 | 172 | unsigned short int sector_size; |
---|
166 | 173 | unsigned char sector_shift; |
---|
167 | 174 | |
---|
168 | | - /* ESSIV: struct crypto_cipher *essiv_tfm */ |
---|
169 | | - void *iv_private; |
---|
170 | 175 | union { |
---|
171 | 176 | struct crypto_skcipher **tfms; |
---|
172 | 177 | struct crypto_aead **tfms_aead; |
---|
.. | .. |
---|
214 | 219 | struct mutex bio_alloc_lock; |
---|
215 | 220 | |
---|
216 | 221 | u8 *authenc_key; /* space for keys in authenc() format (if used) */ |
---|
217 | | - u8 key[0]; |
---|
| 222 | + u8 key[]; |
---|
218 | 223 | }; |
---|
219 | 224 | |
---|
220 | 225 | #define MIN_IOS 64 |
---|
.. | .. |
---|
231 | 236 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); |
---|
232 | 237 | static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc, |
---|
233 | 238 | struct scatterlist *sg); |
---|
| 239 | + |
---|
| 240 | +static bool crypt_integrity_aead(struct crypt_config *cc); |
---|
234 | 241 | |
---|
235 | 242 | /* |
---|
236 | 243 | * Use this to access cipher attributes that are independent of the key. |
---|
.. | .. |
---|
291 | 298 | * Note that this encryption scheme is vulnerable to watermarking attacks |
---|
292 | 299 | * and should be used for old compatible containers access only. |
---|
293 | 300 | * |
---|
294 | | - * plumb: unimplemented, see: |
---|
295 | | - * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 |
---|
| 301 | + * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode) |
---|
| 302 | + * The IV is encrypted little-endian byte-offset (with the same key |
---|
| 303 | + * and cipher as the volume). |
---|
| 304 | + * |
---|
| 305 | + * elephant: The extended version of eboiv with additional Elephant diffuser |
---|
| 306 | + * used with Bitlocker CBC mode. |
---|
| 307 | + * This mode was used in older Windows systems |
---|
| 308 | + * https://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf |
---|
296 | 309 | */ |
---|
297 | 310 | |
---|
298 | 311 | static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, |
---|
.. | .. |
---|
323 | 336 | return 0; |
---|
324 | 337 | } |
---|
325 | 338 | |
---|
326 | | -/* Initialise ESSIV - compute salt but no local memory allocations */ |
---|
327 | | -static int crypt_iv_essiv_init(struct crypt_config *cc) |
---|
328 | | -{ |
---|
329 | | - struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; |
---|
330 | | - SHASH_DESC_ON_STACK(desc, essiv->hash_tfm); |
---|
331 | | - struct crypto_cipher *essiv_tfm; |
---|
332 | | - int err; |
---|
333 | | - |
---|
334 | | - desc->tfm = essiv->hash_tfm; |
---|
335 | | - desc->flags = 0; |
---|
336 | | - |
---|
337 | | - err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt); |
---|
338 | | - shash_desc_zero(desc); |
---|
339 | | - if (err) |
---|
340 | | - return err; |
---|
341 | | - |
---|
342 | | - essiv_tfm = cc->iv_private; |
---|
343 | | - |
---|
344 | | - err = crypto_cipher_setkey(essiv_tfm, essiv->salt, |
---|
345 | | - crypto_shash_digestsize(essiv->hash_tfm)); |
---|
346 | | - if (err) |
---|
347 | | - return err; |
---|
348 | | - |
---|
349 | | - return 0; |
---|
350 | | -} |
---|
351 | | - |
---|
352 | | -/* Wipe salt and reset key derived from volume key */ |
---|
353 | | -static int crypt_iv_essiv_wipe(struct crypt_config *cc) |
---|
354 | | -{ |
---|
355 | | - struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; |
---|
356 | | - unsigned salt_size = crypto_shash_digestsize(essiv->hash_tfm); |
---|
357 | | - struct crypto_cipher *essiv_tfm; |
---|
358 | | - int r, err = 0; |
---|
359 | | - |
---|
360 | | - memset(essiv->salt, 0, salt_size); |
---|
361 | | - |
---|
362 | | - essiv_tfm = cc->iv_private; |
---|
363 | | - r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); |
---|
364 | | - if (r) |
---|
365 | | - err = r; |
---|
366 | | - |
---|
367 | | - return err; |
---|
368 | | -} |
---|
369 | | - |
---|
370 | | -/* Allocate the cipher for ESSIV */ |
---|
371 | | -static struct crypto_cipher *alloc_essiv_cipher(struct crypt_config *cc, |
---|
372 | | - struct dm_target *ti, |
---|
373 | | - const u8 *salt, |
---|
374 | | - unsigned int saltsize) |
---|
375 | | -{ |
---|
376 | | - struct crypto_cipher *essiv_tfm; |
---|
377 | | - int err; |
---|
378 | | - |
---|
379 | | - /* Setup the essiv_tfm with the given salt */ |
---|
380 | | - essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); |
---|
381 | | - if (IS_ERR(essiv_tfm)) { |
---|
382 | | - ti->error = "Error allocating crypto tfm for ESSIV"; |
---|
383 | | - return essiv_tfm; |
---|
384 | | - } |
---|
385 | | - |
---|
386 | | - if (crypto_cipher_blocksize(essiv_tfm) != cc->iv_size) { |
---|
387 | | - ti->error = "Block size of ESSIV cipher does " |
---|
388 | | - "not match IV size of block cipher"; |
---|
389 | | - crypto_free_cipher(essiv_tfm); |
---|
390 | | - return ERR_PTR(-EINVAL); |
---|
391 | | - } |
---|
392 | | - |
---|
393 | | - err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); |
---|
394 | | - if (err) { |
---|
395 | | - ti->error = "Failed to set key for ESSIV cipher"; |
---|
396 | | - crypto_free_cipher(essiv_tfm); |
---|
397 | | - return ERR_PTR(err); |
---|
398 | | - } |
---|
399 | | - |
---|
400 | | - return essiv_tfm; |
---|
401 | | -} |
---|
402 | | - |
---|
403 | | -static void crypt_iv_essiv_dtr(struct crypt_config *cc) |
---|
404 | | -{ |
---|
405 | | - struct crypto_cipher *essiv_tfm; |
---|
406 | | - struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; |
---|
407 | | - |
---|
408 | | - crypto_free_shash(essiv->hash_tfm); |
---|
409 | | - essiv->hash_tfm = NULL; |
---|
410 | | - |
---|
411 | | - kzfree(essiv->salt); |
---|
412 | | - essiv->salt = NULL; |
---|
413 | | - |
---|
414 | | - essiv_tfm = cc->iv_private; |
---|
415 | | - |
---|
416 | | - if (essiv_tfm) |
---|
417 | | - crypto_free_cipher(essiv_tfm); |
---|
418 | | - |
---|
419 | | - cc->iv_private = NULL; |
---|
420 | | -} |
---|
421 | | - |
---|
422 | | -static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, |
---|
423 | | - const char *opts) |
---|
424 | | -{ |
---|
425 | | - struct crypto_cipher *essiv_tfm = NULL; |
---|
426 | | - struct crypto_shash *hash_tfm = NULL; |
---|
427 | | - u8 *salt = NULL; |
---|
428 | | - int err; |
---|
429 | | - |
---|
430 | | - if (!opts) { |
---|
431 | | - ti->error = "Digest algorithm missing for ESSIV mode"; |
---|
432 | | - return -EINVAL; |
---|
433 | | - } |
---|
434 | | - |
---|
435 | | - /* Allocate hash algorithm */ |
---|
436 | | - hash_tfm = crypto_alloc_shash(opts, 0, 0); |
---|
437 | | - if (IS_ERR(hash_tfm)) { |
---|
438 | | - ti->error = "Error initializing ESSIV hash"; |
---|
439 | | - err = PTR_ERR(hash_tfm); |
---|
440 | | - goto bad; |
---|
441 | | - } |
---|
442 | | - |
---|
443 | | - salt = kzalloc(crypto_shash_digestsize(hash_tfm), GFP_KERNEL); |
---|
444 | | - if (!salt) { |
---|
445 | | - ti->error = "Error kmallocing salt storage in ESSIV"; |
---|
446 | | - err = -ENOMEM; |
---|
447 | | - goto bad; |
---|
448 | | - } |
---|
449 | | - |
---|
450 | | - cc->iv_gen_private.essiv.salt = salt; |
---|
451 | | - cc->iv_gen_private.essiv.hash_tfm = hash_tfm; |
---|
452 | | - |
---|
453 | | - essiv_tfm = alloc_essiv_cipher(cc, ti, salt, |
---|
454 | | - crypto_shash_digestsize(hash_tfm)); |
---|
455 | | - if (IS_ERR(essiv_tfm)) { |
---|
456 | | - crypt_iv_essiv_dtr(cc); |
---|
457 | | - return PTR_ERR(essiv_tfm); |
---|
458 | | - } |
---|
459 | | - cc->iv_private = essiv_tfm; |
---|
460 | | - |
---|
461 | | - return 0; |
---|
462 | | - |
---|
463 | | -bad: |
---|
464 | | - if (hash_tfm && !IS_ERR(hash_tfm)) |
---|
465 | | - crypto_free_shash(hash_tfm); |
---|
466 | | - kfree(salt); |
---|
467 | | - return err; |
---|
468 | | -} |
---|
469 | | - |
---|
470 | 339 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, |
---|
471 | 340 | struct dm_crypt_request *dmreq) |
---|
472 | 341 | { |
---|
473 | | - struct crypto_cipher *essiv_tfm = cc->iv_private; |
---|
474 | | - |
---|
| 342 | + /* |
---|
| 343 | + * ESSIV encryption of the IV is now handled by the crypto API, |
---|
| 344 | + * so just pass the plain sector number here. |
---|
| 345 | + */ |
---|
475 | 346 | memset(iv, 0, cc->iv_size); |
---|
476 | 347 | *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); |
---|
477 | | - crypto_cipher_encrypt_one(essiv_tfm, iv, iv); |
---|
478 | 348 | |
---|
479 | 349 | return 0; |
---|
480 | 350 | } |
---|
.. | .. |
---|
485 | 355 | unsigned bs; |
---|
486 | 356 | int log; |
---|
487 | 357 | |
---|
488 | | - if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags)) |
---|
| 358 | + if (crypt_integrity_aead(cc)) |
---|
489 | 359 | bs = crypto_aead_blocksize(any_tfm_aead(cc)); |
---|
490 | 360 | else |
---|
491 | 361 | bs = crypto_skcipher_blocksize(any_tfm(cc)); |
---|
.. | .. |
---|
542 | 412 | crypto_free_shash(lmk->hash_tfm); |
---|
543 | 413 | lmk->hash_tfm = NULL; |
---|
544 | 414 | |
---|
545 | | - kzfree(lmk->seed); |
---|
| 415 | + kfree_sensitive(lmk->seed); |
---|
546 | 416 | lmk->seed = NULL; |
---|
547 | 417 | } |
---|
548 | 418 | |
---|
.. | .. |
---|
556 | 426 | return -EINVAL; |
---|
557 | 427 | } |
---|
558 | 428 | |
---|
559 | | - lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); |
---|
| 429 | + lmk->hash_tfm = crypto_alloc_shash("md5", 0, |
---|
| 430 | + CRYPTO_ALG_ALLOCATES_MEMORY); |
---|
560 | 431 | if (IS_ERR(lmk->hash_tfm)) { |
---|
561 | 432 | ti->error = "Error initializing LMK hash"; |
---|
562 | 433 | return PTR_ERR(lmk->hash_tfm); |
---|
.. | .. |
---|
612 | 483 | int i, r; |
---|
613 | 484 | |
---|
614 | 485 | desc->tfm = lmk->hash_tfm; |
---|
615 | | - desc->flags = 0; |
---|
616 | 486 | |
---|
617 | 487 | r = crypto_shash_init(desc); |
---|
618 | 488 | if (r) |
---|
.. | .. |
---|
694 | 564 | { |
---|
695 | 565 | struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; |
---|
696 | 566 | |
---|
697 | | - kzfree(tcw->iv_seed); |
---|
| 567 | + kfree_sensitive(tcw->iv_seed); |
---|
698 | 568 | tcw->iv_seed = NULL; |
---|
699 | | - kzfree(tcw->whitening); |
---|
| 569 | + kfree_sensitive(tcw->whitening); |
---|
700 | 570 | tcw->whitening = NULL; |
---|
701 | 571 | |
---|
702 | 572 | if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) |
---|
.. | .. |
---|
719 | 589 | return -EINVAL; |
---|
720 | 590 | } |
---|
721 | 591 | |
---|
722 | | - tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); |
---|
| 592 | + tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, |
---|
| 593 | + CRYPTO_ALG_ALLOCATES_MEMORY); |
---|
723 | 594 | if (IS_ERR(tcw->crc32_tfm)) { |
---|
724 | 595 | ti->error = "Error initializing CRC32 in TCW"; |
---|
725 | 596 | return PTR_ERR(tcw->crc32_tfm); |
---|
.. | .. |
---|
774 | 645 | |
---|
775 | 646 | /* calculate crc32 for every 32bit part and xor it */ |
---|
776 | 647 | desc->tfm = tcw->crc32_tfm; |
---|
777 | | - desc->flags = 0; |
---|
778 | 648 | for (i = 0; i < 4; i++) { |
---|
779 | 649 | r = crypto_shash_init(desc); |
---|
780 | 650 | if (r) |
---|
.. | .. |
---|
850 | 720 | return 0; |
---|
851 | 721 | } |
---|
852 | 722 | |
---|
| 723 | +static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti, |
---|
| 724 | + const char *opts) |
---|
| 725 | +{ |
---|
| 726 | + if (crypt_integrity_aead(cc)) { |
---|
| 727 | + ti->error = "AEAD transforms not supported for EBOIV"; |
---|
| 728 | + return -EINVAL; |
---|
| 729 | + } |
---|
| 730 | + |
---|
| 731 | + if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) { |
---|
| 732 | + ti->error = "Block size of EBOIV cipher does " |
---|
| 733 | + "not match IV size of block cipher"; |
---|
| 734 | + return -EINVAL; |
---|
| 735 | + } |
---|
| 736 | + |
---|
| 737 | + return 0; |
---|
| 738 | +} |
---|
| 739 | + |
---|
| 740 | +static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, |
---|
| 741 | + struct dm_crypt_request *dmreq) |
---|
| 742 | +{ |
---|
| 743 | + u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64)); |
---|
| 744 | + struct skcipher_request *req; |
---|
| 745 | + struct scatterlist src, dst; |
---|
| 746 | + DECLARE_CRYPTO_WAIT(wait); |
---|
| 747 | + int err; |
---|
| 748 | + |
---|
| 749 | + req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO); |
---|
| 750 | + if (!req) |
---|
| 751 | + return -ENOMEM; |
---|
| 752 | + |
---|
| 753 | + memset(buf, 0, cc->iv_size); |
---|
| 754 | + *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size); |
---|
| 755 | + |
---|
| 756 | + sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size); |
---|
| 757 | + sg_init_one(&dst, iv, cc->iv_size); |
---|
| 758 | + skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf); |
---|
| 759 | + skcipher_request_set_callback(req, 0, crypto_req_done, &wait); |
---|
| 760 | + err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); |
---|
| 761 | + skcipher_request_free(req); |
---|
| 762 | + |
---|
| 763 | + return err; |
---|
| 764 | +} |
---|
| 765 | + |
---|
| 766 | +static void crypt_iv_elephant_dtr(struct crypt_config *cc) |
---|
| 767 | +{ |
---|
| 768 | + struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; |
---|
| 769 | + |
---|
| 770 | + crypto_free_skcipher(elephant->tfm); |
---|
| 771 | + elephant->tfm = NULL; |
---|
| 772 | +} |
---|
| 773 | + |
---|
| 774 | +static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti, |
---|
| 775 | + const char *opts) |
---|
| 776 | +{ |
---|
| 777 | + struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; |
---|
| 778 | + int r; |
---|
| 779 | + |
---|
| 780 | + elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0, |
---|
| 781 | + CRYPTO_ALG_ALLOCATES_MEMORY); |
---|
| 782 | + if (IS_ERR(elephant->tfm)) { |
---|
| 783 | + r = PTR_ERR(elephant->tfm); |
---|
| 784 | + elephant->tfm = NULL; |
---|
| 785 | + return r; |
---|
| 786 | + } |
---|
| 787 | + |
---|
| 788 | + r = crypt_iv_eboiv_ctr(cc, ti, NULL); |
---|
| 789 | + if (r) |
---|
| 790 | + crypt_iv_elephant_dtr(cc); |
---|
| 791 | + return r; |
---|
| 792 | +} |
---|
| 793 | + |
---|
| 794 | +static void diffuser_disk_to_cpu(u32 *d, size_t n) |
---|
| 795 | +{ |
---|
| 796 | +#ifndef __LITTLE_ENDIAN |
---|
| 797 | + int i; |
---|
| 798 | + |
---|
| 799 | + for (i = 0; i < n; i++) |
---|
| 800 | + d[i] = le32_to_cpu((__le32)d[i]); |
---|
| 801 | +#endif |
---|
| 802 | +} |
---|
| 803 | + |
---|
| 804 | +static void diffuser_cpu_to_disk(__le32 *d, size_t n) |
---|
| 805 | +{ |
---|
| 806 | +#ifndef __LITTLE_ENDIAN |
---|
| 807 | + int i; |
---|
| 808 | + |
---|
| 809 | + for (i = 0; i < n; i++) |
---|
| 810 | + d[i] = cpu_to_le32((u32)d[i]); |
---|
| 811 | +#endif |
---|
| 812 | +} |
---|
| 813 | + |
---|
| 814 | +static void diffuser_a_decrypt(u32 *d, size_t n) |
---|
| 815 | +{ |
---|
| 816 | + int i, i1, i2, i3; |
---|
| 817 | + |
---|
| 818 | + for (i = 0; i < 5; i++) { |
---|
| 819 | + i1 = 0; |
---|
| 820 | + i2 = n - 2; |
---|
| 821 | + i3 = n - 5; |
---|
| 822 | + |
---|
| 823 | + while (i1 < (n - 1)) { |
---|
| 824 | + d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23); |
---|
| 825 | + i1++; i2++; i3++; |
---|
| 826 | + |
---|
| 827 | + if (i3 >= n) |
---|
| 828 | + i3 -= n; |
---|
| 829 | + |
---|
| 830 | + d[i1] += d[i2] ^ d[i3]; |
---|
| 831 | + i1++; i2++; i3++; |
---|
| 832 | + |
---|
| 833 | + if (i2 >= n) |
---|
| 834 | + i2 -= n; |
---|
| 835 | + |
---|
| 836 | + d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19); |
---|
| 837 | + i1++; i2++; i3++; |
---|
| 838 | + |
---|
| 839 | + d[i1] += d[i2] ^ d[i3]; |
---|
| 840 | + i1++; i2++; i3++; |
---|
| 841 | + } |
---|
| 842 | + } |
---|
| 843 | +} |
---|
| 844 | + |
---|
| 845 | +static void diffuser_a_encrypt(u32 *d, size_t n) |
---|
| 846 | +{ |
---|
| 847 | + int i, i1, i2, i3; |
---|
| 848 | + |
---|
| 849 | + for (i = 0; i < 5; i++) { |
---|
| 850 | + i1 = n - 1; |
---|
| 851 | + i2 = n - 2 - 1; |
---|
| 852 | + i3 = n - 5 - 1; |
---|
| 853 | + |
---|
| 854 | + while (i1 > 0) { |
---|
| 855 | + d[i1] -= d[i2] ^ d[i3]; |
---|
| 856 | + i1--; i2--; i3--; |
---|
| 857 | + |
---|
| 858 | + d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19); |
---|
| 859 | + i1--; i2--; i3--; |
---|
| 860 | + |
---|
| 861 | + if (i2 < 0) |
---|
| 862 | + i2 += n; |
---|
| 863 | + |
---|
| 864 | + d[i1] -= d[i2] ^ d[i3]; |
---|
| 865 | + i1--; i2--; i3--; |
---|
| 866 | + |
---|
| 867 | + if (i3 < 0) |
---|
| 868 | + i3 += n; |
---|
| 869 | + |
---|
| 870 | + d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23); |
---|
| 871 | + i1--; i2--; i3--; |
---|
| 872 | + } |
---|
| 873 | + } |
---|
| 874 | +} |
---|
| 875 | + |
---|
| 876 | +static void diffuser_b_decrypt(u32 *d, size_t n) |
---|
| 877 | +{ |
---|
| 878 | + int i, i1, i2, i3; |
---|
| 879 | + |
---|
| 880 | + for (i = 0; i < 3; i++) { |
---|
| 881 | + i1 = 0; |
---|
| 882 | + i2 = 2; |
---|
| 883 | + i3 = 5; |
---|
| 884 | + |
---|
| 885 | + while (i1 < (n - 1)) { |
---|
| 886 | + d[i1] += d[i2] ^ d[i3]; |
---|
| 887 | + i1++; i2++; i3++; |
---|
| 888 | + |
---|
| 889 | + d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22); |
---|
| 890 | + i1++; i2++; i3++; |
---|
| 891 | + |
---|
| 892 | + if (i2 >= n) |
---|
| 893 | + i2 -= n; |
---|
| 894 | + |
---|
| 895 | + d[i1] += d[i2] ^ d[i3]; |
---|
| 896 | + i1++; i2++; i3++; |
---|
| 897 | + |
---|
| 898 | + if (i3 >= n) |
---|
| 899 | + i3 -= n; |
---|
| 900 | + |
---|
| 901 | + d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7); |
---|
| 902 | + i1++; i2++; i3++; |
---|
| 903 | + } |
---|
| 904 | + } |
---|
| 905 | +} |
---|
| 906 | + |
---|
| 907 | +static void diffuser_b_encrypt(u32 *d, size_t n) |
---|
| 908 | +{ |
---|
| 909 | + int i, i1, i2, i3; |
---|
| 910 | + |
---|
| 911 | + for (i = 0; i < 3; i++) { |
---|
| 912 | + i1 = n - 1; |
---|
| 913 | + i2 = 2 - 1; |
---|
| 914 | + i3 = 5 - 1; |
---|
| 915 | + |
---|
| 916 | + while (i1 > 0) { |
---|
| 917 | + d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7); |
---|
| 918 | + i1--; i2--; i3--; |
---|
| 919 | + |
---|
| 920 | + if (i3 < 0) |
---|
| 921 | + i3 += n; |
---|
| 922 | + |
---|
| 923 | + d[i1] -= d[i2] ^ d[i3]; |
---|
| 924 | + i1--; i2--; i3--; |
---|
| 925 | + |
---|
| 926 | + if (i2 < 0) |
---|
| 927 | + i2 += n; |
---|
| 928 | + |
---|
| 929 | + d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22); |
---|
| 930 | + i1--; i2--; i3--; |
---|
| 931 | + |
---|
| 932 | + d[i1] -= d[i2] ^ d[i3]; |
---|
| 933 | + i1--; i2--; i3--; |
---|
| 934 | + } |
---|
| 935 | + } |
---|
| 936 | +} |
---|
| 937 | + |
---|
| 938 | +static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq) |
---|
| 939 | +{ |
---|
| 940 | + struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; |
---|
| 941 | + u8 *es, *ks, *data, *data2, *data_offset; |
---|
| 942 | + struct skcipher_request *req; |
---|
| 943 | + struct scatterlist *sg, *sg2, src, dst; |
---|
| 944 | + DECLARE_CRYPTO_WAIT(wait); |
---|
| 945 | + int i, r; |
---|
| 946 | + |
---|
| 947 | + req = skcipher_request_alloc(elephant->tfm, GFP_NOIO); |
---|
| 948 | + es = kzalloc(16, GFP_NOIO); /* Key for AES */ |
---|
| 949 | + ks = kzalloc(32, GFP_NOIO); /* Elephant sector key */ |
---|
| 950 | + |
---|
| 951 | + if (!req || !es || !ks) { |
---|
| 952 | + r = -ENOMEM; |
---|
| 953 | + goto out; |
---|
| 954 | + } |
---|
| 955 | + |
---|
| 956 | + *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size); |
---|
| 957 | + |
---|
| 958 | + /* E(Ks, e(s)) */ |
---|
| 959 | + sg_init_one(&src, es, 16); |
---|
| 960 | + sg_init_one(&dst, ks, 16); |
---|
| 961 | + skcipher_request_set_crypt(req, &src, &dst, 16, NULL); |
---|
| 962 | + skcipher_request_set_callback(req, 0, crypto_req_done, &wait); |
---|
| 963 | + r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); |
---|
| 964 | + if (r) |
---|
| 965 | + goto out; |
---|
| 966 | + |
---|
| 967 | + /* E(Ks, e'(s)) */ |
---|
| 968 | + es[15] = 0x80; |
---|
| 969 | + sg_init_one(&dst, &ks[16], 16); |
---|
| 970 | + r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); |
---|
| 971 | + if (r) |
---|
| 972 | + goto out; |
---|
| 973 | + |
---|
| 974 | + sg = crypt_get_sg_data(cc, dmreq->sg_out); |
---|
| 975 | + data = kmap_atomic(sg_page(sg)); |
---|
| 976 | + data_offset = data + sg->offset; |
---|
| 977 | + |
---|
| 978 | + /* Cannot modify original bio, copy to sg_out and apply Elephant to it */ |
---|
| 979 | + if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { |
---|
| 980 | + sg2 = crypt_get_sg_data(cc, dmreq->sg_in); |
---|
| 981 | + data2 = kmap_atomic(sg_page(sg2)); |
---|
| 982 | + memcpy(data_offset, data2 + sg2->offset, cc->sector_size); |
---|
| 983 | + kunmap_atomic(data2); |
---|
| 984 | + } |
---|
| 985 | + |
---|
| 986 | + if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { |
---|
| 987 | + diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32)); |
---|
| 988 | + diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); |
---|
| 989 | + diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); |
---|
| 990 | + diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32)); |
---|
| 991 | + } |
---|
| 992 | + |
---|
| 993 | + for (i = 0; i < (cc->sector_size / 32); i++) |
---|
| 994 | + crypto_xor(data_offset + i * 32, ks, 32); |
---|
| 995 | + |
---|
| 996 | + if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { |
---|
| 997 | + diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32)); |
---|
| 998 | + diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); |
---|
| 999 | + diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); |
---|
| 1000 | + diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32)); |
---|
| 1001 | + } |
---|
| 1002 | + |
---|
| 1003 | + kunmap_atomic(data); |
---|
| 1004 | +out: |
---|
| 1005 | + kfree_sensitive(ks); |
---|
| 1006 | + kfree_sensitive(es); |
---|
| 1007 | + skcipher_request_free(req); |
---|
| 1008 | + return r; |
---|
| 1009 | +} |
---|
| 1010 | + |
---|
| 1011 | +static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv, |
---|
| 1012 | + struct dm_crypt_request *dmreq) |
---|
| 1013 | +{ |
---|
| 1014 | + int r; |
---|
| 1015 | + |
---|
| 1016 | + if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { |
---|
| 1017 | + r = crypt_iv_elephant(cc, dmreq); |
---|
| 1018 | + if (r) |
---|
| 1019 | + return r; |
---|
| 1020 | + } |
---|
| 1021 | + |
---|
| 1022 | + return crypt_iv_eboiv_gen(cc, iv, dmreq); |
---|
| 1023 | +} |
---|
| 1024 | + |
---|
| 1025 | +static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv, |
---|
| 1026 | + struct dm_crypt_request *dmreq) |
---|
| 1027 | +{ |
---|
| 1028 | + if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) |
---|
| 1029 | + return crypt_iv_elephant(cc, dmreq); |
---|
| 1030 | + |
---|
| 1031 | + return 0; |
---|
| 1032 | +} |
---|
| 1033 | + |
---|
| 1034 | +static int crypt_iv_elephant_init(struct crypt_config *cc) |
---|
| 1035 | +{ |
---|
| 1036 | + struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; |
---|
| 1037 | + int key_offset = cc->key_size - cc->key_extra_size; |
---|
| 1038 | + |
---|
| 1039 | + return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size); |
---|
| 1040 | +} |
---|
| 1041 | + |
---|
| 1042 | +static int crypt_iv_elephant_wipe(struct crypt_config *cc) |
---|
| 1043 | +{ |
---|
| 1044 | + struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; |
---|
| 1045 | + u8 key[ELEPHANT_MAX_KEY_SIZE]; |
---|
| 1046 | + |
---|
| 1047 | + memset(key, 0, cc->key_extra_size); |
---|
| 1048 | + return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size); |
---|
| 1049 | +} |
---|
| 1050 | + |
---|
853 | 1051 | static const struct crypt_iv_operations crypt_iv_plain_ops = { |
---|
854 | 1052 | .generator = crypt_iv_plain_gen |
---|
855 | 1053 | }; |
---|
.. | .. |
---|
863 | 1061 | }; |
---|
864 | 1062 | |
---|
865 | 1063 | static const struct crypt_iv_operations crypt_iv_essiv_ops = { |
---|
866 | | - .ctr = crypt_iv_essiv_ctr, |
---|
867 | | - .dtr = crypt_iv_essiv_dtr, |
---|
868 | | - .init = crypt_iv_essiv_init, |
---|
869 | | - .wipe = crypt_iv_essiv_wipe, |
---|
870 | 1064 | .generator = crypt_iv_essiv_gen |
---|
871 | 1065 | }; |
---|
872 | 1066 | |
---|
.. | .. |
---|
900 | 1094 | |
---|
901 | 1095 | static struct crypt_iv_operations crypt_iv_random_ops = { |
---|
902 | 1096 | .generator = crypt_iv_random_gen |
---|
| 1097 | +}; |
---|
| 1098 | + |
---|
| 1099 | +static struct crypt_iv_operations crypt_iv_eboiv_ops = { |
---|
| 1100 | + .ctr = crypt_iv_eboiv_ctr, |
---|
| 1101 | + .generator = crypt_iv_eboiv_gen |
---|
| 1102 | +}; |
---|
| 1103 | + |
---|
| 1104 | +static struct crypt_iv_operations crypt_iv_elephant_ops = { |
---|
| 1105 | + .ctr = crypt_iv_elephant_ctr, |
---|
| 1106 | + .dtr = crypt_iv_elephant_dtr, |
---|
| 1107 | + .init = crypt_iv_elephant_init, |
---|
| 1108 | + .wipe = crypt_iv_elephant_wipe, |
---|
| 1109 | + .generator = crypt_iv_elephant_gen, |
---|
| 1110 | + .post = crypt_iv_elephant_post |
---|
903 | 1111 | }; |
---|
904 | 1112 | |
---|
905 | 1113 | /* |
---|
.. | .. |
---|
1041 | 1249 | return iv_of_dmreq(cc, dmreq) + cc->iv_size; |
---|
1042 | 1250 | } |
---|
1043 | 1251 | |
---|
1044 | | -static uint64_t *org_sector_of_dmreq(struct crypt_config *cc, |
---|
| 1252 | +static __le64 *org_sector_of_dmreq(struct crypt_config *cc, |
---|
1045 | 1253 | struct dm_crypt_request *dmreq) |
---|
1046 | 1254 | { |
---|
1047 | 1255 | u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size; |
---|
1048 | | - return (uint64_t*) ptr; |
---|
| 1256 | + return (__le64 *) ptr; |
---|
1049 | 1257 | } |
---|
1050 | 1258 | |
---|
1051 | 1259 | static unsigned int *org_tag_of_dmreq(struct crypt_config *cc, |
---|
.. | .. |
---|
1081 | 1289 | struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); |
---|
1082 | 1290 | struct dm_crypt_request *dmreq; |
---|
1083 | 1291 | u8 *iv, *org_iv, *tag_iv, *tag; |
---|
1084 | | - uint64_t *sector; |
---|
| 1292 | + __le64 *sector; |
---|
1085 | 1293 | int r = 0; |
---|
1086 | 1294 | |
---|
1087 | 1295 | BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size); |
---|
.. | .. |
---|
1153 | 1361 | r = crypto_aead_decrypt(req); |
---|
1154 | 1362 | } |
---|
1155 | 1363 | |
---|
1156 | | - if (r == -EBADMSG) |
---|
1157 | | - DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", |
---|
| 1364 | + if (r == -EBADMSG) { |
---|
| 1365 | + char b[BDEVNAME_SIZE]; |
---|
| 1366 | + DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b), |
---|
1158 | 1367 | (unsigned long long)le64_to_cpu(*sector)); |
---|
| 1368 | + } |
---|
1159 | 1369 | |
---|
1160 | 1370 | if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) |
---|
1161 | 1371 | r = cc->iv_gen_ops->post(cc, org_iv, dmreq); |
---|
.. | .. |
---|
1176 | 1386 | struct scatterlist *sg_in, *sg_out; |
---|
1177 | 1387 | struct dm_crypt_request *dmreq; |
---|
1178 | 1388 | u8 *iv, *org_iv, *tag_iv; |
---|
1179 | | - uint64_t *sector; |
---|
| 1389 | + __le64 *sector; |
---|
1180 | 1390 | int r = 0; |
---|
1181 | 1391 | |
---|
1182 | 1392 | /* Reject unexpected unaligned bio. */ |
---|
.. | .. |
---|
1216 | 1426 | r = cc->iv_gen_ops->generator(cc, org_iv, dmreq); |
---|
1217 | 1427 | if (r < 0) |
---|
1218 | 1428 | return r; |
---|
| 1429 | + /* Data can be already preprocessed in generator */ |
---|
| 1430 | + if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags)) |
---|
| 1431 | + sg_in = sg_out; |
---|
1219 | 1432 | /* Store generated IV in integrity metadata */ |
---|
1220 | 1433 | if (cc->integrity_iv_size) |
---|
1221 | 1434 | memcpy(tag_iv, org_iv, cc->integrity_iv_size); |
---|
.. | .. |
---|
1243 | 1456 | static void kcryptd_async_done(struct crypto_async_request *async_req, |
---|
1244 | 1457 | int error); |
---|
1245 | 1458 | |
---|
1246 | | -static void crypt_alloc_req_skcipher(struct crypt_config *cc, |
---|
| 1459 | +static int crypt_alloc_req_skcipher(struct crypt_config *cc, |
---|
1247 | 1460 | struct convert_context *ctx) |
---|
1248 | 1461 | { |
---|
1249 | 1462 | unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); |
---|
1250 | 1463 | |
---|
1251 | | - if (!ctx->r.req) |
---|
1252 | | - ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO); |
---|
| 1464 | + if (!ctx->r.req) { |
---|
| 1465 | + ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO); |
---|
| 1466 | + if (!ctx->r.req) |
---|
| 1467 | + return -ENOMEM; |
---|
| 1468 | + } |
---|
1253 | 1469 | |
---|
1254 | 1470 | skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]); |
---|
1255 | 1471 | |
---|
.. | .. |
---|
1260 | 1476 | skcipher_request_set_callback(ctx->r.req, |
---|
1261 | 1477 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
---|
1262 | 1478 | kcryptd_async_done, dmreq_of_req(cc, ctx->r.req)); |
---|
| 1479 | + |
---|
| 1480 | + return 0; |
---|
1263 | 1481 | } |
---|
1264 | 1482 | |
---|
1265 | | -static void crypt_alloc_req_aead(struct crypt_config *cc, |
---|
| 1483 | +static int crypt_alloc_req_aead(struct crypt_config *cc, |
---|
1266 | 1484 | struct convert_context *ctx) |
---|
1267 | 1485 | { |
---|
1268 | | - if (!ctx->r.req_aead) |
---|
1269 | | - ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO); |
---|
| 1486 | + if (!ctx->r.req_aead) { |
---|
| 1487 | + ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO); |
---|
| 1488 | + if (!ctx->r.req_aead) |
---|
| 1489 | + return -ENOMEM; |
---|
| 1490 | + } |
---|
1270 | 1491 | |
---|
1271 | 1492 | aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]); |
---|
1272 | 1493 | |
---|
.. | .. |
---|
1277 | 1498 | aead_request_set_callback(ctx->r.req_aead, |
---|
1278 | 1499 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
---|
1279 | 1500 | kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead)); |
---|
| 1501 | + |
---|
| 1502 | + return 0; |
---|
1280 | 1503 | } |
---|
1281 | 1504 | |
---|
1282 | | -static void crypt_alloc_req(struct crypt_config *cc, |
---|
| 1505 | +static int crypt_alloc_req(struct crypt_config *cc, |
---|
1283 | 1506 | struct convert_context *ctx) |
---|
1284 | 1507 | { |
---|
1285 | 1508 | if (crypt_integrity_aead(cc)) |
---|
1286 | | - crypt_alloc_req_aead(cc, ctx); |
---|
| 1509 | + return crypt_alloc_req_aead(cc, ctx); |
---|
1287 | 1510 | else |
---|
1288 | | - crypt_alloc_req_skcipher(cc, ctx); |
---|
| 1511 | + return crypt_alloc_req_skcipher(cc, ctx); |
---|
1289 | 1512 | } |
---|
1290 | 1513 | |
---|
1291 | 1514 | static void crypt_free_req_skcipher(struct crypt_config *cc, |
---|
.. | .. |
---|
1318 | 1541 | * Encrypt / decrypt data from one bio to another one (can be the same one) |
---|
1319 | 1542 | */ |
---|
1320 | 1543 | static blk_status_t crypt_convert(struct crypt_config *cc, |
---|
1321 | | - struct convert_context *ctx) |
---|
| 1544 | + struct convert_context *ctx, bool atomic, bool reset_pending) |
---|
1322 | 1545 | { |
---|
1323 | 1546 | unsigned int tag_offset = 0; |
---|
1324 | 1547 | unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT; |
---|
1325 | 1548 | int r; |
---|
1326 | 1549 | |
---|
1327 | | - atomic_set(&ctx->cc_pending, 1); |
---|
| 1550 | + /* |
---|
| 1551 | + * if reset_pending is set we are dealing with the bio for the first time, |
---|
| 1552 | + * else we're continuing to work on the previous bio, so don't mess with |
---|
| 1553 | + * the cc_pending counter |
---|
| 1554 | + */ |
---|
| 1555 | + if (reset_pending) |
---|
| 1556 | + atomic_set(&ctx->cc_pending, 1); |
---|
1328 | 1557 | |
---|
1329 | 1558 | while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { |
---|
1330 | 1559 | |
---|
1331 | | - crypt_alloc_req(cc, ctx); |
---|
| 1560 | + r = crypt_alloc_req(cc, ctx); |
---|
| 1561 | + if (r) { |
---|
| 1562 | + complete(&ctx->restart); |
---|
| 1563 | + return BLK_STS_DEV_RESOURCE; |
---|
| 1564 | + } |
---|
| 1565 | + |
---|
1332 | 1566 | atomic_inc(&ctx->cc_pending); |
---|
1333 | 1567 | |
---|
1334 | 1568 | if (crypt_integrity_aead(cc)) |
---|
.. | .. |
---|
1342 | 1576 | * but the driver request queue is full, let's wait. |
---|
1343 | 1577 | */ |
---|
1344 | 1578 | case -EBUSY: |
---|
1345 | | - wait_for_completion(&ctx->restart); |
---|
| 1579 | + if (in_interrupt()) { |
---|
| 1580 | + if (try_wait_for_completion(&ctx->restart)) { |
---|
| 1581 | + /* |
---|
| 1582 | + * we don't have to block to wait for completion, |
---|
| 1583 | + * so proceed |
---|
| 1584 | + */ |
---|
| 1585 | + } else { |
---|
| 1586 | + /* |
---|
| 1587 | + * we can't wait for completion without blocking |
---|
| 1588 | + * exit and continue processing in a workqueue |
---|
| 1589 | + */ |
---|
| 1590 | + ctx->r.req = NULL; |
---|
| 1591 | + ctx->cc_sector += sector_step; |
---|
| 1592 | + tag_offset++; |
---|
| 1593 | + return BLK_STS_DEV_RESOURCE; |
---|
| 1594 | + } |
---|
| 1595 | + } else { |
---|
| 1596 | + wait_for_completion(&ctx->restart); |
---|
| 1597 | + } |
---|
1346 | 1598 | reinit_completion(&ctx->restart); |
---|
1347 | | - /* fall through */ |
---|
| 1599 | + fallthrough; |
---|
1348 | 1600 | /* |
---|
1349 | 1601 | * The request is queued and processed asynchronously, |
---|
1350 | 1602 | * completion function kcryptd_async_done() will be called. |
---|
.. | .. |
---|
1361 | 1613 | atomic_dec(&ctx->cc_pending); |
---|
1362 | 1614 | ctx->cc_sector += sector_step; |
---|
1363 | 1615 | tag_offset++; |
---|
1364 | | - cond_resched(); |
---|
| 1616 | + if (!atomic) |
---|
| 1617 | + cond_resched(); |
---|
1365 | 1618 | continue; |
---|
1366 | 1619 | /* |
---|
1367 | 1620 | * There was a data integrity error. |
---|
.. | .. |
---|
1452 | 1705 | |
---|
1453 | 1706 | static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) |
---|
1454 | 1707 | { |
---|
1455 | | - unsigned int i; |
---|
1456 | 1708 | struct bio_vec *bv; |
---|
| 1709 | + struct bvec_iter_all iter_all; |
---|
1457 | 1710 | |
---|
1458 | | - bio_for_each_segment_all(bv, clone, i) { |
---|
| 1711 | + bio_for_each_segment_all(bv, clone, iter_all) { |
---|
1459 | 1712 | BUG_ON(!bv->bv_page); |
---|
1460 | 1713 | mempool_free(bv->bv_page, &cc->page_pool); |
---|
1461 | 1714 | } |
---|
.. | .. |
---|
1471 | 1724 | io->ctx.r.req = NULL; |
---|
1472 | 1725 | io->integrity_metadata = NULL; |
---|
1473 | 1726 | io->integrity_metadata_from_pool = false; |
---|
| 1727 | + io->in_tasklet = false; |
---|
1474 | 1728 | atomic_set(&io->io_pending, 0); |
---|
1475 | 1729 | } |
---|
1476 | 1730 | |
---|
1477 | 1731 | static void crypt_inc_pending(struct dm_crypt_io *io) |
---|
1478 | 1732 | { |
---|
1479 | 1733 | atomic_inc(&io->io_pending); |
---|
| 1734 | +} |
---|
| 1735 | + |
---|
| 1736 | +static void kcryptd_io_bio_endio(struct work_struct *work) |
---|
| 1737 | +{ |
---|
| 1738 | + struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
---|
| 1739 | + bio_endio(io->base_bio); |
---|
1480 | 1740 | } |
---|
1481 | 1741 | |
---|
1482 | 1742 | /* |
---|
.. | .. |
---|
1501 | 1761 | kfree(io->integrity_metadata); |
---|
1502 | 1762 | |
---|
1503 | 1763 | base_bio->bi_status = error; |
---|
| 1764 | + |
---|
| 1765 | + /* |
---|
| 1766 | + * If we are running this function from our tasklet, |
---|
| 1767 | + * we can't call bio_endio() here, because it will call |
---|
| 1768 | + * clone_endio() from dm.c, which in turn will |
---|
| 1769 | + * free the current struct dm_crypt_io structure with |
---|
| 1770 | + * our tasklet. In this case we need to delay bio_endio() |
---|
| 1771 | + * execution to after the tasklet is done and dequeued. |
---|
| 1772 | + */ |
---|
| 1773 | + if (io->in_tasklet) { |
---|
| 1774 | + INIT_WORK(&io->work, kcryptd_io_bio_endio); |
---|
| 1775 | + queue_work(cc->io_queue, &io->work); |
---|
| 1776 | + return; |
---|
| 1777 | + } |
---|
| 1778 | + |
---|
1504 | 1779 | bio_endio(base_bio); |
---|
1505 | 1780 | } |
---|
1506 | 1781 | |
---|
.. | .. |
---|
1584 | 1859 | return 1; |
---|
1585 | 1860 | } |
---|
1586 | 1861 | |
---|
1587 | | - generic_make_request(clone); |
---|
| 1862 | + submit_bio_noacct(clone); |
---|
1588 | 1863 | return 0; |
---|
1589 | 1864 | } |
---|
1590 | 1865 | |
---|
.. | .. |
---|
1610 | 1885 | { |
---|
1611 | 1886 | struct bio *clone = io->ctx.bio_out; |
---|
1612 | 1887 | |
---|
1613 | | - generic_make_request(clone); |
---|
| 1888 | + submit_bio_noacct(clone); |
---|
1614 | 1889 | } |
---|
1615 | 1890 | |
---|
1616 | 1891 | #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node) |
---|
.. | .. |
---|
1661 | 1936 | io = crypt_io_from_node(rb_first(&write_tree)); |
---|
1662 | 1937 | rb_erase(&io->rb_node, &write_tree); |
---|
1663 | 1938 | kcryptd_io_write(io); |
---|
| 1939 | + cond_resched(); |
---|
1664 | 1940 | } while (!RB_EMPTY_ROOT(&write_tree)); |
---|
1665 | 1941 | blk_finish_plug(&plug); |
---|
1666 | 1942 | } |
---|
.. | .. |
---|
1687 | 1963 | |
---|
1688 | 1964 | clone->bi_iter.bi_sector = cc->start + io->sector; |
---|
1689 | 1965 | |
---|
1690 | | - if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) { |
---|
1691 | | - generic_make_request(clone); |
---|
| 1966 | + if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) || |
---|
| 1967 | + test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) { |
---|
| 1968 | + submit_bio_noacct(clone); |
---|
1692 | 1969 | return; |
---|
1693 | 1970 | } |
---|
1694 | 1971 | |
---|
.. | .. |
---|
1710 | 1987 | spin_unlock_irqrestore(&cc->write_thread_lock, flags); |
---|
1711 | 1988 | } |
---|
1712 | 1989 | |
---|
| 1990 | +static bool kcryptd_crypt_write_inline(struct crypt_config *cc, |
---|
| 1991 | + struct convert_context *ctx) |
---|
| 1992 | + |
---|
| 1993 | +{ |
---|
| 1994 | + if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags)) |
---|
| 1995 | + return false; |
---|
| 1996 | + |
---|
| 1997 | + /* |
---|
| 1998 | + * Note: zone append writes (REQ_OP_ZONE_APPEND) do not have ordering |
---|
| 1999 | + * constraints so they do not need to be issued inline by |
---|
| 2000 | + * kcryptd_crypt_write_convert(). |
---|
| 2001 | + */ |
---|
| 2002 | + switch (bio_op(ctx->bio_in)) { |
---|
| 2003 | + case REQ_OP_WRITE: |
---|
| 2004 | + case REQ_OP_WRITE_SAME: |
---|
| 2005 | + case REQ_OP_WRITE_ZEROES: |
---|
| 2006 | + return true; |
---|
| 2007 | + default: |
---|
| 2008 | + return false; |
---|
| 2009 | + } |
---|
| 2010 | +} |
---|
| 2011 | + |
---|
| 2012 | +static void kcryptd_crypt_write_continue(struct work_struct *work) |
---|
| 2013 | +{ |
---|
| 2014 | + struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
---|
| 2015 | + struct crypt_config *cc = io->cc; |
---|
| 2016 | + struct convert_context *ctx = &io->ctx; |
---|
| 2017 | + int crypt_finished; |
---|
| 2018 | + sector_t sector = io->sector; |
---|
| 2019 | + blk_status_t r; |
---|
| 2020 | + |
---|
| 2021 | + wait_for_completion(&ctx->restart); |
---|
| 2022 | + reinit_completion(&ctx->restart); |
---|
| 2023 | + |
---|
| 2024 | + r = crypt_convert(cc, &io->ctx, true, false); |
---|
| 2025 | + if (r) |
---|
| 2026 | + io->error = r; |
---|
| 2027 | + crypt_finished = atomic_dec_and_test(&ctx->cc_pending); |
---|
| 2028 | + if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) { |
---|
| 2029 | + /* Wait for completion signaled by kcryptd_async_done() */ |
---|
| 2030 | + wait_for_completion(&ctx->restart); |
---|
| 2031 | + crypt_finished = 1; |
---|
| 2032 | + } |
---|
| 2033 | + |
---|
| 2034 | + /* Encryption was already finished, submit io now */ |
---|
| 2035 | + if (crypt_finished) { |
---|
| 2036 | + kcryptd_crypt_write_io_submit(io, 0); |
---|
| 2037 | + io->sector = sector; |
---|
| 2038 | + } |
---|
| 2039 | + |
---|
| 2040 | + crypt_dec_pending(io); |
---|
| 2041 | +} |
---|
| 2042 | + |
---|
1713 | 2043 | static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) |
---|
1714 | 2044 | { |
---|
1715 | 2045 | struct crypt_config *cc = io->cc; |
---|
| 2046 | + struct convert_context *ctx = &io->ctx; |
---|
1716 | 2047 | struct bio *clone; |
---|
1717 | 2048 | int crypt_finished; |
---|
1718 | 2049 | sector_t sector = io->sector; |
---|
.. | .. |
---|
1722 | 2053 | * Prevent io from disappearing until this function completes. |
---|
1723 | 2054 | */ |
---|
1724 | 2055 | crypt_inc_pending(io); |
---|
1725 | | - crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); |
---|
| 2056 | + crypt_convert_init(cc, ctx, NULL, io->base_bio, sector); |
---|
1726 | 2057 | |
---|
1727 | 2058 | clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); |
---|
1728 | 2059 | if (unlikely(!clone)) { |
---|
.. | .. |
---|
1736 | 2067 | sector += bio_sectors(clone); |
---|
1737 | 2068 | |
---|
1738 | 2069 | crypt_inc_pending(io); |
---|
1739 | | - r = crypt_convert(cc, &io->ctx); |
---|
| 2070 | + r = crypt_convert(cc, ctx, |
---|
| 2071 | + test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true); |
---|
| 2072 | + /* |
---|
| 2073 | + * Crypto API backlogged the request, because its queue was full |
---|
| 2074 | + * and we're in softirq context, so continue from a workqueue |
---|
| 2075 | + * (TODO: is it actually possible to be in softirq in the write path?) |
---|
| 2076 | + */ |
---|
| 2077 | + if (r == BLK_STS_DEV_RESOURCE) { |
---|
| 2078 | + INIT_WORK(&io->work, kcryptd_crypt_write_continue); |
---|
| 2079 | + queue_work(cc->crypt_queue, &io->work); |
---|
| 2080 | + return; |
---|
| 2081 | + } |
---|
1740 | 2082 | if (r) |
---|
1741 | 2083 | io->error = r; |
---|
1742 | | - crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); |
---|
| 2084 | + crypt_finished = atomic_dec_and_test(&ctx->cc_pending); |
---|
| 2085 | + if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) { |
---|
| 2086 | + /* Wait for completion signaled by kcryptd_async_done() */ |
---|
| 2087 | + wait_for_completion(&ctx->restart); |
---|
| 2088 | + crypt_finished = 1; |
---|
| 2089 | + } |
---|
1743 | 2090 | |
---|
1744 | 2091 | /* Encryption was already finished, submit io now */ |
---|
1745 | 2092 | if (crypt_finished) { |
---|
.. | .. |
---|
1756 | 2103 | crypt_dec_pending(io); |
---|
1757 | 2104 | } |
---|
1758 | 2105 | |
---|
| 2106 | +static void kcryptd_crypt_read_continue(struct work_struct *work) |
---|
| 2107 | +{ |
---|
| 2108 | + struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
---|
| 2109 | + struct crypt_config *cc = io->cc; |
---|
| 2110 | + blk_status_t r; |
---|
| 2111 | + |
---|
| 2112 | + wait_for_completion(&io->ctx.restart); |
---|
| 2113 | + reinit_completion(&io->ctx.restart); |
---|
| 2114 | + |
---|
| 2115 | + r = crypt_convert(cc, &io->ctx, true, false); |
---|
| 2116 | + if (r) |
---|
| 2117 | + io->error = r; |
---|
| 2118 | + |
---|
| 2119 | + if (atomic_dec_and_test(&io->ctx.cc_pending)) |
---|
| 2120 | + kcryptd_crypt_read_done(io); |
---|
| 2121 | + |
---|
| 2122 | + crypt_dec_pending(io); |
---|
| 2123 | +} |
---|
| 2124 | + |
---|
1759 | 2125 | static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) |
---|
1760 | 2126 | { |
---|
1761 | 2127 | struct crypt_config *cc = io->cc; |
---|
.. | .. |
---|
1766 | 2132 | crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, |
---|
1767 | 2133 | io->sector); |
---|
1768 | 2134 | |
---|
1769 | | - r = crypt_convert(cc, &io->ctx); |
---|
| 2135 | + r = crypt_convert(cc, &io->ctx, |
---|
| 2136 | + test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true); |
---|
| 2137 | + /* |
---|
| 2138 | + * Crypto API backlogged the request, because its queue was full |
---|
| 2139 | + * and we're in softirq context, so continue from a workqueue |
---|
| 2140 | + */ |
---|
| 2141 | + if (r == BLK_STS_DEV_RESOURCE) { |
---|
| 2142 | + INIT_WORK(&io->work, kcryptd_crypt_read_continue); |
---|
| 2143 | + queue_work(cc->crypt_queue, &io->work); |
---|
| 2144 | + return; |
---|
| 2145 | + } |
---|
1770 | 2146 | if (r) |
---|
1771 | 2147 | io->error = r; |
---|
1772 | 2148 | |
---|
.. | .. |
---|
1798 | 2174 | error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq); |
---|
1799 | 2175 | |
---|
1800 | 2176 | if (error == -EBADMSG) { |
---|
1801 | | - DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", |
---|
| 2177 | + char b[BDEVNAME_SIZE]; |
---|
| 2178 | + DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b), |
---|
1802 | 2179 | (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq))); |
---|
1803 | 2180 | io->error = BLK_STS_PROTECTION; |
---|
1804 | 2181 | } else if (error < 0) |
---|
.. | .. |
---|
1809 | 2186 | if (!atomic_dec_and_test(&ctx->cc_pending)) |
---|
1810 | 2187 | return; |
---|
1811 | 2188 | |
---|
1812 | | - if (bio_data_dir(io->base_bio) == READ) |
---|
| 2189 | + /* |
---|
| 2190 | + * The request is fully completed: for inline writes, let |
---|
| 2191 | + * kcryptd_crypt_write_convert() do the IO submission. |
---|
| 2192 | + */ |
---|
| 2193 | + if (bio_data_dir(io->base_bio) == READ) { |
---|
1813 | 2194 | kcryptd_crypt_read_done(io); |
---|
1814 | | - else |
---|
1815 | | - kcryptd_crypt_write_io_submit(io, 1); |
---|
| 2195 | + return; |
---|
| 2196 | + } |
---|
| 2197 | + |
---|
| 2198 | + if (kcryptd_crypt_write_inline(cc, ctx)) { |
---|
| 2199 | + complete(&ctx->restart); |
---|
| 2200 | + return; |
---|
| 2201 | + } |
---|
| 2202 | + |
---|
| 2203 | + kcryptd_crypt_write_io_submit(io, 1); |
---|
1816 | 2204 | } |
---|
1817 | 2205 | |
---|
1818 | 2206 | static void kcryptd_crypt(struct work_struct *work) |
---|
.. | .. |
---|
1825 | 2213 | kcryptd_crypt_write_convert(io); |
---|
1826 | 2214 | } |
---|
1827 | 2215 | |
---|
| 2216 | +static void kcryptd_crypt_tasklet(unsigned long work) |
---|
| 2217 | +{ |
---|
| 2218 | + kcryptd_crypt((struct work_struct *)work); |
---|
| 2219 | +} |
---|
| 2220 | + |
---|
1828 | 2221 | static void kcryptd_queue_crypt(struct dm_crypt_io *io) |
---|
1829 | 2222 | { |
---|
1830 | 2223 | struct crypt_config *cc = io->cc; |
---|
| 2224 | + |
---|
| 2225 | + if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) || |
---|
| 2226 | + (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) { |
---|
| 2227 | + /* |
---|
| 2228 | + * in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context. |
---|
| 2229 | + * irqs_disabled(): the kernel may run some IO completion from the idle thread, but |
---|
| 2230 | + * it is being executed with irqs disabled. |
---|
| 2231 | + */ |
---|
| 2232 | + if (in_irq() || irqs_disabled()) { |
---|
| 2233 | + io->in_tasklet = true; |
---|
| 2234 | + tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work); |
---|
| 2235 | + tasklet_schedule(&io->tasklet); |
---|
| 2236 | + return; |
---|
| 2237 | + } |
---|
| 2238 | + |
---|
| 2239 | + kcryptd_crypt(&io->work); |
---|
| 2240 | + return; |
---|
| 2241 | + } |
---|
1831 | 2242 | |
---|
1832 | 2243 | INIT_WORK(&io->work, kcryptd_crypt); |
---|
1833 | 2244 | queue_work(cc->crypt_queue, &io->work); |
---|
.. | .. |
---|
1884 | 2295 | return -ENOMEM; |
---|
1885 | 2296 | |
---|
1886 | 2297 | for (i = 0; i < cc->tfms_count; i++) { |
---|
1887 | | - cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0); |
---|
| 2298 | + cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, |
---|
| 2299 | + CRYPTO_ALG_ALLOCATES_MEMORY); |
---|
1888 | 2300 | if (IS_ERR(cc->cipher_tfm.tfms[i])) { |
---|
1889 | 2301 | err = PTR_ERR(cc->cipher_tfm.tfms[i]); |
---|
1890 | 2302 | crypt_free_tfms(cc); |
---|
.. | .. |
---|
1897 | 2309 | * algorithm implementation is used. Help people debug performance |
---|
1898 | 2310 | * problems by logging the ->cra_driver_name. |
---|
1899 | 2311 | */ |
---|
1900 | | - DMINFO("%s using implementation \"%s\"", ciphermode, |
---|
| 2312 | + DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode, |
---|
1901 | 2313 | crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name); |
---|
1902 | 2314 | return 0; |
---|
1903 | 2315 | } |
---|
.. | .. |
---|
1910 | 2322 | if (!cc->cipher_tfm.tfms) |
---|
1911 | 2323 | return -ENOMEM; |
---|
1912 | 2324 | |
---|
1913 | | - cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0); |
---|
| 2325 | + cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, |
---|
| 2326 | + CRYPTO_ALG_ALLOCATES_MEMORY); |
---|
1914 | 2327 | if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) { |
---|
1915 | 2328 | err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]); |
---|
1916 | 2329 | crypt_free_tfms(cc); |
---|
1917 | 2330 | return err; |
---|
1918 | 2331 | } |
---|
1919 | 2332 | |
---|
1920 | | - DMINFO("%s using implementation \"%s\"", ciphermode, |
---|
| 2333 | + DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode, |
---|
1921 | 2334 | crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name); |
---|
1922 | 2335 | return 0; |
---|
1923 | 2336 | } |
---|
.. | .. |
---|
2011 | 2424 | return false; |
---|
2012 | 2425 | } |
---|
2013 | 2426 | |
---|
| 2427 | +static int set_key_user(struct crypt_config *cc, struct key *key) |
---|
| 2428 | +{ |
---|
| 2429 | + const struct user_key_payload *ukp; |
---|
| 2430 | + |
---|
| 2431 | + ukp = user_key_payload_locked(key); |
---|
| 2432 | + if (!ukp) |
---|
| 2433 | + return -EKEYREVOKED; |
---|
| 2434 | + |
---|
| 2435 | + if (cc->key_size != ukp->datalen) |
---|
| 2436 | + return -EINVAL; |
---|
| 2437 | + |
---|
| 2438 | + memcpy(cc->key, ukp->data, cc->key_size); |
---|
| 2439 | + |
---|
| 2440 | + return 0; |
---|
| 2441 | +} |
---|
| 2442 | + |
---|
| 2443 | +#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE) |
---|
| 2444 | +static int set_key_encrypted(struct crypt_config *cc, struct key *key) |
---|
| 2445 | +{ |
---|
| 2446 | + const struct encrypted_key_payload *ekp; |
---|
| 2447 | + |
---|
| 2448 | + ekp = key->payload.data[0]; |
---|
| 2449 | + if (!ekp) |
---|
| 2450 | + return -EKEYREVOKED; |
---|
| 2451 | + |
---|
| 2452 | + if (cc->key_size != ekp->decrypted_datalen) |
---|
| 2453 | + return -EINVAL; |
---|
| 2454 | + |
---|
| 2455 | + memcpy(cc->key, ekp->decrypted_data, cc->key_size); |
---|
| 2456 | + |
---|
| 2457 | + return 0; |
---|
| 2458 | +} |
---|
| 2459 | +#endif /* CONFIG_ENCRYPTED_KEYS */ |
---|
| 2460 | + |
---|
2014 | 2461 | static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string) |
---|
2015 | 2462 | { |
---|
2016 | 2463 | char *new_key_string, *key_desc; |
---|
2017 | 2464 | int ret; |
---|
| 2465 | + struct key_type *type; |
---|
2018 | 2466 | struct key *key; |
---|
2019 | | - const struct user_key_payload *ukp; |
---|
| 2467 | + int (*set_key)(struct crypt_config *cc, struct key *key); |
---|
2020 | 2468 | |
---|
2021 | 2469 | /* |
---|
2022 | 2470 | * Reject key_string with whitespace. dm core currently lacks code for |
---|
.. | .. |
---|
2032 | 2480 | if (!key_desc || key_desc == key_string || !strlen(key_desc + 1)) |
---|
2033 | 2481 | return -EINVAL; |
---|
2034 | 2482 | |
---|
2035 | | - if (strncmp(key_string, "logon:", key_desc - key_string + 1) && |
---|
2036 | | - strncmp(key_string, "user:", key_desc - key_string + 1)) |
---|
| 2483 | + if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) { |
---|
| 2484 | + type = &key_type_logon; |
---|
| 2485 | + set_key = set_key_user; |
---|
| 2486 | + } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) { |
---|
| 2487 | + type = &key_type_user; |
---|
| 2488 | + set_key = set_key_user; |
---|
| 2489 | +#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE) |
---|
| 2490 | + } else if (!strncmp(key_string, "encrypted:", key_desc - key_string + 1)) { |
---|
| 2491 | + type = &key_type_encrypted; |
---|
| 2492 | + set_key = set_key_encrypted; |
---|
| 2493 | +#endif |
---|
| 2494 | + } else { |
---|
2037 | 2495 | return -EINVAL; |
---|
| 2496 | + } |
---|
2038 | 2497 | |
---|
2039 | 2498 | new_key_string = kstrdup(key_string, GFP_KERNEL); |
---|
2040 | 2499 | if (!new_key_string) |
---|
2041 | 2500 | return -ENOMEM; |
---|
2042 | 2501 | |
---|
2043 | | - key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user, |
---|
2044 | | - key_desc + 1, NULL); |
---|
| 2502 | + key = request_key(type, key_desc + 1, NULL); |
---|
2045 | 2503 | if (IS_ERR(key)) { |
---|
2046 | | - kzfree(new_key_string); |
---|
| 2504 | + kfree_sensitive(new_key_string); |
---|
2047 | 2505 | return PTR_ERR(key); |
---|
2048 | 2506 | } |
---|
2049 | 2507 | |
---|
2050 | 2508 | down_read(&key->sem); |
---|
2051 | 2509 | |
---|
2052 | | - ukp = user_key_payload_locked(key); |
---|
2053 | | - if (!ukp) { |
---|
| 2510 | + ret = set_key(cc, key); |
---|
| 2511 | + if (ret < 0) { |
---|
2054 | 2512 | up_read(&key->sem); |
---|
2055 | 2513 | key_put(key); |
---|
2056 | | - kzfree(new_key_string); |
---|
2057 | | - return -EKEYREVOKED; |
---|
| 2514 | + kfree_sensitive(new_key_string); |
---|
| 2515 | + return ret; |
---|
2058 | 2516 | } |
---|
2059 | | - |
---|
2060 | | - if (cc->key_size != ukp->datalen) { |
---|
2061 | | - up_read(&key->sem); |
---|
2062 | | - key_put(key); |
---|
2063 | | - kzfree(new_key_string); |
---|
2064 | | - return -EINVAL; |
---|
2065 | | - } |
---|
2066 | | - |
---|
2067 | | - memcpy(cc->key, ukp->data, cc->key_size); |
---|
2068 | 2517 | |
---|
2069 | 2518 | up_read(&key->sem); |
---|
2070 | 2519 | key_put(key); |
---|
.. | .. |
---|
2076 | 2525 | |
---|
2077 | 2526 | if (!ret) { |
---|
2078 | 2527 | set_bit(DM_CRYPT_KEY_VALID, &cc->flags); |
---|
2079 | | - kzfree(cc->key_string); |
---|
| 2528 | + kfree_sensitive(cc->key_string); |
---|
2080 | 2529 | cc->key_string = new_key_string; |
---|
2081 | 2530 | } else |
---|
2082 | | - kzfree(new_key_string); |
---|
| 2531 | + kfree_sensitive(new_key_string); |
---|
2083 | 2532 | |
---|
2084 | 2533 | return ret; |
---|
2085 | 2534 | } |
---|
.. | .. |
---|
2116 | 2565 | |
---|
2117 | 2566 | static int get_key_size(char **key_string) |
---|
2118 | 2567 | { |
---|
2119 | | - return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1; |
---|
| 2568 | + return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1); |
---|
2120 | 2569 | } |
---|
2121 | 2570 | |
---|
2122 | | -#endif |
---|
| 2571 | +#endif /* CONFIG_KEYS */ |
---|
2123 | 2572 | |
---|
2124 | 2573 | static int crypt_set_key(struct crypt_config *cc, char *key) |
---|
2125 | 2574 | { |
---|
.. | .. |
---|
2140 | 2589 | clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); |
---|
2141 | 2590 | |
---|
2142 | 2591 | /* wipe references to any kernel keyring key */ |
---|
2143 | | - kzfree(cc->key_string); |
---|
| 2592 | + kfree_sensitive(cc->key_string); |
---|
2144 | 2593 | cc->key_string = NULL; |
---|
2145 | 2594 | |
---|
2146 | 2595 | /* Decode key from its hex representation. */ |
---|
.. | .. |
---|
2164 | 2613 | |
---|
2165 | 2614 | clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); |
---|
2166 | 2615 | get_random_bytes(&cc->key, cc->key_size); |
---|
2167 | | - kzfree(cc->key_string); |
---|
| 2616 | + |
---|
| 2617 | + /* Wipe IV private keys */ |
---|
| 2618 | + if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { |
---|
| 2619 | + r = cc->iv_gen_ops->wipe(cc); |
---|
| 2620 | + if (r) |
---|
| 2621 | + return r; |
---|
| 2622 | + } |
---|
| 2623 | + |
---|
| 2624 | + kfree_sensitive(cc->key_string); |
---|
2168 | 2625 | cc->key_string = NULL; |
---|
2169 | 2626 | r = crypt_setkey(cc); |
---|
2170 | 2627 | memset(&cc->key, 0, cc->key_size * sizeof(u8)); |
---|
.. | .. |
---|
2174 | 2631 | |
---|
2175 | 2632 | static void crypt_calculate_pages_per_client(void) |
---|
2176 | 2633 | { |
---|
2177 | | - unsigned long pages = (totalram_pages - totalhigh_pages) * DM_CRYPT_MEMORY_PERCENT / 100; |
---|
| 2634 | + unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100; |
---|
2178 | 2635 | |
---|
2179 | 2636 | if (!dm_crypt_clients_n) |
---|
2180 | 2637 | return; |
---|
.. | .. |
---|
2248 | 2705 | if (cc->dev) |
---|
2249 | 2706 | dm_put_device(ti, cc->dev); |
---|
2250 | 2707 | |
---|
2251 | | - kzfree(cc->cipher); |
---|
2252 | | - kzfree(cc->cipher_string); |
---|
2253 | | - kzfree(cc->key_string); |
---|
2254 | | - kzfree(cc->cipher_auth); |
---|
2255 | | - kzfree(cc->authenc_key); |
---|
| 2708 | + kfree_sensitive(cc->cipher_string); |
---|
| 2709 | + kfree_sensitive(cc->key_string); |
---|
| 2710 | + kfree_sensitive(cc->cipher_auth); |
---|
| 2711 | + kfree_sensitive(cc->authenc_key); |
---|
2256 | 2712 | |
---|
2257 | 2713 | mutex_destroy(&cc->bio_alloc_lock); |
---|
2258 | 2714 | |
---|
2259 | 2715 | /* Must zero key material before freeing */ |
---|
2260 | | - kzfree(cc); |
---|
| 2716 | + kfree_sensitive(cc); |
---|
2261 | 2717 | |
---|
2262 | 2718 | spin_lock(&dm_crypt_clients_lock); |
---|
2263 | 2719 | WARN_ON(!dm_crypt_clients_n); |
---|
.. | .. |
---|
2299 | 2755 | cc->iv_gen_ops = &crypt_iv_benbi_ops; |
---|
2300 | 2756 | else if (strcmp(ivmode, "null") == 0) |
---|
2301 | 2757 | cc->iv_gen_ops = &crypt_iv_null_ops; |
---|
2302 | | - else if (strcmp(ivmode, "lmk") == 0) { |
---|
| 2758 | + else if (strcmp(ivmode, "eboiv") == 0) |
---|
| 2759 | + cc->iv_gen_ops = &crypt_iv_eboiv_ops; |
---|
| 2760 | + else if (strcmp(ivmode, "elephant") == 0) { |
---|
| 2761 | + cc->iv_gen_ops = &crypt_iv_elephant_ops; |
---|
| 2762 | + cc->key_parts = 2; |
---|
| 2763 | + cc->key_extra_size = cc->key_size / 2; |
---|
| 2764 | + if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE) |
---|
| 2765 | + return -EINVAL; |
---|
| 2766 | + set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags); |
---|
| 2767 | + } else if (strcmp(ivmode, "lmk") == 0) { |
---|
2303 | 2768 | cc->iv_gen_ops = &crypt_iv_lmk_ops; |
---|
2304 | 2769 | /* |
---|
2305 | 2770 | * Version 2 and 3 is recognised according |
---|
.. | .. |
---|
2328 | 2793 | } |
---|
2329 | 2794 | |
---|
2330 | 2795 | /* |
---|
2331 | | - * Workaround to parse cipher algorithm from crypto API spec. |
---|
2332 | | - * The cc->cipher is currently used only in ESSIV. |
---|
2333 | | - * This should be probably done by crypto-api calls (once available...) |
---|
2334 | | - */ |
---|
2335 | | -static int crypt_ctr_blkdev_cipher(struct crypt_config *cc) |
---|
2336 | | -{ |
---|
2337 | | - const char *alg_name = NULL; |
---|
2338 | | - char *start, *end; |
---|
2339 | | - |
---|
2340 | | - if (crypt_integrity_aead(cc)) { |
---|
2341 | | - alg_name = crypto_tfm_alg_name(crypto_aead_tfm(any_tfm_aead(cc))); |
---|
2342 | | - if (!alg_name) |
---|
2343 | | - return -EINVAL; |
---|
2344 | | - if (crypt_integrity_hmac(cc)) { |
---|
2345 | | - alg_name = strchr(alg_name, ','); |
---|
2346 | | - if (!alg_name) |
---|
2347 | | - return -EINVAL; |
---|
2348 | | - } |
---|
2349 | | - alg_name++; |
---|
2350 | | - } else { |
---|
2351 | | - alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(any_tfm(cc))); |
---|
2352 | | - if (!alg_name) |
---|
2353 | | - return -EINVAL; |
---|
2354 | | - } |
---|
2355 | | - |
---|
2356 | | - start = strchr(alg_name, '('); |
---|
2357 | | - end = strchr(alg_name, ')'); |
---|
2358 | | - |
---|
2359 | | - if (!start && !end) { |
---|
2360 | | - cc->cipher = kstrdup(alg_name, GFP_KERNEL); |
---|
2361 | | - return cc->cipher ? 0 : -ENOMEM; |
---|
2362 | | - } |
---|
2363 | | - |
---|
2364 | | - if (!start || !end || ++start >= end) |
---|
2365 | | - return -EINVAL; |
---|
2366 | | - |
---|
2367 | | - cc->cipher = kzalloc(end - start + 1, GFP_KERNEL); |
---|
2368 | | - if (!cc->cipher) |
---|
2369 | | - return -ENOMEM; |
---|
2370 | | - |
---|
2371 | | - strncpy(cc->cipher, start, end - start); |
---|
2372 | | - |
---|
2373 | | - return 0; |
---|
2374 | | -} |
---|
2375 | | - |
---|
2376 | | -/* |
---|
2377 | 2796 | * Workaround to parse HMAC algorithm from AEAD crypto API spec. |
---|
2378 | 2797 | * The HMAC is needed to calculate tag size (HMAC digest size). |
---|
2379 | 2798 | * This should be probably done by crypto-api calls (once available...) |
---|
.. | .. |
---|
2396 | 2815 | return -ENOMEM; |
---|
2397 | 2816 | strncpy(mac_alg, start, end - start); |
---|
2398 | 2817 | |
---|
2399 | | - mac = crypto_alloc_ahash(mac_alg, 0, 0); |
---|
| 2818 | + mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY); |
---|
2400 | 2819 | kfree(mac_alg); |
---|
2401 | 2820 | |
---|
2402 | 2821 | if (IS_ERR(mac)) |
---|
.. | .. |
---|
2416 | 2835 | char **ivmode, char **ivopts) |
---|
2417 | 2836 | { |
---|
2418 | 2837 | struct crypt_config *cc = ti->private; |
---|
2419 | | - char *tmp, *cipher_api; |
---|
| 2838 | + char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME]; |
---|
2420 | 2839 | int ret = -EINVAL; |
---|
2421 | 2840 | |
---|
2422 | 2841 | cc->tfms_count = 1; |
---|
.. | .. |
---|
2442 | 2861 | /* The rest is crypto API spec */ |
---|
2443 | 2862 | cipher_api = tmp; |
---|
2444 | 2863 | |
---|
| 2864 | + /* Alloc AEAD, can be used only in new format. */ |
---|
| 2865 | + if (crypt_integrity_aead(cc)) { |
---|
| 2866 | + ret = crypt_ctr_auth_cipher(cc, cipher_api); |
---|
| 2867 | + if (ret < 0) { |
---|
| 2868 | + ti->error = "Invalid AEAD cipher spec"; |
---|
| 2869 | + return -ENOMEM; |
---|
| 2870 | + } |
---|
| 2871 | + } |
---|
| 2872 | + |
---|
2445 | 2873 | if (*ivmode && !strcmp(*ivmode, "lmk")) |
---|
2446 | 2874 | cc->tfms_count = 64; |
---|
| 2875 | + |
---|
| 2876 | + if (*ivmode && !strcmp(*ivmode, "essiv")) { |
---|
| 2877 | + if (!*ivopts) { |
---|
| 2878 | + ti->error = "Digest algorithm missing for ESSIV mode"; |
---|
| 2879 | + return -EINVAL; |
---|
| 2880 | + } |
---|
| 2881 | + ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)", |
---|
| 2882 | + cipher_api, *ivopts); |
---|
| 2883 | + if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) { |
---|
| 2884 | + ti->error = "Cannot allocate cipher string"; |
---|
| 2885 | + return -ENOMEM; |
---|
| 2886 | + } |
---|
| 2887 | + cipher_api = buf; |
---|
| 2888 | + } |
---|
2447 | 2889 | |
---|
2448 | 2890 | cc->key_parts = cc->tfms_count; |
---|
2449 | 2891 | |
---|
.. | .. |
---|
2454 | 2896 | return ret; |
---|
2455 | 2897 | } |
---|
2456 | 2898 | |
---|
2457 | | - /* Alloc AEAD, can be used only in new format. */ |
---|
2458 | | - if (crypt_integrity_aead(cc)) { |
---|
2459 | | - ret = crypt_ctr_auth_cipher(cc, cipher_api); |
---|
2460 | | - if (ret < 0) { |
---|
2461 | | - ti->error = "Invalid AEAD cipher spec"; |
---|
2462 | | - return -ENOMEM; |
---|
2463 | | - } |
---|
| 2899 | + if (crypt_integrity_aead(cc)) |
---|
2464 | 2900 | cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc)); |
---|
2465 | | - } else |
---|
| 2901 | + else |
---|
2466 | 2902 | cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc)); |
---|
2467 | | - |
---|
2468 | | - ret = crypt_ctr_blkdev_cipher(cc); |
---|
2469 | | - if (ret < 0) { |
---|
2470 | | - ti->error = "Cannot allocate cipher string"; |
---|
2471 | | - return -ENOMEM; |
---|
2472 | | - } |
---|
2473 | 2903 | |
---|
2474 | 2904 | return 0; |
---|
2475 | 2905 | } |
---|
.. | .. |
---|
2505 | 2935 | } |
---|
2506 | 2936 | cc->key_parts = cc->tfms_count; |
---|
2507 | 2937 | |
---|
2508 | | - cc->cipher = kstrdup(cipher, GFP_KERNEL); |
---|
2509 | | - if (!cc->cipher) |
---|
2510 | | - goto bad_mem; |
---|
2511 | | - |
---|
2512 | 2938 | chainmode = strsep(&tmp, "-"); |
---|
2513 | 2939 | *ivmode = strsep(&tmp, ":"); |
---|
2514 | 2940 | *ivopts = tmp; |
---|
.. | .. |
---|
2531 | 2957 | if (!cipher_api) |
---|
2532 | 2958 | goto bad_mem; |
---|
2533 | 2959 | |
---|
2534 | | - ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, |
---|
2535 | | - "%s(%s)", chainmode, cipher); |
---|
2536 | | - if (ret < 0) { |
---|
| 2960 | + if (*ivmode && !strcmp(*ivmode, "essiv")) { |
---|
| 2961 | + if (!*ivopts) { |
---|
| 2962 | + ti->error = "Digest algorithm missing for ESSIV mode"; |
---|
| 2963 | + kfree(cipher_api); |
---|
| 2964 | + return -EINVAL; |
---|
| 2965 | + } |
---|
| 2966 | + ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, |
---|
| 2967 | + "essiv(%s(%s),%s)", chainmode, cipher, *ivopts); |
---|
| 2968 | + } else { |
---|
| 2969 | + ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, |
---|
| 2970 | + "%s(%s)", chainmode, cipher); |
---|
| 2971 | + } |
---|
| 2972 | + if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) { |
---|
2537 | 2973 | kfree(cipher_api); |
---|
2538 | 2974 | goto bad_mem; |
---|
2539 | 2975 | } |
---|
.. | .. |
---|
2614 | 3050 | struct crypt_config *cc = ti->private; |
---|
2615 | 3051 | struct dm_arg_set as; |
---|
2616 | 3052 | static const struct dm_arg _args[] = { |
---|
2617 | | - {0, 6, "Invalid number of feature args"}, |
---|
| 3053 | + {0, 8, "Invalid number of feature args"}, |
---|
2618 | 3054 | }; |
---|
2619 | 3055 | unsigned int opt_params, val; |
---|
2620 | 3056 | const char *opt_string, *sval; |
---|
.. | .. |
---|
2644 | 3080 | |
---|
2645 | 3081 | else if (!strcasecmp(opt_string, "submit_from_crypt_cpus")) |
---|
2646 | 3082 | set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); |
---|
| 3083 | + else if (!strcasecmp(opt_string, "no_read_workqueue")) |
---|
| 3084 | + set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags); |
---|
| 3085 | + else if (!strcasecmp(opt_string, "no_write_workqueue")) |
---|
| 3086 | + set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags); |
---|
2647 | 3087 | else if (sscanf(opt_string, "integrity:%u:", &val) == 1) { |
---|
2648 | 3088 | if (val == 0 || val > MAX_TAG_SIZE) { |
---|
2649 | 3089 | ti->error = "Invalid integrity arguments"; |
---|
.. | .. |
---|
2684 | 3124 | return 0; |
---|
2685 | 3125 | } |
---|
2686 | 3126 | |
---|
| 3127 | +#ifdef CONFIG_BLK_DEV_ZONED |
---|
| 3128 | + |
---|
| 3129 | +static int crypt_report_zones(struct dm_target *ti, |
---|
| 3130 | + struct dm_report_zones_args *args, unsigned int nr_zones) |
---|
| 3131 | +{ |
---|
| 3132 | + struct crypt_config *cc = ti->private; |
---|
| 3133 | + sector_t sector = cc->start + dm_target_offset(ti, args->next_sector); |
---|
| 3134 | + |
---|
| 3135 | + args->start = cc->start; |
---|
| 3136 | + return blkdev_report_zones(cc->dev->bdev, sector, nr_zones, |
---|
| 3137 | + dm_report_zones_cb, args); |
---|
| 3138 | +} |
---|
| 3139 | + |
---|
| 3140 | +#endif |
---|
| 3141 | + |
---|
2687 | 3142 | /* |
---|
2688 | 3143 | * Construct an encryption mapping: |
---|
2689 | 3144 | * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start> |
---|
.. | .. |
---|
2691 | 3146 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
---|
2692 | 3147 | { |
---|
2693 | 3148 | struct crypt_config *cc; |
---|
| 3149 | + const char *devname = dm_table_device_name(ti->table); |
---|
2694 | 3150 | int key_size; |
---|
2695 | 3151 | unsigned int align_mask; |
---|
2696 | 3152 | unsigned long long tmpll; |
---|
.. | .. |
---|
2709 | 3165 | return -EINVAL; |
---|
2710 | 3166 | } |
---|
2711 | 3167 | |
---|
2712 | | - cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); |
---|
| 3168 | + cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL); |
---|
2713 | 3169 | if (!cc) { |
---|
2714 | 3170 | ti->error = "Cannot allocate encryption context"; |
---|
2715 | 3171 | return -ENOMEM; |
---|
.. | .. |
---|
2816 | 3272 | } |
---|
2817 | 3273 | cc->start = tmpll; |
---|
2818 | 3274 | |
---|
| 3275 | + /* |
---|
| 3276 | + * For zoned block devices, we need to preserve the issuer write |
---|
| 3277 | + * ordering. To do so, disable write workqueues and force inline |
---|
| 3278 | + * encryption completion. |
---|
| 3279 | + */ |
---|
| 3280 | + if (bdev_is_zoned(cc->dev->bdev)) { |
---|
| 3281 | + set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags); |
---|
| 3282 | + set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags); |
---|
| 3283 | + } |
---|
| 3284 | + |
---|
2819 | 3285 | if (crypt_integrity_aead(cc) || cc->integrity_iv_size) { |
---|
2820 | 3286 | ret = crypt_integrity_ctr(cc, ti); |
---|
2821 | 3287 | if (ret) |
---|
.. | .. |
---|
2836 | 3302 | } |
---|
2837 | 3303 | |
---|
2838 | 3304 | ret = -ENOMEM; |
---|
2839 | | - cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); |
---|
| 3305 | + cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname); |
---|
2840 | 3306 | if (!cc->io_queue) { |
---|
2841 | 3307 | ti->error = "Couldn't create kcryptd io queue"; |
---|
2842 | 3308 | goto bad; |
---|
2843 | 3309 | } |
---|
2844 | 3310 | |
---|
2845 | 3311 | if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) |
---|
2846 | | - cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); |
---|
| 3312 | + cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, |
---|
| 3313 | + 1, devname); |
---|
2847 | 3314 | else |
---|
2848 | | - cc->crypt_queue = alloc_workqueue("kcryptd", |
---|
2849 | | - WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, |
---|
2850 | | - num_online_cpus()); |
---|
| 3315 | + cc->crypt_queue = alloc_workqueue("kcryptd/%s", |
---|
| 3316 | + WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, |
---|
| 3317 | + num_online_cpus(), devname); |
---|
2851 | 3318 | if (!cc->crypt_queue) { |
---|
2852 | 3319 | ti->error = "Couldn't create kcryptd queue"; |
---|
2853 | 3320 | goto bad; |
---|
.. | .. |
---|
2856 | 3323 | spin_lock_init(&cc->write_thread_lock); |
---|
2857 | 3324 | cc->write_tree = RB_ROOT; |
---|
2858 | 3325 | |
---|
2859 | | - cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write"); |
---|
| 3326 | + cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname); |
---|
2860 | 3327 | if (IS_ERR(cc->write_thread)) { |
---|
2861 | 3328 | ret = PTR_ERR(cc->write_thread); |
---|
2862 | 3329 | cc->write_thread = NULL; |
---|
.. | .. |
---|
2866 | 3333 | wake_up_process(cc->write_thread); |
---|
2867 | 3334 | |
---|
2868 | 3335 | ti->num_flush_bios = 1; |
---|
| 3336 | + ti->limit_swap_bios = true; |
---|
2869 | 3337 | |
---|
2870 | 3338 | return 0; |
---|
2871 | 3339 | |
---|
.. | .. |
---|
2940 | 3408 | return DM_MAPIO_SUBMITTED; |
---|
2941 | 3409 | } |
---|
2942 | 3410 | |
---|
| 3411 | +static char hex2asc(unsigned char c) |
---|
| 3412 | +{ |
---|
| 3413 | + return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27); |
---|
| 3414 | +} |
---|
| 3415 | + |
---|
2943 | 3416 | static void crypt_status(struct dm_target *ti, status_type_t type, |
---|
2944 | 3417 | unsigned status_flags, char *result, unsigned maxlen) |
---|
2945 | 3418 | { |
---|
.. | .. |
---|
2958 | 3431 | if (cc->key_size > 0) { |
---|
2959 | 3432 | if (cc->key_string) |
---|
2960 | 3433 | DMEMIT(":%u:%s", cc->key_size, cc->key_string); |
---|
2961 | | - else |
---|
2962 | | - for (i = 0; i < cc->key_size; i++) |
---|
2963 | | - DMEMIT("%02x", cc->key[i]); |
---|
| 3434 | + else { |
---|
| 3435 | + for (i = 0; i < cc->key_size; i++) { |
---|
| 3436 | + DMEMIT("%c%c", hex2asc(cc->key[i] >> 4), |
---|
| 3437 | + hex2asc(cc->key[i] & 0xf)); |
---|
| 3438 | + } |
---|
| 3439 | + } |
---|
2964 | 3440 | } else |
---|
2965 | 3441 | DMEMIT("-"); |
---|
2966 | 3442 | |
---|
.. | .. |
---|
2970 | 3446 | num_feature_args += !!ti->num_discard_bios; |
---|
2971 | 3447 | num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags); |
---|
2972 | 3448 | num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); |
---|
| 3449 | + num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags); |
---|
| 3450 | + num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags); |
---|
2973 | 3451 | num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT); |
---|
2974 | 3452 | num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); |
---|
2975 | 3453 | if (cc->on_disk_tag_size) |
---|
.. | .. |
---|
2982 | 3460 | DMEMIT(" same_cpu_crypt"); |
---|
2983 | 3461 | if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) |
---|
2984 | 3462 | DMEMIT(" submit_from_crypt_cpus"); |
---|
| 3463 | + if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) |
---|
| 3464 | + DMEMIT(" no_read_workqueue"); |
---|
| 3465 | + if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) |
---|
| 3466 | + DMEMIT(" no_write_workqueue"); |
---|
2985 | 3467 | if (cc->on_disk_tag_size) |
---|
2986 | 3468 | DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth); |
---|
2987 | 3469 | if (cc->sector_size != (1 << SECTOR_SHIFT)) |
---|
.. | .. |
---|
3056 | 3538 | memset(cc->key, 0, cc->key_size * sizeof(u8)); |
---|
3057 | 3539 | return ret; |
---|
3058 | 3540 | } |
---|
3059 | | - if (argc == 2 && !strcasecmp(argv[1], "wipe")) { |
---|
3060 | | - if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { |
---|
3061 | | - ret = cc->iv_gen_ops->wipe(cc); |
---|
3062 | | - if (ret) |
---|
3063 | | - return ret; |
---|
3064 | | - } |
---|
| 3541 | + if (argc == 2 && !strcasecmp(argv[1], "wipe")) |
---|
3065 | 3542 | return crypt_wipe_key(cc); |
---|
3066 | | - } |
---|
3067 | 3543 | } |
---|
3068 | 3544 | |
---|
3069 | 3545 | error: |
---|
.. | .. |
---|
3100 | 3576 | |
---|
3101 | 3577 | static struct target_type crypt_target = { |
---|
3102 | 3578 | .name = "crypt", |
---|
3103 | | - .version = {1, 18, 1}, |
---|
| 3579 | + .version = {1, 22, 0}, |
---|
3104 | 3580 | .module = THIS_MODULE, |
---|
3105 | 3581 | .ctr = crypt_ctr, |
---|
3106 | 3582 | .dtr = crypt_dtr, |
---|
| 3583 | +#ifdef CONFIG_BLK_DEV_ZONED |
---|
| 3584 | + .features = DM_TARGET_ZONED_HM, |
---|
| 3585 | + .report_zones = crypt_report_zones, |
---|
| 3586 | +#endif |
---|
3107 | 3587 | .map = crypt_map, |
---|
3108 | 3588 | .status = crypt_status, |
---|
3109 | 3589 | .postsuspend = crypt_postsuspend, |
---|