| .. | .. |
|---|
| 1 | 1 | /* |
|---|
| 2 | 2 | * Copyright (C) 2003 Jana Saout <jana@saout.de> |
|---|
| 3 | 3 | * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> |
|---|
| 4 | | - * Copyright (C) 2006-2017 Red Hat, Inc. All rights reserved. |
|---|
| 5 | | - * Copyright (C) 2013-2017 Milan Broz <gmazyland@gmail.com> |
|---|
| 4 | + * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved. |
|---|
| 5 | + * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com> |
|---|
| 6 | 6 | * |
|---|
| 7 | 7 | * This file is released under the GPL. |
|---|
| 8 | 8 | */ |
|---|
| .. | .. |
|---|
| 34 | 34 | #include <crypto/aead.h> |
|---|
| 35 | 35 | #include <crypto/authenc.h> |
|---|
| 36 | 36 | #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */ |
|---|
| 37 | +#include <linux/key-type.h> |
|---|
| 37 | 38 | #include <keys/user-type.h> |
|---|
| 39 | +#include <keys/encrypted-type.h> |
|---|
| 38 | 40 | |
|---|
| 39 | 41 | #include <linux/device-mapper.h> |
|---|
| 40 | 42 | |
|---|
| .. | .. |
|---|
| 67 | 69 | u8 *integrity_metadata; |
|---|
| 68 | 70 | bool integrity_metadata_from_pool; |
|---|
| 69 | 71 | struct work_struct work; |
|---|
| 72 | + struct tasklet_struct tasklet; |
|---|
| 70 | 73 | |
|---|
| 71 | 74 | struct convert_context ctx; |
|---|
| 72 | 75 | |
|---|
| .. | .. |
|---|
| 98 | 101 | struct dm_crypt_request *dmreq); |
|---|
| 99 | 102 | }; |
|---|
| 100 | 103 | |
|---|
| 101 | | -struct iv_essiv_private { |
|---|
| 102 | | - struct crypto_shash *hash_tfm; |
|---|
| 103 | | - u8 *salt; |
|---|
| 104 | | -}; |
|---|
| 105 | | - |
|---|
| 106 | 104 | struct iv_benbi_private { |
|---|
| 107 | 105 | int shift; |
|---|
| 108 | 106 | }; |
|---|
| .. | .. |
|---|
| 120 | 118 | u8 *whitening; |
|---|
| 121 | 119 | }; |
|---|
| 122 | 120 | |
|---|
| 121 | +#define ELEPHANT_MAX_KEY_SIZE 32 |
|---|
| 122 | +struct iv_elephant_private { |
|---|
| 123 | + struct crypto_skcipher *tfm; |
|---|
| 124 | +}; |
|---|
| 125 | + |
|---|
| 123 | 126 | /* |
|---|
| 124 | 127 | * Crypt: maps a linear range of a block device |
|---|
| 125 | 128 | * and encrypts / decrypts at the same time. |
|---|
| 126 | 129 | */ |
|---|
| 127 | 130 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, |
|---|
| 128 | | - DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; |
|---|
| 131 | + DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD, |
|---|
| 132 | + DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE, |
|---|
| 133 | + DM_CRYPT_WRITE_INLINE }; |
|---|
| 129 | 134 | |
|---|
| 130 | 135 | enum cipher_flags { |
|---|
| 131 | 136 | CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */ |
|---|
| 132 | 137 | CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */ |
|---|
| 138 | + CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */ |
|---|
| 133 | 139 | }; |
|---|
| 134 | 140 | |
|---|
| 135 | 141 | /* |
|---|
| .. | .. |
|---|
| 148 | 154 | struct task_struct *write_thread; |
|---|
| 149 | 155 | struct rb_root write_tree; |
|---|
| 150 | 156 | |
|---|
| 151 | | - char *cipher; |
|---|
| 152 | 157 | char *cipher_string; |
|---|
| 153 | 158 | char *cipher_auth; |
|---|
| 154 | 159 | char *key_string; |
|---|
| 155 | 160 | |
|---|
| 156 | 161 | const struct crypt_iv_operations *iv_gen_ops; |
|---|
| 157 | 162 | union { |
|---|
| 158 | | - struct iv_essiv_private essiv; |
|---|
| 159 | 163 | struct iv_benbi_private benbi; |
|---|
| 160 | 164 | struct iv_lmk_private lmk; |
|---|
| 161 | 165 | struct iv_tcw_private tcw; |
|---|
| 166 | + struct iv_elephant_private elephant; |
|---|
| 162 | 167 | } iv_gen_private; |
|---|
| 163 | 168 | u64 iv_offset; |
|---|
| 164 | 169 | unsigned int iv_size; |
|---|
| 165 | 170 | unsigned short int sector_size; |
|---|
| 166 | 171 | unsigned char sector_shift; |
|---|
| 167 | 172 | |
|---|
| 168 | | - /* ESSIV: struct crypto_cipher *essiv_tfm */ |
|---|
| 169 | | - void *iv_private; |
|---|
| 170 | 173 | union { |
|---|
| 171 | 174 | struct crypto_skcipher **tfms; |
|---|
| 172 | 175 | struct crypto_aead **tfms_aead; |
|---|
| .. | .. |
|---|
| 214 | 217 | struct mutex bio_alloc_lock; |
|---|
| 215 | 218 | |
|---|
| 216 | 219 | u8 *authenc_key; /* space for keys in authenc() format (if used) */ |
|---|
| 217 | | - u8 key[0]; |
|---|
| 220 | + u8 key[]; |
|---|
| 218 | 221 | }; |
|---|
| 219 | 222 | |
|---|
| 220 | 223 | #define MIN_IOS 64 |
|---|
| .. | .. |
|---|
| 231 | 234 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); |
|---|
| 232 | 235 | static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc, |
|---|
| 233 | 236 | struct scatterlist *sg); |
|---|
| 237 | + |
|---|
| 238 | +static bool crypt_integrity_aead(struct crypt_config *cc); |
|---|
| 234 | 239 | |
|---|
| 235 | 240 | /* |
|---|
| 236 | 241 | * Use this to access cipher attributes that are independent of the key. |
|---|
| .. | .. |
|---|
| 291 | 296 | * Note that this encryption scheme is vulnerable to watermarking attacks |
|---|
| 292 | 297 | * and should be used for old compatible containers access only. |
|---|
| 293 | 298 | * |
|---|
| 294 | | - * plumb: unimplemented, see: |
|---|
| 295 | | - * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 |
|---|
| 299 | + * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode) |
|---|
| 300 | + * The IV is encrypted little-endian byte-offset (with the same key |
|---|
| 301 | + * and cipher as the volume). |
|---|
| 302 | + * |
|---|
| 303 | + * elephant: The extended version of eboiv with additional Elephant diffuser |
|---|
| 304 | + * used with Bitlocker CBC mode. |
|---|
| 305 | + * This mode was used in older Windows systems |
|---|
| 306 | + * https://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf |
|---|
| 296 | 307 | */ |
|---|
| 297 | 308 | |
|---|
| 298 | 309 | static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, |
|---|
| .. | .. |
|---|
| 323 | 334 | return 0; |
|---|
| 324 | 335 | } |
|---|
| 325 | 336 | |
|---|
| 326 | | -/* Initialise ESSIV - compute salt but no local memory allocations */ |
|---|
| 327 | | -static int crypt_iv_essiv_init(struct crypt_config *cc) |
|---|
| 328 | | -{ |
|---|
| 329 | | - struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; |
|---|
| 330 | | - SHASH_DESC_ON_STACK(desc, essiv->hash_tfm); |
|---|
| 331 | | - struct crypto_cipher *essiv_tfm; |
|---|
| 332 | | - int err; |
|---|
| 333 | | - |
|---|
| 334 | | - desc->tfm = essiv->hash_tfm; |
|---|
| 335 | | - desc->flags = 0; |
|---|
| 336 | | - |
|---|
| 337 | | - err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt); |
|---|
| 338 | | - shash_desc_zero(desc); |
|---|
| 339 | | - if (err) |
|---|
| 340 | | - return err; |
|---|
| 341 | | - |
|---|
| 342 | | - essiv_tfm = cc->iv_private; |
|---|
| 343 | | - |
|---|
| 344 | | - err = crypto_cipher_setkey(essiv_tfm, essiv->salt, |
|---|
| 345 | | - crypto_shash_digestsize(essiv->hash_tfm)); |
|---|
| 346 | | - if (err) |
|---|
| 347 | | - return err; |
|---|
| 348 | | - |
|---|
| 349 | | - return 0; |
|---|
| 350 | | -} |
|---|
| 351 | | - |
|---|
| 352 | | -/* Wipe salt and reset key derived from volume key */ |
|---|
| 353 | | -static int crypt_iv_essiv_wipe(struct crypt_config *cc) |
|---|
| 354 | | -{ |
|---|
| 355 | | - struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; |
|---|
| 356 | | - unsigned salt_size = crypto_shash_digestsize(essiv->hash_tfm); |
|---|
| 357 | | - struct crypto_cipher *essiv_tfm; |
|---|
| 358 | | - int r, err = 0; |
|---|
| 359 | | - |
|---|
| 360 | | - memset(essiv->salt, 0, salt_size); |
|---|
| 361 | | - |
|---|
| 362 | | - essiv_tfm = cc->iv_private; |
|---|
| 363 | | - r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); |
|---|
| 364 | | - if (r) |
|---|
| 365 | | - err = r; |
|---|
| 366 | | - |
|---|
| 367 | | - return err; |
|---|
| 368 | | -} |
|---|
| 369 | | - |
|---|
| 370 | | -/* Allocate the cipher for ESSIV */ |
|---|
| 371 | | -static struct crypto_cipher *alloc_essiv_cipher(struct crypt_config *cc, |
|---|
| 372 | | - struct dm_target *ti, |
|---|
| 373 | | - const u8 *salt, |
|---|
| 374 | | - unsigned int saltsize) |
|---|
| 375 | | -{ |
|---|
| 376 | | - struct crypto_cipher *essiv_tfm; |
|---|
| 377 | | - int err; |
|---|
| 378 | | - |
|---|
| 379 | | - /* Setup the essiv_tfm with the given salt */ |
|---|
| 380 | | - essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); |
|---|
| 381 | | - if (IS_ERR(essiv_tfm)) { |
|---|
| 382 | | - ti->error = "Error allocating crypto tfm for ESSIV"; |
|---|
| 383 | | - return essiv_tfm; |
|---|
| 384 | | - } |
|---|
| 385 | | - |
|---|
| 386 | | - if (crypto_cipher_blocksize(essiv_tfm) != cc->iv_size) { |
|---|
| 387 | | - ti->error = "Block size of ESSIV cipher does " |
|---|
| 388 | | - "not match IV size of block cipher"; |
|---|
| 389 | | - crypto_free_cipher(essiv_tfm); |
|---|
| 390 | | - return ERR_PTR(-EINVAL); |
|---|
| 391 | | - } |
|---|
| 392 | | - |
|---|
| 393 | | - err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); |
|---|
| 394 | | - if (err) { |
|---|
| 395 | | - ti->error = "Failed to set key for ESSIV cipher"; |
|---|
| 396 | | - crypto_free_cipher(essiv_tfm); |
|---|
| 397 | | - return ERR_PTR(err); |
|---|
| 398 | | - } |
|---|
| 399 | | - |
|---|
| 400 | | - return essiv_tfm; |
|---|
| 401 | | -} |
|---|
| 402 | | - |
|---|
| 403 | | -static void crypt_iv_essiv_dtr(struct crypt_config *cc) |
|---|
| 404 | | -{ |
|---|
| 405 | | - struct crypto_cipher *essiv_tfm; |
|---|
| 406 | | - struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; |
|---|
| 407 | | - |
|---|
| 408 | | - crypto_free_shash(essiv->hash_tfm); |
|---|
| 409 | | - essiv->hash_tfm = NULL; |
|---|
| 410 | | - |
|---|
| 411 | | - kzfree(essiv->salt); |
|---|
| 412 | | - essiv->salt = NULL; |
|---|
| 413 | | - |
|---|
| 414 | | - essiv_tfm = cc->iv_private; |
|---|
| 415 | | - |
|---|
| 416 | | - if (essiv_tfm) |
|---|
| 417 | | - crypto_free_cipher(essiv_tfm); |
|---|
| 418 | | - |
|---|
| 419 | | - cc->iv_private = NULL; |
|---|
| 420 | | -} |
|---|
| 421 | | - |
|---|
| 422 | | -static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, |
|---|
| 423 | | - const char *opts) |
|---|
| 424 | | -{ |
|---|
| 425 | | - struct crypto_cipher *essiv_tfm = NULL; |
|---|
| 426 | | - struct crypto_shash *hash_tfm = NULL; |
|---|
| 427 | | - u8 *salt = NULL; |
|---|
| 428 | | - int err; |
|---|
| 429 | | - |
|---|
| 430 | | - if (!opts) { |
|---|
| 431 | | - ti->error = "Digest algorithm missing for ESSIV mode"; |
|---|
| 432 | | - return -EINVAL; |
|---|
| 433 | | - } |
|---|
| 434 | | - |
|---|
| 435 | | - /* Allocate hash algorithm */ |
|---|
| 436 | | - hash_tfm = crypto_alloc_shash(opts, 0, 0); |
|---|
| 437 | | - if (IS_ERR(hash_tfm)) { |
|---|
| 438 | | - ti->error = "Error initializing ESSIV hash"; |
|---|
| 439 | | - err = PTR_ERR(hash_tfm); |
|---|
| 440 | | - goto bad; |
|---|
| 441 | | - } |
|---|
| 442 | | - |
|---|
| 443 | | - salt = kzalloc(crypto_shash_digestsize(hash_tfm), GFP_KERNEL); |
|---|
| 444 | | - if (!salt) { |
|---|
| 445 | | - ti->error = "Error kmallocing salt storage in ESSIV"; |
|---|
| 446 | | - err = -ENOMEM; |
|---|
| 447 | | - goto bad; |
|---|
| 448 | | - } |
|---|
| 449 | | - |
|---|
| 450 | | - cc->iv_gen_private.essiv.salt = salt; |
|---|
| 451 | | - cc->iv_gen_private.essiv.hash_tfm = hash_tfm; |
|---|
| 452 | | - |
|---|
| 453 | | - essiv_tfm = alloc_essiv_cipher(cc, ti, salt, |
|---|
| 454 | | - crypto_shash_digestsize(hash_tfm)); |
|---|
| 455 | | - if (IS_ERR(essiv_tfm)) { |
|---|
| 456 | | - crypt_iv_essiv_dtr(cc); |
|---|
| 457 | | - return PTR_ERR(essiv_tfm); |
|---|
| 458 | | - } |
|---|
| 459 | | - cc->iv_private = essiv_tfm; |
|---|
| 460 | | - |
|---|
| 461 | | - return 0; |
|---|
| 462 | | - |
|---|
| 463 | | -bad: |
|---|
| 464 | | - if (hash_tfm && !IS_ERR(hash_tfm)) |
|---|
| 465 | | - crypto_free_shash(hash_tfm); |
|---|
| 466 | | - kfree(salt); |
|---|
| 467 | | - return err; |
|---|
| 468 | | -} |
|---|
| 469 | | - |
|---|
| 470 | 337 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, |
|---|
| 471 | 338 | struct dm_crypt_request *dmreq) |
|---|
| 472 | 339 | { |
|---|
| 473 | | - struct crypto_cipher *essiv_tfm = cc->iv_private; |
|---|
| 474 | | - |
|---|
| 340 | + /* |
|---|
| 341 | + * ESSIV encryption of the IV is now handled by the crypto API, |
|---|
| 342 | + * so just pass the plain sector number here. |
|---|
| 343 | + */ |
|---|
| 475 | 344 | memset(iv, 0, cc->iv_size); |
|---|
| 476 | 345 | *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); |
|---|
| 477 | | - crypto_cipher_encrypt_one(essiv_tfm, iv, iv); |
|---|
| 478 | 346 | |
|---|
| 479 | 347 | return 0; |
|---|
| 480 | 348 | } |
|---|
| .. | .. |
|---|
| 485 | 353 | unsigned bs; |
|---|
| 486 | 354 | int log; |
|---|
| 487 | 355 | |
|---|
| 488 | | - if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags)) |
|---|
| 356 | + if (crypt_integrity_aead(cc)) |
|---|
| 489 | 357 | bs = crypto_aead_blocksize(any_tfm_aead(cc)); |
|---|
| 490 | 358 | else |
|---|
| 491 | 359 | bs = crypto_skcipher_blocksize(any_tfm(cc)); |
|---|
| .. | .. |
|---|
| 542 | 410 | crypto_free_shash(lmk->hash_tfm); |
|---|
| 543 | 411 | lmk->hash_tfm = NULL; |
|---|
| 544 | 412 | |
|---|
| 545 | | - kzfree(lmk->seed); |
|---|
| 413 | + kfree_sensitive(lmk->seed); |
|---|
| 546 | 414 | lmk->seed = NULL; |
|---|
| 547 | 415 | } |
|---|
| 548 | 416 | |
|---|
| .. | .. |
|---|
| 556 | 424 | return -EINVAL; |
|---|
| 557 | 425 | } |
|---|
| 558 | 426 | |
|---|
| 559 | | - lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); |
|---|
| 427 | + lmk->hash_tfm = crypto_alloc_shash("md5", 0, |
|---|
| 428 | + CRYPTO_ALG_ALLOCATES_MEMORY); |
|---|
| 560 | 429 | if (IS_ERR(lmk->hash_tfm)) { |
|---|
| 561 | 430 | ti->error = "Error initializing LMK hash"; |
|---|
| 562 | 431 | return PTR_ERR(lmk->hash_tfm); |
|---|
| .. | .. |
|---|
| 612 | 481 | int i, r; |
|---|
| 613 | 482 | |
|---|
| 614 | 483 | desc->tfm = lmk->hash_tfm; |
|---|
| 615 | | - desc->flags = 0; |
|---|
| 616 | 484 | |
|---|
| 617 | 485 | r = crypto_shash_init(desc); |
|---|
| 618 | 486 | if (r) |
|---|
| .. | .. |
|---|
| 694 | 562 | { |
|---|
| 695 | 563 | struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; |
|---|
| 696 | 564 | |
|---|
| 697 | | - kzfree(tcw->iv_seed); |
|---|
| 565 | + kfree_sensitive(tcw->iv_seed); |
|---|
| 698 | 566 | tcw->iv_seed = NULL; |
|---|
| 699 | | - kzfree(tcw->whitening); |
|---|
| 567 | + kfree_sensitive(tcw->whitening); |
|---|
| 700 | 568 | tcw->whitening = NULL; |
|---|
| 701 | 569 | |
|---|
| 702 | 570 | if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) |
|---|
| .. | .. |
|---|
| 719 | 587 | return -EINVAL; |
|---|
| 720 | 588 | } |
|---|
| 721 | 589 | |
|---|
| 722 | | - tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); |
|---|
| 590 | + tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, |
|---|
| 591 | + CRYPTO_ALG_ALLOCATES_MEMORY); |
|---|
| 723 | 592 | if (IS_ERR(tcw->crc32_tfm)) { |
|---|
| 724 | 593 | ti->error = "Error initializing CRC32 in TCW"; |
|---|
| 725 | 594 | return PTR_ERR(tcw->crc32_tfm); |
|---|
| .. | .. |
|---|
| 774 | 643 | |
|---|
| 775 | 644 | /* calculate crc32 for every 32bit part and xor it */ |
|---|
| 776 | 645 | desc->tfm = tcw->crc32_tfm; |
|---|
| 777 | | - desc->flags = 0; |
|---|
| 778 | 646 | for (i = 0; i < 4; i++) { |
|---|
| 779 | 647 | r = crypto_shash_init(desc); |
|---|
| 780 | 648 | if (r) |
|---|
| .. | .. |
|---|
| 850 | 718 | return 0; |
|---|
| 851 | 719 | } |
|---|
| 852 | 720 | |
|---|
| 721 | +static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti, |
|---|
| 722 | + const char *opts) |
|---|
| 723 | +{ |
|---|
| 724 | + if (crypt_integrity_aead(cc)) { |
|---|
| 725 | + ti->error = "AEAD transforms not supported for EBOIV"; |
|---|
| 726 | + return -EINVAL; |
|---|
| 727 | + } |
|---|
| 728 | + |
|---|
| 729 | + if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) { |
|---|
| 730 | + ti->error = "Block size of EBOIV cipher does " |
|---|
| 731 | + "not match IV size of block cipher"; |
|---|
| 732 | + return -EINVAL; |
|---|
| 733 | + } |
|---|
| 734 | + |
|---|
| 735 | + return 0; |
|---|
| 736 | +} |
|---|
| 737 | + |
|---|
| 738 | +static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, |
|---|
| 739 | + struct dm_crypt_request *dmreq) |
|---|
| 740 | +{ |
|---|
| 741 | + u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64)); |
|---|
| 742 | + struct skcipher_request *req; |
|---|
| 743 | + struct scatterlist src, dst; |
|---|
| 744 | + DECLARE_CRYPTO_WAIT(wait); |
|---|
| 745 | + int err; |
|---|
| 746 | + |
|---|
| 747 | + req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO); |
|---|
| 748 | + if (!req) |
|---|
| 749 | + return -ENOMEM; |
|---|
| 750 | + |
|---|
| 751 | + memset(buf, 0, cc->iv_size); |
|---|
| 752 | + *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size); |
|---|
| 753 | + |
|---|
| 754 | + sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size); |
|---|
| 755 | + sg_init_one(&dst, iv, cc->iv_size); |
|---|
| 756 | + skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf); |
|---|
| 757 | + skcipher_request_set_callback(req, 0, crypto_req_done, &wait); |
|---|
| 758 | + err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); |
|---|
| 759 | + skcipher_request_free(req); |
|---|
| 760 | + |
|---|
| 761 | + return err; |
|---|
| 762 | +} |
|---|
| 763 | + |
|---|
| 764 | +static void crypt_iv_elephant_dtr(struct crypt_config *cc) |
|---|
| 765 | +{ |
|---|
| 766 | + struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; |
|---|
| 767 | + |
|---|
| 768 | + crypto_free_skcipher(elephant->tfm); |
|---|
| 769 | + elephant->tfm = NULL; |
|---|
| 770 | +} |
|---|
| 771 | + |
|---|
| 772 | +static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti, |
|---|
| 773 | + const char *opts) |
|---|
| 774 | +{ |
|---|
| 775 | + struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; |
|---|
| 776 | + int r; |
|---|
| 777 | + |
|---|
| 778 | + elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0, |
|---|
| 779 | + CRYPTO_ALG_ALLOCATES_MEMORY); |
|---|
| 780 | + if (IS_ERR(elephant->tfm)) { |
|---|
| 781 | + r = PTR_ERR(elephant->tfm); |
|---|
| 782 | + elephant->tfm = NULL; |
|---|
| 783 | + return r; |
|---|
| 784 | + } |
|---|
| 785 | + |
|---|
| 786 | + r = crypt_iv_eboiv_ctr(cc, ti, NULL); |
|---|
| 787 | + if (r) |
|---|
| 788 | + crypt_iv_elephant_dtr(cc); |
|---|
| 789 | + return r; |
|---|
| 790 | +} |
|---|
| 791 | + |
|---|
| 792 | +static void diffuser_disk_to_cpu(u32 *d, size_t n) |
|---|
| 793 | +{ |
|---|
| 794 | +#ifndef __LITTLE_ENDIAN |
|---|
| 795 | + int i; |
|---|
| 796 | + |
|---|
| 797 | + for (i = 0; i < n; i++) |
|---|
| 798 | + d[i] = le32_to_cpu((__le32)d[i]); |
|---|
| 799 | +#endif |
|---|
| 800 | +} |
|---|
| 801 | + |
|---|
| 802 | +static void diffuser_cpu_to_disk(__le32 *d, size_t n) |
|---|
| 803 | +{ |
|---|
| 804 | +#ifndef __LITTLE_ENDIAN |
|---|
| 805 | + int i; |
|---|
| 806 | + |
|---|
| 807 | + for (i = 0; i < n; i++) |
|---|
| 808 | + d[i] = cpu_to_le32((u32)d[i]); |
|---|
| 809 | +#endif |
|---|
| 810 | +} |
|---|
| 811 | + |
|---|
| 812 | +static void diffuser_a_decrypt(u32 *d, size_t n) |
|---|
| 813 | +{ |
|---|
| 814 | + int i, i1, i2, i3; |
|---|
| 815 | + |
|---|
| 816 | + for (i = 0; i < 5; i++) { |
|---|
| 817 | + i1 = 0; |
|---|
| 818 | + i2 = n - 2; |
|---|
| 819 | + i3 = n - 5; |
|---|
| 820 | + |
|---|
| 821 | + while (i1 < (n - 1)) { |
|---|
| 822 | + d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23); |
|---|
| 823 | + i1++; i2++; i3++; |
|---|
| 824 | + |
|---|
| 825 | + if (i3 >= n) |
|---|
| 826 | + i3 -= n; |
|---|
| 827 | + |
|---|
| 828 | + d[i1] += d[i2] ^ d[i3]; |
|---|
| 829 | + i1++; i2++; i3++; |
|---|
| 830 | + |
|---|
| 831 | + if (i2 >= n) |
|---|
| 832 | + i2 -= n; |
|---|
| 833 | + |
|---|
| 834 | + d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19); |
|---|
| 835 | + i1++; i2++; i3++; |
|---|
| 836 | + |
|---|
| 837 | + d[i1] += d[i2] ^ d[i3]; |
|---|
| 838 | + i1++; i2++; i3++; |
|---|
| 839 | + } |
|---|
| 840 | + } |
|---|
| 841 | +} |
|---|
| 842 | + |
|---|
| 843 | +static void diffuser_a_encrypt(u32 *d, size_t n) |
|---|
| 844 | +{ |
|---|
| 845 | + int i, i1, i2, i3; |
|---|
| 846 | + |
|---|
| 847 | + for (i = 0; i < 5; i++) { |
|---|
| 848 | + i1 = n - 1; |
|---|
| 849 | + i2 = n - 2 - 1; |
|---|
| 850 | + i3 = n - 5 - 1; |
|---|
| 851 | + |
|---|
| 852 | + while (i1 > 0) { |
|---|
| 853 | + d[i1] -= d[i2] ^ d[i3]; |
|---|
| 854 | + i1--; i2--; i3--; |
|---|
| 855 | + |
|---|
| 856 | + d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19); |
|---|
| 857 | + i1--; i2--; i3--; |
|---|
| 858 | + |
|---|
| 859 | + if (i2 < 0) |
|---|
| 860 | + i2 += n; |
|---|
| 861 | + |
|---|
| 862 | + d[i1] -= d[i2] ^ d[i3]; |
|---|
| 863 | + i1--; i2--; i3--; |
|---|
| 864 | + |
|---|
| 865 | + if (i3 < 0) |
|---|
| 866 | + i3 += n; |
|---|
| 867 | + |
|---|
| 868 | + d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23); |
|---|
| 869 | + i1--; i2--; i3--; |
|---|
| 870 | + } |
|---|
| 871 | + } |
|---|
| 872 | +} |
|---|
| 873 | + |
|---|
| 874 | +static void diffuser_b_decrypt(u32 *d, size_t n) |
|---|
| 875 | +{ |
|---|
| 876 | + int i, i1, i2, i3; |
|---|
| 877 | + |
|---|
| 878 | + for (i = 0; i < 3; i++) { |
|---|
| 879 | + i1 = 0; |
|---|
| 880 | + i2 = 2; |
|---|
| 881 | + i3 = 5; |
|---|
| 882 | + |
|---|
| 883 | + while (i1 < (n - 1)) { |
|---|
| 884 | + d[i1] += d[i2] ^ d[i3]; |
|---|
| 885 | + i1++; i2++; i3++; |
|---|
| 886 | + |
|---|
| 887 | + d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22); |
|---|
| 888 | + i1++; i2++; i3++; |
|---|
| 889 | + |
|---|
| 890 | + if (i2 >= n) |
|---|
| 891 | + i2 -= n; |
|---|
| 892 | + |
|---|
| 893 | + d[i1] += d[i2] ^ d[i3]; |
|---|
| 894 | + i1++; i2++; i3++; |
|---|
| 895 | + |
|---|
| 896 | + if (i3 >= n) |
|---|
| 897 | + i3 -= n; |
|---|
| 898 | + |
|---|
| 899 | + d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7); |
|---|
| 900 | + i1++; i2++; i3++; |
|---|
| 901 | + } |
|---|
| 902 | + } |
|---|
| 903 | +} |
|---|
| 904 | + |
|---|
| 905 | +static void diffuser_b_encrypt(u32 *d, size_t n) |
|---|
| 906 | +{ |
|---|
| 907 | + int i, i1, i2, i3; |
|---|
| 908 | + |
|---|
| 909 | + for (i = 0; i < 3; i++) { |
|---|
| 910 | + i1 = n - 1; |
|---|
| 911 | + i2 = 2 - 1; |
|---|
| 912 | + i3 = 5 - 1; |
|---|
| 913 | + |
|---|
| 914 | + while (i1 > 0) { |
|---|
| 915 | + d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7); |
|---|
| 916 | + i1--; i2--; i3--; |
|---|
| 917 | + |
|---|
| 918 | + if (i3 < 0) |
|---|
| 919 | + i3 += n; |
|---|
| 920 | + |
|---|
| 921 | + d[i1] -= d[i2] ^ d[i3]; |
|---|
| 922 | + i1--; i2--; i3--; |
|---|
| 923 | + |
|---|
| 924 | + if (i2 < 0) |
|---|
| 925 | + i2 += n; |
|---|
| 926 | + |
|---|
| 927 | + d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22); |
|---|
| 928 | + i1--; i2--; i3--; |
|---|
| 929 | + |
|---|
| 930 | + d[i1] -= d[i2] ^ d[i3]; |
|---|
| 931 | + i1--; i2--; i3--; |
|---|
| 932 | + } |
|---|
| 933 | + } |
|---|
| 934 | +} |
|---|
| 935 | + |
|---|
| 936 | +static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq) |
|---|
| 937 | +{ |
|---|
| 938 | + struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; |
|---|
| 939 | + u8 *es, *ks, *data, *data2, *data_offset; |
|---|
| 940 | + struct skcipher_request *req; |
|---|
| 941 | + struct scatterlist *sg, *sg2, src, dst; |
|---|
| 942 | + DECLARE_CRYPTO_WAIT(wait); |
|---|
| 943 | + int i, r; |
|---|
| 944 | + |
|---|
| 945 | + req = skcipher_request_alloc(elephant->tfm, GFP_NOIO); |
|---|
| 946 | + es = kzalloc(16, GFP_NOIO); /* Key for AES */ |
|---|
| 947 | + ks = kzalloc(32, GFP_NOIO); /* Elephant sector key */ |
|---|
| 948 | + |
|---|
| 949 | + if (!req || !es || !ks) { |
|---|
| 950 | + r = -ENOMEM; |
|---|
| 951 | + goto out; |
|---|
| 952 | + } |
|---|
| 953 | + |
|---|
| 954 | + *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size); |
|---|
| 955 | + |
|---|
| 956 | + /* E(Ks, e(s)) */ |
|---|
| 957 | + sg_init_one(&src, es, 16); |
|---|
| 958 | + sg_init_one(&dst, ks, 16); |
|---|
| 959 | + skcipher_request_set_crypt(req, &src, &dst, 16, NULL); |
|---|
| 960 | + skcipher_request_set_callback(req, 0, crypto_req_done, &wait); |
|---|
| 961 | + r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); |
|---|
| 962 | + if (r) |
|---|
| 963 | + goto out; |
|---|
| 964 | + |
|---|
| 965 | + /* E(Ks, e'(s)) */ |
|---|
| 966 | + es[15] = 0x80; |
|---|
| 967 | + sg_init_one(&dst, &ks[16], 16); |
|---|
| 968 | + r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); |
|---|
| 969 | + if (r) |
|---|
| 970 | + goto out; |
|---|
| 971 | + |
|---|
| 972 | + sg = crypt_get_sg_data(cc, dmreq->sg_out); |
|---|
| 973 | + data = kmap_atomic(sg_page(sg)); |
|---|
| 974 | + data_offset = data + sg->offset; |
|---|
| 975 | + |
|---|
| 976 | + /* Cannot modify original bio, copy to sg_out and apply Elephant to it */ |
|---|
| 977 | + if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { |
|---|
| 978 | + sg2 = crypt_get_sg_data(cc, dmreq->sg_in); |
|---|
| 979 | + data2 = kmap_atomic(sg_page(sg2)); |
|---|
| 980 | + memcpy(data_offset, data2 + sg2->offset, cc->sector_size); |
|---|
| 981 | + kunmap_atomic(data2); |
|---|
| 982 | + } |
|---|
| 983 | + |
|---|
| 984 | + if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { |
|---|
| 985 | + diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32)); |
|---|
| 986 | + diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); |
|---|
| 987 | + diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); |
|---|
| 988 | + diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32)); |
|---|
| 989 | + } |
|---|
| 990 | + |
|---|
| 991 | + for (i = 0; i < (cc->sector_size / 32); i++) |
|---|
| 992 | + crypto_xor(data_offset + i * 32, ks, 32); |
|---|
| 993 | + |
|---|
| 994 | + if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { |
|---|
| 995 | + diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32)); |
|---|
| 996 | + diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); |
|---|
| 997 | + diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); |
|---|
| 998 | + diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32)); |
|---|
| 999 | + } |
|---|
| 1000 | + |
|---|
| 1001 | + kunmap_atomic(data); |
|---|
| 1002 | +out: |
|---|
| 1003 | + kfree_sensitive(ks); |
|---|
| 1004 | + kfree_sensitive(es); |
|---|
| 1005 | + skcipher_request_free(req); |
|---|
| 1006 | + return r; |
|---|
| 1007 | +} |
|---|
| 1008 | + |
|---|
| 1009 | +static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv, |
|---|
| 1010 | + struct dm_crypt_request *dmreq) |
|---|
| 1011 | +{ |
|---|
| 1012 | + int r; |
|---|
| 1013 | + |
|---|
| 1014 | + if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { |
|---|
| 1015 | + r = crypt_iv_elephant(cc, dmreq); |
|---|
| 1016 | + if (r) |
|---|
| 1017 | + return r; |
|---|
| 1018 | + } |
|---|
| 1019 | + |
|---|
| 1020 | + return crypt_iv_eboiv_gen(cc, iv, dmreq); |
|---|
| 1021 | +} |
|---|
| 1022 | + |
|---|
| 1023 | +static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv, |
|---|
| 1024 | + struct dm_crypt_request *dmreq) |
|---|
| 1025 | +{ |
|---|
| 1026 | + if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) |
|---|
| 1027 | + return crypt_iv_elephant(cc, dmreq); |
|---|
| 1028 | + |
|---|
| 1029 | + return 0; |
|---|
| 1030 | +} |
|---|
| 1031 | + |
|---|
| 1032 | +static int crypt_iv_elephant_init(struct crypt_config *cc) |
|---|
| 1033 | +{ |
|---|
| 1034 | + struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; |
|---|
| 1035 | + int key_offset = cc->key_size - cc->key_extra_size; |
|---|
| 1036 | + |
|---|
| 1037 | + return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size); |
|---|
| 1038 | +} |
|---|
| 1039 | + |
|---|
| 1040 | +static int crypt_iv_elephant_wipe(struct crypt_config *cc) |
|---|
| 1041 | +{ |
|---|
| 1042 | + struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; |
|---|
| 1043 | + u8 key[ELEPHANT_MAX_KEY_SIZE]; |
|---|
| 1044 | + |
|---|
| 1045 | + memset(key, 0, cc->key_extra_size); |
|---|
| 1046 | + return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size); |
|---|
| 1047 | +} |
|---|
| 1048 | + |
|---|
| 853 | 1049 | static const struct crypt_iv_operations crypt_iv_plain_ops = { |
|---|
| 854 | 1050 | .generator = crypt_iv_plain_gen |
|---|
| 855 | 1051 | }; |
|---|
| .. | .. |
|---|
| 863 | 1059 | }; |
|---|
| 864 | 1060 | |
|---|
| 865 | 1061 | static const struct crypt_iv_operations crypt_iv_essiv_ops = { |
|---|
| 866 | | - .ctr = crypt_iv_essiv_ctr, |
|---|
| 867 | | - .dtr = crypt_iv_essiv_dtr, |
|---|
| 868 | | - .init = crypt_iv_essiv_init, |
|---|
| 869 | | - .wipe = crypt_iv_essiv_wipe, |
|---|
| 870 | 1062 | .generator = crypt_iv_essiv_gen |
|---|
| 871 | 1063 | }; |
|---|
| 872 | 1064 | |
|---|
| .. | .. |
|---|
| 900 | 1092 | |
|---|
| 901 | 1093 | static struct crypt_iv_operations crypt_iv_random_ops = { |
|---|
| 902 | 1094 | .generator = crypt_iv_random_gen |
|---|
| 1095 | +}; |
|---|
| 1096 | + |
|---|
| 1097 | +static struct crypt_iv_operations crypt_iv_eboiv_ops = { |
|---|
| 1098 | + .ctr = crypt_iv_eboiv_ctr, |
|---|
| 1099 | + .generator = crypt_iv_eboiv_gen |
|---|
| 1100 | +}; |
|---|
| 1101 | + |
|---|
| 1102 | +static struct crypt_iv_operations crypt_iv_elephant_ops = { |
|---|
| 1103 | + .ctr = crypt_iv_elephant_ctr, |
|---|
| 1104 | + .dtr = crypt_iv_elephant_dtr, |
|---|
| 1105 | + .init = crypt_iv_elephant_init, |
|---|
| 1106 | + .wipe = crypt_iv_elephant_wipe, |
|---|
| 1107 | + .generator = crypt_iv_elephant_gen, |
|---|
| 1108 | + .post = crypt_iv_elephant_post |
|---|
| 903 | 1109 | }; |
|---|
| 904 | 1110 | |
|---|
| 905 | 1111 | /* |
|---|
| .. | .. |
|---|
| 1041 | 1247 | return iv_of_dmreq(cc, dmreq) + cc->iv_size; |
|---|
| 1042 | 1248 | } |
|---|
| 1043 | 1249 | |
|---|
| 1044 | | -static uint64_t *org_sector_of_dmreq(struct crypt_config *cc, |
|---|
| 1250 | +static __le64 *org_sector_of_dmreq(struct crypt_config *cc, |
|---|
| 1045 | 1251 | struct dm_crypt_request *dmreq) |
|---|
| 1046 | 1252 | { |
|---|
| 1047 | 1253 | u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size; |
|---|
| 1048 | | - return (uint64_t*) ptr; |
|---|
| 1254 | + return (__le64 *) ptr; |
|---|
| 1049 | 1255 | } |
|---|
| 1050 | 1256 | |
|---|
| 1051 | 1257 | static unsigned int *org_tag_of_dmreq(struct crypt_config *cc, |
|---|
| .. | .. |
|---|
| 1081 | 1287 | struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); |
|---|
| 1082 | 1288 | struct dm_crypt_request *dmreq; |
|---|
| 1083 | 1289 | u8 *iv, *org_iv, *tag_iv, *tag; |
|---|
| 1084 | | - uint64_t *sector; |
|---|
| 1290 | + __le64 *sector; |
|---|
| 1085 | 1291 | int r = 0; |
|---|
| 1086 | 1292 | |
|---|
| 1087 | 1293 | BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size); |
|---|
| .. | .. |
|---|
| 1153 | 1359 | r = crypto_aead_decrypt(req); |
|---|
| 1154 | 1360 | } |
|---|
| 1155 | 1361 | |
|---|
| 1156 | | - if (r == -EBADMSG) |
|---|
| 1157 | | - DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", |
|---|
| 1362 | + if (r == -EBADMSG) { |
|---|
| 1363 | + char b[BDEVNAME_SIZE]; |
|---|
| 1364 | + DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b), |
|---|
| 1158 | 1365 | (unsigned long long)le64_to_cpu(*sector)); |
|---|
| 1366 | + } |
|---|
| 1159 | 1367 | |
|---|
| 1160 | 1368 | if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) |
|---|
| 1161 | 1369 | r = cc->iv_gen_ops->post(cc, org_iv, dmreq); |
|---|
| .. | .. |
|---|
| 1176 | 1384 | struct scatterlist *sg_in, *sg_out; |
|---|
| 1177 | 1385 | struct dm_crypt_request *dmreq; |
|---|
| 1178 | 1386 | u8 *iv, *org_iv, *tag_iv; |
|---|
| 1179 | | - uint64_t *sector; |
|---|
| 1387 | + __le64 *sector; |
|---|
| 1180 | 1388 | int r = 0; |
|---|
| 1181 | 1389 | |
|---|
| 1182 | 1390 | /* Reject unexpected unaligned bio. */ |
|---|
| .. | .. |
|---|
| 1216 | 1424 | r = cc->iv_gen_ops->generator(cc, org_iv, dmreq); |
|---|
| 1217 | 1425 | if (r < 0) |
|---|
| 1218 | 1426 | return r; |
|---|
| 1427 | + /* Data can be already preprocessed in generator */ |
|---|
| 1428 | + if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags)) |
|---|
| 1429 | + sg_in = sg_out; |
|---|
| 1219 | 1430 | /* Store generated IV in integrity metadata */ |
|---|
| 1220 | 1431 | if (cc->integrity_iv_size) |
|---|
| 1221 | 1432 | memcpy(tag_iv, org_iv, cc->integrity_iv_size); |
|---|
| .. | .. |
|---|
| 1243 | 1454 | static void kcryptd_async_done(struct crypto_async_request *async_req, |
|---|
| 1244 | 1455 | int error); |
|---|
| 1245 | 1456 | |
|---|
| 1246 | | -static void crypt_alloc_req_skcipher(struct crypt_config *cc, |
|---|
| 1457 | +static int crypt_alloc_req_skcipher(struct crypt_config *cc, |
|---|
| 1247 | 1458 | struct convert_context *ctx) |
|---|
| 1248 | 1459 | { |
|---|
| 1249 | 1460 | unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); |
|---|
| 1250 | 1461 | |
|---|
| 1251 | | - if (!ctx->r.req) |
|---|
| 1252 | | - ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO); |
|---|
| 1462 | + if (!ctx->r.req) { |
|---|
| 1463 | + ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO); |
|---|
| 1464 | + if (!ctx->r.req) |
|---|
| 1465 | + return -ENOMEM; |
|---|
| 1466 | + } |
|---|
| 1253 | 1467 | |
|---|
| 1254 | 1468 | skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]); |
|---|
| 1255 | 1469 | |
|---|
| .. | .. |
|---|
| 1260 | 1474 | skcipher_request_set_callback(ctx->r.req, |
|---|
| 1261 | 1475 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
|---|
| 1262 | 1476 | kcryptd_async_done, dmreq_of_req(cc, ctx->r.req)); |
|---|
| 1477 | + |
|---|
| 1478 | + return 0; |
|---|
| 1263 | 1479 | } |
|---|
| 1264 | 1480 | |
|---|
| 1265 | | -static void crypt_alloc_req_aead(struct crypt_config *cc, |
|---|
| 1481 | +static int crypt_alloc_req_aead(struct crypt_config *cc, |
|---|
| 1266 | 1482 | struct convert_context *ctx) |
|---|
| 1267 | 1483 | { |
|---|
| 1268 | | - if (!ctx->r.req_aead) |
|---|
| 1269 | | - ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO); |
|---|
| 1484 | + if (!ctx->r.req_aead) { |
|---|
| 1485 | + ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO); |
|---|
| 1486 | + if (!ctx->r.req_aead) |
|---|
| 1487 | + return -ENOMEM; |
|---|
| 1488 | + } |
|---|
| 1270 | 1489 | |
|---|
| 1271 | 1490 | aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]); |
|---|
| 1272 | 1491 | |
|---|
| .. | .. |
|---|
| 1277 | 1496 | aead_request_set_callback(ctx->r.req_aead, |
|---|
| 1278 | 1497 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
|---|
| 1279 | 1498 | kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead)); |
|---|
| 1499 | + |
|---|
| 1500 | + return 0; |
|---|
| 1280 | 1501 | } |
|---|
| 1281 | 1502 | |
|---|
| 1282 | | -static void crypt_alloc_req(struct crypt_config *cc, |
|---|
| 1503 | +static int crypt_alloc_req(struct crypt_config *cc, |
|---|
| 1283 | 1504 | struct convert_context *ctx) |
|---|
| 1284 | 1505 | { |
|---|
| 1285 | 1506 | if (crypt_integrity_aead(cc)) |
|---|
| 1286 | | - crypt_alloc_req_aead(cc, ctx); |
|---|
| 1507 | + return crypt_alloc_req_aead(cc, ctx); |
|---|
| 1287 | 1508 | else |
|---|
| 1288 | | - crypt_alloc_req_skcipher(cc, ctx); |
|---|
| 1509 | + return crypt_alloc_req_skcipher(cc, ctx); |
|---|
| 1289 | 1510 | } |
|---|
| 1290 | 1511 | |
|---|
| 1291 | 1512 | static void crypt_free_req_skcipher(struct crypt_config *cc, |
|---|
| .. | .. |
|---|
| 1318 | 1539 | * Encrypt / decrypt data from one bio to another one (can be the same one) |
|---|
| 1319 | 1540 | */ |
|---|
| 1320 | 1541 | static blk_status_t crypt_convert(struct crypt_config *cc, |
|---|
| 1321 | | - struct convert_context *ctx) |
|---|
| 1542 | + struct convert_context *ctx, bool atomic, bool reset_pending) |
|---|
| 1322 | 1543 | { |
|---|
| 1323 | 1544 | unsigned int tag_offset = 0; |
|---|
| 1324 | 1545 | unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT; |
|---|
| 1325 | 1546 | int r; |
|---|
| 1326 | 1547 | |
|---|
| 1327 | | - atomic_set(&ctx->cc_pending, 1); |
|---|
| 1548 | + /* |
|---|
| 1549 | + * if reset_pending is set we are dealing with the bio for the first time, |
|---|
| 1550 | + * else we're continuing to work on the previous bio, so don't mess with |
|---|
| 1551 | + * the cc_pending counter |
|---|
| 1552 | + */ |
|---|
| 1553 | + if (reset_pending) |
|---|
| 1554 | + atomic_set(&ctx->cc_pending, 1); |
|---|
| 1328 | 1555 | |
|---|
| 1329 | 1556 | while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { |
|---|
| 1330 | 1557 | |
|---|
| 1331 | | - crypt_alloc_req(cc, ctx); |
|---|
| 1558 | + r = crypt_alloc_req(cc, ctx); |
|---|
| 1559 | + if (r) { |
|---|
| 1560 | + complete(&ctx->restart); |
|---|
| 1561 | + return BLK_STS_DEV_RESOURCE; |
|---|
| 1562 | + } |
|---|
| 1563 | + |
|---|
| 1332 | 1564 | atomic_inc(&ctx->cc_pending); |
|---|
| 1333 | 1565 | |
|---|
| 1334 | 1566 | if (crypt_integrity_aead(cc)) |
|---|
| .. | .. |
|---|
| 1342 | 1574 | * but the driver request queue is full, let's wait. |
|---|
| 1343 | 1575 | */ |
|---|
| 1344 | 1576 | case -EBUSY: |
|---|
| 1345 | | - wait_for_completion(&ctx->restart); |
|---|
| 1577 | + if (in_interrupt()) { |
|---|
| 1578 | + if (try_wait_for_completion(&ctx->restart)) { |
|---|
| 1579 | + /* |
|---|
| 1580 | + * we don't have to block to wait for completion, |
|---|
| 1581 | + * so proceed |
|---|
| 1582 | + */ |
|---|
| 1583 | + } else { |
|---|
| 1584 | + /* |
|---|
| 1585 | + * we can't wait for completion without blocking |
|---|
| 1586 | + * exit and continue processing in a workqueue |
|---|
| 1587 | + */ |
|---|
| 1588 | + ctx->r.req = NULL; |
|---|
| 1589 | + ctx->cc_sector += sector_step; |
|---|
| 1590 | + tag_offset++; |
|---|
| 1591 | + return BLK_STS_DEV_RESOURCE; |
|---|
| 1592 | + } |
|---|
| 1593 | + } else { |
|---|
| 1594 | + wait_for_completion(&ctx->restart); |
|---|
| 1595 | + } |
|---|
| 1346 | 1596 | reinit_completion(&ctx->restart); |
|---|
| 1347 | | - /* fall through */ |
|---|
| 1597 | + fallthrough; |
|---|
| 1348 | 1598 | /* |
|---|
| 1349 | 1599 | * The request is queued and processed asynchronously, |
|---|
| 1350 | 1600 | * completion function kcryptd_async_done() will be called. |
|---|
| .. | .. |
|---|
| 1361 | 1611 | atomic_dec(&ctx->cc_pending); |
|---|
| 1362 | 1612 | ctx->cc_sector += sector_step; |
|---|
| 1363 | 1613 | tag_offset++; |
|---|
| 1364 | | - cond_resched(); |
|---|
| 1614 | + if (!atomic) |
|---|
| 1615 | + cond_resched(); |
|---|
| 1365 | 1616 | continue; |
|---|
| 1366 | 1617 | /* |
|---|
| 1367 | 1618 | * There was a data integrity error. |
|---|
| .. | .. |
|---|
| 1452 | 1703 | |
|---|
| 1453 | 1704 | static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) |
|---|
| 1454 | 1705 | { |
|---|
| 1455 | | - unsigned int i; |
|---|
| 1456 | 1706 | struct bio_vec *bv; |
|---|
| 1707 | + struct bvec_iter_all iter_all; |
|---|
| 1457 | 1708 | |
|---|
| 1458 | | - bio_for_each_segment_all(bv, clone, i) { |
|---|
| 1709 | + bio_for_each_segment_all(bv, clone, iter_all) { |
|---|
| 1459 | 1710 | BUG_ON(!bv->bv_page); |
|---|
| 1460 | 1711 | mempool_free(bv->bv_page, &cc->page_pool); |
|---|
| 1461 | 1712 | } |
|---|
| .. | .. |
|---|
| 1477 | 1728 | static void crypt_inc_pending(struct dm_crypt_io *io) |
|---|
| 1478 | 1729 | { |
|---|
| 1479 | 1730 | atomic_inc(&io->io_pending); |
|---|
| 1731 | +} |
|---|
| 1732 | + |
|---|
| 1733 | +static void kcryptd_io_bio_endio(struct work_struct *work) |
|---|
| 1734 | +{ |
|---|
| 1735 | + struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
|---|
| 1736 | + bio_endio(io->base_bio); |
|---|
| 1480 | 1737 | } |
|---|
| 1481 | 1738 | |
|---|
| 1482 | 1739 | /* |
|---|
| .. | .. |
|---|
| 1501 | 1758 | kfree(io->integrity_metadata); |
|---|
| 1502 | 1759 | |
|---|
| 1503 | 1760 | base_bio->bi_status = error; |
|---|
| 1504 | | - bio_endio(base_bio); |
|---|
| 1761 | + |
|---|
| 1762 | + /* |
|---|
| 1763 | + * If we are running this function from our tasklet, |
|---|
| 1764 | + * we can't call bio_endio() here, because it will call |
|---|
| 1765 | + * clone_endio() from dm.c, which in turn will |
|---|
| 1766 | + * free the current struct dm_crypt_io structure with |
|---|
| 1767 | + * our tasklet. In this case we need to delay bio_endio() |
|---|
| 1768 | + * execution to after the tasklet is done and dequeued. |
|---|
| 1769 | + */ |
|---|
| 1770 | + if (tasklet_trylock(&io->tasklet)) { |
|---|
| 1771 | + tasklet_unlock(&io->tasklet); |
|---|
| 1772 | + bio_endio(base_bio); |
|---|
| 1773 | + return; |
|---|
| 1774 | + } |
|---|
| 1775 | + |
|---|
| 1776 | + INIT_WORK(&io->work, kcryptd_io_bio_endio); |
|---|
| 1777 | + queue_work(cc->io_queue, &io->work); |
|---|
| 1505 | 1778 | } |
|---|
| 1506 | 1779 | |
|---|
| 1507 | 1780 | /* |
|---|
| .. | .. |
|---|
| 1584 | 1857 | return 1; |
|---|
| 1585 | 1858 | } |
|---|
| 1586 | 1859 | |
|---|
| 1587 | | - generic_make_request(clone); |
|---|
| 1860 | + submit_bio_noacct(clone); |
|---|
| 1588 | 1861 | return 0; |
|---|
| 1589 | 1862 | } |
|---|
| 1590 | 1863 | |
|---|
| .. | .. |
|---|
| 1610 | 1883 | { |
|---|
| 1611 | 1884 | struct bio *clone = io->ctx.bio_out; |
|---|
| 1612 | 1885 | |
|---|
| 1613 | | - generic_make_request(clone); |
|---|
| 1886 | + submit_bio_noacct(clone); |
|---|
| 1614 | 1887 | } |
|---|
| 1615 | 1888 | |
|---|
| 1616 | 1889 | #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node) |
|---|
| .. | .. |
|---|
| 1687 | 1960 | |
|---|
| 1688 | 1961 | clone->bi_iter.bi_sector = cc->start + io->sector; |
|---|
| 1689 | 1962 | |
|---|
| 1690 | | - if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) { |
|---|
| 1691 | | - generic_make_request(clone); |
|---|
| 1963 | + if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) || |
|---|
| 1964 | + test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) { |
|---|
| 1965 | + submit_bio_noacct(clone); |
|---|
| 1692 | 1966 | return; |
|---|
| 1693 | 1967 | } |
|---|
| 1694 | 1968 | |
|---|
| .. | .. |
|---|
| 1710 | 1984 | spin_unlock_irqrestore(&cc->write_thread_lock, flags); |
|---|
| 1711 | 1985 | } |
|---|
| 1712 | 1986 | |
|---|
| 1987 | +static bool kcryptd_crypt_write_inline(struct crypt_config *cc, |
|---|
| 1988 | + struct convert_context *ctx) |
|---|
| 1989 | + |
|---|
| 1990 | +{ |
|---|
| 1991 | + if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags)) |
|---|
| 1992 | + return false; |
|---|
| 1993 | + |
|---|
| 1994 | + /* |
|---|
| 1995 | + * Note: zone append writes (REQ_OP_ZONE_APPEND) do not have ordering |
|---|
| 1996 | + * constraints so they do not need to be issued inline by |
|---|
| 1997 | + * kcryptd_crypt_write_convert(). |
|---|
| 1998 | + */ |
|---|
| 1999 | + switch (bio_op(ctx->bio_in)) { |
|---|
| 2000 | + case REQ_OP_WRITE: |
|---|
| 2001 | + case REQ_OP_WRITE_SAME: |
|---|
| 2002 | + case REQ_OP_WRITE_ZEROES: |
|---|
| 2003 | + return true; |
|---|
| 2004 | + default: |
|---|
| 2005 | + return false; |
|---|
| 2006 | + } |
|---|
| 2007 | +} |
|---|
| 2008 | + |
|---|
| 2009 | +static void kcryptd_crypt_write_continue(struct work_struct *work) |
|---|
| 2010 | +{ |
|---|
| 2011 | + struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
|---|
| 2012 | + struct crypt_config *cc = io->cc; |
|---|
| 2013 | + struct convert_context *ctx = &io->ctx; |
|---|
| 2014 | + int crypt_finished; |
|---|
| 2015 | + sector_t sector = io->sector; |
|---|
| 2016 | + blk_status_t r; |
|---|
| 2017 | + |
|---|
| 2018 | + wait_for_completion(&ctx->restart); |
|---|
| 2019 | + reinit_completion(&ctx->restart); |
|---|
| 2020 | + |
|---|
| 2021 | + r = crypt_convert(cc, &io->ctx, true, false); |
|---|
| 2022 | + if (r) |
|---|
| 2023 | + io->error = r; |
|---|
| 2024 | + crypt_finished = atomic_dec_and_test(&ctx->cc_pending); |
|---|
| 2025 | + if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) { |
|---|
| 2026 | + /* Wait for completion signaled by kcryptd_async_done() */ |
|---|
| 2027 | + wait_for_completion(&ctx->restart); |
|---|
| 2028 | + crypt_finished = 1; |
|---|
| 2029 | + } |
|---|
| 2030 | + |
|---|
| 2031 | + /* Encryption was already finished, submit io now */ |
|---|
| 2032 | + if (crypt_finished) { |
|---|
| 2033 | + kcryptd_crypt_write_io_submit(io, 0); |
|---|
| 2034 | + io->sector = sector; |
|---|
| 2035 | + } |
|---|
| 2036 | + |
|---|
| 2037 | + crypt_dec_pending(io); |
|---|
| 2038 | +} |
|---|
| 2039 | + |
|---|
| 1713 | 2040 | static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) |
|---|
| 1714 | 2041 | { |
|---|
| 1715 | 2042 | struct crypt_config *cc = io->cc; |
|---|
| 2043 | + struct convert_context *ctx = &io->ctx; |
|---|
| 1716 | 2044 | struct bio *clone; |
|---|
| 1717 | 2045 | int crypt_finished; |
|---|
| 1718 | 2046 | sector_t sector = io->sector; |
|---|
| .. | .. |
|---|
| 1722 | 2050 | * Prevent io from disappearing until this function completes. |
|---|
| 1723 | 2051 | */ |
|---|
| 1724 | 2052 | crypt_inc_pending(io); |
|---|
| 1725 | | - crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); |
|---|
| 2053 | + crypt_convert_init(cc, ctx, NULL, io->base_bio, sector); |
|---|
| 1726 | 2054 | |
|---|
| 1727 | 2055 | clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); |
|---|
| 1728 | 2056 | if (unlikely(!clone)) { |
|---|
| .. | .. |
|---|
| 1736 | 2064 | sector += bio_sectors(clone); |
|---|
| 1737 | 2065 | |
|---|
| 1738 | 2066 | crypt_inc_pending(io); |
|---|
| 1739 | | - r = crypt_convert(cc, &io->ctx); |
|---|
| 2067 | + r = crypt_convert(cc, ctx, |
|---|
| 2068 | + test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true); |
|---|
| 2069 | + /* |
|---|
| 2070 | + * Crypto API backlogged the request, because its queue was full |
|---|
| 2071 | + * and we're in softirq context, so continue from a workqueue |
|---|
| 2072 | + * (TODO: is it actually possible to be in softirq in the write path?) |
|---|
| 2073 | + */ |
|---|
| 2074 | + if (r == BLK_STS_DEV_RESOURCE) { |
|---|
| 2075 | + INIT_WORK(&io->work, kcryptd_crypt_write_continue); |
|---|
| 2076 | + queue_work(cc->crypt_queue, &io->work); |
|---|
| 2077 | + return; |
|---|
| 2078 | + } |
|---|
| 1740 | 2079 | if (r) |
|---|
| 1741 | 2080 | io->error = r; |
|---|
| 1742 | | - crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); |
|---|
| 2081 | + crypt_finished = atomic_dec_and_test(&ctx->cc_pending); |
|---|
| 2082 | + if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) { |
|---|
| 2083 | + /* Wait for completion signaled by kcryptd_async_done() */ |
|---|
| 2084 | + wait_for_completion(&ctx->restart); |
|---|
| 2085 | + crypt_finished = 1; |
|---|
| 2086 | + } |
|---|
| 1743 | 2087 | |
|---|
| 1744 | 2088 | /* Encryption was already finished, submit io now */ |
|---|
| 1745 | 2089 | if (crypt_finished) { |
|---|
| .. | .. |
|---|
| 1756 | 2100 | crypt_dec_pending(io); |
|---|
| 1757 | 2101 | } |
|---|
| 1758 | 2102 | |
|---|
| 2103 | +static void kcryptd_crypt_read_continue(struct work_struct *work) |
|---|
| 2104 | +{ |
|---|
| 2105 | + struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
|---|
| 2106 | + struct crypt_config *cc = io->cc; |
|---|
| 2107 | + blk_status_t r; |
|---|
| 2108 | + |
|---|
| 2109 | + wait_for_completion(&io->ctx.restart); |
|---|
| 2110 | + reinit_completion(&io->ctx.restart); |
|---|
| 2111 | + |
|---|
| 2112 | + r = crypt_convert(cc, &io->ctx, true, false); |
|---|
| 2113 | + if (r) |
|---|
| 2114 | + io->error = r; |
|---|
| 2115 | + |
|---|
| 2116 | + if (atomic_dec_and_test(&io->ctx.cc_pending)) |
|---|
| 2117 | + kcryptd_crypt_read_done(io); |
|---|
| 2118 | + |
|---|
| 2119 | + crypt_dec_pending(io); |
|---|
| 2120 | +} |
|---|
| 2121 | + |
|---|
| 1759 | 2122 | static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) |
|---|
| 1760 | 2123 | { |
|---|
| 1761 | 2124 | struct crypt_config *cc = io->cc; |
|---|
| .. | .. |
|---|
| 1766 | 2129 | crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, |
|---|
| 1767 | 2130 | io->sector); |
|---|
| 1768 | 2131 | |
|---|
| 1769 | | - r = crypt_convert(cc, &io->ctx); |
|---|
| 2132 | + r = crypt_convert(cc, &io->ctx, |
|---|
| 2133 | + test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true); |
|---|
| 2134 | + /* |
|---|
| 2135 | + * Crypto API backlogged the request, because its queue was full |
|---|
| 2136 | + * and we're in softirq context, so continue from a workqueue |
|---|
| 2137 | + */ |
|---|
| 2138 | + if (r == BLK_STS_DEV_RESOURCE) { |
|---|
| 2139 | + INIT_WORK(&io->work, kcryptd_crypt_read_continue); |
|---|
| 2140 | + queue_work(cc->crypt_queue, &io->work); |
|---|
| 2141 | + return; |
|---|
| 2142 | + } |
|---|
| 1770 | 2143 | if (r) |
|---|
| 1771 | 2144 | io->error = r; |
|---|
| 1772 | 2145 | |
|---|
| .. | .. |
|---|
| 1798 | 2171 | error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq); |
|---|
| 1799 | 2172 | |
|---|
| 1800 | 2173 | if (error == -EBADMSG) { |
|---|
| 1801 | | - DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", |
|---|
| 2174 | + char b[BDEVNAME_SIZE]; |
|---|
| 2175 | + DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b), |
|---|
| 1802 | 2176 | (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq))); |
|---|
| 1803 | 2177 | io->error = BLK_STS_PROTECTION; |
|---|
| 1804 | 2178 | } else if (error < 0) |
|---|
| .. | .. |
|---|
| 1809 | 2183 | if (!atomic_dec_and_test(&ctx->cc_pending)) |
|---|
| 1810 | 2184 | return; |
|---|
| 1811 | 2185 | |
|---|
| 1812 | | - if (bio_data_dir(io->base_bio) == READ) |
|---|
| 2186 | + /* |
|---|
| 2187 | + * The request is fully completed: for inline writes, let |
|---|
| 2188 | + * kcryptd_crypt_write_convert() do the IO submission. |
|---|
| 2189 | + */ |
|---|
| 2190 | + if (bio_data_dir(io->base_bio) == READ) { |
|---|
| 1813 | 2191 | kcryptd_crypt_read_done(io); |
|---|
| 1814 | | - else |
|---|
| 1815 | | - kcryptd_crypt_write_io_submit(io, 1); |
|---|
| 2192 | + return; |
|---|
| 2193 | + } |
|---|
| 2194 | + |
|---|
| 2195 | + if (kcryptd_crypt_write_inline(cc, ctx)) { |
|---|
| 2196 | + complete(&ctx->restart); |
|---|
| 2197 | + return; |
|---|
| 2198 | + } |
|---|
| 2199 | + |
|---|
| 2200 | + kcryptd_crypt_write_io_submit(io, 1); |
|---|
| 1816 | 2201 | } |
|---|
| 1817 | 2202 | |
|---|
| 1818 | 2203 | static void kcryptd_crypt(struct work_struct *work) |
|---|
| .. | .. |
|---|
| 1825 | 2210 | kcryptd_crypt_write_convert(io); |
|---|
| 1826 | 2211 | } |
|---|
| 1827 | 2212 | |
|---|
| 2213 | +static void kcryptd_crypt_tasklet(unsigned long work) |
|---|
| 2214 | +{ |
|---|
| 2215 | + kcryptd_crypt((struct work_struct *)work); |
|---|
| 2216 | +} |
|---|
| 2217 | + |
|---|
| 1828 | 2218 | static void kcryptd_queue_crypt(struct dm_crypt_io *io) |
|---|
| 1829 | 2219 | { |
|---|
| 1830 | 2220 | struct crypt_config *cc = io->cc; |
|---|
| 2221 | + |
|---|
| 2222 | + if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) || |
|---|
| 2223 | + (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) { |
|---|
| 2224 | + /* |
|---|
| 2225 | + * in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context. |
|---|
| 2226 | + * irqs_disabled(): the kernel may run some IO completion from the idle thread, but |
|---|
| 2227 | + * it is being executed with irqs disabled. |
|---|
| 2228 | + */ |
|---|
| 2229 | + if (in_irq() || irqs_disabled()) { |
|---|
| 2230 | + tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work); |
|---|
| 2231 | + tasklet_schedule(&io->tasklet); |
|---|
| 2232 | + return; |
|---|
| 2233 | + } |
|---|
| 2234 | + |
|---|
| 2235 | + kcryptd_crypt(&io->work); |
|---|
| 2236 | + return; |
|---|
| 2237 | + } |
|---|
| 1831 | 2238 | |
|---|
| 1832 | 2239 | INIT_WORK(&io->work, kcryptd_crypt); |
|---|
| 1833 | 2240 | queue_work(cc->crypt_queue, &io->work); |
|---|
| .. | .. |
|---|
| 1884 | 2291 | return -ENOMEM; |
|---|
| 1885 | 2292 | |
|---|
| 1886 | 2293 | for (i = 0; i < cc->tfms_count; i++) { |
|---|
| 1887 | | - cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0); |
|---|
| 2294 | + cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, |
|---|
| 2295 | + CRYPTO_ALG_ALLOCATES_MEMORY); |
|---|
| 1888 | 2296 | if (IS_ERR(cc->cipher_tfm.tfms[i])) { |
|---|
| 1889 | 2297 | err = PTR_ERR(cc->cipher_tfm.tfms[i]); |
|---|
| 1890 | 2298 | crypt_free_tfms(cc); |
|---|
| .. | .. |
|---|
| 1897 | 2305 | * algorithm implementation is used. Help people debug performance |
|---|
| 1898 | 2306 | * problems by logging the ->cra_driver_name. |
|---|
| 1899 | 2307 | */ |
|---|
| 1900 | | - DMINFO("%s using implementation \"%s\"", ciphermode, |
|---|
| 2308 | + DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode, |
|---|
| 1901 | 2309 | crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name); |
|---|
| 1902 | 2310 | return 0; |
|---|
| 1903 | 2311 | } |
|---|
| .. | .. |
|---|
| 1910 | 2318 | if (!cc->cipher_tfm.tfms) |
|---|
| 1911 | 2319 | return -ENOMEM; |
|---|
| 1912 | 2320 | |
|---|
| 1913 | | - cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0); |
|---|
| 2321 | + cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, |
|---|
| 2322 | + CRYPTO_ALG_ALLOCATES_MEMORY); |
|---|
| 1914 | 2323 | if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) { |
|---|
| 1915 | 2324 | err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]); |
|---|
| 1916 | 2325 | crypt_free_tfms(cc); |
|---|
| 1917 | 2326 | return err; |
|---|
| 1918 | 2327 | } |
|---|
| 1919 | 2328 | |
|---|
| 1920 | | - DMINFO("%s using implementation \"%s\"", ciphermode, |
|---|
| 2329 | + DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode, |
|---|
| 1921 | 2330 | crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name); |
|---|
| 1922 | 2331 | return 0; |
|---|
| 1923 | 2332 | } |
|---|
| .. | .. |
|---|
| 2011 | 2420 | return false; |
|---|
| 2012 | 2421 | } |
|---|
| 2013 | 2422 | |
|---|
| 2423 | +static int set_key_user(struct crypt_config *cc, struct key *key) |
|---|
| 2424 | +{ |
|---|
| 2425 | + const struct user_key_payload *ukp; |
|---|
| 2426 | + |
|---|
| 2427 | + ukp = user_key_payload_locked(key); |
|---|
| 2428 | + if (!ukp) |
|---|
| 2429 | + return -EKEYREVOKED; |
|---|
| 2430 | + |
|---|
| 2431 | + if (cc->key_size != ukp->datalen) |
|---|
| 2432 | + return -EINVAL; |
|---|
| 2433 | + |
|---|
| 2434 | + memcpy(cc->key, ukp->data, cc->key_size); |
|---|
| 2435 | + |
|---|
| 2436 | + return 0; |
|---|
| 2437 | +} |
|---|
| 2438 | + |
|---|
| 2439 | +#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE) |
|---|
| 2440 | +static int set_key_encrypted(struct crypt_config *cc, struct key *key) |
|---|
| 2441 | +{ |
|---|
| 2442 | + const struct encrypted_key_payload *ekp; |
|---|
| 2443 | + |
|---|
| 2444 | + ekp = key->payload.data[0]; |
|---|
| 2445 | + if (!ekp) |
|---|
| 2446 | + return -EKEYREVOKED; |
|---|
| 2447 | + |
|---|
| 2448 | + if (cc->key_size != ekp->decrypted_datalen) |
|---|
| 2449 | + return -EINVAL; |
|---|
| 2450 | + |
|---|
| 2451 | + memcpy(cc->key, ekp->decrypted_data, cc->key_size); |
|---|
| 2452 | + |
|---|
| 2453 | + return 0; |
|---|
| 2454 | +} |
|---|
| 2455 | +#endif /* CONFIG_ENCRYPTED_KEYS */ |
|---|
| 2456 | + |
|---|
| 2014 | 2457 | static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string) |
|---|
| 2015 | 2458 | { |
|---|
| 2016 | 2459 | char *new_key_string, *key_desc; |
|---|
| 2017 | 2460 | int ret; |
|---|
| 2461 | + struct key_type *type; |
|---|
| 2018 | 2462 | struct key *key; |
|---|
| 2019 | | - const struct user_key_payload *ukp; |
|---|
| 2463 | + int (*set_key)(struct crypt_config *cc, struct key *key); |
|---|
| 2020 | 2464 | |
|---|
| 2021 | 2465 | /* |
|---|
| 2022 | 2466 | * Reject key_string with whitespace. dm core currently lacks code for |
|---|
| .. | .. |
|---|
| 2032 | 2476 | if (!key_desc || key_desc == key_string || !strlen(key_desc + 1)) |
|---|
| 2033 | 2477 | return -EINVAL; |
|---|
| 2034 | 2478 | |
|---|
| 2035 | | - if (strncmp(key_string, "logon:", key_desc - key_string + 1) && |
|---|
| 2036 | | - strncmp(key_string, "user:", key_desc - key_string + 1)) |
|---|
| 2479 | + if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) { |
|---|
| 2480 | + type = &key_type_logon; |
|---|
| 2481 | + set_key = set_key_user; |
|---|
| 2482 | + } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) { |
|---|
| 2483 | + type = &key_type_user; |
|---|
| 2484 | + set_key = set_key_user; |
|---|
| 2485 | +#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE) |
|---|
| 2486 | + } else if (!strncmp(key_string, "encrypted:", key_desc - key_string + 1)) { |
|---|
| 2487 | + type = &key_type_encrypted; |
|---|
| 2488 | + set_key = set_key_encrypted; |
|---|
| 2489 | +#endif |
|---|
| 2490 | + } else { |
|---|
| 2037 | 2491 | return -EINVAL; |
|---|
| 2492 | + } |
|---|
| 2038 | 2493 | |
|---|
| 2039 | 2494 | new_key_string = kstrdup(key_string, GFP_KERNEL); |
|---|
| 2040 | 2495 | if (!new_key_string) |
|---|
| 2041 | 2496 | return -ENOMEM; |
|---|
| 2042 | 2497 | |
|---|
| 2043 | | - key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user, |
|---|
| 2044 | | - key_desc + 1, NULL); |
|---|
| 2498 | + key = request_key(type, key_desc + 1, NULL); |
|---|
| 2045 | 2499 | if (IS_ERR(key)) { |
|---|
| 2046 | | - kzfree(new_key_string); |
|---|
| 2500 | + kfree_sensitive(new_key_string); |
|---|
| 2047 | 2501 | return PTR_ERR(key); |
|---|
| 2048 | 2502 | } |
|---|
| 2049 | 2503 | |
|---|
| 2050 | 2504 | down_read(&key->sem); |
|---|
| 2051 | 2505 | |
|---|
| 2052 | | - ukp = user_key_payload_locked(key); |
|---|
| 2053 | | - if (!ukp) { |
|---|
| 2506 | + ret = set_key(cc, key); |
|---|
| 2507 | + if (ret < 0) { |
|---|
| 2054 | 2508 | up_read(&key->sem); |
|---|
| 2055 | 2509 | key_put(key); |
|---|
| 2056 | | - kzfree(new_key_string); |
|---|
| 2057 | | - return -EKEYREVOKED; |
|---|
| 2510 | + kfree_sensitive(new_key_string); |
|---|
| 2511 | + return ret; |
|---|
| 2058 | 2512 | } |
|---|
| 2059 | | - |
|---|
| 2060 | | - if (cc->key_size != ukp->datalen) { |
|---|
| 2061 | | - up_read(&key->sem); |
|---|
| 2062 | | - key_put(key); |
|---|
| 2063 | | - kzfree(new_key_string); |
|---|
| 2064 | | - return -EINVAL; |
|---|
| 2065 | | - } |
|---|
| 2066 | | - |
|---|
| 2067 | | - memcpy(cc->key, ukp->data, cc->key_size); |
|---|
| 2068 | 2513 | |
|---|
| 2069 | 2514 | up_read(&key->sem); |
|---|
| 2070 | 2515 | key_put(key); |
|---|
| .. | .. |
|---|
| 2076 | 2521 | |
|---|
| 2077 | 2522 | if (!ret) { |
|---|
| 2078 | 2523 | set_bit(DM_CRYPT_KEY_VALID, &cc->flags); |
|---|
| 2079 | | - kzfree(cc->key_string); |
|---|
| 2524 | + kfree_sensitive(cc->key_string); |
|---|
| 2080 | 2525 | cc->key_string = new_key_string; |
|---|
| 2081 | 2526 | } else |
|---|
| 2082 | | - kzfree(new_key_string); |
|---|
| 2527 | + kfree_sensitive(new_key_string); |
|---|
| 2083 | 2528 | |
|---|
| 2084 | 2529 | return ret; |
|---|
| 2085 | 2530 | } |
|---|
| .. | .. |
|---|
| 2116 | 2561 | |
|---|
| 2117 | 2562 | static int get_key_size(char **key_string) |
|---|
| 2118 | 2563 | { |
|---|
| 2119 | | - return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1; |
|---|
| 2564 | + return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1); |
|---|
| 2120 | 2565 | } |
|---|
| 2121 | 2566 | |
|---|
| 2122 | | -#endif |
|---|
| 2567 | +#endif /* CONFIG_KEYS */ |
|---|
| 2123 | 2568 | |
|---|
| 2124 | 2569 | static int crypt_set_key(struct crypt_config *cc, char *key) |
|---|
| 2125 | 2570 | { |
|---|
| .. | .. |
|---|
| 2140 | 2585 | clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); |
|---|
| 2141 | 2586 | |
|---|
| 2142 | 2587 | /* wipe references to any kernel keyring key */ |
|---|
| 2143 | | - kzfree(cc->key_string); |
|---|
| 2588 | + kfree_sensitive(cc->key_string); |
|---|
| 2144 | 2589 | cc->key_string = NULL; |
|---|
| 2145 | 2590 | |
|---|
| 2146 | 2591 | /* Decode key from its hex representation. */ |
|---|
| .. | .. |
|---|
| 2164 | 2609 | |
|---|
| 2165 | 2610 | clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); |
|---|
| 2166 | 2611 | get_random_bytes(&cc->key, cc->key_size); |
|---|
| 2167 | | - kzfree(cc->key_string); |
|---|
| 2612 | + |
|---|
| 2613 | + /* Wipe IV private keys */ |
|---|
| 2614 | + if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { |
|---|
| 2615 | + r = cc->iv_gen_ops->wipe(cc); |
|---|
| 2616 | + if (r) |
|---|
| 2617 | + return r; |
|---|
| 2618 | + } |
|---|
| 2619 | + |
|---|
| 2620 | + kfree_sensitive(cc->key_string); |
|---|
| 2168 | 2621 | cc->key_string = NULL; |
|---|
| 2169 | 2622 | r = crypt_setkey(cc); |
|---|
| 2170 | 2623 | memset(&cc->key, 0, cc->key_size * sizeof(u8)); |
|---|
| .. | .. |
|---|
| 2174 | 2627 | |
|---|
| 2175 | 2628 | static void crypt_calculate_pages_per_client(void) |
|---|
| 2176 | 2629 | { |
|---|
| 2177 | | - unsigned long pages = (totalram_pages - totalhigh_pages) * DM_CRYPT_MEMORY_PERCENT / 100; |
|---|
| 2630 | + unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100; |
|---|
| 2178 | 2631 | |
|---|
| 2179 | 2632 | if (!dm_crypt_clients_n) |
|---|
| 2180 | 2633 | return; |
|---|
| .. | .. |
|---|
| 2248 | 2701 | if (cc->dev) |
|---|
| 2249 | 2702 | dm_put_device(ti, cc->dev); |
|---|
| 2250 | 2703 | |
|---|
| 2251 | | - kzfree(cc->cipher); |
|---|
| 2252 | | - kzfree(cc->cipher_string); |
|---|
| 2253 | | - kzfree(cc->key_string); |
|---|
| 2254 | | - kzfree(cc->cipher_auth); |
|---|
| 2255 | | - kzfree(cc->authenc_key); |
|---|
| 2704 | + kfree_sensitive(cc->cipher_string); |
|---|
| 2705 | + kfree_sensitive(cc->key_string); |
|---|
| 2706 | + kfree_sensitive(cc->cipher_auth); |
|---|
| 2707 | + kfree_sensitive(cc->authenc_key); |
|---|
| 2256 | 2708 | |
|---|
| 2257 | 2709 | mutex_destroy(&cc->bio_alloc_lock); |
|---|
| 2258 | 2710 | |
|---|
| 2259 | 2711 | /* Must zero key material before freeing */ |
|---|
| 2260 | | - kzfree(cc); |
|---|
| 2712 | + kfree_sensitive(cc); |
|---|
| 2261 | 2713 | |
|---|
| 2262 | 2714 | spin_lock(&dm_crypt_clients_lock); |
|---|
| 2263 | 2715 | WARN_ON(!dm_crypt_clients_n); |
|---|
| .. | .. |
|---|
| 2299 | 2751 | cc->iv_gen_ops = &crypt_iv_benbi_ops; |
|---|
| 2300 | 2752 | else if (strcmp(ivmode, "null") == 0) |
|---|
| 2301 | 2753 | cc->iv_gen_ops = &crypt_iv_null_ops; |
|---|
| 2302 | | - else if (strcmp(ivmode, "lmk") == 0) { |
|---|
| 2754 | + else if (strcmp(ivmode, "eboiv") == 0) |
|---|
| 2755 | + cc->iv_gen_ops = &crypt_iv_eboiv_ops; |
|---|
| 2756 | + else if (strcmp(ivmode, "elephant") == 0) { |
|---|
| 2757 | + cc->iv_gen_ops = &crypt_iv_elephant_ops; |
|---|
| 2758 | + cc->key_parts = 2; |
|---|
| 2759 | + cc->key_extra_size = cc->key_size / 2; |
|---|
| 2760 | + if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE) |
|---|
| 2761 | + return -EINVAL; |
|---|
| 2762 | + set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags); |
|---|
| 2763 | + } else if (strcmp(ivmode, "lmk") == 0) { |
|---|
| 2303 | 2764 | cc->iv_gen_ops = &crypt_iv_lmk_ops; |
|---|
| 2304 | 2765 | /* |
|---|
| 2305 | 2766 | * Version 2 and 3 is recognised according |
|---|
| .. | .. |
|---|
| 2328 | 2789 | } |
|---|
| 2329 | 2790 | |
|---|
| 2330 | 2791 | /* |
|---|
| 2331 | | - * Workaround to parse cipher algorithm from crypto API spec. |
|---|
| 2332 | | - * The cc->cipher is currently used only in ESSIV. |
|---|
| 2333 | | - * This should be probably done by crypto-api calls (once available...) |
|---|
| 2334 | | - */ |
|---|
| 2335 | | -static int crypt_ctr_blkdev_cipher(struct crypt_config *cc) |
|---|
| 2336 | | -{ |
|---|
| 2337 | | - const char *alg_name = NULL; |
|---|
| 2338 | | - char *start, *end; |
|---|
| 2339 | | - |
|---|
| 2340 | | - if (crypt_integrity_aead(cc)) { |
|---|
| 2341 | | - alg_name = crypto_tfm_alg_name(crypto_aead_tfm(any_tfm_aead(cc))); |
|---|
| 2342 | | - if (!alg_name) |
|---|
| 2343 | | - return -EINVAL; |
|---|
| 2344 | | - if (crypt_integrity_hmac(cc)) { |
|---|
| 2345 | | - alg_name = strchr(alg_name, ','); |
|---|
| 2346 | | - if (!alg_name) |
|---|
| 2347 | | - return -EINVAL; |
|---|
| 2348 | | - } |
|---|
| 2349 | | - alg_name++; |
|---|
| 2350 | | - } else { |
|---|
| 2351 | | - alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(any_tfm(cc))); |
|---|
| 2352 | | - if (!alg_name) |
|---|
| 2353 | | - return -EINVAL; |
|---|
| 2354 | | - } |
|---|
| 2355 | | - |
|---|
| 2356 | | - start = strchr(alg_name, '('); |
|---|
| 2357 | | - end = strchr(alg_name, ')'); |
|---|
| 2358 | | - |
|---|
| 2359 | | - if (!start && !end) { |
|---|
| 2360 | | - cc->cipher = kstrdup(alg_name, GFP_KERNEL); |
|---|
| 2361 | | - return cc->cipher ? 0 : -ENOMEM; |
|---|
| 2362 | | - } |
|---|
| 2363 | | - |
|---|
| 2364 | | - if (!start || !end || ++start >= end) |
|---|
| 2365 | | - return -EINVAL; |
|---|
| 2366 | | - |
|---|
| 2367 | | - cc->cipher = kzalloc(end - start + 1, GFP_KERNEL); |
|---|
| 2368 | | - if (!cc->cipher) |
|---|
| 2369 | | - return -ENOMEM; |
|---|
| 2370 | | - |
|---|
| 2371 | | - strncpy(cc->cipher, start, end - start); |
|---|
| 2372 | | - |
|---|
| 2373 | | - return 0; |
|---|
| 2374 | | -} |
|---|
| 2375 | | - |
|---|
| 2376 | | -/* |
|---|
| 2377 | 2792 | * Workaround to parse HMAC algorithm from AEAD crypto API spec. |
|---|
| 2378 | 2793 | * The HMAC is needed to calculate tag size (HMAC digest size). |
|---|
| 2379 | 2794 | * This should be probably done by crypto-api calls (once available...) |
|---|
| .. | .. |
|---|
| 2396 | 2811 | return -ENOMEM; |
|---|
| 2397 | 2812 | strncpy(mac_alg, start, end - start); |
|---|
| 2398 | 2813 | |
|---|
| 2399 | | - mac = crypto_alloc_ahash(mac_alg, 0, 0); |
|---|
| 2814 | + mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY); |
|---|
| 2400 | 2815 | kfree(mac_alg); |
|---|
| 2401 | 2816 | |
|---|
| 2402 | 2817 | if (IS_ERR(mac)) |
|---|
| .. | .. |
|---|
| 2416 | 2831 | char **ivmode, char **ivopts) |
|---|
| 2417 | 2832 | { |
|---|
| 2418 | 2833 | struct crypt_config *cc = ti->private; |
|---|
| 2419 | | - char *tmp, *cipher_api; |
|---|
| 2834 | + char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME]; |
|---|
| 2420 | 2835 | int ret = -EINVAL; |
|---|
| 2421 | 2836 | |
|---|
| 2422 | 2837 | cc->tfms_count = 1; |
|---|
| .. | .. |
|---|
| 2442 | 2857 | /* The rest is crypto API spec */ |
|---|
| 2443 | 2858 | cipher_api = tmp; |
|---|
| 2444 | 2859 | |
|---|
| 2860 | + /* Alloc AEAD, can be used only in new format. */ |
|---|
| 2861 | + if (crypt_integrity_aead(cc)) { |
|---|
| 2862 | + ret = crypt_ctr_auth_cipher(cc, cipher_api); |
|---|
| 2863 | + if (ret < 0) { |
|---|
| 2864 | + ti->error = "Invalid AEAD cipher spec"; |
|---|
| 2865 | + return -ENOMEM; |
|---|
| 2866 | + } |
|---|
| 2867 | + } |
|---|
| 2868 | + |
|---|
| 2445 | 2869 | if (*ivmode && !strcmp(*ivmode, "lmk")) |
|---|
| 2446 | 2870 | cc->tfms_count = 64; |
|---|
| 2871 | + |
|---|
| 2872 | + if (*ivmode && !strcmp(*ivmode, "essiv")) { |
|---|
| 2873 | + if (!*ivopts) { |
|---|
| 2874 | + ti->error = "Digest algorithm missing for ESSIV mode"; |
|---|
| 2875 | + return -EINVAL; |
|---|
| 2876 | + } |
|---|
| 2877 | + ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)", |
|---|
| 2878 | + cipher_api, *ivopts); |
|---|
| 2879 | + if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) { |
|---|
| 2880 | + ti->error = "Cannot allocate cipher string"; |
|---|
| 2881 | + return -ENOMEM; |
|---|
| 2882 | + } |
|---|
| 2883 | + cipher_api = buf; |
|---|
| 2884 | + } |
|---|
| 2447 | 2885 | |
|---|
| 2448 | 2886 | cc->key_parts = cc->tfms_count; |
|---|
| 2449 | 2887 | |
|---|
| .. | .. |
|---|
| 2454 | 2892 | return ret; |
|---|
| 2455 | 2893 | } |
|---|
| 2456 | 2894 | |
|---|
| 2457 | | - /* Alloc AEAD, can be used only in new format. */ |
|---|
| 2458 | | - if (crypt_integrity_aead(cc)) { |
|---|
| 2459 | | - ret = crypt_ctr_auth_cipher(cc, cipher_api); |
|---|
| 2460 | | - if (ret < 0) { |
|---|
| 2461 | | - ti->error = "Invalid AEAD cipher spec"; |
|---|
| 2462 | | - return -ENOMEM; |
|---|
| 2463 | | - } |
|---|
| 2895 | + if (crypt_integrity_aead(cc)) |
|---|
| 2464 | 2896 | cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc)); |
|---|
| 2465 | | - } else |
|---|
| 2897 | + else |
|---|
| 2466 | 2898 | cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc)); |
|---|
| 2467 | | - |
|---|
| 2468 | | - ret = crypt_ctr_blkdev_cipher(cc); |
|---|
| 2469 | | - if (ret < 0) { |
|---|
| 2470 | | - ti->error = "Cannot allocate cipher string"; |
|---|
| 2471 | | - return -ENOMEM; |
|---|
| 2472 | | - } |
|---|
| 2473 | 2899 | |
|---|
| 2474 | 2900 | return 0; |
|---|
| 2475 | 2901 | } |
|---|
| .. | .. |
|---|
| 2505 | 2931 | } |
|---|
| 2506 | 2932 | cc->key_parts = cc->tfms_count; |
|---|
| 2507 | 2933 | |
|---|
| 2508 | | - cc->cipher = kstrdup(cipher, GFP_KERNEL); |
|---|
| 2509 | | - if (!cc->cipher) |
|---|
| 2510 | | - goto bad_mem; |
|---|
| 2511 | | - |
|---|
| 2512 | 2934 | chainmode = strsep(&tmp, "-"); |
|---|
| 2513 | 2935 | *ivmode = strsep(&tmp, ":"); |
|---|
| 2514 | 2936 | *ivopts = tmp; |
|---|
| .. | .. |
|---|
| 2531 | 2953 | if (!cipher_api) |
|---|
| 2532 | 2954 | goto bad_mem; |
|---|
| 2533 | 2955 | |
|---|
| 2534 | | - ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, |
|---|
| 2535 | | - "%s(%s)", chainmode, cipher); |
|---|
| 2536 | | - if (ret < 0) { |
|---|
| 2956 | + if (*ivmode && !strcmp(*ivmode, "essiv")) { |
|---|
| 2957 | + if (!*ivopts) { |
|---|
| 2958 | + ti->error = "Digest algorithm missing for ESSIV mode"; |
|---|
| 2959 | + kfree(cipher_api); |
|---|
| 2960 | + return -EINVAL; |
|---|
| 2961 | + } |
|---|
| 2962 | + ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, |
|---|
| 2963 | + "essiv(%s(%s),%s)", chainmode, cipher, *ivopts); |
|---|
| 2964 | + } else { |
|---|
| 2965 | + ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, |
|---|
| 2966 | + "%s(%s)", chainmode, cipher); |
|---|
| 2967 | + } |
|---|
| 2968 | + if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) { |
|---|
| 2537 | 2969 | kfree(cipher_api); |
|---|
| 2538 | 2970 | goto bad_mem; |
|---|
| 2539 | 2971 | } |
|---|
| .. | .. |
|---|
| 2614 | 3046 | struct crypt_config *cc = ti->private; |
|---|
| 2615 | 3047 | struct dm_arg_set as; |
|---|
| 2616 | 3048 | static const struct dm_arg _args[] = { |
|---|
| 2617 | | - {0, 6, "Invalid number of feature args"}, |
|---|
| 3049 | + {0, 8, "Invalid number of feature args"}, |
|---|
| 2618 | 3050 | }; |
|---|
| 2619 | 3051 | unsigned int opt_params, val; |
|---|
| 2620 | 3052 | const char *opt_string, *sval; |
|---|
| .. | .. |
|---|
| 2644 | 3076 | |
|---|
| 2645 | 3077 | else if (!strcasecmp(opt_string, "submit_from_crypt_cpus")) |
|---|
| 2646 | 3078 | set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); |
|---|
| 3079 | + else if (!strcasecmp(opt_string, "no_read_workqueue")) |
|---|
| 3080 | + set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags); |
|---|
| 3081 | + else if (!strcasecmp(opt_string, "no_write_workqueue")) |
|---|
| 3082 | + set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags); |
|---|
| 2647 | 3083 | else if (sscanf(opt_string, "integrity:%u:", &val) == 1) { |
|---|
| 2648 | 3084 | if (val == 0 || val > MAX_TAG_SIZE) { |
|---|
| 2649 | 3085 | ti->error = "Invalid integrity arguments"; |
|---|
| .. | .. |
|---|
| 2684 | 3120 | return 0; |
|---|
| 2685 | 3121 | } |
|---|
| 2686 | 3122 | |
|---|
| 3123 | +#ifdef CONFIG_BLK_DEV_ZONED |
|---|
| 3124 | + |
|---|
| 3125 | +static int crypt_report_zones(struct dm_target *ti, |
|---|
| 3126 | + struct dm_report_zones_args *args, unsigned int nr_zones) |
|---|
| 3127 | +{ |
|---|
| 3128 | + struct crypt_config *cc = ti->private; |
|---|
| 3129 | + sector_t sector = cc->start + dm_target_offset(ti, args->next_sector); |
|---|
| 3130 | + |
|---|
| 3131 | + args->start = cc->start; |
|---|
| 3132 | + return blkdev_report_zones(cc->dev->bdev, sector, nr_zones, |
|---|
| 3133 | + dm_report_zones_cb, args); |
|---|
| 3134 | +} |
|---|
| 3135 | + |
|---|
| 3136 | +#endif |
|---|
| 3137 | + |
|---|
| 2687 | 3138 | /* |
|---|
| 2688 | 3139 | * Construct an encryption mapping: |
|---|
| 2689 | 3140 | * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start> |
|---|
| .. | .. |
|---|
| 2691 | 3142 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
|---|
| 2692 | 3143 | { |
|---|
| 2693 | 3144 | struct crypt_config *cc; |
|---|
| 3145 | + const char *devname = dm_table_device_name(ti->table); |
|---|
| 2694 | 3146 | int key_size; |
|---|
| 2695 | 3147 | unsigned int align_mask; |
|---|
| 2696 | 3148 | unsigned long long tmpll; |
|---|
| .. | .. |
|---|
| 2709 | 3161 | return -EINVAL; |
|---|
| 2710 | 3162 | } |
|---|
| 2711 | 3163 | |
|---|
| 2712 | | - cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); |
|---|
| 3164 | + cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL); |
|---|
| 2713 | 3165 | if (!cc) { |
|---|
| 2714 | 3166 | ti->error = "Cannot allocate encryption context"; |
|---|
| 2715 | 3167 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 2816 | 3268 | } |
|---|
| 2817 | 3269 | cc->start = tmpll; |
|---|
| 2818 | 3270 | |
|---|
| 3271 | + /* |
|---|
| 3272 | + * For zoned block devices, we need to preserve the issuer write |
|---|
| 3273 | + * ordering. To do so, disable write workqueues and force inline |
|---|
| 3274 | + * encryption completion. |
|---|
| 3275 | + */ |
|---|
| 3276 | + if (bdev_is_zoned(cc->dev->bdev)) { |
|---|
| 3277 | + set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags); |
|---|
| 3278 | + set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags); |
|---|
| 3279 | + } |
|---|
| 3280 | + |
|---|
| 2819 | 3281 | if (crypt_integrity_aead(cc) || cc->integrity_iv_size) { |
|---|
| 2820 | 3282 | ret = crypt_integrity_ctr(cc, ti); |
|---|
| 2821 | 3283 | if (ret) |
|---|
| .. | .. |
|---|
| 2836 | 3298 | } |
|---|
| 2837 | 3299 | |
|---|
| 2838 | 3300 | ret = -ENOMEM; |
|---|
| 2839 | | - cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); |
|---|
| 3301 | + cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname); |
|---|
| 2840 | 3302 | if (!cc->io_queue) { |
|---|
| 2841 | 3303 | ti->error = "Couldn't create kcryptd io queue"; |
|---|
| 2842 | 3304 | goto bad; |
|---|
| 2843 | 3305 | } |
|---|
| 2844 | 3306 | |
|---|
| 2845 | 3307 | if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) |
|---|
| 2846 | | - cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); |
|---|
| 3308 | + cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, |
|---|
| 3309 | + 1, devname); |
|---|
| 2847 | 3310 | else |
|---|
| 2848 | | - cc->crypt_queue = alloc_workqueue("kcryptd", |
|---|
| 2849 | | - WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, |
|---|
| 2850 | | - num_online_cpus()); |
|---|
| 3311 | + cc->crypt_queue = alloc_workqueue("kcryptd/%s", |
|---|
| 3312 | + WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, |
|---|
| 3313 | + num_online_cpus(), devname); |
|---|
| 2851 | 3314 | if (!cc->crypt_queue) { |
|---|
| 2852 | 3315 | ti->error = "Couldn't create kcryptd queue"; |
|---|
| 2853 | 3316 | goto bad; |
|---|
| .. | .. |
|---|
| 2856 | 3319 | spin_lock_init(&cc->write_thread_lock); |
|---|
| 2857 | 3320 | cc->write_tree = RB_ROOT; |
|---|
| 2858 | 3321 | |
|---|
| 2859 | | - cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write"); |
|---|
| 3322 | + cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname); |
|---|
| 2860 | 3323 | if (IS_ERR(cc->write_thread)) { |
|---|
| 2861 | 3324 | ret = PTR_ERR(cc->write_thread); |
|---|
| 2862 | 3325 | cc->write_thread = NULL; |
|---|
| .. | .. |
|---|
| 2866 | 3329 | wake_up_process(cc->write_thread); |
|---|
| 2867 | 3330 | |
|---|
| 2868 | 3331 | ti->num_flush_bios = 1; |
|---|
| 3332 | + ti->limit_swap_bios = true; |
|---|
| 2869 | 3333 | |
|---|
| 2870 | 3334 | return 0; |
|---|
| 2871 | 3335 | |
|---|
| .. | .. |
|---|
| 2940 | 3404 | return DM_MAPIO_SUBMITTED; |
|---|
| 2941 | 3405 | } |
|---|
| 2942 | 3406 | |
|---|
| 3407 | +static char hex2asc(unsigned char c) |
|---|
| 3408 | +{ |
|---|
| 3409 | + return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27); |
|---|
| 3410 | +} |
|---|
| 3411 | + |
|---|
| 2943 | 3412 | static void crypt_status(struct dm_target *ti, status_type_t type, |
|---|
| 2944 | 3413 | unsigned status_flags, char *result, unsigned maxlen) |
|---|
| 2945 | 3414 | { |
|---|
| .. | .. |
|---|
| 2958 | 3427 | if (cc->key_size > 0) { |
|---|
| 2959 | 3428 | if (cc->key_string) |
|---|
| 2960 | 3429 | DMEMIT(":%u:%s", cc->key_size, cc->key_string); |
|---|
| 2961 | | - else |
|---|
| 2962 | | - for (i = 0; i < cc->key_size; i++) |
|---|
| 2963 | | - DMEMIT("%02x", cc->key[i]); |
|---|
| 3430 | + else { |
|---|
| 3431 | + for (i = 0; i < cc->key_size; i++) { |
|---|
| 3432 | + DMEMIT("%c%c", hex2asc(cc->key[i] >> 4), |
|---|
| 3433 | + hex2asc(cc->key[i] & 0xf)); |
|---|
| 3434 | + } |
|---|
| 3435 | + } |
|---|
| 2964 | 3436 | } else |
|---|
| 2965 | 3437 | DMEMIT("-"); |
|---|
| 2966 | 3438 | |
|---|
| .. | .. |
|---|
| 2970 | 3442 | num_feature_args += !!ti->num_discard_bios; |
|---|
| 2971 | 3443 | num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags); |
|---|
| 2972 | 3444 | num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); |
|---|
| 3445 | + num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags); |
|---|
| 3446 | + num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags); |
|---|
| 2973 | 3447 | num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT); |
|---|
| 2974 | 3448 | num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); |
|---|
| 2975 | 3449 | if (cc->on_disk_tag_size) |
|---|
| .. | .. |
|---|
| 2982 | 3456 | DMEMIT(" same_cpu_crypt"); |
|---|
| 2983 | 3457 | if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) |
|---|
| 2984 | 3458 | DMEMIT(" submit_from_crypt_cpus"); |
|---|
| 3459 | + if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) |
|---|
| 3460 | + DMEMIT(" no_read_workqueue"); |
|---|
| 3461 | + if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) |
|---|
| 3462 | + DMEMIT(" no_write_workqueue"); |
|---|
| 2985 | 3463 | if (cc->on_disk_tag_size) |
|---|
| 2986 | 3464 | DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth); |
|---|
| 2987 | 3465 | if (cc->sector_size != (1 << SECTOR_SHIFT)) |
|---|
| .. | .. |
|---|
| 3056 | 3534 | memset(cc->key, 0, cc->key_size * sizeof(u8)); |
|---|
| 3057 | 3535 | return ret; |
|---|
| 3058 | 3536 | } |
|---|
| 3059 | | - if (argc == 2 && !strcasecmp(argv[1], "wipe")) { |
|---|
| 3060 | | - if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { |
|---|
| 3061 | | - ret = cc->iv_gen_ops->wipe(cc); |
|---|
| 3062 | | - if (ret) |
|---|
| 3063 | | - return ret; |
|---|
| 3064 | | - } |
|---|
| 3537 | + if (argc == 2 && !strcasecmp(argv[1], "wipe")) |
|---|
| 3065 | 3538 | return crypt_wipe_key(cc); |
|---|
| 3066 | | - } |
|---|
| 3067 | 3539 | } |
|---|
| 3068 | 3540 | |
|---|
| 3069 | 3541 | error: |
|---|
| .. | .. |
|---|
| 3100 | 3572 | |
|---|
| 3101 | 3573 | static struct target_type crypt_target = { |
|---|
| 3102 | 3574 | .name = "crypt", |
|---|
| 3103 | | - .version = {1, 18, 1}, |
|---|
| 3575 | + .version = {1, 22, 0}, |
|---|
| 3104 | 3576 | .module = THIS_MODULE, |
|---|
| 3105 | 3577 | .ctr = crypt_ctr, |
|---|
| 3106 | 3578 | .dtr = crypt_dtr, |
|---|
| 3579 | +#ifdef CONFIG_BLK_DEV_ZONED |
|---|
| 3580 | + .features = DM_TARGET_ZONED_HM, |
|---|
| 3581 | + .report_zones = crypt_report_zones, |
|---|
| 3582 | +#endif |
|---|
| 3107 | 3583 | .map = crypt_map, |
|---|
| 3108 | 3584 | .status = crypt_status, |
|---|
| 3109 | 3585 | .postsuspend = crypt_postsuspend, |
|---|