hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/md/dm-crypt.c
....@@ -1,8 +1,8 @@
11 /*
22 * Copyright (C) 2003 Jana Saout <jana@saout.de>
33 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4
- * Copyright (C) 2006-2017 Red Hat, Inc. All rights reserved.
5
- * Copyright (C) 2013-2017 Milan Broz <gmazyland@gmail.com>
4
+ * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved.
5
+ * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com>
66 *
77 * This file is released under the GPL.
88 */
....@@ -34,7 +34,9 @@
3434 #include <crypto/aead.h>
3535 #include <crypto/authenc.h>
3636 #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
37
+#include <linux/key-type.h>
3738 #include <keys/user-type.h>
39
+#include <keys/encrypted-type.h>
3840
3941 #include <linux/device-mapper.h>
4042
....@@ -65,8 +67,11 @@
6567 struct crypt_config *cc;
6668 struct bio *base_bio;
6769 u8 *integrity_metadata;
68
- bool integrity_metadata_from_pool;
70
+ bool integrity_metadata_from_pool:1;
71
+ bool in_tasklet:1;
72
+
6973 struct work_struct work;
74
+ struct tasklet_struct tasklet;
7075
7176 struct convert_context ctx;
7277
....@@ -98,11 +103,6 @@
98103 struct dm_crypt_request *dmreq);
99104 };
100105
101
-struct iv_essiv_private {
102
- struct crypto_shash *hash_tfm;
103
- u8 *salt;
104
-};
105
-
106106 struct iv_benbi_private {
107107 int shift;
108108 };
....@@ -120,16 +120,24 @@
120120 u8 *whitening;
121121 };
122122
123
+#define ELEPHANT_MAX_KEY_SIZE 32
124
+struct iv_elephant_private {
125
+ struct crypto_skcipher *tfm;
126
+};
127
+
123128 /*
124129 * Crypt: maps a linear range of a block device
125130 * and encrypts / decrypts at the same time.
126131 */
127132 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
128
- DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
133
+ DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
134
+ DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE,
135
+ DM_CRYPT_WRITE_INLINE };
129136
130137 enum cipher_flags {
131138 CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */
132139 CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */
140
+ CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */
133141 };
134142
135143 /*
....@@ -148,25 +156,22 @@
148156 struct task_struct *write_thread;
149157 struct rb_root write_tree;
150158
151
- char *cipher;
152159 char *cipher_string;
153160 char *cipher_auth;
154161 char *key_string;
155162
156163 const struct crypt_iv_operations *iv_gen_ops;
157164 union {
158
- struct iv_essiv_private essiv;
159165 struct iv_benbi_private benbi;
160166 struct iv_lmk_private lmk;
161167 struct iv_tcw_private tcw;
168
+ struct iv_elephant_private elephant;
162169 } iv_gen_private;
163170 u64 iv_offset;
164171 unsigned int iv_size;
165172 unsigned short int sector_size;
166173 unsigned char sector_shift;
167174
168
- /* ESSIV: struct crypto_cipher *essiv_tfm */
169
- void *iv_private;
170175 union {
171176 struct crypto_skcipher **tfms;
172177 struct crypto_aead **tfms_aead;
....@@ -214,7 +219,7 @@
214219 struct mutex bio_alloc_lock;
215220
216221 u8 *authenc_key; /* space for keys in authenc() format (if used) */
217
- u8 key[0];
222
+ u8 key[];
218223 };
219224
220225 #define MIN_IOS 64
....@@ -231,6 +236,8 @@
231236 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
232237 static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
233238 struct scatterlist *sg);
239
+
240
+static bool crypt_integrity_aead(struct crypt_config *cc);
234241
235242 /*
236243 * Use this to access cipher attributes that are independent of the key.
....@@ -291,8 +298,14 @@
291298 * Note that this encryption scheme is vulnerable to watermarking attacks
292299 * and should be used for old compatible containers access only.
293300 *
294
- * plumb: unimplemented, see:
295
- * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
301
+ * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode)
302
+ * The IV is encrypted little-endian byte-offset (with the same key
303
+ * and cipher as the volume).
304
+ *
305
+ * elephant: The extended version of eboiv with additional Elephant diffuser
306
+ * used with Bitlocker CBC mode.
307
+ * This mode was used in older Windows systems
308
+ * https://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf
296309 */
297310
298311 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
....@@ -323,158 +336,15 @@
323336 return 0;
324337 }
325338
326
-/* Initialise ESSIV - compute salt but no local memory allocations */
327
-static int crypt_iv_essiv_init(struct crypt_config *cc)
328
-{
329
- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
330
- SHASH_DESC_ON_STACK(desc, essiv->hash_tfm);
331
- struct crypto_cipher *essiv_tfm;
332
- int err;
333
-
334
- desc->tfm = essiv->hash_tfm;
335
- desc->flags = 0;
336
-
337
- err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
338
- shash_desc_zero(desc);
339
- if (err)
340
- return err;
341
-
342
- essiv_tfm = cc->iv_private;
343
-
344
- err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
345
- crypto_shash_digestsize(essiv->hash_tfm));
346
- if (err)
347
- return err;
348
-
349
- return 0;
350
-}
351
-
352
-/* Wipe salt and reset key derived from volume key */
353
-static int crypt_iv_essiv_wipe(struct crypt_config *cc)
354
-{
355
- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
356
- unsigned salt_size = crypto_shash_digestsize(essiv->hash_tfm);
357
- struct crypto_cipher *essiv_tfm;
358
- int r, err = 0;
359
-
360
- memset(essiv->salt, 0, salt_size);
361
-
362
- essiv_tfm = cc->iv_private;
363
- r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
364
- if (r)
365
- err = r;
366
-
367
- return err;
368
-}
369
-
370
-/* Allocate the cipher for ESSIV */
371
-static struct crypto_cipher *alloc_essiv_cipher(struct crypt_config *cc,
372
- struct dm_target *ti,
373
- const u8 *salt,
374
- unsigned int saltsize)
375
-{
376
- struct crypto_cipher *essiv_tfm;
377
- int err;
378
-
379
- /* Setup the essiv_tfm with the given salt */
380
- essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
381
- if (IS_ERR(essiv_tfm)) {
382
- ti->error = "Error allocating crypto tfm for ESSIV";
383
- return essiv_tfm;
384
- }
385
-
386
- if (crypto_cipher_blocksize(essiv_tfm) != cc->iv_size) {
387
- ti->error = "Block size of ESSIV cipher does "
388
- "not match IV size of block cipher";
389
- crypto_free_cipher(essiv_tfm);
390
- return ERR_PTR(-EINVAL);
391
- }
392
-
393
- err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
394
- if (err) {
395
- ti->error = "Failed to set key for ESSIV cipher";
396
- crypto_free_cipher(essiv_tfm);
397
- return ERR_PTR(err);
398
- }
399
-
400
- return essiv_tfm;
401
-}
402
-
403
-static void crypt_iv_essiv_dtr(struct crypt_config *cc)
404
-{
405
- struct crypto_cipher *essiv_tfm;
406
- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
407
-
408
- crypto_free_shash(essiv->hash_tfm);
409
- essiv->hash_tfm = NULL;
410
-
411
- kzfree(essiv->salt);
412
- essiv->salt = NULL;
413
-
414
- essiv_tfm = cc->iv_private;
415
-
416
- if (essiv_tfm)
417
- crypto_free_cipher(essiv_tfm);
418
-
419
- cc->iv_private = NULL;
420
-}
421
-
422
-static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
423
- const char *opts)
424
-{
425
- struct crypto_cipher *essiv_tfm = NULL;
426
- struct crypto_shash *hash_tfm = NULL;
427
- u8 *salt = NULL;
428
- int err;
429
-
430
- if (!opts) {
431
- ti->error = "Digest algorithm missing for ESSIV mode";
432
- return -EINVAL;
433
- }
434
-
435
- /* Allocate hash algorithm */
436
- hash_tfm = crypto_alloc_shash(opts, 0, 0);
437
- if (IS_ERR(hash_tfm)) {
438
- ti->error = "Error initializing ESSIV hash";
439
- err = PTR_ERR(hash_tfm);
440
- goto bad;
441
- }
442
-
443
- salt = kzalloc(crypto_shash_digestsize(hash_tfm), GFP_KERNEL);
444
- if (!salt) {
445
- ti->error = "Error kmallocing salt storage in ESSIV";
446
- err = -ENOMEM;
447
- goto bad;
448
- }
449
-
450
- cc->iv_gen_private.essiv.salt = salt;
451
- cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
452
-
453
- essiv_tfm = alloc_essiv_cipher(cc, ti, salt,
454
- crypto_shash_digestsize(hash_tfm));
455
- if (IS_ERR(essiv_tfm)) {
456
- crypt_iv_essiv_dtr(cc);
457
- return PTR_ERR(essiv_tfm);
458
- }
459
- cc->iv_private = essiv_tfm;
460
-
461
- return 0;
462
-
463
-bad:
464
- if (hash_tfm && !IS_ERR(hash_tfm))
465
- crypto_free_shash(hash_tfm);
466
- kfree(salt);
467
- return err;
468
-}
469
-
470339 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
471340 struct dm_crypt_request *dmreq)
472341 {
473
- struct crypto_cipher *essiv_tfm = cc->iv_private;
474
-
342
+ /*
343
+ * ESSIV encryption of the IV is now handled by the crypto API,
344
+ * so just pass the plain sector number here.
345
+ */
475346 memset(iv, 0, cc->iv_size);
476347 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
477
- crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
478348
479349 return 0;
480350 }
....@@ -485,7 +355,7 @@
485355 unsigned bs;
486356 int log;
487357
488
- if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags))
358
+ if (crypt_integrity_aead(cc))
489359 bs = crypto_aead_blocksize(any_tfm_aead(cc));
490360 else
491361 bs = crypto_skcipher_blocksize(any_tfm(cc));
....@@ -542,7 +412,7 @@
542412 crypto_free_shash(lmk->hash_tfm);
543413 lmk->hash_tfm = NULL;
544414
545
- kzfree(lmk->seed);
415
+ kfree_sensitive(lmk->seed);
546416 lmk->seed = NULL;
547417 }
548418
....@@ -556,7 +426,8 @@
556426 return -EINVAL;
557427 }
558428
559
- lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
429
+ lmk->hash_tfm = crypto_alloc_shash("md5", 0,
430
+ CRYPTO_ALG_ALLOCATES_MEMORY);
560431 if (IS_ERR(lmk->hash_tfm)) {
561432 ti->error = "Error initializing LMK hash";
562433 return PTR_ERR(lmk->hash_tfm);
....@@ -612,7 +483,6 @@
612483 int i, r;
613484
614485 desc->tfm = lmk->hash_tfm;
615
- desc->flags = 0;
616486
617487 r = crypto_shash_init(desc);
618488 if (r)
....@@ -694,9 +564,9 @@
694564 {
695565 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
696566
697
- kzfree(tcw->iv_seed);
567
+ kfree_sensitive(tcw->iv_seed);
698568 tcw->iv_seed = NULL;
699
- kzfree(tcw->whitening);
569
+ kfree_sensitive(tcw->whitening);
700570 tcw->whitening = NULL;
701571
702572 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
....@@ -719,7 +589,8 @@
719589 return -EINVAL;
720590 }
721591
722
- tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
592
+ tcw->crc32_tfm = crypto_alloc_shash("crc32", 0,
593
+ CRYPTO_ALG_ALLOCATES_MEMORY);
723594 if (IS_ERR(tcw->crc32_tfm)) {
724595 ti->error = "Error initializing CRC32 in TCW";
725596 return PTR_ERR(tcw->crc32_tfm);
....@@ -774,7 +645,6 @@
774645
775646 /* calculate crc32 for every 32bit part and xor it */
776647 desc->tfm = tcw->crc32_tfm;
777
- desc->flags = 0;
778648 for (i = 0; i < 4; i++) {
779649 r = crypto_shash_init(desc);
780650 if (r)
....@@ -850,6 +720,334 @@
850720 return 0;
851721 }
852722
723
+static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
724
+ const char *opts)
725
+{
726
+ if (crypt_integrity_aead(cc)) {
727
+ ti->error = "AEAD transforms not supported for EBOIV";
728
+ return -EINVAL;
729
+ }
730
+
731
+ if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
732
+ ti->error = "Block size of EBOIV cipher does "
733
+ "not match IV size of block cipher";
734
+ return -EINVAL;
735
+ }
736
+
737
+ return 0;
738
+}
739
+
740
+static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
741
+ struct dm_crypt_request *dmreq)
742
+{
743
+ u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
744
+ struct skcipher_request *req;
745
+ struct scatterlist src, dst;
746
+ DECLARE_CRYPTO_WAIT(wait);
747
+ int err;
748
+
749
+ req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
750
+ if (!req)
751
+ return -ENOMEM;
752
+
753
+ memset(buf, 0, cc->iv_size);
754
+ *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
755
+
756
+ sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
757
+ sg_init_one(&dst, iv, cc->iv_size);
758
+ skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
759
+ skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
760
+ err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
761
+ skcipher_request_free(req);
762
+
763
+ return err;
764
+}
765
+
766
+static void crypt_iv_elephant_dtr(struct crypt_config *cc)
767
+{
768
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
769
+
770
+ crypto_free_skcipher(elephant->tfm);
771
+ elephant->tfm = NULL;
772
+}
773
+
774
+static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
775
+ const char *opts)
776
+{
777
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
778
+ int r;
779
+
780
+ elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0,
781
+ CRYPTO_ALG_ALLOCATES_MEMORY);
782
+ if (IS_ERR(elephant->tfm)) {
783
+ r = PTR_ERR(elephant->tfm);
784
+ elephant->tfm = NULL;
785
+ return r;
786
+ }
787
+
788
+ r = crypt_iv_eboiv_ctr(cc, ti, NULL);
789
+ if (r)
790
+ crypt_iv_elephant_dtr(cc);
791
+ return r;
792
+}
793
+
794
+static void diffuser_disk_to_cpu(u32 *d, size_t n)
795
+{
796
+#ifndef __LITTLE_ENDIAN
797
+ int i;
798
+
799
+ for (i = 0; i < n; i++)
800
+ d[i] = le32_to_cpu((__le32)d[i]);
801
+#endif
802
+}
803
+
804
+static void diffuser_cpu_to_disk(__le32 *d, size_t n)
805
+{
806
+#ifndef __LITTLE_ENDIAN
807
+ int i;
808
+
809
+ for (i = 0; i < n; i++)
810
+ d[i] = cpu_to_le32((u32)d[i]);
811
+#endif
812
+}
813
+
814
+static void diffuser_a_decrypt(u32 *d, size_t n)
815
+{
816
+ int i, i1, i2, i3;
817
+
818
+ for (i = 0; i < 5; i++) {
819
+ i1 = 0;
820
+ i2 = n - 2;
821
+ i3 = n - 5;
822
+
823
+ while (i1 < (n - 1)) {
824
+ d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
825
+ i1++; i2++; i3++;
826
+
827
+ if (i3 >= n)
828
+ i3 -= n;
829
+
830
+ d[i1] += d[i2] ^ d[i3];
831
+ i1++; i2++; i3++;
832
+
833
+ if (i2 >= n)
834
+ i2 -= n;
835
+
836
+ d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
837
+ i1++; i2++; i3++;
838
+
839
+ d[i1] += d[i2] ^ d[i3];
840
+ i1++; i2++; i3++;
841
+ }
842
+ }
843
+}
844
+
845
+static void diffuser_a_encrypt(u32 *d, size_t n)
846
+{
847
+ int i, i1, i2, i3;
848
+
849
+ for (i = 0; i < 5; i++) {
850
+ i1 = n - 1;
851
+ i2 = n - 2 - 1;
852
+ i3 = n - 5 - 1;
853
+
854
+ while (i1 > 0) {
855
+ d[i1] -= d[i2] ^ d[i3];
856
+ i1--; i2--; i3--;
857
+
858
+ d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
859
+ i1--; i2--; i3--;
860
+
861
+ if (i2 < 0)
862
+ i2 += n;
863
+
864
+ d[i1] -= d[i2] ^ d[i3];
865
+ i1--; i2--; i3--;
866
+
867
+ if (i3 < 0)
868
+ i3 += n;
869
+
870
+ d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
871
+ i1--; i2--; i3--;
872
+ }
873
+ }
874
+}
875
+
876
+static void diffuser_b_decrypt(u32 *d, size_t n)
877
+{
878
+ int i, i1, i2, i3;
879
+
880
+ for (i = 0; i < 3; i++) {
881
+ i1 = 0;
882
+ i2 = 2;
883
+ i3 = 5;
884
+
885
+ while (i1 < (n - 1)) {
886
+ d[i1] += d[i2] ^ d[i3];
887
+ i1++; i2++; i3++;
888
+
889
+ d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
890
+ i1++; i2++; i3++;
891
+
892
+ if (i2 >= n)
893
+ i2 -= n;
894
+
895
+ d[i1] += d[i2] ^ d[i3];
896
+ i1++; i2++; i3++;
897
+
898
+ if (i3 >= n)
899
+ i3 -= n;
900
+
901
+ d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
902
+ i1++; i2++; i3++;
903
+ }
904
+ }
905
+}
906
+
907
+static void diffuser_b_encrypt(u32 *d, size_t n)
908
+{
909
+ int i, i1, i2, i3;
910
+
911
+ for (i = 0; i < 3; i++) {
912
+ i1 = n - 1;
913
+ i2 = 2 - 1;
914
+ i3 = 5 - 1;
915
+
916
+ while (i1 > 0) {
917
+ d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
918
+ i1--; i2--; i3--;
919
+
920
+ if (i3 < 0)
921
+ i3 += n;
922
+
923
+ d[i1] -= d[i2] ^ d[i3];
924
+ i1--; i2--; i3--;
925
+
926
+ if (i2 < 0)
927
+ i2 += n;
928
+
929
+ d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
930
+ i1--; i2--; i3--;
931
+
932
+ d[i1] -= d[i2] ^ d[i3];
933
+ i1--; i2--; i3--;
934
+ }
935
+ }
936
+}
937
+
938
+static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
939
+{
940
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
941
+ u8 *es, *ks, *data, *data2, *data_offset;
942
+ struct skcipher_request *req;
943
+ struct scatterlist *sg, *sg2, src, dst;
944
+ DECLARE_CRYPTO_WAIT(wait);
945
+ int i, r;
946
+
947
+ req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
948
+ es = kzalloc(16, GFP_NOIO); /* Key for AES */
949
+ ks = kzalloc(32, GFP_NOIO); /* Elephant sector key */
950
+
951
+ if (!req || !es || !ks) {
952
+ r = -ENOMEM;
953
+ goto out;
954
+ }
955
+
956
+ *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
957
+
958
+ /* E(Ks, e(s)) */
959
+ sg_init_one(&src, es, 16);
960
+ sg_init_one(&dst, ks, 16);
961
+ skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
962
+ skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
963
+ r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
964
+ if (r)
965
+ goto out;
966
+
967
+ /* E(Ks, e'(s)) */
968
+ es[15] = 0x80;
969
+ sg_init_one(&dst, &ks[16], 16);
970
+ r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
971
+ if (r)
972
+ goto out;
973
+
974
+ sg = crypt_get_sg_data(cc, dmreq->sg_out);
975
+ data = kmap_atomic(sg_page(sg));
976
+ data_offset = data + sg->offset;
977
+
978
+ /* Cannot modify original bio, copy to sg_out and apply Elephant to it */
979
+ if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
980
+ sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
981
+ data2 = kmap_atomic(sg_page(sg2));
982
+ memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
983
+ kunmap_atomic(data2);
984
+ }
985
+
986
+ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
987
+ diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
988
+ diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
989
+ diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
990
+ diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
991
+ }
992
+
993
+ for (i = 0; i < (cc->sector_size / 32); i++)
994
+ crypto_xor(data_offset + i * 32, ks, 32);
995
+
996
+ if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
997
+ diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
998
+ diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
999
+ diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
1000
+ diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
1001
+ }
1002
+
1003
+ kunmap_atomic(data);
1004
+out:
1005
+ kfree_sensitive(ks);
1006
+ kfree_sensitive(es);
1007
+ skcipher_request_free(req);
1008
+ return r;
1009
+}
1010
+
1011
+static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
1012
+ struct dm_crypt_request *dmreq)
1013
+{
1014
+ int r;
1015
+
1016
+ if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
1017
+ r = crypt_iv_elephant(cc, dmreq);
1018
+ if (r)
1019
+ return r;
1020
+ }
1021
+
1022
+ return crypt_iv_eboiv_gen(cc, iv, dmreq);
1023
+}
1024
+
1025
+static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
1026
+ struct dm_crypt_request *dmreq)
1027
+{
1028
+ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
1029
+ return crypt_iv_elephant(cc, dmreq);
1030
+
1031
+ return 0;
1032
+}
1033
+
1034
+static int crypt_iv_elephant_init(struct crypt_config *cc)
1035
+{
1036
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1037
+ int key_offset = cc->key_size - cc->key_extra_size;
1038
+
1039
+ return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
1040
+}
1041
+
1042
+static int crypt_iv_elephant_wipe(struct crypt_config *cc)
1043
+{
1044
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1045
+ u8 key[ELEPHANT_MAX_KEY_SIZE];
1046
+
1047
+ memset(key, 0, cc->key_extra_size);
1048
+ return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
1049
+}
1050
+
8531051 static const struct crypt_iv_operations crypt_iv_plain_ops = {
8541052 .generator = crypt_iv_plain_gen
8551053 };
....@@ -863,10 +1061,6 @@
8631061 };
8641062
8651063 static const struct crypt_iv_operations crypt_iv_essiv_ops = {
866
- .ctr = crypt_iv_essiv_ctr,
867
- .dtr = crypt_iv_essiv_dtr,
868
- .init = crypt_iv_essiv_init,
869
- .wipe = crypt_iv_essiv_wipe,
8701064 .generator = crypt_iv_essiv_gen
8711065 };
8721066
....@@ -900,6 +1094,20 @@
9001094
9011095 static struct crypt_iv_operations crypt_iv_random_ops = {
9021096 .generator = crypt_iv_random_gen
1097
+};
1098
+
1099
+static struct crypt_iv_operations crypt_iv_eboiv_ops = {
1100
+ .ctr = crypt_iv_eboiv_ctr,
1101
+ .generator = crypt_iv_eboiv_gen
1102
+};
1103
+
1104
+static struct crypt_iv_operations crypt_iv_elephant_ops = {
1105
+ .ctr = crypt_iv_elephant_ctr,
1106
+ .dtr = crypt_iv_elephant_dtr,
1107
+ .init = crypt_iv_elephant_init,
1108
+ .wipe = crypt_iv_elephant_wipe,
1109
+ .generator = crypt_iv_elephant_gen,
1110
+ .post = crypt_iv_elephant_post
9031111 };
9041112
9051113 /*
....@@ -1041,11 +1249,11 @@
10411249 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
10421250 }
10431251
1044
-static uint64_t *org_sector_of_dmreq(struct crypt_config *cc,
1252
+static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
10451253 struct dm_crypt_request *dmreq)
10461254 {
10471255 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
1048
- return (uint64_t*) ptr;
1256
+ return (__le64 *) ptr;
10491257 }
10501258
10511259 static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
....@@ -1081,7 +1289,7 @@
10811289 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
10821290 struct dm_crypt_request *dmreq;
10831291 u8 *iv, *org_iv, *tag_iv, *tag;
1084
- uint64_t *sector;
1292
+ __le64 *sector;
10851293 int r = 0;
10861294
10871295 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
....@@ -1153,9 +1361,11 @@
11531361 r = crypto_aead_decrypt(req);
11541362 }
11551363
1156
- if (r == -EBADMSG)
1157
- DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
1364
+ if (r == -EBADMSG) {
1365
+ char b[BDEVNAME_SIZE];
1366
+ DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
11581367 (unsigned long long)le64_to_cpu(*sector));
1368
+ }
11591369
11601370 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
11611371 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
....@@ -1176,7 +1386,7 @@
11761386 struct scatterlist *sg_in, *sg_out;
11771387 struct dm_crypt_request *dmreq;
11781388 u8 *iv, *org_iv, *tag_iv;
1179
- uint64_t *sector;
1389
+ __le64 *sector;
11801390 int r = 0;
11811391
11821392 /* Reject unexpected unaligned bio. */
....@@ -1216,6 +1426,9 @@
12161426 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
12171427 if (r < 0)
12181428 return r;
1429
+ /* Data can be already preprocessed in generator */
1430
+ if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
1431
+ sg_in = sg_out;
12191432 /* Store generated IV in integrity metadata */
12201433 if (cc->integrity_iv_size)
12211434 memcpy(tag_iv, org_iv, cc->integrity_iv_size);
....@@ -1243,13 +1456,16 @@
12431456 static void kcryptd_async_done(struct crypto_async_request *async_req,
12441457 int error);
12451458
1246
-static void crypt_alloc_req_skcipher(struct crypt_config *cc,
1459
+static int crypt_alloc_req_skcipher(struct crypt_config *cc,
12471460 struct convert_context *ctx)
12481461 {
12491462 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
12501463
1251
- if (!ctx->r.req)
1252
- ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
1464
+ if (!ctx->r.req) {
1465
+ ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1466
+ if (!ctx->r.req)
1467
+ return -ENOMEM;
1468
+ }
12531469
12541470 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
12551471
....@@ -1260,13 +1476,18 @@
12601476 skcipher_request_set_callback(ctx->r.req,
12611477 CRYPTO_TFM_REQ_MAY_BACKLOG,
12621478 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1479
+
1480
+ return 0;
12631481 }
12641482
1265
-static void crypt_alloc_req_aead(struct crypt_config *cc,
1483
+static int crypt_alloc_req_aead(struct crypt_config *cc,
12661484 struct convert_context *ctx)
12671485 {
1268
- if (!ctx->r.req_aead)
1269
- ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
1486
+ if (!ctx->r.req_aead) {
1487
+ ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1488
+ if (!ctx->r.req_aead)
1489
+ return -ENOMEM;
1490
+ }
12701491
12711492 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
12721493
....@@ -1277,15 +1498,17 @@
12771498 aead_request_set_callback(ctx->r.req_aead,
12781499 CRYPTO_TFM_REQ_MAY_BACKLOG,
12791500 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1501
+
1502
+ return 0;
12801503 }
12811504
1282
-static void crypt_alloc_req(struct crypt_config *cc,
1505
+static int crypt_alloc_req(struct crypt_config *cc,
12831506 struct convert_context *ctx)
12841507 {
12851508 if (crypt_integrity_aead(cc))
1286
- crypt_alloc_req_aead(cc, ctx);
1509
+ return crypt_alloc_req_aead(cc, ctx);
12871510 else
1288
- crypt_alloc_req_skcipher(cc, ctx);
1511
+ return crypt_alloc_req_skcipher(cc, ctx);
12891512 }
12901513
12911514 static void crypt_free_req_skcipher(struct crypt_config *cc,
....@@ -1318,17 +1541,28 @@
13181541 * Encrypt / decrypt data from one bio to another one (can be the same one)
13191542 */
13201543 static blk_status_t crypt_convert(struct crypt_config *cc,
1321
- struct convert_context *ctx)
1544
+ struct convert_context *ctx, bool atomic, bool reset_pending)
13221545 {
13231546 unsigned int tag_offset = 0;
13241547 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
13251548 int r;
13261549
1327
- atomic_set(&ctx->cc_pending, 1);
1550
+ /*
1551
+ * if reset_pending is set we are dealing with the bio for the first time,
1552
+ * else we're continuing to work on the previous bio, so don't mess with
1553
+ * the cc_pending counter
1554
+ */
1555
+ if (reset_pending)
1556
+ atomic_set(&ctx->cc_pending, 1);
13281557
13291558 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
13301559
1331
- crypt_alloc_req(cc, ctx);
1560
+ r = crypt_alloc_req(cc, ctx);
1561
+ if (r) {
1562
+ complete(&ctx->restart);
1563
+ return BLK_STS_DEV_RESOURCE;
1564
+ }
1565
+
13321566 atomic_inc(&ctx->cc_pending);
13331567
13341568 if (crypt_integrity_aead(cc))
....@@ -1342,9 +1576,27 @@
13421576 * but the driver request queue is full, let's wait.
13431577 */
13441578 case -EBUSY:
1345
- wait_for_completion(&ctx->restart);
1579
+ if (in_interrupt()) {
1580
+ if (try_wait_for_completion(&ctx->restart)) {
1581
+ /*
1582
+ * we don't have to block to wait for completion,
1583
+ * so proceed
1584
+ */
1585
+ } else {
1586
+ /*
1587
+ * we can't wait for completion without blocking
1588
+ * exit and continue processing in a workqueue
1589
+ */
1590
+ ctx->r.req = NULL;
1591
+ ctx->cc_sector += sector_step;
1592
+ tag_offset++;
1593
+ return BLK_STS_DEV_RESOURCE;
1594
+ }
1595
+ } else {
1596
+ wait_for_completion(&ctx->restart);
1597
+ }
13461598 reinit_completion(&ctx->restart);
1347
- /* fall through */
1599
+ fallthrough;
13481600 /*
13491601 * The request is queued and processed asynchronously,
13501602 * completion function kcryptd_async_done() will be called.
....@@ -1361,7 +1613,8 @@
13611613 atomic_dec(&ctx->cc_pending);
13621614 ctx->cc_sector += sector_step;
13631615 tag_offset++;
1364
- cond_resched();
1616
+ if (!atomic)
1617
+ cond_resched();
13651618 continue;
13661619 /*
13671620 * There was a data integrity error.
....@@ -1452,10 +1705,10 @@
14521705
14531706 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
14541707 {
1455
- unsigned int i;
14561708 struct bio_vec *bv;
1709
+ struct bvec_iter_all iter_all;
14571710
1458
- bio_for_each_segment_all(bv, clone, i) {
1711
+ bio_for_each_segment_all(bv, clone, iter_all) {
14591712 BUG_ON(!bv->bv_page);
14601713 mempool_free(bv->bv_page, &cc->page_pool);
14611714 }
....@@ -1471,12 +1724,19 @@
14711724 io->ctx.r.req = NULL;
14721725 io->integrity_metadata = NULL;
14731726 io->integrity_metadata_from_pool = false;
1727
+ io->in_tasklet = false;
14741728 atomic_set(&io->io_pending, 0);
14751729 }
14761730
14771731 static void crypt_inc_pending(struct dm_crypt_io *io)
14781732 {
14791733 atomic_inc(&io->io_pending);
1734
+}
1735
+
1736
+static void kcryptd_io_bio_endio(struct work_struct *work)
1737
+{
1738
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1739
+ bio_endio(io->base_bio);
14801740 }
14811741
14821742 /*
....@@ -1501,6 +1761,21 @@
15011761 kfree(io->integrity_metadata);
15021762
15031763 base_bio->bi_status = error;
1764
+
1765
+ /*
1766
+ * If we are running this function from our tasklet,
1767
+ * we can't call bio_endio() here, because it will call
1768
+ * clone_endio() from dm.c, which in turn will
1769
+ * free the current struct dm_crypt_io structure with
1770
+ * our tasklet. In this case we need to delay bio_endio()
1771
+ * execution to after the tasklet is done and dequeued.
1772
+ */
1773
+ if (io->in_tasklet) {
1774
+ INIT_WORK(&io->work, kcryptd_io_bio_endio);
1775
+ queue_work(cc->io_queue, &io->work);
1776
+ return;
1777
+ }
1778
+
15041779 bio_endio(base_bio);
15051780 }
15061781
....@@ -1584,7 +1859,7 @@
15841859 return 1;
15851860 }
15861861
1587
- generic_make_request(clone);
1862
+ submit_bio_noacct(clone);
15881863 return 0;
15891864 }
15901865
....@@ -1610,7 +1885,7 @@
16101885 {
16111886 struct bio *clone = io->ctx.bio_out;
16121887
1613
- generic_make_request(clone);
1888
+ submit_bio_noacct(clone);
16141889 }
16151890
16161891 #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
....@@ -1661,6 +1936,7 @@
16611936 io = crypt_io_from_node(rb_first(&write_tree));
16621937 rb_erase(&io->rb_node, &write_tree);
16631938 kcryptd_io_write(io);
1939
+ cond_resched();
16641940 } while (!RB_EMPTY_ROOT(&write_tree));
16651941 blk_finish_plug(&plug);
16661942 }
....@@ -1687,8 +1963,9 @@
16871963
16881964 clone->bi_iter.bi_sector = cc->start + io->sector;
16891965
1690
- if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
1691
- generic_make_request(clone);
1966
+ if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
1967
+ test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
1968
+ submit_bio_noacct(clone);
16921969 return;
16931970 }
16941971
....@@ -1710,9 +1987,63 @@
17101987 spin_unlock_irqrestore(&cc->write_thread_lock, flags);
17111988 }
17121989
1990
+static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
1991
+ struct convert_context *ctx)
1992
+
1993
+{
1994
+ if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags))
1995
+ return false;
1996
+
1997
+ /*
1998
+ * Note: zone append writes (REQ_OP_ZONE_APPEND) do not have ordering
1999
+ * constraints so they do not need to be issued inline by
2000
+ * kcryptd_crypt_write_convert().
2001
+ */
2002
+ switch (bio_op(ctx->bio_in)) {
2003
+ case REQ_OP_WRITE:
2004
+ case REQ_OP_WRITE_SAME:
2005
+ case REQ_OP_WRITE_ZEROES:
2006
+ return true;
2007
+ default:
2008
+ return false;
2009
+ }
2010
+}
2011
+
2012
+static void kcryptd_crypt_write_continue(struct work_struct *work)
2013
+{
2014
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2015
+ struct crypt_config *cc = io->cc;
2016
+ struct convert_context *ctx = &io->ctx;
2017
+ int crypt_finished;
2018
+ sector_t sector = io->sector;
2019
+ blk_status_t r;
2020
+
2021
+ wait_for_completion(&ctx->restart);
2022
+ reinit_completion(&ctx->restart);
2023
+
2024
+ r = crypt_convert(cc, &io->ctx, true, false);
2025
+ if (r)
2026
+ io->error = r;
2027
+ crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
2028
+ if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2029
+ /* Wait for completion signaled by kcryptd_async_done() */
2030
+ wait_for_completion(&ctx->restart);
2031
+ crypt_finished = 1;
2032
+ }
2033
+
2034
+ /* Encryption was already finished, submit io now */
2035
+ if (crypt_finished) {
2036
+ kcryptd_crypt_write_io_submit(io, 0);
2037
+ io->sector = sector;
2038
+ }
2039
+
2040
+ crypt_dec_pending(io);
2041
+}
2042
+
17132043 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
17142044 {
17152045 struct crypt_config *cc = io->cc;
2046
+ struct convert_context *ctx = &io->ctx;
17162047 struct bio *clone;
17172048 int crypt_finished;
17182049 sector_t sector = io->sector;
....@@ -1722,7 +2053,7 @@
17222053 * Prevent io from disappearing until this function completes.
17232054 */
17242055 crypt_inc_pending(io);
1725
- crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
2056
+ crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
17262057
17272058 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
17282059 if (unlikely(!clone)) {
....@@ -1736,10 +2067,26 @@
17362067 sector += bio_sectors(clone);
17372068
17382069 crypt_inc_pending(io);
1739
- r = crypt_convert(cc, &io->ctx);
2070
+ r = crypt_convert(cc, ctx,
2071
+ test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
2072
+ /*
2073
+ * Crypto API backlogged the request, because its queue was full
2074
+ * and we're in softirq context, so continue from a workqueue
2075
+ * (TODO: is it actually possible to be in softirq in the write path?)
2076
+ */
2077
+ if (r == BLK_STS_DEV_RESOURCE) {
2078
+ INIT_WORK(&io->work, kcryptd_crypt_write_continue);
2079
+ queue_work(cc->crypt_queue, &io->work);
2080
+ return;
2081
+ }
17402082 if (r)
17412083 io->error = r;
1742
- crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
2084
+ crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
2085
+ if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2086
+ /* Wait for completion signaled by kcryptd_async_done() */
2087
+ wait_for_completion(&ctx->restart);
2088
+ crypt_finished = 1;
2089
+ }
17432090
17442091 /* Encryption was already finished, submit io now */
17452092 if (crypt_finished) {
....@@ -1756,6 +2103,25 @@
17562103 crypt_dec_pending(io);
17572104 }
17582105
2106
+static void kcryptd_crypt_read_continue(struct work_struct *work)
2107
+{
2108
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2109
+ struct crypt_config *cc = io->cc;
2110
+ blk_status_t r;
2111
+
2112
+ wait_for_completion(&io->ctx.restart);
2113
+ reinit_completion(&io->ctx.restart);
2114
+
2115
+ r = crypt_convert(cc, &io->ctx, true, false);
2116
+ if (r)
2117
+ io->error = r;
2118
+
2119
+ if (atomic_dec_and_test(&io->ctx.cc_pending))
2120
+ kcryptd_crypt_read_done(io);
2121
+
2122
+ crypt_dec_pending(io);
2123
+}
2124
+
17592125 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
17602126 {
17612127 struct crypt_config *cc = io->cc;
....@@ -1766,7 +2132,17 @@
17662132 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
17672133 io->sector);
17682134
1769
- r = crypt_convert(cc, &io->ctx);
2135
+ r = crypt_convert(cc, &io->ctx,
2136
+ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
2137
+ /*
2138
+ * Crypto API backlogged the request, because its queue was full
2139
+ * and we're in softirq context, so continue from a workqueue
2140
+ */
2141
+ if (r == BLK_STS_DEV_RESOURCE) {
2142
+ INIT_WORK(&io->work, kcryptd_crypt_read_continue);
2143
+ queue_work(cc->crypt_queue, &io->work);
2144
+ return;
2145
+ }
17702146 if (r)
17712147 io->error = r;
17722148
....@@ -1798,7 +2174,8 @@
17982174 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
17992175
18002176 if (error == -EBADMSG) {
1801
- DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
2177
+ char b[BDEVNAME_SIZE];
2178
+ DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
18022179 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
18032180 io->error = BLK_STS_PROTECTION;
18042181 } else if (error < 0)
....@@ -1809,10 +2186,21 @@
18092186 if (!atomic_dec_and_test(&ctx->cc_pending))
18102187 return;
18112188
1812
- if (bio_data_dir(io->base_bio) == READ)
2189
+ /*
2190
+ * The request is fully completed: for inline writes, let
2191
+ * kcryptd_crypt_write_convert() do the IO submission.
2192
+ */
2193
+ if (bio_data_dir(io->base_bio) == READ) {
18132194 kcryptd_crypt_read_done(io);
1814
- else
1815
- kcryptd_crypt_write_io_submit(io, 1);
2195
+ return;
2196
+ }
2197
+
2198
+ if (kcryptd_crypt_write_inline(cc, ctx)) {
2199
+ complete(&ctx->restart);
2200
+ return;
2201
+ }
2202
+
2203
+ kcryptd_crypt_write_io_submit(io, 1);
18162204 }
18172205
18182206 static void kcryptd_crypt(struct work_struct *work)
....@@ -1825,9 +2213,32 @@
18252213 kcryptd_crypt_write_convert(io);
18262214 }
18272215
2216
+static void kcryptd_crypt_tasklet(unsigned long work)
2217
+{
2218
+ kcryptd_crypt((struct work_struct *)work);
2219
+}
2220
+
18282221 static void kcryptd_queue_crypt(struct dm_crypt_io *io)
18292222 {
18302223 struct crypt_config *cc = io->cc;
2224
+
2225
+ if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
2226
+ (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
2227
+ /*
2228
+ * in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
2229
+ * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
2230
+ * it is being executed with irqs disabled.
2231
+ */
2232
+ if (in_irq() || irqs_disabled()) {
2233
+ io->in_tasklet = true;
2234
+ tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
2235
+ tasklet_schedule(&io->tasklet);
2236
+ return;
2237
+ }
2238
+
2239
+ kcryptd_crypt(&io->work);
2240
+ return;
2241
+ }
18312242
18322243 INIT_WORK(&io->work, kcryptd_crypt);
18332244 queue_work(cc->crypt_queue, &io->work);
....@@ -1884,7 +2295,8 @@
18842295 return -ENOMEM;
18852296
18862297 for (i = 0; i < cc->tfms_count; i++) {
1887
- cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0);
2298
+ cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0,
2299
+ CRYPTO_ALG_ALLOCATES_MEMORY);
18882300 if (IS_ERR(cc->cipher_tfm.tfms[i])) {
18892301 err = PTR_ERR(cc->cipher_tfm.tfms[i]);
18902302 crypt_free_tfms(cc);
....@@ -1897,7 +2309,7 @@
18972309 * algorithm implementation is used. Help people debug performance
18982310 * problems by logging the ->cra_driver_name.
18992311 */
1900
- DMINFO("%s using implementation \"%s\"", ciphermode,
2312
+ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
19012313 crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
19022314 return 0;
19032315 }
....@@ -1910,14 +2322,15 @@
19102322 if (!cc->cipher_tfm.tfms)
19112323 return -ENOMEM;
19122324
1913
- cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0);
2325
+ cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0,
2326
+ CRYPTO_ALG_ALLOCATES_MEMORY);
19142327 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
19152328 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
19162329 crypt_free_tfms(cc);
19172330 return err;
19182331 }
19192332
1920
- DMINFO("%s using implementation \"%s\"", ciphermode,
2333
+ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
19212334 crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
19222335 return 0;
19232336 }
....@@ -2011,12 +2424,47 @@
20112424 return false;
20122425 }
20132426
2427
+static int set_key_user(struct crypt_config *cc, struct key *key)
2428
+{
2429
+ const struct user_key_payload *ukp;
2430
+
2431
+ ukp = user_key_payload_locked(key);
2432
+ if (!ukp)
2433
+ return -EKEYREVOKED;
2434
+
2435
+ if (cc->key_size != ukp->datalen)
2436
+ return -EINVAL;
2437
+
2438
+ memcpy(cc->key, ukp->data, cc->key_size);
2439
+
2440
+ return 0;
2441
+}
2442
+
2443
+#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
2444
+static int set_key_encrypted(struct crypt_config *cc, struct key *key)
2445
+{
2446
+ const struct encrypted_key_payload *ekp;
2447
+
2448
+ ekp = key->payload.data[0];
2449
+ if (!ekp)
2450
+ return -EKEYREVOKED;
2451
+
2452
+ if (cc->key_size != ekp->decrypted_datalen)
2453
+ return -EINVAL;
2454
+
2455
+ memcpy(cc->key, ekp->decrypted_data, cc->key_size);
2456
+
2457
+ return 0;
2458
+}
2459
+#endif /* CONFIG_ENCRYPTED_KEYS */
2460
+
20142461 static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
20152462 {
20162463 char *new_key_string, *key_desc;
20172464 int ret;
2465
+ struct key_type *type;
20182466 struct key *key;
2019
- const struct user_key_payload *ukp;
2467
+ int (*set_key)(struct crypt_config *cc, struct key *key);
20202468
20212469 /*
20222470 * Reject key_string with whitespace. dm core currently lacks code for
....@@ -2032,39 +2480,40 @@
20322480 if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
20332481 return -EINVAL;
20342482
2035
- if (strncmp(key_string, "logon:", key_desc - key_string + 1) &&
2036
- strncmp(key_string, "user:", key_desc - key_string + 1))
2483
+ if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) {
2484
+ type = &key_type_logon;
2485
+ set_key = set_key_user;
2486
+ } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) {
2487
+ type = &key_type_user;
2488
+ set_key = set_key_user;
2489
+#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
2490
+ } else if (!strncmp(key_string, "encrypted:", key_desc - key_string + 1)) {
2491
+ type = &key_type_encrypted;
2492
+ set_key = set_key_encrypted;
2493
+#endif
2494
+ } else {
20372495 return -EINVAL;
2496
+ }
20382497
20392498 new_key_string = kstrdup(key_string, GFP_KERNEL);
20402499 if (!new_key_string)
20412500 return -ENOMEM;
20422501
2043
- key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user,
2044
- key_desc + 1, NULL);
2502
+ key = request_key(type, key_desc + 1, NULL);
20452503 if (IS_ERR(key)) {
2046
- kzfree(new_key_string);
2504
+ kfree_sensitive(new_key_string);
20472505 return PTR_ERR(key);
20482506 }
20492507
20502508 down_read(&key->sem);
20512509
2052
- ukp = user_key_payload_locked(key);
2053
- if (!ukp) {
2510
+ ret = set_key(cc, key);
2511
+ if (ret < 0) {
20542512 up_read(&key->sem);
20552513 key_put(key);
2056
- kzfree(new_key_string);
2057
- return -EKEYREVOKED;
2514
+ kfree_sensitive(new_key_string);
2515
+ return ret;
20582516 }
2059
-
2060
- if (cc->key_size != ukp->datalen) {
2061
- up_read(&key->sem);
2062
- key_put(key);
2063
- kzfree(new_key_string);
2064
- return -EINVAL;
2065
- }
2066
-
2067
- memcpy(cc->key, ukp->data, cc->key_size);
20682517
20692518 up_read(&key->sem);
20702519 key_put(key);
....@@ -2076,10 +2525,10 @@
20762525
20772526 if (!ret) {
20782527 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2079
- kzfree(cc->key_string);
2528
+ kfree_sensitive(cc->key_string);
20802529 cc->key_string = new_key_string;
20812530 } else
2082
- kzfree(new_key_string);
2531
+ kfree_sensitive(new_key_string);
20832532
20842533 return ret;
20852534 }
....@@ -2116,10 +2565,10 @@
21162565
21172566 static int get_key_size(char **key_string)
21182567 {
2119
- return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
2568
+ return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1);
21202569 }
21212570
2122
-#endif
2571
+#endif /* CONFIG_KEYS */
21232572
21242573 static int crypt_set_key(struct crypt_config *cc, char *key)
21252574 {
....@@ -2140,7 +2589,7 @@
21402589 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
21412590
21422591 /* wipe references to any kernel keyring key */
2143
- kzfree(cc->key_string);
2592
+ kfree_sensitive(cc->key_string);
21442593 cc->key_string = NULL;
21452594
21462595 /* Decode key from its hex representation. */
....@@ -2164,7 +2613,15 @@
21642613
21652614 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
21662615 get_random_bytes(&cc->key, cc->key_size);
2167
- kzfree(cc->key_string);
2616
+
2617
+ /* Wipe IV private keys */
2618
+ if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2619
+ r = cc->iv_gen_ops->wipe(cc);
2620
+ if (r)
2621
+ return r;
2622
+ }
2623
+
2624
+ kfree_sensitive(cc->key_string);
21682625 cc->key_string = NULL;
21692626 r = crypt_setkey(cc);
21702627 memset(&cc->key, 0, cc->key_size * sizeof(u8));
....@@ -2174,7 +2631,7 @@
21742631
21752632 static void crypt_calculate_pages_per_client(void)
21762633 {
2177
- unsigned long pages = (totalram_pages - totalhigh_pages) * DM_CRYPT_MEMORY_PERCENT / 100;
2634
+ unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100;
21782635
21792636 if (!dm_crypt_clients_n)
21802637 return;
....@@ -2248,16 +2705,15 @@
22482705 if (cc->dev)
22492706 dm_put_device(ti, cc->dev);
22502707
2251
- kzfree(cc->cipher);
2252
- kzfree(cc->cipher_string);
2253
- kzfree(cc->key_string);
2254
- kzfree(cc->cipher_auth);
2255
- kzfree(cc->authenc_key);
2708
+ kfree_sensitive(cc->cipher_string);
2709
+ kfree_sensitive(cc->key_string);
2710
+ kfree_sensitive(cc->cipher_auth);
2711
+ kfree_sensitive(cc->authenc_key);
22562712
22572713 mutex_destroy(&cc->bio_alloc_lock);
22582714
22592715 /* Must zero key material before freeing */
2260
- kzfree(cc);
2716
+ kfree_sensitive(cc);
22612717
22622718 spin_lock(&dm_crypt_clients_lock);
22632719 WARN_ON(!dm_crypt_clients_n);
....@@ -2299,7 +2755,16 @@
22992755 cc->iv_gen_ops = &crypt_iv_benbi_ops;
23002756 else if (strcmp(ivmode, "null") == 0)
23012757 cc->iv_gen_ops = &crypt_iv_null_ops;
2302
- else if (strcmp(ivmode, "lmk") == 0) {
2758
+ else if (strcmp(ivmode, "eboiv") == 0)
2759
+ cc->iv_gen_ops = &crypt_iv_eboiv_ops;
2760
+ else if (strcmp(ivmode, "elephant") == 0) {
2761
+ cc->iv_gen_ops = &crypt_iv_elephant_ops;
2762
+ cc->key_parts = 2;
2763
+ cc->key_extra_size = cc->key_size / 2;
2764
+ if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
2765
+ return -EINVAL;
2766
+ set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
2767
+ } else if (strcmp(ivmode, "lmk") == 0) {
23032768 cc->iv_gen_ops = &crypt_iv_lmk_ops;
23042769 /*
23052770 * Version 2 and 3 is recognised according
....@@ -2328,52 +2793,6 @@
23282793 }
23292794
23302795 /*
2331
- * Workaround to parse cipher algorithm from crypto API spec.
2332
- * The cc->cipher is currently used only in ESSIV.
2333
- * This should be probably done by crypto-api calls (once available...)
2334
- */
2335
-static int crypt_ctr_blkdev_cipher(struct crypt_config *cc)
2336
-{
2337
- const char *alg_name = NULL;
2338
- char *start, *end;
2339
-
2340
- if (crypt_integrity_aead(cc)) {
2341
- alg_name = crypto_tfm_alg_name(crypto_aead_tfm(any_tfm_aead(cc)));
2342
- if (!alg_name)
2343
- return -EINVAL;
2344
- if (crypt_integrity_hmac(cc)) {
2345
- alg_name = strchr(alg_name, ',');
2346
- if (!alg_name)
2347
- return -EINVAL;
2348
- }
2349
- alg_name++;
2350
- } else {
2351
- alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(any_tfm(cc)));
2352
- if (!alg_name)
2353
- return -EINVAL;
2354
- }
2355
-
2356
- start = strchr(alg_name, '(');
2357
- end = strchr(alg_name, ')');
2358
-
2359
- if (!start && !end) {
2360
- cc->cipher = kstrdup(alg_name, GFP_KERNEL);
2361
- return cc->cipher ? 0 : -ENOMEM;
2362
- }
2363
-
2364
- if (!start || !end || ++start >= end)
2365
- return -EINVAL;
2366
-
2367
- cc->cipher = kzalloc(end - start + 1, GFP_KERNEL);
2368
- if (!cc->cipher)
2369
- return -ENOMEM;
2370
-
2371
- strncpy(cc->cipher, start, end - start);
2372
-
2373
- return 0;
2374
-}
2375
-
2376
-/*
23772796 * Workaround to parse HMAC algorithm from AEAD crypto API spec.
23782797 * The HMAC is needed to calculate tag size (HMAC digest size).
23792798 * This should be probably done by crypto-api calls (once available...)
....@@ -2396,7 +2815,7 @@
23962815 return -ENOMEM;
23972816 strncpy(mac_alg, start, end - start);
23982817
2399
- mac = crypto_alloc_ahash(mac_alg, 0, 0);
2818
+ mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
24002819 kfree(mac_alg);
24012820
24022821 if (IS_ERR(mac))
....@@ -2416,7 +2835,7 @@
24162835 char **ivmode, char **ivopts)
24172836 {
24182837 struct crypt_config *cc = ti->private;
2419
- char *tmp, *cipher_api;
2838
+ char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME];
24202839 int ret = -EINVAL;
24212840
24222841 cc->tfms_count = 1;
....@@ -2442,8 +2861,31 @@
24422861 /* The rest is crypto API spec */
24432862 cipher_api = tmp;
24442863
2864
+ /* Alloc AEAD, can be used only in new format. */
2865
+ if (crypt_integrity_aead(cc)) {
2866
+ ret = crypt_ctr_auth_cipher(cc, cipher_api);
2867
+ if (ret < 0) {
2868
+ ti->error = "Invalid AEAD cipher spec";
2869
+ return -ENOMEM;
2870
+ }
2871
+ }
2872
+
24452873 if (*ivmode && !strcmp(*ivmode, "lmk"))
24462874 cc->tfms_count = 64;
2875
+
2876
+ if (*ivmode && !strcmp(*ivmode, "essiv")) {
2877
+ if (!*ivopts) {
2878
+ ti->error = "Digest algorithm missing for ESSIV mode";
2879
+ return -EINVAL;
2880
+ }
2881
+ ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)",
2882
+ cipher_api, *ivopts);
2883
+ if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2884
+ ti->error = "Cannot allocate cipher string";
2885
+ return -ENOMEM;
2886
+ }
2887
+ cipher_api = buf;
2888
+ }
24472889
24482890 cc->key_parts = cc->tfms_count;
24492891
....@@ -2454,22 +2896,10 @@
24542896 return ret;
24552897 }
24562898
2457
- /* Alloc AEAD, can be used only in new format. */
2458
- if (crypt_integrity_aead(cc)) {
2459
- ret = crypt_ctr_auth_cipher(cc, cipher_api);
2460
- if (ret < 0) {
2461
- ti->error = "Invalid AEAD cipher spec";
2462
- return -ENOMEM;
2463
- }
2899
+ if (crypt_integrity_aead(cc))
24642900 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2465
- } else
2901
+ else
24662902 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2467
-
2468
- ret = crypt_ctr_blkdev_cipher(cc);
2469
- if (ret < 0) {
2470
- ti->error = "Cannot allocate cipher string";
2471
- return -ENOMEM;
2472
- }
24732903
24742904 return 0;
24752905 }
....@@ -2505,10 +2935,6 @@
25052935 }
25062936 cc->key_parts = cc->tfms_count;
25072937
2508
- cc->cipher = kstrdup(cipher, GFP_KERNEL);
2509
- if (!cc->cipher)
2510
- goto bad_mem;
2511
-
25122938 chainmode = strsep(&tmp, "-");
25132939 *ivmode = strsep(&tmp, ":");
25142940 *ivopts = tmp;
....@@ -2531,9 +2957,19 @@
25312957 if (!cipher_api)
25322958 goto bad_mem;
25332959
2534
- ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2535
- "%s(%s)", chainmode, cipher);
2536
- if (ret < 0) {
2960
+ if (*ivmode && !strcmp(*ivmode, "essiv")) {
2961
+ if (!*ivopts) {
2962
+ ti->error = "Digest algorithm missing for ESSIV mode";
2963
+ kfree(cipher_api);
2964
+ return -EINVAL;
2965
+ }
2966
+ ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2967
+ "essiv(%s(%s),%s)", chainmode, cipher, *ivopts);
2968
+ } else {
2969
+ ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2970
+ "%s(%s)", chainmode, cipher);
2971
+ }
2972
+ if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
25372973 kfree(cipher_api);
25382974 goto bad_mem;
25392975 }
....@@ -2614,7 +3050,7 @@
26143050 struct crypt_config *cc = ti->private;
26153051 struct dm_arg_set as;
26163052 static const struct dm_arg _args[] = {
2617
- {0, 6, "Invalid number of feature args"},
3053
+ {0, 8, "Invalid number of feature args"},
26183054 };
26193055 unsigned int opt_params, val;
26203056 const char *opt_string, *sval;
....@@ -2644,6 +3080,10 @@
26443080
26453081 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
26463082 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3083
+ else if (!strcasecmp(opt_string, "no_read_workqueue"))
3084
+ set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3085
+ else if (!strcasecmp(opt_string, "no_write_workqueue"))
3086
+ set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
26473087 else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
26483088 if (val == 0 || val > MAX_TAG_SIZE) {
26493089 ti->error = "Invalid integrity arguments";
....@@ -2684,6 +3124,21 @@
26843124 return 0;
26853125 }
26863126
3127
+#ifdef CONFIG_BLK_DEV_ZONED
3128
+
3129
+static int crypt_report_zones(struct dm_target *ti,
3130
+ struct dm_report_zones_args *args, unsigned int nr_zones)
3131
+{
3132
+ struct crypt_config *cc = ti->private;
3133
+ sector_t sector = cc->start + dm_target_offset(ti, args->next_sector);
3134
+
3135
+ args->start = cc->start;
3136
+ return blkdev_report_zones(cc->dev->bdev, sector, nr_zones,
3137
+ dm_report_zones_cb, args);
3138
+}
3139
+
3140
+#endif
3141
+
26873142 /*
26883143 * Construct an encryption mapping:
26893144 * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
....@@ -2691,6 +3146,7 @@
26913146 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
26923147 {
26933148 struct crypt_config *cc;
3149
+ const char *devname = dm_table_device_name(ti->table);
26943150 int key_size;
26953151 unsigned int align_mask;
26963152 unsigned long long tmpll;
....@@ -2709,7 +3165,7 @@
27093165 return -EINVAL;
27103166 }
27113167
2712
- cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
3168
+ cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
27133169 if (!cc) {
27143170 ti->error = "Cannot allocate encryption context";
27153171 return -ENOMEM;
....@@ -2816,6 +3272,16 @@
28163272 }
28173273 cc->start = tmpll;
28183274
3275
+ /*
3276
+ * For zoned block devices, we need to preserve the issuer write
3277
+ * ordering. To do so, disable write workqueues and force inline
3278
+ * encryption completion.
3279
+ */
3280
+ if (bdev_is_zoned(cc->dev->bdev)) {
3281
+ set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3282
+ set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags);
3283
+ }
3284
+
28193285 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
28203286 ret = crypt_integrity_ctr(cc, ti);
28213287 if (ret)
....@@ -2836,18 +3302,19 @@
28363302 }
28373303
28383304 ret = -ENOMEM;
2839
- cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
3305
+ cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
28403306 if (!cc->io_queue) {
28413307 ti->error = "Couldn't create kcryptd io queue";
28423308 goto bad;
28433309 }
28443310
28453311 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
2846
- cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
3312
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
3313
+ 1, devname);
28473314 else
2848
- cc->crypt_queue = alloc_workqueue("kcryptd",
2849
- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
2850
- num_online_cpus());
3315
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s",
3316
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
3317
+ num_online_cpus(), devname);
28513318 if (!cc->crypt_queue) {
28523319 ti->error = "Couldn't create kcryptd queue";
28533320 goto bad;
....@@ -2856,7 +3323,7 @@
28563323 spin_lock_init(&cc->write_thread_lock);
28573324 cc->write_tree = RB_ROOT;
28583325
2859
- cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
3326
+ cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
28603327 if (IS_ERR(cc->write_thread)) {
28613328 ret = PTR_ERR(cc->write_thread);
28623329 cc->write_thread = NULL;
....@@ -2866,6 +3333,7 @@
28663333 wake_up_process(cc->write_thread);
28673334
28683335 ti->num_flush_bios = 1;
3336
+ ti->limit_swap_bios = true;
28693337
28703338 return 0;
28713339
....@@ -2940,6 +3408,11 @@
29403408 return DM_MAPIO_SUBMITTED;
29413409 }
29423410
3411
+static char hex2asc(unsigned char c)
3412
+{
3413
+ return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
3414
+}
3415
+
29433416 static void crypt_status(struct dm_target *ti, status_type_t type,
29443417 unsigned status_flags, char *result, unsigned maxlen)
29453418 {
....@@ -2958,9 +3431,12 @@
29583431 if (cc->key_size > 0) {
29593432 if (cc->key_string)
29603433 DMEMIT(":%u:%s", cc->key_size, cc->key_string);
2961
- else
2962
- for (i = 0; i < cc->key_size; i++)
2963
- DMEMIT("%02x", cc->key[i]);
3434
+ else {
3435
+ for (i = 0; i < cc->key_size; i++) {
3436
+ DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
3437
+ hex2asc(cc->key[i] & 0xf));
3438
+ }
3439
+ }
29643440 } else
29653441 DMEMIT("-");
29663442
....@@ -2970,6 +3446,8 @@
29703446 num_feature_args += !!ti->num_discard_bios;
29713447 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
29723448 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3449
+ num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3450
+ num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
29733451 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
29743452 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
29753453 if (cc->on_disk_tag_size)
....@@ -2982,6 +3460,10 @@
29823460 DMEMIT(" same_cpu_crypt");
29833461 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
29843462 DMEMIT(" submit_from_crypt_cpus");
3463
+ if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags))
3464
+ DMEMIT(" no_read_workqueue");
3465
+ if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))
3466
+ DMEMIT(" no_write_workqueue");
29853467 if (cc->on_disk_tag_size)
29863468 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
29873469 if (cc->sector_size != (1 << SECTOR_SHIFT))
....@@ -3056,14 +3538,8 @@
30563538 memset(cc->key, 0, cc->key_size * sizeof(u8));
30573539 return ret;
30583540 }
3059
- if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
3060
- if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
3061
- ret = cc->iv_gen_ops->wipe(cc);
3062
- if (ret)
3063
- return ret;
3064
- }
3541
+ if (argc == 2 && !strcasecmp(argv[1], "wipe"))
30653542 return crypt_wipe_key(cc);
3066
- }
30673543 }
30683544
30693545 error:
....@@ -3100,10 +3576,14 @@
31003576
31013577 static struct target_type crypt_target = {
31023578 .name = "crypt",
3103
- .version = {1, 18, 1},
3579
+ .version = {1, 22, 0},
31043580 .module = THIS_MODULE,
31053581 .ctr = crypt_ctr,
31063582 .dtr = crypt_dtr,
3583
+#ifdef CONFIG_BLK_DEV_ZONED
3584
+ .features = DM_TARGET_ZONED_HM,
3585
+ .report_zones = crypt_report_zones,
3586
+#endif
31073587 .map = crypt_map,
31083588 .status = crypt_status,
31093589 .postsuspend = crypt_postsuspend,