forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/md/dm-crypt.c
....@@ -1,8 +1,8 @@
11 /*
22 * Copyright (C) 2003 Jana Saout <jana@saout.de>
33 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4
- * Copyright (C) 2006-2017 Red Hat, Inc. All rights reserved.
5
- * Copyright (C) 2013-2017 Milan Broz <gmazyland@gmail.com>
4
+ * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved.
5
+ * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com>
66 *
77 * This file is released under the GPL.
88 */
....@@ -34,7 +34,9 @@
3434 #include <crypto/aead.h>
3535 #include <crypto/authenc.h>
3636 #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
37
+#include <linux/key-type.h>
3738 #include <keys/user-type.h>
39
+#include <keys/encrypted-type.h>
3840
3941 #include <linux/device-mapper.h>
4042
....@@ -67,6 +69,7 @@
6769 u8 *integrity_metadata;
6870 bool integrity_metadata_from_pool;
6971 struct work_struct work;
72
+ struct tasklet_struct tasklet;
7073
7174 struct convert_context ctx;
7275
....@@ -98,11 +101,6 @@
98101 struct dm_crypt_request *dmreq);
99102 };
100103
101
-struct iv_essiv_private {
102
- struct crypto_shash *hash_tfm;
103
- u8 *salt;
104
-};
105
-
106104 struct iv_benbi_private {
107105 int shift;
108106 };
....@@ -120,16 +118,24 @@
120118 u8 *whitening;
121119 };
122120
121
+#define ELEPHANT_MAX_KEY_SIZE 32
122
+struct iv_elephant_private {
123
+ struct crypto_skcipher *tfm;
124
+};
125
+
123126 /*
124127 * Crypt: maps a linear range of a block device
125128 * and encrypts / decrypts at the same time.
126129 */
127130 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
128
- DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
131
+ DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
132
+ DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE,
133
+ DM_CRYPT_WRITE_INLINE };
129134
130135 enum cipher_flags {
131136 CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */
132137 CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */
138
+ CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */
133139 };
134140
135141 /*
....@@ -148,25 +154,22 @@
148154 struct task_struct *write_thread;
149155 struct rb_root write_tree;
150156
151
- char *cipher;
152157 char *cipher_string;
153158 char *cipher_auth;
154159 char *key_string;
155160
156161 const struct crypt_iv_operations *iv_gen_ops;
157162 union {
158
- struct iv_essiv_private essiv;
159163 struct iv_benbi_private benbi;
160164 struct iv_lmk_private lmk;
161165 struct iv_tcw_private tcw;
166
+ struct iv_elephant_private elephant;
162167 } iv_gen_private;
163168 u64 iv_offset;
164169 unsigned int iv_size;
165170 unsigned short int sector_size;
166171 unsigned char sector_shift;
167172
168
- /* ESSIV: struct crypto_cipher *essiv_tfm */
169
- void *iv_private;
170173 union {
171174 struct crypto_skcipher **tfms;
172175 struct crypto_aead **tfms_aead;
....@@ -214,7 +217,7 @@
214217 struct mutex bio_alloc_lock;
215218
216219 u8 *authenc_key; /* space for keys in authenc() format (if used) */
217
- u8 key[0];
220
+ u8 key[];
218221 };
219222
220223 #define MIN_IOS 64
....@@ -231,6 +234,8 @@
231234 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
232235 static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
233236 struct scatterlist *sg);
237
+
238
+static bool crypt_integrity_aead(struct crypt_config *cc);
234239
235240 /*
236241 * Use this to access cipher attributes that are independent of the key.
....@@ -291,8 +296,14 @@
291296 * Note that this encryption scheme is vulnerable to watermarking attacks
292297 * and should be used for old compatible containers access only.
293298 *
294
- * plumb: unimplemented, see:
295
- * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
299
+ * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode)
300
+ * The IV is encrypted little-endian byte-offset (with the same key
301
+ * and cipher as the volume).
302
+ *
303
+ * elephant: The extended version of eboiv with additional Elephant diffuser
304
+ * used with Bitlocker CBC mode.
305
+ * This mode was used in older Windows systems
306
+ * https://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf
296307 */
297308
298309 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
....@@ -323,158 +334,15 @@
323334 return 0;
324335 }
325336
326
-/* Initialise ESSIV - compute salt but no local memory allocations */
327
-static int crypt_iv_essiv_init(struct crypt_config *cc)
328
-{
329
- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
330
- SHASH_DESC_ON_STACK(desc, essiv->hash_tfm);
331
- struct crypto_cipher *essiv_tfm;
332
- int err;
333
-
334
- desc->tfm = essiv->hash_tfm;
335
- desc->flags = 0;
336
-
337
- err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
338
- shash_desc_zero(desc);
339
- if (err)
340
- return err;
341
-
342
- essiv_tfm = cc->iv_private;
343
-
344
- err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
345
- crypto_shash_digestsize(essiv->hash_tfm));
346
- if (err)
347
- return err;
348
-
349
- return 0;
350
-}
351
-
352
-/* Wipe salt and reset key derived from volume key */
353
-static int crypt_iv_essiv_wipe(struct crypt_config *cc)
354
-{
355
- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
356
- unsigned salt_size = crypto_shash_digestsize(essiv->hash_tfm);
357
- struct crypto_cipher *essiv_tfm;
358
- int r, err = 0;
359
-
360
- memset(essiv->salt, 0, salt_size);
361
-
362
- essiv_tfm = cc->iv_private;
363
- r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
364
- if (r)
365
- err = r;
366
-
367
- return err;
368
-}
369
-
370
-/* Allocate the cipher for ESSIV */
371
-static struct crypto_cipher *alloc_essiv_cipher(struct crypt_config *cc,
372
- struct dm_target *ti,
373
- const u8 *salt,
374
- unsigned int saltsize)
375
-{
376
- struct crypto_cipher *essiv_tfm;
377
- int err;
378
-
379
- /* Setup the essiv_tfm with the given salt */
380
- essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
381
- if (IS_ERR(essiv_tfm)) {
382
- ti->error = "Error allocating crypto tfm for ESSIV";
383
- return essiv_tfm;
384
- }
385
-
386
- if (crypto_cipher_blocksize(essiv_tfm) != cc->iv_size) {
387
- ti->error = "Block size of ESSIV cipher does "
388
- "not match IV size of block cipher";
389
- crypto_free_cipher(essiv_tfm);
390
- return ERR_PTR(-EINVAL);
391
- }
392
-
393
- err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
394
- if (err) {
395
- ti->error = "Failed to set key for ESSIV cipher";
396
- crypto_free_cipher(essiv_tfm);
397
- return ERR_PTR(err);
398
- }
399
-
400
- return essiv_tfm;
401
-}
402
-
403
-static void crypt_iv_essiv_dtr(struct crypt_config *cc)
404
-{
405
- struct crypto_cipher *essiv_tfm;
406
- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
407
-
408
- crypto_free_shash(essiv->hash_tfm);
409
- essiv->hash_tfm = NULL;
410
-
411
- kzfree(essiv->salt);
412
- essiv->salt = NULL;
413
-
414
- essiv_tfm = cc->iv_private;
415
-
416
- if (essiv_tfm)
417
- crypto_free_cipher(essiv_tfm);
418
-
419
- cc->iv_private = NULL;
420
-}
421
-
422
-static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
423
- const char *opts)
424
-{
425
- struct crypto_cipher *essiv_tfm = NULL;
426
- struct crypto_shash *hash_tfm = NULL;
427
- u8 *salt = NULL;
428
- int err;
429
-
430
- if (!opts) {
431
- ti->error = "Digest algorithm missing for ESSIV mode";
432
- return -EINVAL;
433
- }
434
-
435
- /* Allocate hash algorithm */
436
- hash_tfm = crypto_alloc_shash(opts, 0, 0);
437
- if (IS_ERR(hash_tfm)) {
438
- ti->error = "Error initializing ESSIV hash";
439
- err = PTR_ERR(hash_tfm);
440
- goto bad;
441
- }
442
-
443
- salt = kzalloc(crypto_shash_digestsize(hash_tfm), GFP_KERNEL);
444
- if (!salt) {
445
- ti->error = "Error kmallocing salt storage in ESSIV";
446
- err = -ENOMEM;
447
- goto bad;
448
- }
449
-
450
- cc->iv_gen_private.essiv.salt = salt;
451
- cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
452
-
453
- essiv_tfm = alloc_essiv_cipher(cc, ti, salt,
454
- crypto_shash_digestsize(hash_tfm));
455
- if (IS_ERR(essiv_tfm)) {
456
- crypt_iv_essiv_dtr(cc);
457
- return PTR_ERR(essiv_tfm);
458
- }
459
- cc->iv_private = essiv_tfm;
460
-
461
- return 0;
462
-
463
-bad:
464
- if (hash_tfm && !IS_ERR(hash_tfm))
465
- crypto_free_shash(hash_tfm);
466
- kfree(salt);
467
- return err;
468
-}
469
-
470337 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
471338 struct dm_crypt_request *dmreq)
472339 {
473
- struct crypto_cipher *essiv_tfm = cc->iv_private;
474
-
340
+ /*
341
+ * ESSIV encryption of the IV is now handled by the crypto API,
342
+ * so just pass the plain sector number here.
343
+ */
475344 memset(iv, 0, cc->iv_size);
476345 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
477
- crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
478346
479347 return 0;
480348 }
....@@ -485,7 +353,7 @@
485353 unsigned bs;
486354 int log;
487355
488
- if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags))
356
+ if (crypt_integrity_aead(cc))
489357 bs = crypto_aead_blocksize(any_tfm_aead(cc));
490358 else
491359 bs = crypto_skcipher_blocksize(any_tfm(cc));
....@@ -542,7 +410,7 @@
542410 crypto_free_shash(lmk->hash_tfm);
543411 lmk->hash_tfm = NULL;
544412
545
- kzfree(lmk->seed);
413
+ kfree_sensitive(lmk->seed);
546414 lmk->seed = NULL;
547415 }
548416
....@@ -556,7 +424,8 @@
556424 return -EINVAL;
557425 }
558426
559
- lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
427
+ lmk->hash_tfm = crypto_alloc_shash("md5", 0,
428
+ CRYPTO_ALG_ALLOCATES_MEMORY);
560429 if (IS_ERR(lmk->hash_tfm)) {
561430 ti->error = "Error initializing LMK hash";
562431 return PTR_ERR(lmk->hash_tfm);
....@@ -612,7 +481,6 @@
612481 int i, r;
613482
614483 desc->tfm = lmk->hash_tfm;
615
- desc->flags = 0;
616484
617485 r = crypto_shash_init(desc);
618486 if (r)
....@@ -694,9 +562,9 @@
694562 {
695563 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
696564
697
- kzfree(tcw->iv_seed);
565
+ kfree_sensitive(tcw->iv_seed);
698566 tcw->iv_seed = NULL;
699
- kzfree(tcw->whitening);
567
+ kfree_sensitive(tcw->whitening);
700568 tcw->whitening = NULL;
701569
702570 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
....@@ -719,7 +587,8 @@
719587 return -EINVAL;
720588 }
721589
722
- tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
590
+ tcw->crc32_tfm = crypto_alloc_shash("crc32", 0,
591
+ CRYPTO_ALG_ALLOCATES_MEMORY);
723592 if (IS_ERR(tcw->crc32_tfm)) {
724593 ti->error = "Error initializing CRC32 in TCW";
725594 return PTR_ERR(tcw->crc32_tfm);
....@@ -774,7 +643,6 @@
774643
775644 /* calculate crc32 for every 32bit part and xor it */
776645 desc->tfm = tcw->crc32_tfm;
777
- desc->flags = 0;
778646 for (i = 0; i < 4; i++) {
779647 r = crypto_shash_init(desc);
780648 if (r)
....@@ -850,6 +718,334 @@
850718 return 0;
851719 }
852720
721
+static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
722
+ const char *opts)
723
+{
724
+ if (crypt_integrity_aead(cc)) {
725
+ ti->error = "AEAD transforms not supported for EBOIV";
726
+ return -EINVAL;
727
+ }
728
+
729
+ if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
730
+ ti->error = "Block size of EBOIV cipher does "
731
+ "not match IV size of block cipher";
732
+ return -EINVAL;
733
+ }
734
+
735
+ return 0;
736
+}
737
+
738
+static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
739
+ struct dm_crypt_request *dmreq)
740
+{
741
+ u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
742
+ struct skcipher_request *req;
743
+ struct scatterlist src, dst;
744
+ DECLARE_CRYPTO_WAIT(wait);
745
+ int err;
746
+
747
+ req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
748
+ if (!req)
749
+ return -ENOMEM;
750
+
751
+ memset(buf, 0, cc->iv_size);
752
+ *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
753
+
754
+ sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
755
+ sg_init_one(&dst, iv, cc->iv_size);
756
+ skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
757
+ skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
758
+ err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
759
+ skcipher_request_free(req);
760
+
761
+ return err;
762
+}
763
+
764
+static void crypt_iv_elephant_dtr(struct crypt_config *cc)
765
+{
766
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
767
+
768
+ crypto_free_skcipher(elephant->tfm);
769
+ elephant->tfm = NULL;
770
+}
771
+
772
+static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
773
+ const char *opts)
774
+{
775
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
776
+ int r;
777
+
778
+ elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0,
779
+ CRYPTO_ALG_ALLOCATES_MEMORY);
780
+ if (IS_ERR(elephant->tfm)) {
781
+ r = PTR_ERR(elephant->tfm);
782
+ elephant->tfm = NULL;
783
+ return r;
784
+ }
785
+
786
+ r = crypt_iv_eboiv_ctr(cc, ti, NULL);
787
+ if (r)
788
+ crypt_iv_elephant_dtr(cc);
789
+ return r;
790
+}
791
+
792
+static void diffuser_disk_to_cpu(u32 *d, size_t n)
793
+{
794
+#ifndef __LITTLE_ENDIAN
795
+ int i;
796
+
797
+ for (i = 0; i < n; i++)
798
+ d[i] = le32_to_cpu((__le32)d[i]);
799
+#endif
800
+}
801
+
802
+static void diffuser_cpu_to_disk(__le32 *d, size_t n)
803
+{
804
+#ifndef __LITTLE_ENDIAN
805
+ int i;
806
+
807
+ for (i = 0; i < n; i++)
808
+ d[i] = cpu_to_le32((u32)d[i]);
809
+#endif
810
+}
811
+
812
+static void diffuser_a_decrypt(u32 *d, size_t n)
813
+{
814
+ int i, i1, i2, i3;
815
+
816
+ for (i = 0; i < 5; i++) {
817
+ i1 = 0;
818
+ i2 = n - 2;
819
+ i3 = n - 5;
820
+
821
+ while (i1 < (n - 1)) {
822
+ d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
823
+ i1++; i2++; i3++;
824
+
825
+ if (i3 >= n)
826
+ i3 -= n;
827
+
828
+ d[i1] += d[i2] ^ d[i3];
829
+ i1++; i2++; i3++;
830
+
831
+ if (i2 >= n)
832
+ i2 -= n;
833
+
834
+ d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
835
+ i1++; i2++; i3++;
836
+
837
+ d[i1] += d[i2] ^ d[i3];
838
+ i1++; i2++; i3++;
839
+ }
840
+ }
841
+}
842
+
843
+static void diffuser_a_encrypt(u32 *d, size_t n)
844
+{
845
+ int i, i1, i2, i3;
846
+
847
+ for (i = 0; i < 5; i++) {
848
+ i1 = n - 1;
849
+ i2 = n - 2 - 1;
850
+ i3 = n - 5 - 1;
851
+
852
+ while (i1 > 0) {
853
+ d[i1] -= d[i2] ^ d[i3];
854
+ i1--; i2--; i3--;
855
+
856
+ d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
857
+ i1--; i2--; i3--;
858
+
859
+ if (i2 < 0)
860
+ i2 += n;
861
+
862
+ d[i1] -= d[i2] ^ d[i3];
863
+ i1--; i2--; i3--;
864
+
865
+ if (i3 < 0)
866
+ i3 += n;
867
+
868
+ d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
869
+ i1--; i2--; i3--;
870
+ }
871
+ }
872
+}
873
+
874
+static void diffuser_b_decrypt(u32 *d, size_t n)
875
+{
876
+ int i, i1, i2, i3;
877
+
878
+ for (i = 0; i < 3; i++) {
879
+ i1 = 0;
880
+ i2 = 2;
881
+ i3 = 5;
882
+
883
+ while (i1 < (n - 1)) {
884
+ d[i1] += d[i2] ^ d[i3];
885
+ i1++; i2++; i3++;
886
+
887
+ d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
888
+ i1++; i2++; i3++;
889
+
890
+ if (i2 >= n)
891
+ i2 -= n;
892
+
893
+ d[i1] += d[i2] ^ d[i3];
894
+ i1++; i2++; i3++;
895
+
896
+ if (i3 >= n)
897
+ i3 -= n;
898
+
899
+ d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
900
+ i1++; i2++; i3++;
901
+ }
902
+ }
903
+}
904
+
905
+static void diffuser_b_encrypt(u32 *d, size_t n)
906
+{
907
+ int i, i1, i2, i3;
908
+
909
+ for (i = 0; i < 3; i++) {
910
+ i1 = n - 1;
911
+ i2 = 2 - 1;
912
+ i3 = 5 - 1;
913
+
914
+ while (i1 > 0) {
915
+ d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
916
+ i1--; i2--; i3--;
917
+
918
+ if (i3 < 0)
919
+ i3 += n;
920
+
921
+ d[i1] -= d[i2] ^ d[i3];
922
+ i1--; i2--; i3--;
923
+
924
+ if (i2 < 0)
925
+ i2 += n;
926
+
927
+ d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
928
+ i1--; i2--; i3--;
929
+
930
+ d[i1] -= d[i2] ^ d[i3];
931
+ i1--; i2--; i3--;
932
+ }
933
+ }
934
+}
935
+
936
+static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
937
+{
938
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
939
+ u8 *es, *ks, *data, *data2, *data_offset;
940
+ struct skcipher_request *req;
941
+ struct scatterlist *sg, *sg2, src, dst;
942
+ DECLARE_CRYPTO_WAIT(wait);
943
+ int i, r;
944
+
945
+ req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
946
+ es = kzalloc(16, GFP_NOIO); /* Key for AES */
947
+ ks = kzalloc(32, GFP_NOIO); /* Elephant sector key */
948
+
949
+ if (!req || !es || !ks) {
950
+ r = -ENOMEM;
951
+ goto out;
952
+ }
953
+
954
+ *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
955
+
956
+ /* E(Ks, e(s)) */
957
+ sg_init_one(&src, es, 16);
958
+ sg_init_one(&dst, ks, 16);
959
+ skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
960
+ skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
961
+ r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
962
+ if (r)
963
+ goto out;
964
+
965
+ /* E(Ks, e'(s)) */
966
+ es[15] = 0x80;
967
+ sg_init_one(&dst, &ks[16], 16);
968
+ r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
969
+ if (r)
970
+ goto out;
971
+
972
+ sg = crypt_get_sg_data(cc, dmreq->sg_out);
973
+ data = kmap_atomic(sg_page(sg));
974
+ data_offset = data + sg->offset;
975
+
976
+ /* Cannot modify original bio, copy to sg_out and apply Elephant to it */
977
+ if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
978
+ sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
979
+ data2 = kmap_atomic(sg_page(sg2));
980
+ memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
981
+ kunmap_atomic(data2);
982
+ }
983
+
984
+ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
985
+ diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
986
+ diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
987
+ diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
988
+ diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
989
+ }
990
+
991
+ for (i = 0; i < (cc->sector_size / 32); i++)
992
+ crypto_xor(data_offset + i * 32, ks, 32);
993
+
994
+ if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
995
+ diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
996
+ diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
997
+ diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
998
+ diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
999
+ }
1000
+
1001
+ kunmap_atomic(data);
1002
+out:
1003
+ kfree_sensitive(ks);
1004
+ kfree_sensitive(es);
1005
+ skcipher_request_free(req);
1006
+ return r;
1007
+}
1008
+
1009
+static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
1010
+ struct dm_crypt_request *dmreq)
1011
+{
1012
+ int r;
1013
+
1014
+ if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
1015
+ r = crypt_iv_elephant(cc, dmreq);
1016
+ if (r)
1017
+ return r;
1018
+ }
1019
+
1020
+ return crypt_iv_eboiv_gen(cc, iv, dmreq);
1021
+}
1022
+
1023
+static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
1024
+ struct dm_crypt_request *dmreq)
1025
+{
1026
+ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
1027
+ return crypt_iv_elephant(cc, dmreq);
1028
+
1029
+ return 0;
1030
+}
1031
+
1032
+static int crypt_iv_elephant_init(struct crypt_config *cc)
1033
+{
1034
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1035
+ int key_offset = cc->key_size - cc->key_extra_size;
1036
+
1037
+ return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
1038
+}
1039
+
1040
+static int crypt_iv_elephant_wipe(struct crypt_config *cc)
1041
+{
1042
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1043
+ u8 key[ELEPHANT_MAX_KEY_SIZE];
1044
+
1045
+ memset(key, 0, cc->key_extra_size);
1046
+ return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
1047
+}
1048
+
8531049 static const struct crypt_iv_operations crypt_iv_plain_ops = {
8541050 .generator = crypt_iv_plain_gen
8551051 };
....@@ -863,10 +1059,6 @@
8631059 };
8641060
8651061 static const struct crypt_iv_operations crypt_iv_essiv_ops = {
866
- .ctr = crypt_iv_essiv_ctr,
867
- .dtr = crypt_iv_essiv_dtr,
868
- .init = crypt_iv_essiv_init,
869
- .wipe = crypt_iv_essiv_wipe,
8701062 .generator = crypt_iv_essiv_gen
8711063 };
8721064
....@@ -900,6 +1092,20 @@
9001092
9011093 static struct crypt_iv_operations crypt_iv_random_ops = {
9021094 .generator = crypt_iv_random_gen
1095
+};
1096
+
1097
+static struct crypt_iv_operations crypt_iv_eboiv_ops = {
1098
+ .ctr = crypt_iv_eboiv_ctr,
1099
+ .generator = crypt_iv_eboiv_gen
1100
+};
1101
+
1102
+static struct crypt_iv_operations crypt_iv_elephant_ops = {
1103
+ .ctr = crypt_iv_elephant_ctr,
1104
+ .dtr = crypt_iv_elephant_dtr,
1105
+ .init = crypt_iv_elephant_init,
1106
+ .wipe = crypt_iv_elephant_wipe,
1107
+ .generator = crypt_iv_elephant_gen,
1108
+ .post = crypt_iv_elephant_post
9031109 };
9041110
9051111 /*
....@@ -1041,11 +1247,11 @@
10411247 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
10421248 }
10431249
1044
-static uint64_t *org_sector_of_dmreq(struct crypt_config *cc,
1250
+static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
10451251 struct dm_crypt_request *dmreq)
10461252 {
10471253 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
1048
- return (uint64_t*) ptr;
1254
+ return (__le64 *) ptr;
10491255 }
10501256
10511257 static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
....@@ -1081,7 +1287,7 @@
10811287 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
10821288 struct dm_crypt_request *dmreq;
10831289 u8 *iv, *org_iv, *tag_iv, *tag;
1084
- uint64_t *sector;
1290
+ __le64 *sector;
10851291 int r = 0;
10861292
10871293 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
....@@ -1153,9 +1359,11 @@
11531359 r = crypto_aead_decrypt(req);
11541360 }
11551361
1156
- if (r == -EBADMSG)
1157
- DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
1362
+ if (r == -EBADMSG) {
1363
+ char b[BDEVNAME_SIZE];
1364
+ DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
11581365 (unsigned long long)le64_to_cpu(*sector));
1366
+ }
11591367
11601368 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
11611369 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
....@@ -1176,7 +1384,7 @@
11761384 struct scatterlist *sg_in, *sg_out;
11771385 struct dm_crypt_request *dmreq;
11781386 u8 *iv, *org_iv, *tag_iv;
1179
- uint64_t *sector;
1387
+ __le64 *sector;
11801388 int r = 0;
11811389
11821390 /* Reject unexpected unaligned bio. */
....@@ -1216,6 +1424,9 @@
12161424 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
12171425 if (r < 0)
12181426 return r;
1427
+ /* Data can be already preprocessed in generator */
1428
+ if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
1429
+ sg_in = sg_out;
12191430 /* Store generated IV in integrity metadata */
12201431 if (cc->integrity_iv_size)
12211432 memcpy(tag_iv, org_iv, cc->integrity_iv_size);
....@@ -1243,13 +1454,16 @@
12431454 static void kcryptd_async_done(struct crypto_async_request *async_req,
12441455 int error);
12451456
1246
-static void crypt_alloc_req_skcipher(struct crypt_config *cc,
1457
+static int crypt_alloc_req_skcipher(struct crypt_config *cc,
12471458 struct convert_context *ctx)
12481459 {
12491460 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
12501461
1251
- if (!ctx->r.req)
1252
- ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
1462
+ if (!ctx->r.req) {
1463
+ ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1464
+ if (!ctx->r.req)
1465
+ return -ENOMEM;
1466
+ }
12531467
12541468 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
12551469
....@@ -1260,13 +1474,18 @@
12601474 skcipher_request_set_callback(ctx->r.req,
12611475 CRYPTO_TFM_REQ_MAY_BACKLOG,
12621476 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1477
+
1478
+ return 0;
12631479 }
12641480
1265
-static void crypt_alloc_req_aead(struct crypt_config *cc,
1481
+static int crypt_alloc_req_aead(struct crypt_config *cc,
12661482 struct convert_context *ctx)
12671483 {
1268
- if (!ctx->r.req_aead)
1269
- ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
1484
+ if (!ctx->r.req_aead) {
1485
+ ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1486
+ if (!ctx->r.req_aead)
1487
+ return -ENOMEM;
1488
+ }
12701489
12711490 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
12721491
....@@ -1277,15 +1496,17 @@
12771496 aead_request_set_callback(ctx->r.req_aead,
12781497 CRYPTO_TFM_REQ_MAY_BACKLOG,
12791498 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1499
+
1500
+ return 0;
12801501 }
12811502
1282
-static void crypt_alloc_req(struct crypt_config *cc,
1503
+static int crypt_alloc_req(struct crypt_config *cc,
12831504 struct convert_context *ctx)
12841505 {
12851506 if (crypt_integrity_aead(cc))
1286
- crypt_alloc_req_aead(cc, ctx);
1507
+ return crypt_alloc_req_aead(cc, ctx);
12871508 else
1288
- crypt_alloc_req_skcipher(cc, ctx);
1509
+ return crypt_alloc_req_skcipher(cc, ctx);
12891510 }
12901511
12911512 static void crypt_free_req_skcipher(struct crypt_config *cc,
....@@ -1318,17 +1539,28 @@
13181539 * Encrypt / decrypt data from one bio to another one (can be the same one)
13191540 */
13201541 static blk_status_t crypt_convert(struct crypt_config *cc,
1321
- struct convert_context *ctx)
1542
+ struct convert_context *ctx, bool atomic, bool reset_pending)
13221543 {
13231544 unsigned int tag_offset = 0;
13241545 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
13251546 int r;
13261547
1327
- atomic_set(&ctx->cc_pending, 1);
1548
+ /*
1549
+ * if reset_pending is set we are dealing with the bio for the first time,
1550
+ * else we're continuing to work on the previous bio, so don't mess with
1551
+ * the cc_pending counter
1552
+ */
1553
+ if (reset_pending)
1554
+ atomic_set(&ctx->cc_pending, 1);
13281555
13291556 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
13301557
1331
- crypt_alloc_req(cc, ctx);
1558
+ r = crypt_alloc_req(cc, ctx);
1559
+ if (r) {
1560
+ complete(&ctx->restart);
1561
+ return BLK_STS_DEV_RESOURCE;
1562
+ }
1563
+
13321564 atomic_inc(&ctx->cc_pending);
13331565
13341566 if (crypt_integrity_aead(cc))
....@@ -1342,9 +1574,27 @@
13421574 * but the driver request queue is full, let's wait.
13431575 */
13441576 case -EBUSY:
1345
- wait_for_completion(&ctx->restart);
1577
+ if (in_interrupt()) {
1578
+ if (try_wait_for_completion(&ctx->restart)) {
1579
+ /*
1580
+ * we don't have to block to wait for completion,
1581
+ * so proceed
1582
+ */
1583
+ } else {
1584
+ /*
1585
+ * we can't wait for completion without blocking
1586
+ * exit and continue processing in a workqueue
1587
+ */
1588
+ ctx->r.req = NULL;
1589
+ ctx->cc_sector += sector_step;
1590
+ tag_offset++;
1591
+ return BLK_STS_DEV_RESOURCE;
1592
+ }
1593
+ } else {
1594
+ wait_for_completion(&ctx->restart);
1595
+ }
13461596 reinit_completion(&ctx->restart);
1347
- /* fall through */
1597
+ fallthrough;
13481598 /*
13491599 * The request is queued and processed asynchronously,
13501600 * completion function kcryptd_async_done() will be called.
....@@ -1361,7 +1611,8 @@
13611611 atomic_dec(&ctx->cc_pending);
13621612 ctx->cc_sector += sector_step;
13631613 tag_offset++;
1364
- cond_resched();
1614
+ if (!atomic)
1615
+ cond_resched();
13651616 continue;
13661617 /*
13671618 * There was a data integrity error.
....@@ -1452,10 +1703,10 @@
14521703
14531704 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
14541705 {
1455
- unsigned int i;
14561706 struct bio_vec *bv;
1707
+ struct bvec_iter_all iter_all;
14571708
1458
- bio_for_each_segment_all(bv, clone, i) {
1709
+ bio_for_each_segment_all(bv, clone, iter_all) {
14591710 BUG_ON(!bv->bv_page);
14601711 mempool_free(bv->bv_page, &cc->page_pool);
14611712 }
....@@ -1477,6 +1728,12 @@
14771728 static void crypt_inc_pending(struct dm_crypt_io *io)
14781729 {
14791730 atomic_inc(&io->io_pending);
1731
+}
1732
+
1733
+static void kcryptd_io_bio_endio(struct work_struct *work)
1734
+{
1735
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1736
+ bio_endio(io->base_bio);
14801737 }
14811738
14821739 /*
....@@ -1501,7 +1758,23 @@
15011758 kfree(io->integrity_metadata);
15021759
15031760 base_bio->bi_status = error;
1504
- bio_endio(base_bio);
1761
+
1762
+ /*
1763
+ * If we are running this function from our tasklet,
1764
+ * we can't call bio_endio() here, because it will call
1765
+ * clone_endio() from dm.c, which in turn will
1766
+ * free the current struct dm_crypt_io structure with
1767
+ * our tasklet. In this case we need to delay bio_endio()
1768
+ * execution to after the tasklet is done and dequeued.
1769
+ */
1770
+ if (tasklet_trylock(&io->tasklet)) {
1771
+ tasklet_unlock(&io->tasklet);
1772
+ bio_endio(base_bio);
1773
+ return;
1774
+ }
1775
+
1776
+ INIT_WORK(&io->work, kcryptd_io_bio_endio);
1777
+ queue_work(cc->io_queue, &io->work);
15051778 }
15061779
15071780 /*
....@@ -1584,7 +1857,7 @@
15841857 return 1;
15851858 }
15861859
1587
- generic_make_request(clone);
1860
+ submit_bio_noacct(clone);
15881861 return 0;
15891862 }
15901863
....@@ -1610,7 +1883,7 @@
16101883 {
16111884 struct bio *clone = io->ctx.bio_out;
16121885
1613
- generic_make_request(clone);
1886
+ submit_bio_noacct(clone);
16141887 }
16151888
16161889 #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
....@@ -1687,8 +1960,9 @@
16871960
16881961 clone->bi_iter.bi_sector = cc->start + io->sector;
16891962
1690
- if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
1691
- generic_make_request(clone);
1963
+ if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
1964
+ test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
1965
+ submit_bio_noacct(clone);
16921966 return;
16931967 }
16941968
....@@ -1710,9 +1984,63 @@
17101984 spin_unlock_irqrestore(&cc->write_thread_lock, flags);
17111985 }
17121986
1987
+static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
1988
+ struct convert_context *ctx)
1989
+
1990
+{
1991
+ if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags))
1992
+ return false;
1993
+
1994
+ /*
1995
+ * Note: zone append writes (REQ_OP_ZONE_APPEND) do not have ordering
1996
+ * constraints so they do not need to be issued inline by
1997
+ * kcryptd_crypt_write_convert().
1998
+ */
1999
+ switch (bio_op(ctx->bio_in)) {
2000
+ case REQ_OP_WRITE:
2001
+ case REQ_OP_WRITE_SAME:
2002
+ case REQ_OP_WRITE_ZEROES:
2003
+ return true;
2004
+ default:
2005
+ return false;
2006
+ }
2007
+}
2008
+
2009
+static void kcryptd_crypt_write_continue(struct work_struct *work)
2010
+{
2011
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2012
+ struct crypt_config *cc = io->cc;
2013
+ struct convert_context *ctx = &io->ctx;
2014
+ int crypt_finished;
2015
+ sector_t sector = io->sector;
2016
+ blk_status_t r;
2017
+
2018
+ wait_for_completion(&ctx->restart);
2019
+ reinit_completion(&ctx->restart);
2020
+
2021
+ r = crypt_convert(cc, &io->ctx, true, false);
2022
+ if (r)
2023
+ io->error = r;
2024
+ crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
2025
+ if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2026
+ /* Wait for completion signaled by kcryptd_async_done() */
2027
+ wait_for_completion(&ctx->restart);
2028
+ crypt_finished = 1;
2029
+ }
2030
+
2031
+ /* Encryption was already finished, submit io now */
2032
+ if (crypt_finished) {
2033
+ kcryptd_crypt_write_io_submit(io, 0);
2034
+ io->sector = sector;
2035
+ }
2036
+
2037
+ crypt_dec_pending(io);
2038
+}
2039
+
17132040 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
17142041 {
17152042 struct crypt_config *cc = io->cc;
2043
+ struct convert_context *ctx = &io->ctx;
17162044 struct bio *clone;
17172045 int crypt_finished;
17182046 sector_t sector = io->sector;
....@@ -1722,7 +2050,7 @@
17222050 * Prevent io from disappearing until this function completes.
17232051 */
17242052 crypt_inc_pending(io);
1725
- crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
2053
+ crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
17262054
17272055 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
17282056 if (unlikely(!clone)) {
....@@ -1736,10 +2064,26 @@
17362064 sector += bio_sectors(clone);
17372065
17382066 crypt_inc_pending(io);
1739
- r = crypt_convert(cc, &io->ctx);
2067
+ r = crypt_convert(cc, ctx,
2068
+ test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
2069
+ /*
2070
+ * Crypto API backlogged the request, because its queue was full
2071
+ * and we're in softirq context, so continue from a workqueue
2072
+ * (TODO: is it actually possible to be in softirq in the write path?)
2073
+ */
2074
+ if (r == BLK_STS_DEV_RESOURCE) {
2075
+ INIT_WORK(&io->work, kcryptd_crypt_write_continue);
2076
+ queue_work(cc->crypt_queue, &io->work);
2077
+ return;
2078
+ }
17402079 if (r)
17412080 io->error = r;
1742
- crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
2081
+ crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
2082
+ if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2083
+ /* Wait for completion signaled by kcryptd_async_done() */
2084
+ wait_for_completion(&ctx->restart);
2085
+ crypt_finished = 1;
2086
+ }
17432087
17442088 /* Encryption was already finished, submit io now */
17452089 if (crypt_finished) {
....@@ -1756,6 +2100,25 @@
17562100 crypt_dec_pending(io);
17572101 }
17582102
2103
+static void kcryptd_crypt_read_continue(struct work_struct *work)
2104
+{
2105
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2106
+ struct crypt_config *cc = io->cc;
2107
+ blk_status_t r;
2108
+
2109
+ wait_for_completion(&io->ctx.restart);
2110
+ reinit_completion(&io->ctx.restart);
2111
+
2112
+ r = crypt_convert(cc, &io->ctx, true, false);
2113
+ if (r)
2114
+ io->error = r;
2115
+
2116
+ if (atomic_dec_and_test(&io->ctx.cc_pending))
2117
+ kcryptd_crypt_read_done(io);
2118
+
2119
+ crypt_dec_pending(io);
2120
+}
2121
+
17592122 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
17602123 {
17612124 struct crypt_config *cc = io->cc;
....@@ -1766,7 +2129,17 @@
17662129 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
17672130 io->sector);
17682131
1769
- r = crypt_convert(cc, &io->ctx);
2132
+ r = crypt_convert(cc, &io->ctx,
2133
+ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
2134
+ /*
2135
+ * Crypto API backlogged the request, because its queue was full
2136
+ * and we're in softirq context, so continue from a workqueue
2137
+ */
2138
+ if (r == BLK_STS_DEV_RESOURCE) {
2139
+ INIT_WORK(&io->work, kcryptd_crypt_read_continue);
2140
+ queue_work(cc->crypt_queue, &io->work);
2141
+ return;
2142
+ }
17702143 if (r)
17712144 io->error = r;
17722145
....@@ -1798,7 +2171,8 @@
17982171 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
17992172
18002173 if (error == -EBADMSG) {
1801
- DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
2174
+ char b[BDEVNAME_SIZE];
2175
+ DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
18022176 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
18032177 io->error = BLK_STS_PROTECTION;
18042178 } else if (error < 0)
....@@ -1809,10 +2183,21 @@
18092183 if (!atomic_dec_and_test(&ctx->cc_pending))
18102184 return;
18112185
1812
- if (bio_data_dir(io->base_bio) == READ)
2186
+ /*
2187
+ * The request is fully completed: for inline writes, let
2188
+ * kcryptd_crypt_write_convert() do the IO submission.
2189
+ */
2190
+ if (bio_data_dir(io->base_bio) == READ) {
18132191 kcryptd_crypt_read_done(io);
1814
- else
1815
- kcryptd_crypt_write_io_submit(io, 1);
2192
+ return;
2193
+ }
2194
+
2195
+ if (kcryptd_crypt_write_inline(cc, ctx)) {
2196
+ complete(&ctx->restart);
2197
+ return;
2198
+ }
2199
+
2200
+ kcryptd_crypt_write_io_submit(io, 1);
18162201 }
18172202
18182203 static void kcryptd_crypt(struct work_struct *work)
....@@ -1825,9 +2210,31 @@
18252210 kcryptd_crypt_write_convert(io);
18262211 }
18272212
2213
+static void kcryptd_crypt_tasklet(unsigned long work)
2214
+{
2215
+ kcryptd_crypt((struct work_struct *)work);
2216
+}
2217
+
18282218 static void kcryptd_queue_crypt(struct dm_crypt_io *io)
18292219 {
18302220 struct crypt_config *cc = io->cc;
2221
+
2222
+ if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
2223
+ (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
2224
+ /*
2225
+ * in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
2226
+ * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
2227
+ * it is being executed with irqs disabled.
2228
+ */
2229
+ if (in_irq() || irqs_disabled()) {
2230
+ tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
2231
+ tasklet_schedule(&io->tasklet);
2232
+ return;
2233
+ }
2234
+
2235
+ kcryptd_crypt(&io->work);
2236
+ return;
2237
+ }
18312238
18322239 INIT_WORK(&io->work, kcryptd_crypt);
18332240 queue_work(cc->crypt_queue, &io->work);
....@@ -1884,7 +2291,8 @@
18842291 return -ENOMEM;
18852292
18862293 for (i = 0; i < cc->tfms_count; i++) {
1887
- cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0);
2294
+ cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0,
2295
+ CRYPTO_ALG_ALLOCATES_MEMORY);
18882296 if (IS_ERR(cc->cipher_tfm.tfms[i])) {
18892297 err = PTR_ERR(cc->cipher_tfm.tfms[i]);
18902298 crypt_free_tfms(cc);
....@@ -1897,7 +2305,7 @@
18972305 * algorithm implementation is used. Help people debug performance
18982306 * problems by logging the ->cra_driver_name.
18992307 */
1900
- DMINFO("%s using implementation \"%s\"", ciphermode,
2308
+ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
19012309 crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
19022310 return 0;
19032311 }
....@@ -1910,14 +2318,15 @@
19102318 if (!cc->cipher_tfm.tfms)
19112319 return -ENOMEM;
19122320
1913
- cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0);
2321
+ cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0,
2322
+ CRYPTO_ALG_ALLOCATES_MEMORY);
19142323 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
19152324 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
19162325 crypt_free_tfms(cc);
19172326 return err;
19182327 }
19192328
1920
- DMINFO("%s using implementation \"%s\"", ciphermode,
2329
+ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
19212330 crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
19222331 return 0;
19232332 }
....@@ -2011,12 +2420,47 @@
20112420 return false;
20122421 }
20132422
2423
+static int set_key_user(struct crypt_config *cc, struct key *key)
2424
+{
2425
+ const struct user_key_payload *ukp;
2426
+
2427
+ ukp = user_key_payload_locked(key);
2428
+ if (!ukp)
2429
+ return -EKEYREVOKED;
2430
+
2431
+ if (cc->key_size != ukp->datalen)
2432
+ return -EINVAL;
2433
+
2434
+ memcpy(cc->key, ukp->data, cc->key_size);
2435
+
2436
+ return 0;
2437
+}
2438
+
2439
+#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
2440
+static int set_key_encrypted(struct crypt_config *cc, struct key *key)
2441
+{
2442
+ const struct encrypted_key_payload *ekp;
2443
+
2444
+ ekp = key->payload.data[0];
2445
+ if (!ekp)
2446
+ return -EKEYREVOKED;
2447
+
2448
+ if (cc->key_size != ekp->decrypted_datalen)
2449
+ return -EINVAL;
2450
+
2451
+ memcpy(cc->key, ekp->decrypted_data, cc->key_size);
2452
+
2453
+ return 0;
2454
+}
2455
+#endif /* CONFIG_ENCRYPTED_KEYS */
2456
+
20142457 static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
20152458 {
20162459 char *new_key_string, *key_desc;
20172460 int ret;
2461
+ struct key_type *type;
20182462 struct key *key;
2019
- const struct user_key_payload *ukp;
2463
+ int (*set_key)(struct crypt_config *cc, struct key *key);
20202464
20212465 /*
20222466 * Reject key_string with whitespace. dm core currently lacks code for
....@@ -2032,39 +2476,40 @@
20322476 if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
20332477 return -EINVAL;
20342478
2035
- if (strncmp(key_string, "logon:", key_desc - key_string + 1) &&
2036
- strncmp(key_string, "user:", key_desc - key_string + 1))
2479
+ if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) {
2480
+ type = &key_type_logon;
2481
+ set_key = set_key_user;
2482
+ } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) {
2483
+ type = &key_type_user;
2484
+ set_key = set_key_user;
2485
+#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
2486
+ } else if (!strncmp(key_string, "encrypted:", key_desc - key_string + 1)) {
2487
+ type = &key_type_encrypted;
2488
+ set_key = set_key_encrypted;
2489
+#endif
2490
+ } else {
20372491 return -EINVAL;
2492
+ }
20382493
20392494 new_key_string = kstrdup(key_string, GFP_KERNEL);
20402495 if (!new_key_string)
20412496 return -ENOMEM;
20422497
2043
- key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user,
2044
- key_desc + 1, NULL);
2498
+ key = request_key(type, key_desc + 1, NULL);
20452499 if (IS_ERR(key)) {
2046
- kzfree(new_key_string);
2500
+ kfree_sensitive(new_key_string);
20472501 return PTR_ERR(key);
20482502 }
20492503
20502504 down_read(&key->sem);
20512505
2052
- ukp = user_key_payload_locked(key);
2053
- if (!ukp) {
2506
+ ret = set_key(cc, key);
2507
+ if (ret < 0) {
20542508 up_read(&key->sem);
20552509 key_put(key);
2056
- kzfree(new_key_string);
2057
- return -EKEYREVOKED;
2510
+ kfree_sensitive(new_key_string);
2511
+ return ret;
20582512 }
2059
-
2060
- if (cc->key_size != ukp->datalen) {
2061
- up_read(&key->sem);
2062
- key_put(key);
2063
- kzfree(new_key_string);
2064
- return -EINVAL;
2065
- }
2066
-
2067
- memcpy(cc->key, ukp->data, cc->key_size);
20682513
20692514 up_read(&key->sem);
20702515 key_put(key);
....@@ -2076,10 +2521,10 @@
20762521
20772522 if (!ret) {
20782523 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2079
- kzfree(cc->key_string);
2524
+ kfree_sensitive(cc->key_string);
20802525 cc->key_string = new_key_string;
20812526 } else
2082
- kzfree(new_key_string);
2527
+ kfree_sensitive(new_key_string);
20832528
20842529 return ret;
20852530 }
....@@ -2116,10 +2561,10 @@
21162561
21172562 static int get_key_size(char **key_string)
21182563 {
2119
- return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
2564
+ return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1);
21202565 }
21212566
2122
-#endif
2567
+#endif /* CONFIG_KEYS */
21232568
21242569 static int crypt_set_key(struct crypt_config *cc, char *key)
21252570 {
....@@ -2140,7 +2585,7 @@
21402585 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
21412586
21422587 /* wipe references to any kernel keyring key */
2143
- kzfree(cc->key_string);
2588
+ kfree_sensitive(cc->key_string);
21442589 cc->key_string = NULL;
21452590
21462591 /* Decode key from its hex representation. */
....@@ -2164,7 +2609,15 @@
21642609
21652610 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
21662611 get_random_bytes(&cc->key, cc->key_size);
2167
- kzfree(cc->key_string);
2612
+
2613
+ /* Wipe IV private keys */
2614
+ if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2615
+ r = cc->iv_gen_ops->wipe(cc);
2616
+ if (r)
2617
+ return r;
2618
+ }
2619
+
2620
+ kfree_sensitive(cc->key_string);
21682621 cc->key_string = NULL;
21692622 r = crypt_setkey(cc);
21702623 memset(&cc->key, 0, cc->key_size * sizeof(u8));
....@@ -2174,7 +2627,7 @@
21742627
21752628 static void crypt_calculate_pages_per_client(void)
21762629 {
2177
- unsigned long pages = (totalram_pages - totalhigh_pages) * DM_CRYPT_MEMORY_PERCENT / 100;
2630
+ unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100;
21782631
21792632 if (!dm_crypt_clients_n)
21802633 return;
....@@ -2248,16 +2701,15 @@
22482701 if (cc->dev)
22492702 dm_put_device(ti, cc->dev);
22502703
2251
- kzfree(cc->cipher);
2252
- kzfree(cc->cipher_string);
2253
- kzfree(cc->key_string);
2254
- kzfree(cc->cipher_auth);
2255
- kzfree(cc->authenc_key);
2704
+ kfree_sensitive(cc->cipher_string);
2705
+ kfree_sensitive(cc->key_string);
2706
+ kfree_sensitive(cc->cipher_auth);
2707
+ kfree_sensitive(cc->authenc_key);
22562708
22572709 mutex_destroy(&cc->bio_alloc_lock);
22582710
22592711 /* Must zero key material before freeing */
2260
- kzfree(cc);
2712
+ kfree_sensitive(cc);
22612713
22622714 spin_lock(&dm_crypt_clients_lock);
22632715 WARN_ON(!dm_crypt_clients_n);
....@@ -2299,7 +2751,16 @@
22992751 cc->iv_gen_ops = &crypt_iv_benbi_ops;
23002752 else if (strcmp(ivmode, "null") == 0)
23012753 cc->iv_gen_ops = &crypt_iv_null_ops;
2302
- else if (strcmp(ivmode, "lmk") == 0) {
2754
+ else if (strcmp(ivmode, "eboiv") == 0)
2755
+ cc->iv_gen_ops = &crypt_iv_eboiv_ops;
2756
+ else if (strcmp(ivmode, "elephant") == 0) {
2757
+ cc->iv_gen_ops = &crypt_iv_elephant_ops;
2758
+ cc->key_parts = 2;
2759
+ cc->key_extra_size = cc->key_size / 2;
2760
+ if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
2761
+ return -EINVAL;
2762
+ set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
2763
+ } else if (strcmp(ivmode, "lmk") == 0) {
23032764 cc->iv_gen_ops = &crypt_iv_lmk_ops;
23042765 /*
23052766 * Version 2 and 3 is recognised according
....@@ -2328,52 +2789,6 @@
23282789 }
23292790
23302791 /*
2331
- * Workaround to parse cipher algorithm from crypto API spec.
2332
- * The cc->cipher is currently used only in ESSIV.
2333
- * This should be probably done by crypto-api calls (once available...)
2334
- */
2335
-static int crypt_ctr_blkdev_cipher(struct crypt_config *cc)
2336
-{
2337
- const char *alg_name = NULL;
2338
- char *start, *end;
2339
-
2340
- if (crypt_integrity_aead(cc)) {
2341
- alg_name = crypto_tfm_alg_name(crypto_aead_tfm(any_tfm_aead(cc)));
2342
- if (!alg_name)
2343
- return -EINVAL;
2344
- if (crypt_integrity_hmac(cc)) {
2345
- alg_name = strchr(alg_name, ',');
2346
- if (!alg_name)
2347
- return -EINVAL;
2348
- }
2349
- alg_name++;
2350
- } else {
2351
- alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(any_tfm(cc)));
2352
- if (!alg_name)
2353
- return -EINVAL;
2354
- }
2355
-
2356
- start = strchr(alg_name, '(');
2357
- end = strchr(alg_name, ')');
2358
-
2359
- if (!start && !end) {
2360
- cc->cipher = kstrdup(alg_name, GFP_KERNEL);
2361
- return cc->cipher ? 0 : -ENOMEM;
2362
- }
2363
-
2364
- if (!start || !end || ++start >= end)
2365
- return -EINVAL;
2366
-
2367
- cc->cipher = kzalloc(end - start + 1, GFP_KERNEL);
2368
- if (!cc->cipher)
2369
- return -ENOMEM;
2370
-
2371
- strncpy(cc->cipher, start, end - start);
2372
-
2373
- return 0;
2374
-}
2375
-
2376
-/*
23772792 * Workaround to parse HMAC algorithm from AEAD crypto API spec.
23782793 * The HMAC is needed to calculate tag size (HMAC digest size).
23792794 * This should be probably done by crypto-api calls (once available...)
....@@ -2396,7 +2811,7 @@
23962811 return -ENOMEM;
23972812 strncpy(mac_alg, start, end - start);
23982813
2399
- mac = crypto_alloc_ahash(mac_alg, 0, 0);
2814
+ mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
24002815 kfree(mac_alg);
24012816
24022817 if (IS_ERR(mac))
....@@ -2416,7 +2831,7 @@
24162831 char **ivmode, char **ivopts)
24172832 {
24182833 struct crypt_config *cc = ti->private;
2419
- char *tmp, *cipher_api;
2834
+ char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME];
24202835 int ret = -EINVAL;
24212836
24222837 cc->tfms_count = 1;
....@@ -2442,8 +2857,31 @@
24422857 /* The rest is crypto API spec */
24432858 cipher_api = tmp;
24442859
2860
+ /* Alloc AEAD, can be used only in new format. */
2861
+ if (crypt_integrity_aead(cc)) {
2862
+ ret = crypt_ctr_auth_cipher(cc, cipher_api);
2863
+ if (ret < 0) {
2864
+ ti->error = "Invalid AEAD cipher spec";
2865
+ return -ENOMEM;
2866
+ }
2867
+ }
2868
+
24452869 if (*ivmode && !strcmp(*ivmode, "lmk"))
24462870 cc->tfms_count = 64;
2871
+
2872
+ if (*ivmode && !strcmp(*ivmode, "essiv")) {
2873
+ if (!*ivopts) {
2874
+ ti->error = "Digest algorithm missing for ESSIV mode";
2875
+ return -EINVAL;
2876
+ }
2877
+ ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)",
2878
+ cipher_api, *ivopts);
2879
+ if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2880
+ ti->error = "Cannot allocate cipher string";
2881
+ return -ENOMEM;
2882
+ }
2883
+ cipher_api = buf;
2884
+ }
24472885
24482886 cc->key_parts = cc->tfms_count;
24492887
....@@ -2454,22 +2892,10 @@
24542892 return ret;
24552893 }
24562894
2457
- /* Alloc AEAD, can be used only in new format. */
2458
- if (crypt_integrity_aead(cc)) {
2459
- ret = crypt_ctr_auth_cipher(cc, cipher_api);
2460
- if (ret < 0) {
2461
- ti->error = "Invalid AEAD cipher spec";
2462
- return -ENOMEM;
2463
- }
2895
+ if (crypt_integrity_aead(cc))
24642896 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2465
- } else
2897
+ else
24662898 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2467
-
2468
- ret = crypt_ctr_blkdev_cipher(cc);
2469
- if (ret < 0) {
2470
- ti->error = "Cannot allocate cipher string";
2471
- return -ENOMEM;
2472
- }
24732899
24742900 return 0;
24752901 }
....@@ -2505,10 +2931,6 @@
25052931 }
25062932 cc->key_parts = cc->tfms_count;
25072933
2508
- cc->cipher = kstrdup(cipher, GFP_KERNEL);
2509
- if (!cc->cipher)
2510
- goto bad_mem;
2511
-
25122934 chainmode = strsep(&tmp, "-");
25132935 *ivmode = strsep(&tmp, ":");
25142936 *ivopts = tmp;
....@@ -2531,9 +2953,19 @@
25312953 if (!cipher_api)
25322954 goto bad_mem;
25332955
2534
- ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2535
- "%s(%s)", chainmode, cipher);
2536
- if (ret < 0) {
2956
+ if (*ivmode && !strcmp(*ivmode, "essiv")) {
2957
+ if (!*ivopts) {
2958
+ ti->error = "Digest algorithm missing for ESSIV mode";
2959
+ kfree(cipher_api);
2960
+ return -EINVAL;
2961
+ }
2962
+ ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2963
+ "essiv(%s(%s),%s)", chainmode, cipher, *ivopts);
2964
+ } else {
2965
+ ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2966
+ "%s(%s)", chainmode, cipher);
2967
+ }
2968
+ if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
25372969 kfree(cipher_api);
25382970 goto bad_mem;
25392971 }
....@@ -2614,7 +3046,7 @@
26143046 struct crypt_config *cc = ti->private;
26153047 struct dm_arg_set as;
26163048 static const struct dm_arg _args[] = {
2617
- {0, 6, "Invalid number of feature args"},
3049
+ {0, 8, "Invalid number of feature args"},
26183050 };
26193051 unsigned int opt_params, val;
26203052 const char *opt_string, *sval;
....@@ -2644,6 +3076,10 @@
26443076
26453077 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
26463078 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3079
+ else if (!strcasecmp(opt_string, "no_read_workqueue"))
3080
+ set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3081
+ else if (!strcasecmp(opt_string, "no_write_workqueue"))
3082
+ set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
26473083 else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
26483084 if (val == 0 || val > MAX_TAG_SIZE) {
26493085 ti->error = "Invalid integrity arguments";
....@@ -2684,6 +3120,21 @@
26843120 return 0;
26853121 }
26863122
3123
+#ifdef CONFIG_BLK_DEV_ZONED
3124
+
3125
+static int crypt_report_zones(struct dm_target *ti,
3126
+ struct dm_report_zones_args *args, unsigned int nr_zones)
3127
+{
3128
+ struct crypt_config *cc = ti->private;
3129
+ sector_t sector = cc->start + dm_target_offset(ti, args->next_sector);
3130
+
3131
+ args->start = cc->start;
3132
+ return blkdev_report_zones(cc->dev->bdev, sector, nr_zones,
3133
+ dm_report_zones_cb, args);
3134
+}
3135
+
3136
+#endif
3137
+
26873138 /*
26883139 * Construct an encryption mapping:
26893140 * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
....@@ -2691,6 +3142,7 @@
26913142 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
26923143 {
26933144 struct crypt_config *cc;
3145
+ const char *devname = dm_table_device_name(ti->table);
26943146 int key_size;
26953147 unsigned int align_mask;
26963148 unsigned long long tmpll;
....@@ -2709,7 +3161,7 @@
27093161 return -EINVAL;
27103162 }
27113163
2712
- cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
3164
+ cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
27133165 if (!cc) {
27143166 ti->error = "Cannot allocate encryption context";
27153167 return -ENOMEM;
....@@ -2816,6 +3268,16 @@
28163268 }
28173269 cc->start = tmpll;
28183270
3271
+ /*
3272
+ * For zoned block devices, we need to preserve the issuer write
3273
+ * ordering. To do so, disable write workqueues and force inline
3274
+ * encryption completion.
3275
+ */
3276
+ if (bdev_is_zoned(cc->dev->bdev)) {
3277
+ set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3278
+ set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags);
3279
+ }
3280
+
28193281 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
28203282 ret = crypt_integrity_ctr(cc, ti);
28213283 if (ret)
....@@ -2836,18 +3298,19 @@
28363298 }
28373299
28383300 ret = -ENOMEM;
2839
- cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
3301
+ cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
28403302 if (!cc->io_queue) {
28413303 ti->error = "Couldn't create kcryptd io queue";
28423304 goto bad;
28433305 }
28443306
28453307 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
2846
- cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
3308
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
3309
+ 1, devname);
28473310 else
2848
- cc->crypt_queue = alloc_workqueue("kcryptd",
2849
- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
2850
- num_online_cpus());
3311
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s",
3312
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
3313
+ num_online_cpus(), devname);
28513314 if (!cc->crypt_queue) {
28523315 ti->error = "Couldn't create kcryptd queue";
28533316 goto bad;
....@@ -2856,7 +3319,7 @@
28563319 spin_lock_init(&cc->write_thread_lock);
28573320 cc->write_tree = RB_ROOT;
28583321
2859
- cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
3322
+ cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
28603323 if (IS_ERR(cc->write_thread)) {
28613324 ret = PTR_ERR(cc->write_thread);
28623325 cc->write_thread = NULL;
....@@ -2866,6 +3329,7 @@
28663329 wake_up_process(cc->write_thread);
28673330
28683331 ti->num_flush_bios = 1;
3332
+ ti->limit_swap_bios = true;
28693333
28703334 return 0;
28713335
....@@ -2940,6 +3404,11 @@
29403404 return DM_MAPIO_SUBMITTED;
29413405 }
29423406
3407
+static char hex2asc(unsigned char c)
3408
+{
3409
+ return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
3410
+}
3411
+
29433412 static void crypt_status(struct dm_target *ti, status_type_t type,
29443413 unsigned status_flags, char *result, unsigned maxlen)
29453414 {
....@@ -2958,9 +3427,12 @@
29583427 if (cc->key_size > 0) {
29593428 if (cc->key_string)
29603429 DMEMIT(":%u:%s", cc->key_size, cc->key_string);
2961
- else
2962
- for (i = 0; i < cc->key_size; i++)
2963
- DMEMIT("%02x", cc->key[i]);
3430
+ else {
3431
+ for (i = 0; i < cc->key_size; i++) {
3432
+ DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
3433
+ hex2asc(cc->key[i] & 0xf));
3434
+ }
3435
+ }
29643436 } else
29653437 DMEMIT("-");
29663438
....@@ -2970,6 +3442,8 @@
29703442 num_feature_args += !!ti->num_discard_bios;
29713443 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
29723444 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3445
+ num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3446
+ num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
29733447 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
29743448 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
29753449 if (cc->on_disk_tag_size)
....@@ -2982,6 +3456,10 @@
29823456 DMEMIT(" same_cpu_crypt");
29833457 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
29843458 DMEMIT(" submit_from_crypt_cpus");
3459
+ if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags))
3460
+ DMEMIT(" no_read_workqueue");
3461
+ if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))
3462
+ DMEMIT(" no_write_workqueue");
29853463 if (cc->on_disk_tag_size)
29863464 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
29873465 if (cc->sector_size != (1 << SECTOR_SHIFT))
....@@ -3056,14 +3534,8 @@
30563534 memset(cc->key, 0, cc->key_size * sizeof(u8));
30573535 return ret;
30583536 }
3059
- if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
3060
- if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
3061
- ret = cc->iv_gen_ops->wipe(cc);
3062
- if (ret)
3063
- return ret;
3064
- }
3537
+ if (argc == 2 && !strcasecmp(argv[1], "wipe"))
30653538 return crypt_wipe_key(cc);
3066
- }
30673539 }
30683540
30693541 error:
....@@ -3100,10 +3572,14 @@
31003572
31013573 static struct target_type crypt_target = {
31023574 .name = "crypt",
3103
- .version = {1, 18, 1},
3575
+ .version = {1, 22, 0},
31043576 .module = THIS_MODULE,
31053577 .ctr = crypt_ctr,
31063578 .dtr = crypt_dtr,
3579
+#ifdef CONFIG_BLK_DEV_ZONED
3580
+ .features = DM_TARGET_ZONED_HM,
3581
+ .report_zones = crypt_report_zones,
3582
+#endif
31073583 .map = crypt_map,
31083584 .status = crypt_status,
31093585 .postsuspend = crypt_postsuspend,