hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/crypto/caam/caamalg.c
....@@ -1,8 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0+
12 /*
23 * caam - Freescale FSL CAAM support for crypto API
34 *
45 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5
- * Copyright 2016 NXP
6
+ * Copyright 2016-2019 NXP
67 *
78 * Based on talitos crypto API driver.
89 *
....@@ -55,6 +56,9 @@
5556 #include "sg_sw_sec4.h"
5657 #include "key_gen.h"
5758 #include "caamalg_desc.h"
59
+#include <crypto/engine.h>
60
+#include <crypto/xts.h>
61
+#include <asm/unaligned.h>
5862
5963 /*
6064 * crypto alg
....@@ -71,23 +75,17 @@
7175 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
7276 CAAM_CMD_SZ * 5)
7377
74
-#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
78
+#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
79
+
80
+#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN)
7581 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
76
-
77
-#ifdef DEBUG
78
-/* for print_hex_dumps with line references */
79
-#define debug(format, arg...) printk(format, arg)
80
-#else
81
-#define debug(format, arg...)
82
-#endif
83
-
84
-static struct list_head alg_list;
8582
8683 struct caam_alg_entry {
8784 int class1_alg_type;
8885 int class2_alg_type;
8986 bool rfc3686;
9087 bool geniv;
88
+ bool nodkp;
9189 };
9290
9391 struct caam_aead_alg {
....@@ -96,23 +94,39 @@
9694 bool registered;
9795 };
9896
97
+struct caam_skcipher_alg {
98
+ struct skcipher_alg skcipher;
99
+ struct caam_alg_entry caam;
100
+ bool registered;
101
+};
102
+
99103 /*
100104 * per-session context
101105 */
102106 struct caam_ctx {
107
+ struct crypto_engine_ctx enginectx;
103108 u32 sh_desc_enc[DESC_MAX_USED_LEN];
104109 u32 sh_desc_dec[DESC_MAX_USED_LEN];
105
- u32 sh_desc_givenc[DESC_MAX_USED_LEN];
106110 u8 key[CAAM_MAX_KEY_SIZE];
107111 dma_addr_t sh_desc_enc_dma;
108112 dma_addr_t sh_desc_dec_dma;
109
- dma_addr_t sh_desc_givenc_dma;
110113 dma_addr_t key_dma;
111114 enum dma_data_direction dir;
112115 struct device *jrdev;
113116 struct alginfo adata;
114117 struct alginfo cdata;
115118 unsigned int authsize;
119
+ bool xts_key_fallback;
120
+ struct crypto_skcipher *fallback;
121
+};
122
+
123
+struct caam_skcipher_req_ctx {
124
+ struct skcipher_edesc *edesc;
125
+ struct skcipher_request fallback_req;
126
+};
127
+
128
+struct caam_aead_req_ctx {
129
+ struct aead_edesc *edesc;
116130 };
117131
118132 static int aead_null_set_sh_desc(struct crypto_aead *aead)
....@@ -206,6 +220,18 @@
206220 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
207221 }
208222
223
+ /*
224
+ * In case |user key| > |derived key|, using DKP<imm,imm>
225
+ * would result in invalid opcodes (last bytes of user key) in
226
+ * the resulting descriptor. Use DKP<ptr,imm> instead => both
227
+ * virtual and dma key addresses are needed.
228
+ */
229
+ ctx->adata.key_virt = ctx->key;
230
+ ctx->adata.key_dma = ctx->key_dma;
231
+
232
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
233
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
234
+
209235 data_len[0] = ctx->adata.keylen_pad;
210236 data_len[1] = ctx->cdata.keylen;
211237
....@@ -221,16 +247,6 @@
221247 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
222248 ARRAY_SIZE(data_len)) < 0)
223249 return -EINVAL;
224
-
225
- if (inl_mask & 1)
226
- ctx->adata.key_virt = ctx->key;
227
- else
228
- ctx->adata.key_dma = ctx->key_dma;
229
-
230
- if (inl_mask & 2)
231
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
232
- else
233
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
234250
235251 ctx->adata.key_inline = !!(inl_mask & 1);
236252 ctx->cdata.key_inline = !!(inl_mask & 2);
....@@ -253,16 +269,6 @@
253269 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
254270 ARRAY_SIZE(data_len)) < 0)
255271 return -EINVAL;
256
-
257
- if (inl_mask & 1)
258
- ctx->adata.key_virt = ctx->key;
259
- else
260
- ctx->adata.key_dma = ctx->key_dma;
261
-
262
- if (inl_mask & 2)
263
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
264
- else
265
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
266272
267273 ctx->adata.key_inline = !!(inl_mask & 1);
268274 ctx->cdata.key_inline = !!(inl_mask & 2);
....@@ -287,16 +293,6 @@
287293 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
288294 ARRAY_SIZE(data_len)) < 0)
289295 return -EINVAL;
290
-
291
- if (inl_mask & 1)
292
- ctx->adata.key_virt = ctx->key;
293
- else
294
- ctx->adata.key_dma = ctx->key_dma;
295
-
296
- if (inl_mask & 2)
297
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
298
- else
299
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
300296
301297 ctx->adata.key_inline = !!(inl_mask & 1);
302298 ctx->cdata.key_inline = !!(inl_mask & 2);
....@@ -377,6 +373,11 @@
377373 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
378374 {
379375 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
376
+ int err;
377
+
378
+ err = crypto_gcm_check_authsize(authsize);
379
+ if (err)
380
+ return err;
380381
381382 ctx->authsize = authsize;
382383 gcm_set_sh_desc(authenc);
....@@ -440,6 +441,11 @@
440441 unsigned int authsize)
441442 {
442443 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
444
+ int err;
445
+
446
+ err = crypto_rfc4106_check_authsize(authsize);
447
+ if (err)
448
+ return err;
443449
444450 ctx->authsize = authsize;
445451 rfc4106_set_sh_desc(authenc);
....@@ -504,10 +510,66 @@
504510 {
505511 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
506512
513
+ if (authsize != 16)
514
+ return -EINVAL;
515
+
507516 ctx->authsize = authsize;
508517 rfc4543_set_sh_desc(authenc);
509518
510519 return 0;
520
+}
521
+
522
+static int chachapoly_set_sh_desc(struct crypto_aead *aead)
523
+{
524
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
525
+ struct device *jrdev = ctx->jrdev;
526
+ unsigned int ivsize = crypto_aead_ivsize(aead);
527
+ u32 *desc;
528
+
529
+ if (!ctx->cdata.keylen || !ctx->authsize)
530
+ return 0;
531
+
532
+ desc = ctx->sh_desc_enc;
533
+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
534
+ ctx->authsize, true, false);
535
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
536
+ desc_bytes(desc), ctx->dir);
537
+
538
+ desc = ctx->sh_desc_dec;
539
+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
540
+ ctx->authsize, false, false);
541
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
542
+ desc_bytes(desc), ctx->dir);
543
+
544
+ return 0;
545
+}
546
+
547
+static int chachapoly_setauthsize(struct crypto_aead *aead,
548
+ unsigned int authsize)
549
+{
550
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
551
+
552
+ if (authsize != POLY1305_DIGEST_SIZE)
553
+ return -EINVAL;
554
+
555
+ ctx->authsize = authsize;
556
+ return chachapoly_set_sh_desc(aead);
557
+}
558
+
559
+static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
560
+ unsigned int keylen)
561
+{
562
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
563
+ unsigned int ivsize = crypto_aead_ivsize(aead);
564
+ unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
565
+
566
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
567
+ return -EINVAL;
568
+
569
+ ctx->cdata.key_virt = key;
570
+ ctx->cdata.keylen = keylen - saltlen;
571
+
572
+ return chachapoly_set_sh_desc(aead);
511573 }
512574
513575 static int aead_setkey(struct crypto_aead *aead,
....@@ -522,13 +584,11 @@
522584 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
523585 goto badkey;
524586
525
-#ifdef DEBUG
526
- printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
587
+ dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
527588 keys.authkeylen + keys.enckeylen, keys.enckeylen,
528589 keys.authkeylen);
529
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
530
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
531
-#endif
590
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
591
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
532592
533593 /*
534594 * If DKP is supported, use it in the shared descriptor to generate
....@@ -562,20 +622,35 @@
562622 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
563623 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
564624 keys.enckeylen, ctx->dir);
565
-#ifdef DEBUG
566
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
567
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
568
- ctx->adata.keylen_pad + keys.enckeylen, 1);
569
-#endif
625
+
626
+ print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
627
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
628
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
570629
571630 skip_split_key:
572631 ctx->cdata.keylen = keys.enckeylen;
573632 memzero_explicit(&keys, sizeof(keys));
574633 return aead_set_sh_desc(aead);
575634 badkey:
576
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
577635 memzero_explicit(&keys, sizeof(keys));
578636 return -EINVAL;
637
+}
638
+
639
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
640
+ unsigned int keylen)
641
+{
642
+ struct crypto_authenc_keys keys;
643
+ int err;
644
+
645
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
646
+ if (unlikely(err))
647
+ return err;
648
+
649
+ err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
650
+ aead_setkey(aead, key, keylen);
651
+
652
+ memzero_explicit(&keys, sizeof(keys));
653
+ return err;
579654 }
580655
581656 static int gcm_setkey(struct crypto_aead *aead,
....@@ -583,11 +658,14 @@
583658 {
584659 struct caam_ctx *ctx = crypto_aead_ctx(aead);
585660 struct device *jrdev = ctx->jrdev;
661
+ int err;
586662
587
-#ifdef DEBUG
588
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
589
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
590
-#endif
663
+ err = aes_check_keylen(keylen);
664
+ if (err)
665
+ return err;
666
+
667
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
668
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
591669
592670 memcpy(ctx->key, key, keylen);
593671 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
....@@ -601,14 +679,14 @@
601679 {
602680 struct caam_ctx *ctx = crypto_aead_ctx(aead);
603681 struct device *jrdev = ctx->jrdev;
682
+ int err;
604683
605
- if (keylen < 4)
606
- return -EINVAL;
684
+ err = aes_check_keylen(keylen - 4);
685
+ if (err)
686
+ return err;
607687
608
-#ifdef DEBUG
609
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
610
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
611
-#endif
688
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
689
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
612690
613691 memcpy(ctx->key, key, keylen);
614692
....@@ -627,14 +705,14 @@
627705 {
628706 struct caam_ctx *ctx = crypto_aead_ctx(aead);
629707 struct device *jrdev = ctx->jrdev;
708
+ int err;
630709
631
- if (keylen < 4)
632
- return -EINVAL;
710
+ err = aes_check_keylen(keylen - 4);
711
+ if (err)
712
+ return err;
633713
634
-#ifdef DEBUG
635
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
636
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
637
-#endif
714
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
715
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
638716
639717 memcpy(ctx->key, key, keylen);
640718
....@@ -648,98 +726,146 @@
648726 return rfc4543_set_sh_desc(aead);
649727 }
650728
651
-static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
652
- const u8 *key, unsigned int keylen)
729
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
730
+ unsigned int keylen, const u32 ctx1_iv_off)
653731 {
654
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
655
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
656
- const char *alg_name = crypto_tfm_alg_name(tfm);
732
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
733
+ struct caam_skcipher_alg *alg =
734
+ container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
735
+ skcipher);
657736 struct device *jrdev = ctx->jrdev;
658
- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
737
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
659738 u32 *desc;
660
- u32 ctx1_iv_off = 0;
661
- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
662
- OP_ALG_AAI_CTR_MOD128);
663
- const bool is_rfc3686 = (ctr_mode &&
664
- (strstr(alg_name, "rfc3686") != NULL));
739
+ const bool is_rfc3686 = alg->caam.rfc3686;
665740
666
-#ifdef DEBUG
667
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
668
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
669
-#endif
670
- /*
671
- * AES-CTR needs to load IV in CONTEXT1 reg
672
- * at an offset of 128bits (16bytes)
673
- * CONTEXT1[255:128] = IV
674
- */
675
- if (ctr_mode)
676
- ctx1_iv_off = 16;
741
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
742
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
743
+
744
+ ctx->cdata.keylen = keylen;
745
+ ctx->cdata.key_virt = key;
746
+ ctx->cdata.key_inline = true;
747
+
748
+ /* skcipher_encrypt shared descriptor */
749
+ desc = ctx->sh_desc_enc;
750
+ cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
751
+ ctx1_iv_off);
752
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
753
+ desc_bytes(desc), ctx->dir);
754
+
755
+ /* skcipher_decrypt shared descriptor */
756
+ desc = ctx->sh_desc_dec;
757
+ cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
758
+ ctx1_iv_off);
759
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
760
+ desc_bytes(desc), ctx->dir);
761
+
762
+ return 0;
763
+}
764
+
765
+static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
766
+ const u8 *key, unsigned int keylen)
767
+{
768
+ int err;
769
+
770
+ err = aes_check_keylen(keylen);
771
+ if (err)
772
+ return err;
773
+
774
+ return skcipher_setkey(skcipher, key, keylen, 0);
775
+}
776
+
777
+static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
778
+ const u8 *key, unsigned int keylen)
779
+{
780
+ u32 ctx1_iv_off;
781
+ int err;
677782
678783 /*
679784 * RFC3686 specific:
680785 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
681786 * | *key = {KEY, NONCE}
682787 */
683
- if (is_rfc3686) {
684
- ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
685
- keylen -= CTR_RFC3686_NONCE_SIZE;
686
- }
788
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
789
+ keylen -= CTR_RFC3686_NONCE_SIZE;
687790
688
- ctx->cdata.keylen = keylen;
689
- ctx->cdata.key_virt = key;
690
- ctx->cdata.key_inline = true;
791
+ err = aes_check_keylen(keylen);
792
+ if (err)
793
+ return err;
691794
692
- /* ablkcipher_encrypt shared descriptor */
693
- desc = ctx->sh_desc_enc;
694
- cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
695
- ctx1_iv_off);
696
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
697
- desc_bytes(desc), ctx->dir);
698
-
699
- /* ablkcipher_decrypt shared descriptor */
700
- desc = ctx->sh_desc_dec;
701
- cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
702
- ctx1_iv_off);
703
- dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
704
- desc_bytes(desc), ctx->dir);
705
-
706
- /* ablkcipher_givencrypt shared descriptor */
707
- desc = ctx->sh_desc_givenc;
708
- cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
709
- ctx1_iv_off);
710
- dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
711
- desc_bytes(desc), ctx->dir);
712
-
713
- return 0;
795
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
714796 }
715797
716
-static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
717
- const u8 *key, unsigned int keylen)
798
+static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
799
+ const u8 *key, unsigned int keylen)
718800 {
719
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
720
- struct device *jrdev = ctx->jrdev;
721
- u32 *desc;
801
+ u32 ctx1_iv_off;
802
+ int err;
722803
723
- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
724
- crypto_ablkcipher_set_flags(ablkcipher,
725
- CRYPTO_TFM_RES_BAD_KEY_LEN);
726
- dev_err(jrdev, "key size mismatch\n");
727
- return -EINVAL;
804
+ /*
805
+ * AES-CTR needs to load IV in CONTEXT1 reg
806
+ * at an offset of 128bits (16bytes)
807
+ * CONTEXT1[255:128] = IV
808
+ */
809
+ ctx1_iv_off = 16;
810
+
811
+ err = aes_check_keylen(keylen);
812
+ if (err)
813
+ return err;
814
+
815
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
816
+}
817
+
818
+static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
819
+ const u8 *key, unsigned int keylen)
820
+{
821
+ return verify_skcipher_des_key(skcipher, key) ?:
822
+ skcipher_setkey(skcipher, key, keylen, 0);
823
+}
824
+
825
+static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
826
+ const u8 *key, unsigned int keylen)
827
+{
828
+ return verify_skcipher_des3_key(skcipher, key) ?:
829
+ skcipher_setkey(skcipher, key, keylen, 0);
830
+}
831
+
832
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
833
+ unsigned int keylen)
834
+{
835
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
836
+ struct device *jrdev = ctx->jrdev;
837
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
838
+ u32 *desc;
839
+ int err;
840
+
841
+ err = xts_verify_key(skcipher, key, keylen);
842
+ if (err) {
843
+ dev_dbg(jrdev, "key size mismatch\n");
844
+ return err;
845
+ }
846
+
847
+ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
848
+ ctx->xts_key_fallback = true;
849
+
850
+ if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) {
851
+ err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
852
+ if (err)
853
+ return err;
728854 }
729855
730856 ctx->cdata.keylen = keylen;
731857 ctx->cdata.key_virt = key;
732858 ctx->cdata.key_inline = true;
733859
734
- /* xts_ablkcipher_encrypt shared descriptor */
860
+ /* xts_skcipher_encrypt shared descriptor */
735861 desc = ctx->sh_desc_enc;
736
- cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
862
+ cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
737863 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
738864 desc_bytes(desc), ctx->dir);
739865
740
- /* xts_ablkcipher_decrypt shared descriptor */
866
+ /* xts_skcipher_decrypt shared descriptor */
741867 desc = ctx->sh_desc_dec;
742
- cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
868
+ cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
743869 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
744870 desc_bytes(desc), ctx->dir);
745871
....@@ -750,7 +876,10 @@
750876 * aead_edesc - s/w-extended aead descriptor
751877 * @src_nents: number of segments in input s/w scatterlist
752878 * @dst_nents: number of segments in output s/w scatterlist
879
+ * @mapped_src_nents: number of segments in input h/w link table
880
+ * @mapped_dst_nents: number of segments in output h/w link table
753881 * @sec4_sg_bytes: length of dma mapped sec4_sg space
882
+ * @bklog: stored to determine if the request needs backlog
754883 * @sec4_sg_dma: bus physical mapped address of h/w link table
755884 * @sec4_sg: pointer to h/w link table
756885 * @hw_desc: the h/w job descriptor followed by any referenced link tables
....@@ -758,52 +887,59 @@
758887 struct aead_edesc {
759888 int src_nents;
760889 int dst_nents;
890
+ int mapped_src_nents;
891
+ int mapped_dst_nents;
761892 int sec4_sg_bytes;
893
+ bool bklog;
762894 dma_addr_t sec4_sg_dma;
763895 struct sec4_sg_entry *sec4_sg;
764896 u32 hw_desc[];
765897 };
766898
767899 /*
768
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
900
+ * skcipher_edesc - s/w-extended skcipher descriptor
769901 * @src_nents: number of segments in input s/w scatterlist
770902 * @dst_nents: number of segments in output s/w scatterlist
903
+ * @mapped_src_nents: number of segments in input h/w link table
904
+ * @mapped_dst_nents: number of segments in output h/w link table
771905 * @iv_dma: dma address of iv for checking continuity and link table
772
- * @iv_dir: DMA mapping direction for IV
773906 * @sec4_sg_bytes: length of dma mapped sec4_sg space
907
+ * @bklog: stored to determine if the request needs backlog
774908 * @sec4_sg_dma: bus physical mapped address of h/w link table
775909 * @sec4_sg: pointer to h/w link table
776910 * @hw_desc: the h/w job descriptor followed by any referenced link tables
777911 * and IV
778912 */
779
-struct ablkcipher_edesc {
913
+struct skcipher_edesc {
780914 int src_nents;
781915 int dst_nents;
916
+ int mapped_src_nents;
917
+ int mapped_dst_nents;
782918 dma_addr_t iv_dma;
783
- enum dma_data_direction iv_dir;
784919 int sec4_sg_bytes;
920
+ bool bklog;
785921 dma_addr_t sec4_sg_dma;
786922 struct sec4_sg_entry *sec4_sg;
787
- u32 hw_desc[0];
923
+ u32 hw_desc[];
788924 };
789925
790926 static void caam_unmap(struct device *dev, struct scatterlist *src,
791927 struct scatterlist *dst, int src_nents,
792928 int dst_nents,
793
- dma_addr_t iv_dma, int ivsize,
794
- enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
929
+ dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
795930 int sec4_sg_bytes)
796931 {
797932 if (dst != src) {
798933 if (src_nents)
799934 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
800
- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
935
+ if (dst_nents)
936
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
801937 } else {
802938 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
803939 }
804940
805941 if (iv_dma)
806
- dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
942
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL);
807943 if (sec4_sg_bytes)
808944 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
809945 DMA_TO_DEVICE);
....@@ -814,156 +950,103 @@
814950 struct aead_request *req)
815951 {
816952 caam_unmap(dev, req->src, req->dst,
817
- edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
953
+ edesc->src_nents, edesc->dst_nents, 0, 0,
818954 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
819955 }
820956
821
-static void ablkcipher_unmap(struct device *dev,
822
- struct ablkcipher_edesc *edesc,
823
- struct ablkcipher_request *req)
957
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
958
+ struct skcipher_request *req)
824959 {
825
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
826
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
960
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
961
+ int ivsize = crypto_skcipher_ivsize(skcipher);
827962
828963 caam_unmap(dev, req->src, req->dst,
829964 edesc->src_nents, edesc->dst_nents,
830
- edesc->iv_dma, ivsize, edesc->iv_dir,
965
+ edesc->iv_dma, ivsize,
831966 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
832967 }
833968
834
-static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
835
- void *context)
969
+static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
970
+ void *context)
836971 {
837972 struct aead_request *req = context;
973
+ struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
974
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
838975 struct aead_edesc *edesc;
976
+ int ecode = 0;
977
+ bool has_bklog;
839978
840
-#ifdef DEBUG
841
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
842
-#endif
979
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
843980
844
- edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
981
+ edesc = rctx->edesc;
982
+ has_bklog = edesc->bklog;
845983
846984 if (err)
847
- caam_jr_strstatus(jrdev, err);
985
+ ecode = caam_jr_strstatus(jrdev, err);
848986
849987 aead_unmap(jrdev, edesc, req);
850988
851989 kfree(edesc);
852990
853
- aead_request_complete(req, err);
991
+ /*
992
+ * If no backlog flag, the completion of the request is done
993
+ * by CAAM, not crypto engine.
994
+ */
995
+ if (!has_bklog)
996
+ aead_request_complete(req, ecode);
997
+ else
998
+ crypto_finalize_aead_request(jrp->engine, req, ecode);
854999 }
8551000
856
-static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
857
- void *context)
1001
+static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
1002
+ void *context)
8581003 {
859
- struct aead_request *req = context;
860
- struct aead_edesc *edesc;
1004
+ struct skcipher_request *req = context;
1005
+ struct skcipher_edesc *edesc;
1006
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1007
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1008
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
1009
+ int ivsize = crypto_skcipher_ivsize(skcipher);
1010
+ int ecode = 0;
1011
+ bool has_bklog;
8611012
862
-#ifdef DEBUG
863
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
864
-#endif
1013
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
8651014
866
- edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
867
-
1015
+ edesc = rctx->edesc;
1016
+ has_bklog = edesc->bklog;
8681017 if (err)
869
- caam_jr_strstatus(jrdev, err);
1018
+ ecode = caam_jr_strstatus(jrdev, err);
8701019
871
- aead_unmap(jrdev, edesc, req);
1020
+ skcipher_unmap(jrdev, edesc, req);
8721021
8731022 /*
874
- * verify hw auth check passed else return -EBADMSG
1023
+ * The crypto API expects us to set the IV (req->iv) to the last
1024
+ * ciphertext block (CBC mode) or last counter (CTR mode).
1025
+ * This is used e.g. by the CTS mode.
8751026 */
876
- if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
877
- err = -EBADMSG;
1027
+ if (ivsize && !ecode) {
1028
+ memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
1029
+ ivsize);
8781030
879
- kfree(edesc);
880
-
881
- aead_request_complete(req, err);
882
-}
883
-
884
-static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
885
- void *context)
886
-{
887
- struct ablkcipher_request *req = context;
888
- struct ablkcipher_edesc *edesc;
889
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
890
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
891
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
892
-
893
-#ifdef DEBUG
894
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
895
-#endif
896
-
897
- edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
898
-
899
- if (err)
900
- caam_jr_strstatus(jrdev, err);
901
-
902
-#ifdef DEBUG
903
- print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
904
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
905
- edesc->src_nents > 1 ? 100 : ivsize, 1);
906
-#endif
907
- caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
908
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
909
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
910
-
911
- ablkcipher_unmap(jrdev, edesc, req);
912
-
913
- /*
914
- * The crypto API expects us to set the IV (req->info) to the last
915
- * ciphertext block when running in CBC mode.
916
- */
917
- if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
918
- scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
919
- ivsize, ivsize, 0);
920
-
921
- /* In case initial IV was generated, copy it in GIVCIPHER request */
922
- if (edesc->iv_dir == DMA_FROM_DEVICE) {
923
- u8 *iv;
924
- struct skcipher_givcrypt_request *greq;
925
-
926
- greq = container_of(req, struct skcipher_givcrypt_request,
927
- creq);
928
- iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
929
- edesc->sec4_sg_bytes;
930
- memcpy(greq->giv, iv, ivsize);
1031
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1032
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1033
+ ivsize, 1);
9311034 }
9321035
933
- kfree(edesc);
934
-
935
- ablkcipher_request_complete(req, err);
936
-}
937
-
938
-static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
939
- void *context)
940
-{
941
- struct ablkcipher_request *req = context;
942
- struct ablkcipher_edesc *edesc;
943
-#ifdef DEBUG
944
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
945
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
946
-
947
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
948
-#endif
949
-
950
- edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
951
- if (err)
952
- caam_jr_strstatus(jrdev, err);
953
-
954
-#ifdef DEBUG
955
- print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
956
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
957
- ivsize, 1);
958
-#endif
959
- caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
1036
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
9601037 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
961
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1038
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
9621039
963
- ablkcipher_unmap(jrdev, edesc, req);
9641040 kfree(edesc);
9651041
966
- ablkcipher_request_complete(req, err);
1042
+ /*
1043
+ * If no backlog flag, the completion of the request is done
1044
+ * by CAAM, not crypto engine.
1045
+ */
1046
+ if (!has_bklog)
1047
+ skcipher_request_complete(req, ecode);
1048
+ else
1049
+ crypto_finalize_skcipher_request(jrp->engine, req, ecode);
9671050 }
9681051
9691052 /*
....@@ -990,11 +1073,12 @@
9901073 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9911074
9921075 if (all_contig) {
993
- src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
1076
+ src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) :
1077
+ 0;
9941078 in_options = 0;
9951079 } else {
9961080 src_dma = edesc->sec4_sg_dma;
997
- sec4_sg_index += edesc->src_nents;
1081
+ sec4_sg_index += edesc->mapped_src_nents;
9981082 in_options = LDST_SGF;
9991083 }
10001084
....@@ -1005,7 +1089,10 @@
10051089 out_options = in_options;
10061090
10071091 if (unlikely(req->src != req->dst)) {
1008
- if (edesc->dst_nents == 1) {
1092
+ if (!edesc->mapped_dst_nents) {
1093
+ dst_dma = 0;
1094
+ out_options = 0;
1095
+ } else if (edesc->mapped_dst_nents == 1) {
10091096 dst_dma = sg_dma_address(req->dst);
10101097 out_options = 0;
10111098 } else {
....@@ -1054,6 +1141,40 @@
10541141 /* Append IV */
10551142 append_data(desc, req->iv, ivsize);
10561143 /* End of blank commands */
1144
+}
1145
+
1146
+static void init_chachapoly_job(struct aead_request *req,
1147
+ struct aead_edesc *edesc, bool all_contig,
1148
+ bool encrypt)
1149
+{
1150
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
1151
+ unsigned int ivsize = crypto_aead_ivsize(aead);
1152
+ unsigned int assoclen = req->assoclen;
1153
+ u32 *desc = edesc->hw_desc;
1154
+ u32 ctx_iv_off = 4;
1155
+
1156
+ init_aead_job(req, edesc, all_contig, encrypt);
1157
+
1158
+ if (ivsize != CHACHAPOLY_IV_SIZE) {
1159
+ /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
1160
+ ctx_iv_off += 4;
1161
+
1162
+ /*
1163
+ * The associated data comes already with the IV but we need
1164
+ * to skip it when we authenticate or encrypt...
1165
+ */
1166
+ assoclen -= ivsize;
1167
+ }
1168
+
1169
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
1170
+
1171
+ /*
1172
+ * For IPsec load the IV further in the same register.
1173
+ * For RFC7539 simply load the 12 bytes nonce in a single operation
1174
+ */
1175
+ append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
1176
+ LDST_SRCDST_BYTE_CONTEXT |
1177
+ ctx_iv_off << LDST_OFFSET_SHIFT);
10571178 }
10581179
10591180 static void init_authenc_job(struct aead_request *req,
....@@ -1106,90 +1227,59 @@
11061227 }
11071228
11081229 /*
1109
- * Fill in ablkcipher job descriptor
1230
+ * Fill in skcipher job descriptor
11101231 */
1111
-static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1112
- struct ablkcipher_edesc *edesc,
1113
- struct ablkcipher_request *req)
1232
+static void init_skcipher_job(struct skcipher_request *req,
1233
+ struct skcipher_edesc *edesc,
1234
+ const bool encrypt)
11141235 {
1115
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1116
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1236
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1237
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1238
+ struct device *jrdev = ctx->jrdev;
1239
+ int ivsize = crypto_skcipher_ivsize(skcipher);
11171240 u32 *desc = edesc->hw_desc;
1118
- u32 out_options = 0;
1119
- dma_addr_t dst_dma;
1120
- int len;
1121
-
1122
-#ifdef DEBUG
1123
- print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1124
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1125
- ivsize, 1);
1126
- pr_err("asked=%d, nbytes%d\n",
1127
- (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
1128
-#endif
1129
- caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
1130
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1131
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
1132
-
1133
- len = desc_len(sh_desc);
1134
- init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1135
-
1136
- append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
1137
- LDST_SGF);
1138
-
1139
- if (likely(req->src == req->dst)) {
1140
- dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
1141
- out_options = LDST_SGF;
1142
- } else {
1143
- if (edesc->dst_nents == 1) {
1144
- dst_dma = sg_dma_address(req->dst);
1145
- } else {
1146
- dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
1147
- sizeof(struct sec4_sg_entry);
1148
- out_options = LDST_SGF;
1149
- }
1150
- }
1151
- append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1152
-}
1153
-
1154
-/*
1155
- * Fill in ablkcipher givencrypt job descriptor
1156
- */
1157
-static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
1158
- struct ablkcipher_edesc *edesc,
1159
- struct ablkcipher_request *req)
1160
-{
1161
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1162
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1163
- u32 *desc = edesc->hw_desc;
1164
- u32 in_options;
1165
- dma_addr_t dst_dma, src_dma;
1241
+ u32 *sh_desc;
1242
+ u32 in_options = 0, out_options = 0;
1243
+ dma_addr_t src_dma, dst_dma, ptr;
11661244 int len, sec4_sg_index = 0;
11671245
1168
-#ifdef DEBUG
1169
- print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
1170
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1171
- ivsize, 1);
1172
-#endif
1173
- caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
1246
+ print_hex_dump_debug("presciv@"__stringify(__LINE__)": ",
1247
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1248
+ dev_dbg(jrdev, "asked=%d, cryptlen%d\n",
1249
+ (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
1250
+
1251
+ caam_dump_sg("src @" __stringify(__LINE__)": ",
11741252 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1175
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
1253
+ edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1254
+
1255
+ sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
1256
+ ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
11761257
11771258 len = desc_len(sh_desc);
11781259 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
11791260
1180
- if (edesc->src_nents == 1) {
1181
- src_dma = sg_dma_address(req->src);
1182
- in_options = 0;
1183
- } else {
1261
+ if (ivsize || edesc->mapped_src_nents > 1) {
11841262 src_dma = edesc->sec4_sg_dma;
1185
- sec4_sg_index += edesc->src_nents;
1263
+ sec4_sg_index = edesc->mapped_src_nents + !!ivsize;
11861264 in_options = LDST_SGF;
1265
+ } else {
1266
+ src_dma = sg_dma_address(req->src);
11871267 }
1188
- append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
11891268
1190
- dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
1191
- sizeof(struct sec4_sg_entry);
1192
- append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
1269
+ append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
1270
+
1271
+ if (likely(req->src == req->dst)) {
1272
+ dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
1273
+ out_options = in_options;
1274
+ } else if (!ivsize && edesc->mapped_dst_nents == 1) {
1275
+ dst_dma = sg_dma_address(req->dst);
1276
+ } else {
1277
+ dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
1278
+ sizeof(struct sec4_sg_entry);
1279
+ out_options = LDST_SGF;
1280
+ }
1281
+
1282
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
11931283 }
11941284
11951285 /*
....@@ -1202,40 +1292,40 @@
12021292 struct crypto_aead *aead = crypto_aead_reqtfm(req);
12031293 struct caam_ctx *ctx = crypto_aead_ctx(aead);
12041294 struct device *jrdev = ctx->jrdev;
1295
+ struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
12051296 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
12061297 GFP_KERNEL : GFP_ATOMIC;
12071298 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1299
+ int src_len, dst_len = 0;
12081300 struct aead_edesc *edesc;
12091301 int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
12101302 unsigned int authsize = ctx->authsize;
12111303
12121304 if (unlikely(req->dst != req->src)) {
1213
- src_nents = sg_nents_for_len(req->src, req->assoclen +
1214
- req->cryptlen);
1305
+ src_len = req->assoclen + req->cryptlen;
1306
+ dst_len = src_len + (encrypt ? authsize : (-authsize));
1307
+
1308
+ src_nents = sg_nents_for_len(req->src, src_len);
12151309 if (unlikely(src_nents < 0)) {
12161310 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1217
- req->assoclen + req->cryptlen);
1311
+ src_len);
12181312 return ERR_PTR(src_nents);
12191313 }
12201314
1221
- dst_nents = sg_nents_for_len(req->dst, req->assoclen +
1222
- req->cryptlen +
1223
- (encrypt ? authsize :
1224
- (-authsize)));
1315
+ dst_nents = sg_nents_for_len(req->dst, dst_len);
12251316 if (unlikely(dst_nents < 0)) {
12261317 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1227
- req->assoclen + req->cryptlen +
1228
- (encrypt ? authsize : (-authsize)));
1318
+ dst_len);
12291319 return ERR_PTR(dst_nents);
12301320 }
12311321 } else {
1232
- src_nents = sg_nents_for_len(req->src, req->assoclen +
1233
- req->cryptlen +
1234
- (encrypt ? authsize : 0));
1322
+ src_len = req->assoclen + req->cryptlen +
1323
+ (encrypt ? authsize : 0);
1324
+
1325
+ src_nents = sg_nents_for_len(req->src, src_len);
12351326 if (unlikely(src_nents < 0)) {
12361327 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1237
- req->assoclen + req->cryptlen +
1238
- (encrypt ? authsize : 0));
1328
+ src_len);
12391329 return ERR_PTR(src_nents);
12401330 }
12411331 }
....@@ -1260,17 +1350,32 @@
12601350 mapped_src_nents = 0;
12611351 }
12621352
1263
- mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
1264
- DMA_FROM_DEVICE);
1265
- if (unlikely(!mapped_dst_nents)) {
1266
- dev_err(jrdev, "unable to map destination\n");
1267
- dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1268
- return ERR_PTR(-ENOMEM);
1353
+ /* Cover also the case of null (zero length) output data */
1354
+ if (dst_nents) {
1355
+ mapped_dst_nents = dma_map_sg(jrdev, req->dst,
1356
+ dst_nents,
1357
+ DMA_FROM_DEVICE);
1358
+ if (unlikely(!mapped_dst_nents)) {
1359
+ dev_err(jrdev, "unable to map destination\n");
1360
+ dma_unmap_sg(jrdev, req->src, src_nents,
1361
+ DMA_TO_DEVICE);
1362
+ return ERR_PTR(-ENOMEM);
1363
+ }
1364
+ } else {
1365
+ mapped_dst_nents = 0;
12691366 }
12701367 }
12711368
1369
+ /*
1370
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1371
+ * the end of the table by allocating more S/G entries.
1372
+ */
12721373 sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
1273
- sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1374
+ if (mapped_dst_nents > 1)
1375
+ sec4_sg_len += pad_sg_nents(mapped_dst_nents);
1376
+ else
1377
+ sec4_sg_len = pad_sg_nents(sec4_sg_len);
1378
+
12741379 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
12751380
12761381 /* allocate space for base edesc and hw desc commands, link tables */
....@@ -1278,24 +1383,29 @@
12781383 GFP_DMA | flags);
12791384 if (!edesc) {
12801385 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1281
- 0, DMA_NONE, 0, 0);
1386
+ 0, 0, 0);
12821387 return ERR_PTR(-ENOMEM);
12831388 }
12841389
12851390 edesc->src_nents = src_nents;
12861391 edesc->dst_nents = dst_nents;
1392
+ edesc->mapped_src_nents = mapped_src_nents;
1393
+ edesc->mapped_dst_nents = mapped_dst_nents;
12871394 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
12881395 desc_bytes;
1396
+
1397
+ rctx->edesc = edesc;
1398
+
12891399 *all_contig_ptr = !(mapped_src_nents > 1);
12901400
12911401 sec4_sg_index = 0;
12921402 if (mapped_src_nents > 1) {
1293
- sg_to_sec4_sg_last(req->src, mapped_src_nents,
1403
+ sg_to_sec4_sg_last(req->src, src_len,
12941404 edesc->sec4_sg + sec4_sg_index, 0);
12951405 sec4_sg_index += mapped_src_nents;
12961406 }
12971407 if (mapped_dst_nents > 1) {
1298
- sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
1408
+ sg_to_sec4_sg_last(req->dst, dst_len,
12991409 edesc->sec4_sg + sec4_sg_index, 0);
13001410 }
13011411
....@@ -1316,7 +1426,34 @@
13161426 return edesc;
13171427 }
13181428
1319
-static int gcm_encrypt(struct aead_request *req)
1429
+static int aead_enqueue_req(struct device *jrdev, struct aead_request *req)
1430
+{
1431
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
1432
+ struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
1433
+ struct aead_edesc *edesc = rctx->edesc;
1434
+ u32 *desc = edesc->hw_desc;
1435
+ int ret;
1436
+
1437
+ /*
1438
+ * Only the backlog request are sent to crypto-engine since the others
1439
+ * can be handled by CAAM, if free, especially since JR has up to 1024
1440
+ * entries (more than the 10 entries from crypto-engine).
1441
+ */
1442
+ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
1443
+ ret = crypto_transfer_aead_request_to_engine(jrpriv->engine,
1444
+ req);
1445
+ else
1446
+ ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req);
1447
+
1448
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
1449
+ aead_unmap(jrdev, edesc, req);
1450
+ kfree(rctx->edesc);
1451
+ }
1452
+
1453
+ return ret;
1454
+}
1455
+
1456
+static inline int chachapoly_crypt(struct aead_request *req, bool encrypt)
13201457 {
13211458 struct aead_edesc *edesc;
13221459 struct crypto_aead *aead = crypto_aead_reqtfm(req);
....@@ -1324,190 +1461,163 @@
13241461 struct device *jrdev = ctx->jrdev;
13251462 bool all_contig;
13261463 u32 *desc;
1327
- int ret = 0;
1464
+
1465
+ edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
1466
+ encrypt);
1467
+ if (IS_ERR(edesc))
1468
+ return PTR_ERR(edesc);
1469
+
1470
+ desc = edesc->hw_desc;
1471
+
1472
+ init_chachapoly_job(req, edesc, all_contig, encrypt);
1473
+ print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
1474
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1475
+ 1);
1476
+
1477
+ return aead_enqueue_req(jrdev, req);
1478
+}
1479
+
1480
+static int chachapoly_encrypt(struct aead_request *req)
1481
+{
1482
+ return chachapoly_crypt(req, true);
1483
+}
1484
+
1485
+static int chachapoly_decrypt(struct aead_request *req)
1486
+{
1487
+ return chachapoly_crypt(req, false);
1488
+}
1489
+
1490
+static inline int aead_crypt(struct aead_request *req, bool encrypt)
1491
+{
1492
+ struct aead_edesc *edesc;
1493
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
1494
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
1495
+ struct device *jrdev = ctx->jrdev;
1496
+ bool all_contig;
13281497
13291498 /* allocate extended descriptor */
1330
- edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
1499
+ edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1500
+ &all_contig, encrypt);
13311501 if (IS_ERR(edesc))
13321502 return PTR_ERR(edesc);
13331503
13341504 /* Create and submit job descriptor */
1335
- init_gcm_job(req, edesc, all_contig, true);
1336
-#ifdef DEBUG
1337
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1338
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1339
- desc_bytes(edesc->hw_desc), 1);
1340
-#endif
1505
+ init_authenc_job(req, edesc, all_contig, encrypt);
13411506
1342
- desc = edesc->hw_desc;
1343
- ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1344
- if (!ret) {
1345
- ret = -EINPROGRESS;
1346
- } else {
1347
- aead_unmap(jrdev, edesc, req);
1348
- kfree(edesc);
1349
- }
1507
+ print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1508
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1509
+ desc_bytes(edesc->hw_desc), 1);
13501510
1351
- return ret;
1352
-}
1353
-
1354
-static int ipsec_gcm_encrypt(struct aead_request *req)
1355
-{
1356
- if (req->assoclen < 8)
1357
- return -EINVAL;
1358
-
1359
- return gcm_encrypt(req);
1511
+ return aead_enqueue_req(jrdev, req);
13601512 }
13611513
13621514 static int aead_encrypt(struct aead_request *req)
13631515 {
1364
- struct aead_edesc *edesc;
1365
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
1366
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
1367
- struct device *jrdev = ctx->jrdev;
1368
- bool all_contig;
1369
- u32 *desc;
1370
- int ret = 0;
1371
-
1372
- /* allocate extended descriptor */
1373
- edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1374
- &all_contig, true);
1375
- if (IS_ERR(edesc))
1376
- return PTR_ERR(edesc);
1377
-
1378
- /* Create and submit job descriptor */
1379
- init_authenc_job(req, edesc, all_contig, true);
1380
-#ifdef DEBUG
1381
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1382
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1383
- desc_bytes(edesc->hw_desc), 1);
1384
-#endif
1385
-
1386
- desc = edesc->hw_desc;
1387
- ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1388
- if (!ret) {
1389
- ret = -EINPROGRESS;
1390
- } else {
1391
- aead_unmap(jrdev, edesc, req);
1392
- kfree(edesc);
1393
- }
1394
-
1395
- return ret;
1396
-}
1397
-
1398
-static int gcm_decrypt(struct aead_request *req)
1399
-{
1400
- struct aead_edesc *edesc;
1401
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
1402
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
1403
- struct device *jrdev = ctx->jrdev;
1404
- bool all_contig;
1405
- u32 *desc;
1406
- int ret = 0;
1407
-
1408
- /* allocate extended descriptor */
1409
- edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
1410
- if (IS_ERR(edesc))
1411
- return PTR_ERR(edesc);
1412
-
1413
- /* Create and submit job descriptor*/
1414
- init_gcm_job(req, edesc, all_contig, false);
1415
-#ifdef DEBUG
1416
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1417
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1418
- desc_bytes(edesc->hw_desc), 1);
1419
-#endif
1420
-
1421
- desc = edesc->hw_desc;
1422
- ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1423
- if (!ret) {
1424
- ret = -EINPROGRESS;
1425
- } else {
1426
- aead_unmap(jrdev, edesc, req);
1427
- kfree(edesc);
1428
- }
1429
-
1430
- return ret;
1431
-}
1432
-
1433
-static int ipsec_gcm_decrypt(struct aead_request *req)
1434
-{
1435
- if (req->assoclen < 8)
1436
- return -EINVAL;
1437
-
1438
- return gcm_decrypt(req);
1516
+ return aead_crypt(req, true);
14391517 }
14401518
14411519 static int aead_decrypt(struct aead_request *req)
14421520 {
1521
+ return aead_crypt(req, false);
1522
+}
1523
+
1524
+static int aead_do_one_req(struct crypto_engine *engine, void *areq)
1525
+{
1526
+ struct aead_request *req = aead_request_cast(areq);
1527
+ struct caam_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
1528
+ struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
1529
+ u32 *desc = rctx->edesc->hw_desc;
1530
+ int ret;
1531
+
1532
+ rctx->edesc->bklog = true;
1533
+
1534
+ ret = caam_jr_enqueue(ctx->jrdev, desc, aead_crypt_done, req);
1535
+
1536
+ if (ret != -EINPROGRESS) {
1537
+ aead_unmap(ctx->jrdev, rctx->edesc, req);
1538
+ kfree(rctx->edesc);
1539
+ } else {
1540
+ ret = 0;
1541
+ }
1542
+
1543
+ return ret;
1544
+}
1545
+
1546
+static inline int gcm_crypt(struct aead_request *req, bool encrypt)
1547
+{
14431548 struct aead_edesc *edesc;
14441549 struct crypto_aead *aead = crypto_aead_reqtfm(req);
14451550 struct caam_ctx *ctx = crypto_aead_ctx(aead);
14461551 struct device *jrdev = ctx->jrdev;
14471552 bool all_contig;
1448
- u32 *desc;
1449
- int ret = 0;
1450
-
1451
- caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
1452
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1453
- req->assoclen + req->cryptlen, 1);
14541553
14551554 /* allocate extended descriptor */
1456
- edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1457
- &all_contig, false);
1555
+ edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig,
1556
+ encrypt);
14581557 if (IS_ERR(edesc))
14591558 return PTR_ERR(edesc);
14601559
1461
- /* Create and submit job descriptor*/
1462
- init_authenc_job(req, edesc, all_contig, false);
1463
-#ifdef DEBUG
1464
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1465
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1466
- desc_bytes(edesc->hw_desc), 1);
1467
-#endif
1560
+ /* Create and submit job descriptor */
1561
+ init_gcm_job(req, edesc, all_contig, encrypt);
14681562
1469
- desc = edesc->hw_desc;
1470
- ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1471
- if (!ret) {
1472
- ret = -EINPROGRESS;
1473
- } else {
1474
- aead_unmap(jrdev, edesc, req);
1475
- kfree(edesc);
1476
- }
1563
+ print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1564
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1565
+ desc_bytes(edesc->hw_desc), 1);
14771566
1478
- return ret;
1567
+ return aead_enqueue_req(jrdev, req);
1568
+}
1569
+
1570
+static int gcm_encrypt(struct aead_request *req)
1571
+{
1572
+ return gcm_crypt(req, true);
1573
+}
1574
+
1575
+static int gcm_decrypt(struct aead_request *req)
1576
+{
1577
+ return gcm_crypt(req, false);
1578
+}
1579
+
1580
+static int ipsec_gcm_encrypt(struct aead_request *req)
1581
+{
1582
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req);
1583
+}
1584
+
1585
+static int ipsec_gcm_decrypt(struct aead_request *req)
1586
+{
1587
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req);
14791588 }
14801589
14811590 /*
1482
- * allocate and map the ablkcipher extended descriptor for ablkcipher
1591
+ * allocate and map the skcipher extended descriptor for skcipher
14831592 */
1484
-static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1485
- *req, int desc_bytes)
1593
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1594
+ int desc_bytes)
14861595 {
1487
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1488
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1596
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1597
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1598
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
14891599 struct device *jrdev = ctx->jrdev;
14901600 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
14911601 GFP_KERNEL : GFP_ATOMIC;
14921602 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1493
- struct ablkcipher_edesc *edesc;
1494
- dma_addr_t iv_dma;
1603
+ struct skcipher_edesc *edesc;
1604
+ dma_addr_t iv_dma = 0;
14951605 u8 *iv;
1496
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1606
+ int ivsize = crypto_skcipher_ivsize(skcipher);
14971607 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
14981608
1499
- src_nents = sg_nents_for_len(req->src, req->nbytes);
1609
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
15001610 if (unlikely(src_nents < 0)) {
15011611 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1502
- req->nbytes);
1612
+ req->cryptlen);
15031613 return ERR_PTR(src_nents);
15041614 }
15051615
15061616 if (req->dst != req->src) {
1507
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1617
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
15081618 if (unlikely(dst_nents < 0)) {
15091619 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1510
- req->nbytes);
1620
+ req->cryptlen);
15111621 return ERR_PTR(dst_nents);
15121622 }
15131623 }
....@@ -1526,7 +1636,6 @@
15261636 dev_err(jrdev, "unable to map source\n");
15271637 return ERR_PTR(-ENOMEM);
15281638 }
1529
-
15301639 mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
15311640 DMA_FROM_DEVICE);
15321641 if (unlikely(!mapped_dst_nents)) {
....@@ -1536,9 +1645,34 @@
15361645 }
15371646 }
15381647
1539
- sec4_sg_ents = 1 + mapped_src_nents;
1648
+ if (!ivsize && mapped_src_nents == 1)
1649
+ sec4_sg_ents = 0; // no need for an input hw s/g table
1650
+ else
1651
+ sec4_sg_ents = mapped_src_nents + !!ivsize;
15401652 dst_sg_idx = sec4_sg_ents;
1541
- sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1653
+
1654
+ /*
1655
+ * Input, output HW S/G tables: [IV, src][dst, IV]
1656
+ * IV entries point to the same buffer
1657
+ * If src == dst, S/G entries are reused (S/G tables overlap)
1658
+ *
1659
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1660
+ * the end of the table by allocating more S/G entries. Logic:
1661
+ * if (output S/G)
1662
+ * pad output S/G, if needed
1663
+ * else if (input S/G) ...
1664
+ * pad input S/G, if needed
1665
+ */
1666
+ if (ivsize || mapped_dst_nents > 1) {
1667
+ if (req->src == req->dst)
1668
+ sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents);
1669
+ else
1670
+ sec4_sg_ents += pad_sg_nents(mapped_dst_nents +
1671
+ !!ivsize);
1672
+ } else {
1673
+ sec4_sg_ents = pad_sg_nents(sec4_sg_ents);
1674
+ }
1675
+
15421676 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
15431677
15441678 /*
....@@ -1549,416 +1683,331 @@
15491683 if (!edesc) {
15501684 dev_err(jrdev, "could not allocate extended descriptor\n");
15511685 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1552
- 0, DMA_NONE, 0, 0);
1686
+ 0, 0, 0);
15531687 return ERR_PTR(-ENOMEM);
15541688 }
15551689
15561690 edesc->src_nents = src_nents;
15571691 edesc->dst_nents = dst_nents;
1692
+ edesc->mapped_src_nents = mapped_src_nents;
1693
+ edesc->mapped_dst_nents = mapped_dst_nents;
15581694 edesc->sec4_sg_bytes = sec4_sg_bytes;
15591695 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
15601696 desc_bytes);
1561
- edesc->iv_dir = DMA_TO_DEVICE;
1697
+ rctx->edesc = edesc;
15621698
15631699 /* Make sure IV is located in a DMAable area */
1564
- iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
1565
- memcpy(iv, req->info, ivsize);
1700
+ if (ivsize) {
1701
+ iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes;
1702
+ memcpy(iv, req->iv, ivsize);
15661703
1567
- iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
1568
- if (dma_mapping_error(jrdev, iv_dma)) {
1569
- dev_err(jrdev, "unable to map IV\n");
1570
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1571
- 0, DMA_NONE, 0, 0);
1572
- kfree(edesc);
1573
- return ERR_PTR(-ENOMEM);
1704
+ iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
1705
+ if (dma_mapping_error(jrdev, iv_dma)) {
1706
+ dev_err(jrdev, "unable to map IV\n");
1707
+ caam_unmap(jrdev, req->src, req->dst, src_nents,
1708
+ dst_nents, 0, 0, 0, 0);
1709
+ kfree(edesc);
1710
+ return ERR_PTR(-ENOMEM);
1711
+ }
1712
+
1713
+ dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
15741714 }
1715
+ if (dst_sg_idx)
1716
+ sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg +
1717
+ !!ivsize, 0);
15751718
1576
- dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1577
- sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
1719
+ if (req->src != req->dst && (ivsize || mapped_dst_nents > 1))
1720
+ sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg +
1721
+ dst_sg_idx, 0);
15781722
1579
- if (mapped_dst_nents > 1) {
1580
- sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
1581
- edesc->sec4_sg + dst_sg_idx, 0);
1582
- }
1723
+ if (ivsize)
1724
+ dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx +
1725
+ mapped_dst_nents, iv_dma, ivsize, 0);
15831726
1584
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1585
- sec4_sg_bytes, DMA_TO_DEVICE);
1586
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1587
- dev_err(jrdev, "unable to map S/G table\n");
1588
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1589
- iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
1590
- kfree(edesc);
1591
- return ERR_PTR(-ENOMEM);
1727
+ if (ivsize || mapped_dst_nents > 1)
1728
+ sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
1729
+ mapped_dst_nents - 1 + !!ivsize);
1730
+
1731
+ if (sec4_sg_bytes) {
1732
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1733
+ sec4_sg_bytes,
1734
+ DMA_TO_DEVICE);
1735
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1736
+ dev_err(jrdev, "unable to map S/G table\n");
1737
+ caam_unmap(jrdev, req->src, req->dst, src_nents,
1738
+ dst_nents, iv_dma, ivsize, 0, 0);
1739
+ kfree(edesc);
1740
+ return ERR_PTR(-ENOMEM);
1741
+ }
15921742 }
15931743
15941744 edesc->iv_dma = iv_dma;
15951745
1596
-#ifdef DEBUG
1597
- print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
1598
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1599
- sec4_sg_bytes, 1);
1600
-#endif
1746
+ print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ",
1747
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1748
+ sec4_sg_bytes, 1);
16011749
16021750 return edesc;
16031751 }
16041752
1605
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
1753
+static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
16061754 {
1607
- struct ablkcipher_edesc *edesc;
1608
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1609
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1610
- struct device *jrdev = ctx->jrdev;
1611
- u32 *desc;
1612
- int ret = 0;
1755
+ struct skcipher_request *req = skcipher_request_cast(areq);
1756
+ struct caam_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1757
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1758
+ u32 *desc = rctx->edesc->hw_desc;
1759
+ int ret;
16131760
1614
- /* allocate extended descriptor */
1615
- edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1616
- if (IS_ERR(edesc))
1617
- return PTR_ERR(edesc);
1761
+ rctx->edesc->bklog = true;
16181762
1619
- /* Create and submit job descriptor*/
1620
- init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
1621
-#ifdef DEBUG
1622
- print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1623
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1624
- desc_bytes(edesc->hw_desc), 1);
1625
-#endif
1626
- desc = edesc->hw_desc;
1627
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1763
+ ret = caam_jr_enqueue(ctx->jrdev, desc, skcipher_crypt_done, req);
16281764
1629
- if (!ret) {
1630
- ret = -EINPROGRESS;
1765
+ if (ret != -EINPROGRESS) {
1766
+ skcipher_unmap(ctx->jrdev, rctx->edesc, req);
1767
+ kfree(rctx->edesc);
16311768 } else {
1632
- ablkcipher_unmap(jrdev, edesc, req);
1633
- kfree(edesc);
1769
+ ret = 0;
16341770 }
16351771
16361772 return ret;
16371773 }
16381774
1639
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
1775
+static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
16401776 {
1641
- struct ablkcipher_edesc *edesc;
1642
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1643
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1644
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1777
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1778
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1779
+
1780
+ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1781
+}
1782
+
1783
+static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
1784
+{
1785
+ struct skcipher_edesc *edesc;
1786
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1787
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
16451788 struct device *jrdev = ctx->jrdev;
1789
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
1790
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
16461791 u32 *desc;
16471792 int ret = 0;
1648
-
1649
- /* allocate extended descriptor */
1650
- edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1651
- if (IS_ERR(edesc))
1652
- return PTR_ERR(edesc);
16531793
16541794 /*
1655
- * The crypto API expects us to set the IV (req->info) to the last
1656
- * ciphertext block when running in CBC mode.
1795
+ * XTS is expected to return an error even for input length = 0
1796
+ * Note that the case input length < block size will be caught during
1797
+ * HW offloading and return an error.
16571798 */
1658
- if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
1659
- scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
1660
- ivsize, ivsize, 0);
1799
+ if (!req->cryptlen && !ctx->fallback)
1800
+ return 0;
16611801
1662
- /* Create and submit job descriptor*/
1663
- init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
1664
- desc = edesc->hw_desc;
1665
-#ifdef DEBUG
1666
- print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1667
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1668
- desc_bytes(edesc->hw_desc), 1);
1669
-#endif
1802
+ if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
1803
+ ctx->xts_key_fallback)) {
1804
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
16701805
1671
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1672
- if (!ret) {
1673
- ret = -EINPROGRESS;
1674
- } else {
1675
- ablkcipher_unmap(jrdev, edesc, req);
1676
- kfree(edesc);
1806
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
1807
+ skcipher_request_set_callback(&rctx->fallback_req,
1808
+ req->base.flags,
1809
+ req->base.complete,
1810
+ req->base.data);
1811
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
1812
+ req->dst, req->cryptlen, req->iv);
1813
+
1814
+ return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
1815
+ crypto_skcipher_decrypt(&rctx->fallback_req);
16771816 }
1678
-
1679
- return ret;
1680
-}
1681
-
1682
-/*
1683
- * allocate and map the ablkcipher extended descriptor
1684
- * for ablkcipher givencrypt
1685
- */
1686
-static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1687
- struct skcipher_givcrypt_request *greq,
1688
- int desc_bytes)
1689
-{
1690
- struct ablkcipher_request *req = &greq->creq;
1691
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1692
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1693
- struct device *jrdev = ctx->jrdev;
1694
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1695
- GFP_KERNEL : GFP_ATOMIC;
1696
- int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1697
- struct ablkcipher_edesc *edesc;
1698
- dma_addr_t iv_dma;
1699
- u8 *iv;
1700
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1701
- int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
1702
-
1703
- src_nents = sg_nents_for_len(req->src, req->nbytes);
1704
- if (unlikely(src_nents < 0)) {
1705
- dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1706
- req->nbytes);
1707
- return ERR_PTR(src_nents);
1708
- }
1709
-
1710
- if (likely(req->src == req->dst)) {
1711
- mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1712
- DMA_BIDIRECTIONAL);
1713
- if (unlikely(!mapped_src_nents)) {
1714
- dev_err(jrdev, "unable to map source\n");
1715
- return ERR_PTR(-ENOMEM);
1716
- }
1717
-
1718
- dst_nents = src_nents;
1719
- mapped_dst_nents = src_nents;
1720
- } else {
1721
- mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1722
- DMA_TO_DEVICE);
1723
- if (unlikely(!mapped_src_nents)) {
1724
- dev_err(jrdev, "unable to map source\n");
1725
- return ERR_PTR(-ENOMEM);
1726
- }
1727
-
1728
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1729
- if (unlikely(dst_nents < 0)) {
1730
- dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1731
- req->nbytes);
1732
- return ERR_PTR(dst_nents);
1733
- }
1734
-
1735
- mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
1736
- DMA_FROM_DEVICE);
1737
- if (unlikely(!mapped_dst_nents)) {
1738
- dev_err(jrdev, "unable to map destination\n");
1739
- dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1740
- return ERR_PTR(-ENOMEM);
1741
- }
1742
- }
1743
-
1744
- sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1745
- dst_sg_idx = sec4_sg_ents;
1746
- sec4_sg_ents += 1 + mapped_dst_nents;
1747
-
1748
- /*
1749
- * allocate space for base edesc and hw desc commands, link tables, IV
1750
- */
1751
- sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
1752
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
1753
- GFP_DMA | flags);
1754
- if (!edesc) {
1755
- dev_err(jrdev, "could not allocate extended descriptor\n");
1756
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1757
- 0, DMA_NONE, 0, 0);
1758
- return ERR_PTR(-ENOMEM);
1759
- }
1760
-
1761
- edesc->src_nents = src_nents;
1762
- edesc->dst_nents = dst_nents;
1763
- edesc->sec4_sg_bytes = sec4_sg_bytes;
1764
- edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
1765
- desc_bytes);
1766
- edesc->iv_dir = DMA_FROM_DEVICE;
1767
-
1768
- /* Make sure IV is located in a DMAable area */
1769
- iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
1770
- iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
1771
- if (dma_mapping_error(jrdev, iv_dma)) {
1772
- dev_err(jrdev, "unable to map IV\n");
1773
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1774
- 0, DMA_NONE, 0, 0);
1775
- kfree(edesc);
1776
- return ERR_PTR(-ENOMEM);
1777
- }
1778
-
1779
- if (mapped_src_nents > 1)
1780
- sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
1781
- 0);
1782
-
1783
- dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
1784
- sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
1785
- dst_sg_idx + 1, 0);
1786
-
1787
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1788
- sec4_sg_bytes, DMA_TO_DEVICE);
1789
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1790
- dev_err(jrdev, "unable to map S/G table\n");
1791
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1792
- iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
1793
- kfree(edesc);
1794
- return ERR_PTR(-ENOMEM);
1795
- }
1796
- edesc->iv_dma = iv_dma;
1797
-
1798
-#ifdef DEBUG
1799
- print_hex_dump(KERN_ERR,
1800
- "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
1801
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1802
- sec4_sg_bytes, 1);
1803
-#endif
1804
-
1805
- return edesc;
1806
-}
1807
-
1808
-static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1809
-{
1810
- struct ablkcipher_request *req = &creq->creq;
1811
- struct ablkcipher_edesc *edesc;
1812
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1813
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1814
- struct device *jrdev = ctx->jrdev;
1815
- u32 *desc;
1816
- int ret = 0;
18171817
18181818 /* allocate extended descriptor */
1819
- edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1819
+ edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
18201820 if (IS_ERR(edesc))
18211821 return PTR_ERR(edesc);
18221822
18231823 /* Create and submit job descriptor*/
1824
- init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
1825
- edesc, req);
1826
-#ifdef DEBUG
1827
- print_hex_dump(KERN_ERR,
1828
- "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
1829
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1830
- desc_bytes(edesc->hw_desc), 1);
1831
-#endif
1832
- desc = edesc->hw_desc;
1833
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1824
+ init_skcipher_job(req, edesc, encrypt);
18341825
1835
- if (!ret) {
1836
- ret = -EINPROGRESS;
1837
- } else {
1838
- ablkcipher_unmap(jrdev, edesc, req);
1826
+ print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
1827
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1828
+ desc_bytes(edesc->hw_desc), 1);
1829
+
1830
+ desc = edesc->hw_desc;
1831
+ /*
1832
+ * Only the backlog request are sent to crypto-engine since the others
1833
+ * can be handled by CAAM, if free, especially since JR has up to 1024
1834
+ * entries (more than the 10 entries from crypto-engine).
1835
+ */
1836
+ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
1837
+ ret = crypto_transfer_skcipher_request_to_engine(jrpriv->engine,
1838
+ req);
1839
+ else
1840
+ ret = caam_jr_enqueue(jrdev, desc, skcipher_crypt_done, req);
1841
+
1842
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
1843
+ skcipher_unmap(jrdev, edesc, req);
18391844 kfree(edesc);
18401845 }
18411846
18421847 return ret;
18431848 }
18441849
1845
-#define template_aead template_u.aead
1846
-#define template_ablkcipher template_u.ablkcipher
1847
-struct caam_alg_template {
1848
- char name[CRYPTO_MAX_ALG_NAME];
1849
- char driver_name[CRYPTO_MAX_ALG_NAME];
1850
- unsigned int blocksize;
1851
- u32 type;
1852
- union {
1853
- struct ablkcipher_alg ablkcipher;
1854
- } template_u;
1855
- u32 class1_alg_type;
1856
- u32 class2_alg_type;
1857
-};
1850
+static int skcipher_encrypt(struct skcipher_request *req)
1851
+{
1852
+ return skcipher_crypt(req, true);
1853
+}
18581854
1859
-static struct caam_alg_template driver_algs[] = {
1860
- /* ablkcipher descriptor */
1855
+static int skcipher_decrypt(struct skcipher_request *req)
1856
+{
1857
+ return skcipher_crypt(req, false);
1858
+}
1859
+
1860
+static struct caam_skcipher_alg driver_algs[] = {
18611861 {
1862
- .name = "cbc(aes)",
1863
- .driver_name = "cbc-aes-caam",
1864
- .blocksize = AES_BLOCK_SIZE,
1865
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1866
- .template_ablkcipher = {
1867
- .setkey = ablkcipher_setkey,
1868
- .encrypt = ablkcipher_encrypt,
1869
- .decrypt = ablkcipher_decrypt,
1870
- .givencrypt = ablkcipher_givencrypt,
1871
- .geniv = "<built-in>",
1862
+ .skcipher = {
1863
+ .base = {
1864
+ .cra_name = "cbc(aes)",
1865
+ .cra_driver_name = "cbc-aes-caam",
1866
+ .cra_blocksize = AES_BLOCK_SIZE,
1867
+ },
1868
+ .setkey = aes_skcipher_setkey,
1869
+ .encrypt = skcipher_encrypt,
1870
+ .decrypt = skcipher_decrypt,
18721871 .min_keysize = AES_MIN_KEY_SIZE,
18731872 .max_keysize = AES_MAX_KEY_SIZE,
18741873 .ivsize = AES_BLOCK_SIZE,
1875
- },
1876
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1874
+ },
1875
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18771876 },
18781877 {
1879
- .name = "cbc(des3_ede)",
1880
- .driver_name = "cbc-3des-caam",
1881
- .blocksize = DES3_EDE_BLOCK_SIZE,
1882
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1883
- .template_ablkcipher = {
1884
- .setkey = ablkcipher_setkey,
1885
- .encrypt = ablkcipher_encrypt,
1886
- .decrypt = ablkcipher_decrypt,
1887
- .givencrypt = ablkcipher_givencrypt,
1888
- .geniv = "<built-in>",
1878
+ .skcipher = {
1879
+ .base = {
1880
+ .cra_name = "cbc(des3_ede)",
1881
+ .cra_driver_name = "cbc-3des-caam",
1882
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1883
+ },
1884
+ .setkey = des3_skcipher_setkey,
1885
+ .encrypt = skcipher_encrypt,
1886
+ .decrypt = skcipher_decrypt,
18891887 .min_keysize = DES3_EDE_KEY_SIZE,
18901888 .max_keysize = DES3_EDE_KEY_SIZE,
18911889 .ivsize = DES3_EDE_BLOCK_SIZE,
1892
- },
1893
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1890
+ },
1891
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18941892 },
18951893 {
1896
- .name = "cbc(des)",
1897
- .driver_name = "cbc-des-caam",
1898
- .blocksize = DES_BLOCK_SIZE,
1899
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1900
- .template_ablkcipher = {
1901
- .setkey = ablkcipher_setkey,
1902
- .encrypt = ablkcipher_encrypt,
1903
- .decrypt = ablkcipher_decrypt,
1904
- .givencrypt = ablkcipher_givencrypt,
1905
- .geniv = "<built-in>",
1894
+ .skcipher = {
1895
+ .base = {
1896
+ .cra_name = "cbc(des)",
1897
+ .cra_driver_name = "cbc-des-caam",
1898
+ .cra_blocksize = DES_BLOCK_SIZE,
1899
+ },
1900
+ .setkey = des_skcipher_setkey,
1901
+ .encrypt = skcipher_encrypt,
1902
+ .decrypt = skcipher_decrypt,
19061903 .min_keysize = DES_KEY_SIZE,
19071904 .max_keysize = DES_KEY_SIZE,
19081905 .ivsize = DES_BLOCK_SIZE,
1909
- },
1910
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1906
+ },
1907
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
19111908 },
19121909 {
1913
- .name = "ctr(aes)",
1914
- .driver_name = "ctr-aes-caam",
1915
- .blocksize = 1,
1916
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1917
- .template_ablkcipher = {
1918
- .setkey = ablkcipher_setkey,
1919
- .encrypt = ablkcipher_encrypt,
1920
- .decrypt = ablkcipher_decrypt,
1921
- .geniv = "chainiv",
1910
+ .skcipher = {
1911
+ .base = {
1912
+ .cra_name = "ctr(aes)",
1913
+ .cra_driver_name = "ctr-aes-caam",
1914
+ .cra_blocksize = 1,
1915
+ },
1916
+ .setkey = ctr_skcipher_setkey,
1917
+ .encrypt = skcipher_encrypt,
1918
+ .decrypt = skcipher_decrypt,
19221919 .min_keysize = AES_MIN_KEY_SIZE,
19231920 .max_keysize = AES_MAX_KEY_SIZE,
19241921 .ivsize = AES_BLOCK_SIZE,
1925
- },
1926
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1922
+ .chunksize = AES_BLOCK_SIZE,
1923
+ },
1924
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1925
+ OP_ALG_AAI_CTR_MOD128,
19271926 },
19281927 {
1929
- .name = "rfc3686(ctr(aes))",
1930
- .driver_name = "rfc3686-ctr-aes-caam",
1931
- .blocksize = 1,
1932
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1933
- .template_ablkcipher = {
1934
- .setkey = ablkcipher_setkey,
1935
- .encrypt = ablkcipher_encrypt,
1936
- .decrypt = ablkcipher_decrypt,
1937
- .givencrypt = ablkcipher_givencrypt,
1938
- .geniv = "<built-in>",
1928
+ .skcipher = {
1929
+ .base = {
1930
+ .cra_name = "rfc3686(ctr(aes))",
1931
+ .cra_driver_name = "rfc3686-ctr-aes-caam",
1932
+ .cra_blocksize = 1,
1933
+ },
1934
+ .setkey = rfc3686_skcipher_setkey,
1935
+ .encrypt = skcipher_encrypt,
1936
+ .decrypt = skcipher_decrypt,
19391937 .min_keysize = AES_MIN_KEY_SIZE +
19401938 CTR_RFC3686_NONCE_SIZE,
19411939 .max_keysize = AES_MAX_KEY_SIZE +
19421940 CTR_RFC3686_NONCE_SIZE,
19431941 .ivsize = CTR_RFC3686_IV_SIZE,
1944
- },
1945
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1942
+ .chunksize = AES_BLOCK_SIZE,
1943
+ },
1944
+ .caam = {
1945
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
1946
+ OP_ALG_AAI_CTR_MOD128,
1947
+ .rfc3686 = true,
1948
+ },
19461949 },
19471950 {
1948
- .name = "xts(aes)",
1949
- .driver_name = "xts-aes-caam",
1950
- .blocksize = AES_BLOCK_SIZE,
1951
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1952
- .template_ablkcipher = {
1953
- .setkey = xts_ablkcipher_setkey,
1954
- .encrypt = ablkcipher_encrypt,
1955
- .decrypt = ablkcipher_decrypt,
1956
- .geniv = "eseqiv",
1951
+ .skcipher = {
1952
+ .base = {
1953
+ .cra_name = "xts(aes)",
1954
+ .cra_driver_name = "xts-aes-caam",
1955
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1956
+ .cra_blocksize = AES_BLOCK_SIZE,
1957
+ },
1958
+ .setkey = xts_skcipher_setkey,
1959
+ .encrypt = skcipher_encrypt,
1960
+ .decrypt = skcipher_decrypt,
19571961 .min_keysize = 2 * AES_MIN_KEY_SIZE,
19581962 .max_keysize = 2 * AES_MAX_KEY_SIZE,
19591963 .ivsize = AES_BLOCK_SIZE,
1964
+ },
1965
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1966
+ },
1967
+ {
1968
+ .skcipher = {
1969
+ .base = {
1970
+ .cra_name = "ecb(des)",
1971
+ .cra_driver_name = "ecb-des-caam",
1972
+ .cra_blocksize = DES_BLOCK_SIZE,
19601973 },
1961
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1974
+ .setkey = des_skcipher_setkey,
1975
+ .encrypt = skcipher_encrypt,
1976
+ .decrypt = skcipher_decrypt,
1977
+ .min_keysize = DES_KEY_SIZE,
1978
+ .max_keysize = DES_KEY_SIZE,
1979
+ },
1980
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
1981
+ },
1982
+ {
1983
+ .skcipher = {
1984
+ .base = {
1985
+ .cra_name = "ecb(aes)",
1986
+ .cra_driver_name = "ecb-aes-caam",
1987
+ .cra_blocksize = AES_BLOCK_SIZE,
1988
+ },
1989
+ .setkey = aes_skcipher_setkey,
1990
+ .encrypt = skcipher_encrypt,
1991
+ .decrypt = skcipher_decrypt,
1992
+ .min_keysize = AES_MIN_KEY_SIZE,
1993
+ .max_keysize = AES_MAX_KEY_SIZE,
1994
+ },
1995
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
1996
+ },
1997
+ {
1998
+ .skcipher = {
1999
+ .base = {
2000
+ .cra_name = "ecb(des3_ede)",
2001
+ .cra_driver_name = "ecb-des3-caam",
2002
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2003
+ },
2004
+ .setkey = des3_skcipher_setkey,
2005
+ .encrypt = skcipher_encrypt,
2006
+ .decrypt = skcipher_decrypt,
2007
+ .min_keysize = DES3_EDE_KEY_SIZE,
2008
+ .max_keysize = DES3_EDE_KEY_SIZE,
2009
+ },
2010
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
19622011 },
19632012 };
19642013
....@@ -1979,6 +2028,7 @@
19792028 },
19802029 .caam = {
19812030 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2031
+ .nodkp = true,
19822032 },
19832033 },
19842034 {
....@@ -1997,6 +2047,7 @@
19972047 },
19982048 .caam = {
19992049 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2050
+ .nodkp = true,
20002051 },
20012052 },
20022053 /* Galois Counter Mode */
....@@ -2016,6 +2067,7 @@
20162067 },
20172068 .caam = {
20182069 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2070
+ .nodkp = true,
20192071 },
20202072 },
20212073 /* single-pass ipsec_esp descriptor */
....@@ -2417,7 +2469,7 @@
24172469 "cbc-des3_ede-caam",
24182470 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
24192471 },
2420
- .setkey = aead_setkey,
2472
+ .setkey = des3_aead_setkey,
24212473 .setauthsize = aead_setauthsize,
24222474 .encrypt = aead_encrypt,
24232475 .decrypt = aead_decrypt,
....@@ -2439,7 +2491,7 @@
24392491 "cbc-des3_ede-caam",
24402492 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
24412493 },
2442
- .setkey = aead_setkey,
2494
+ .setkey = des3_aead_setkey,
24432495 .setauthsize = aead_setauthsize,
24442496 .encrypt = aead_encrypt,
24452497 .decrypt = aead_decrypt,
....@@ -2462,7 +2514,7 @@
24622514 "cbc-des3_ede-caam",
24632515 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
24642516 },
2465
- .setkey = aead_setkey,
2517
+ .setkey = des3_aead_setkey,
24662518 .setauthsize = aead_setauthsize,
24672519 .encrypt = aead_encrypt,
24682520 .decrypt = aead_decrypt,
....@@ -2485,7 +2537,7 @@
24852537 "cbc-des3_ede-caam",
24862538 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
24872539 },
2488
- .setkey = aead_setkey,
2540
+ .setkey = des3_aead_setkey,
24892541 .setauthsize = aead_setauthsize,
24902542 .encrypt = aead_encrypt,
24912543 .decrypt = aead_decrypt,
....@@ -2508,7 +2560,7 @@
25082560 "cbc-des3_ede-caam",
25092561 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
25102562 },
2511
- .setkey = aead_setkey,
2563
+ .setkey = des3_aead_setkey,
25122564 .setauthsize = aead_setauthsize,
25132565 .encrypt = aead_encrypt,
25142566 .decrypt = aead_decrypt,
....@@ -2531,7 +2583,7 @@
25312583 "cbc-des3_ede-caam",
25322584 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
25332585 },
2534
- .setkey = aead_setkey,
2586
+ .setkey = des3_aead_setkey,
25352587 .setauthsize = aead_setauthsize,
25362588 .encrypt = aead_encrypt,
25372589 .decrypt = aead_decrypt,
....@@ -2554,7 +2606,7 @@
25542606 "cbc-des3_ede-caam",
25552607 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
25562608 },
2557
- .setkey = aead_setkey,
2609
+ .setkey = des3_aead_setkey,
25582610 .setauthsize = aead_setauthsize,
25592611 .encrypt = aead_encrypt,
25602612 .decrypt = aead_decrypt,
....@@ -2577,7 +2629,7 @@
25772629 "cbc-des3_ede-caam",
25782630 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
25792631 },
2580
- .setkey = aead_setkey,
2632
+ .setkey = des3_aead_setkey,
25812633 .setauthsize = aead_setauthsize,
25822634 .encrypt = aead_encrypt,
25832635 .decrypt = aead_decrypt,
....@@ -2600,7 +2652,7 @@
26002652 "cbc-des3_ede-caam",
26012653 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
26022654 },
2603
- .setkey = aead_setkey,
2655
+ .setkey = des3_aead_setkey,
26042656 .setauthsize = aead_setauthsize,
26052657 .encrypt = aead_encrypt,
26062658 .decrypt = aead_decrypt,
....@@ -2623,7 +2675,7 @@
26232675 "cbc-des3_ede-caam",
26242676 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
26252677 },
2626
- .setkey = aead_setkey,
2678
+ .setkey = des3_aead_setkey,
26272679 .setauthsize = aead_setauthsize,
26282680 .encrypt = aead_encrypt,
26292681 .decrypt = aead_decrypt,
....@@ -2646,7 +2698,7 @@
26462698 "cbc-des3_ede-caam",
26472699 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
26482700 },
2649
- .setkey = aead_setkey,
2701
+ .setkey = des3_aead_setkey,
26502702 .setauthsize = aead_setauthsize,
26512703 .encrypt = aead_encrypt,
26522704 .decrypt = aead_decrypt,
....@@ -2669,7 +2721,7 @@
26692721 "cbc-des3_ede-caam",
26702722 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
26712723 },
2672
- .setkey = aead_setkey,
2724
+ .setkey = des3_aead_setkey,
26732725 .setauthsize = aead_setauthsize,
26742726 .encrypt = aead_encrypt,
26752727 .decrypt = aead_decrypt,
....@@ -3241,12 +3293,52 @@
32413293 .geniv = true,
32423294 },
32433295 },
3244
-};
3245
-
3246
-struct caam_crypto_alg {
3247
- struct crypto_alg crypto_alg;
3248
- struct list_head entry;
3249
- struct caam_alg_entry caam;
3296
+ {
3297
+ .aead = {
3298
+ .base = {
3299
+ .cra_name = "rfc7539(chacha20,poly1305)",
3300
+ .cra_driver_name = "rfc7539-chacha20-poly1305-"
3301
+ "caam",
3302
+ .cra_blocksize = 1,
3303
+ },
3304
+ .setkey = chachapoly_setkey,
3305
+ .setauthsize = chachapoly_setauthsize,
3306
+ .encrypt = chachapoly_encrypt,
3307
+ .decrypt = chachapoly_decrypt,
3308
+ .ivsize = CHACHAPOLY_IV_SIZE,
3309
+ .maxauthsize = POLY1305_DIGEST_SIZE,
3310
+ },
3311
+ .caam = {
3312
+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
3313
+ OP_ALG_AAI_AEAD,
3314
+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
3315
+ OP_ALG_AAI_AEAD,
3316
+ .nodkp = true,
3317
+ },
3318
+ },
3319
+ {
3320
+ .aead = {
3321
+ .base = {
3322
+ .cra_name = "rfc7539esp(chacha20,poly1305)",
3323
+ .cra_driver_name = "rfc7539esp-chacha20-"
3324
+ "poly1305-caam",
3325
+ .cra_blocksize = 1,
3326
+ },
3327
+ .setkey = chachapoly_setkey,
3328
+ .setauthsize = chachapoly_setauthsize,
3329
+ .encrypt = chachapoly_encrypt,
3330
+ .decrypt = chachapoly_decrypt,
3331
+ .ivsize = 8,
3332
+ .maxauthsize = POLY1305_DIGEST_SIZE,
3333
+ },
3334
+ .caam = {
3335
+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
3336
+ OP_ALG_AAI_AEAD,
3337
+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
3338
+ OP_ALG_AAI_AEAD,
3339
+ .nodkp = true,
3340
+ },
3341
+ },
32503342 };
32513343
32523344 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
....@@ -3254,6 +3346,8 @@
32543346 {
32553347 dma_addr_t dma_addr;
32563348 struct caam_drv_private *priv;
3349
+ const size_t sh_desc_enc_offset = offsetof(struct caam_ctx,
3350
+ sh_desc_enc);
32573351
32583352 ctx->jrdev = caam_jr_alloc();
32593353 if (IS_ERR(ctx->jrdev)) {
....@@ -3269,7 +3363,8 @@
32693363
32703364 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
32713365 offsetof(struct caam_ctx,
3272
- sh_desc_enc_dma),
3366
+ sh_desc_enc_dma) -
3367
+ sh_desc_enc_offset,
32733368 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
32743369 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
32753370 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
....@@ -3279,10 +3374,10 @@
32793374
32803375 ctx->sh_desc_enc_dma = dma_addr;
32813376 ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
3282
- sh_desc_dec);
3283
- ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
3284
- sh_desc_givenc);
3285
- ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
3377
+ sh_desc_dec) -
3378
+ sh_desc_enc_offset;
3379
+ ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key) -
3380
+ sh_desc_enc_offset;
32863381
32873382 /* copy descriptor header template value */
32883383 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
....@@ -3291,14 +3386,41 @@
32913386 return 0;
32923387 }
32933388
3294
-static int caam_cra_init(struct crypto_tfm *tfm)
3389
+static int caam_cra_init(struct crypto_skcipher *tfm)
32953390 {
3296
- struct crypto_alg *alg = tfm->__crt_alg;
3297
- struct caam_crypto_alg *caam_alg =
3298
- container_of(alg, struct caam_crypto_alg, crypto_alg);
3299
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
3391
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3392
+ struct caam_skcipher_alg *caam_alg =
3393
+ container_of(alg, typeof(*caam_alg), skcipher);
3394
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
3395
+ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
3396
+ int ret = 0;
33003397
3301
- return caam_init_common(ctx, &caam_alg->caam, false);
3398
+ ctx->enginectx.op.do_one_request = skcipher_do_one_req;
3399
+
3400
+ if (alg_aai == OP_ALG_AAI_XTS) {
3401
+ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
3402
+ struct crypto_skcipher *fallback;
3403
+
3404
+ fallback = crypto_alloc_skcipher(tfm_name, 0,
3405
+ CRYPTO_ALG_NEED_FALLBACK);
3406
+ if (IS_ERR(fallback)) {
3407
+ pr_err("Failed to allocate %s fallback: %ld\n",
3408
+ tfm_name, PTR_ERR(fallback));
3409
+ return PTR_ERR(fallback);
3410
+ }
3411
+
3412
+ ctx->fallback = fallback;
3413
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
3414
+ crypto_skcipher_reqsize(fallback));
3415
+ } else {
3416
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
3417
+ }
3418
+
3419
+ ret = caam_init_common(ctx, &caam_alg->caam, false);
3420
+ if (ret && ctx->fallback)
3421
+ crypto_free_skcipher(ctx->fallback);
3422
+
3423
+ return ret;
33023424 }
33033425
33043426 static int caam_aead_init(struct crypto_aead *tfm)
....@@ -3308,21 +3430,29 @@
33083430 container_of(alg, struct caam_aead_alg, aead);
33093431 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
33103432
3311
- return caam_init_common(ctx, &caam_alg->caam,
3312
- alg->setkey == aead_setkey);
3433
+ crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx));
3434
+
3435
+ ctx->enginectx.op.do_one_request = aead_do_one_req;
3436
+
3437
+ return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
33133438 }
33143439
33153440 static void caam_exit_common(struct caam_ctx *ctx)
33163441 {
33173442 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
3318
- offsetof(struct caam_ctx, sh_desc_enc_dma),
3443
+ offsetof(struct caam_ctx, sh_desc_enc_dma) -
3444
+ offsetof(struct caam_ctx, sh_desc_enc),
33193445 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
33203446 caam_jr_free(ctx->jrdev);
33213447 }
33223448
3323
-static void caam_cra_exit(struct crypto_tfm *tfm)
3449
+static void caam_cra_exit(struct crypto_skcipher *tfm)
33243450 {
3325
- caam_exit_common(crypto_tfm_ctx(tfm));
3451
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
3452
+
3453
+ if (ctx->fallback)
3454
+ crypto_free_skcipher(ctx->fallback);
3455
+ caam_exit_common(ctx);
33263456 }
33273457
33283458 static void caam_aead_exit(struct crypto_aead *tfm)
....@@ -3330,10 +3460,8 @@
33303460 caam_exit_common(crypto_aead_ctx(tfm));
33313461 }
33323462
3333
-static void __exit caam_algapi_exit(void)
3463
+void caam_algapi_exit(void)
33343464 {
3335
-
3336
- struct caam_crypto_alg *t_alg, *n;
33373465 int i;
33383466
33393467 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
....@@ -3343,57 +3471,26 @@
33433471 crypto_unregister_aead(&t_alg->aead);
33443472 }
33453473
3346
- if (!alg_list.next)
3347
- return;
3474
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3475
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
33483476
3349
- list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
3350
- crypto_unregister_alg(&t_alg->crypto_alg);
3351
- list_del(&t_alg->entry);
3352
- kfree(t_alg);
3477
+ if (t_alg->registered)
3478
+ crypto_unregister_skcipher(&t_alg->skcipher);
33533479 }
33543480 }
33553481
3356
-static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
3357
- *template)
3482
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
33583483 {
3359
- struct caam_crypto_alg *t_alg;
3360
- struct crypto_alg *alg;
3484
+ struct skcipher_alg *alg = &t_alg->skcipher;
33613485
3362
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
3363
- if (!t_alg) {
3364
- pr_err("failed to allocate t_alg\n");
3365
- return ERR_PTR(-ENOMEM);
3366
- }
3486
+ alg->base.cra_module = THIS_MODULE;
3487
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
3488
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3489
+ alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3490
+ CRYPTO_ALG_KERN_DRIVER_ONLY);
33673491
3368
- alg = &t_alg->crypto_alg;
3369
-
3370
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
3371
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
3372
- template->driver_name);
3373
- alg->cra_module = THIS_MODULE;
3374
- alg->cra_init = caam_cra_init;
3375
- alg->cra_exit = caam_cra_exit;
3376
- alg->cra_priority = CAAM_CRA_PRIORITY;
3377
- alg->cra_blocksize = template->blocksize;
3378
- alg->cra_alignmask = 0;
3379
- alg->cra_ctxsize = sizeof(struct caam_ctx);
3380
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
3381
- template->type;
3382
- switch (template->type) {
3383
- case CRYPTO_ALG_TYPE_GIVCIPHER:
3384
- alg->cra_type = &crypto_givcipher_type;
3385
- alg->cra_ablkcipher = template->template_ablkcipher;
3386
- break;
3387
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
3388
- alg->cra_type = &crypto_ablkcipher_type;
3389
- alg->cra_ablkcipher = template->template_ablkcipher;
3390
- break;
3391
- }
3392
-
3393
- t_alg->caam.class1_alg_type = template->class1_alg_type;
3394
- t_alg->caam.class2_alg_type = template->class2_alg_type;
3395
-
3396
- return t_alg;
3492
+ alg->init = caam_cra_init;
3493
+ alg->exit = caam_cra_exit;
33973494 }
33983495
33993496 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
....@@ -3403,68 +3500,68 @@
34033500 alg->base.cra_module = THIS_MODULE;
34043501 alg->base.cra_priority = CAAM_CRA_PRIORITY;
34053502 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3406
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
3503
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3504
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
34073505
34083506 alg->init = caam_aead_init;
34093507 alg->exit = caam_aead_exit;
34103508 }
34113509
3412
-static int __init caam_algapi_init(void)
3510
+int caam_algapi_init(struct device *ctrldev)
34133511 {
3414
- struct device_node *dev_node;
3415
- struct platform_device *pdev;
3416
- struct device *ctrldev;
3417
- struct caam_drv_private *priv;
3512
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
34183513 int i = 0, err = 0;
3419
- u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
3514
+ u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
34203515 unsigned int md_limit = SHA512_DIGEST_SIZE;
3421
- bool registered = false;
3422
-
3423
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
3424
- if (!dev_node) {
3425
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
3426
- if (!dev_node)
3427
- return -ENODEV;
3428
- }
3429
-
3430
- pdev = of_find_device_by_node(dev_node);
3431
- if (!pdev) {
3432
- of_node_put(dev_node);
3433
- return -ENODEV;
3434
- }
3435
-
3436
- ctrldev = &pdev->dev;
3437
- priv = dev_get_drvdata(ctrldev);
3438
- of_node_put(dev_node);
3439
-
3440
- /*
3441
- * If priv is NULL, it's probably because the caam driver wasn't
3442
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
3443
- */
3444
- if (!priv)
3445
- return -ENODEV;
3446
-
3447
-
3448
- INIT_LIST_HEAD(&alg_list);
3516
+ bool registered = false, gcm_support;
34493517
34503518 /*
34513519 * Register crypto algorithms the device supports.
34523520 * First, detect presence and attributes of DES, AES, and MD blocks.
34533521 */
3454
- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
3455
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
3456
- des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
3457
- aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
3458
- md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
3522
+ if (priv->era < 10) {
3523
+ u32 cha_vid, cha_inst, aes_rn;
3524
+
3525
+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
3526
+ aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
3527
+ md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
3528
+
3529
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
3530
+ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
3531
+ CHA_ID_LS_DES_SHIFT;
3532
+ aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
3533
+ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
3534
+ ccha_inst = 0;
3535
+ ptha_inst = 0;
3536
+
3537
+ aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) &
3538
+ CHA_ID_LS_AES_MASK;
3539
+ gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
3540
+ } else {
3541
+ u32 aesa, mdha;
3542
+
3543
+ aesa = rd_reg32(&priv->ctrl->vreg.aesa);
3544
+ mdha = rd_reg32(&priv->ctrl->vreg.mdha);
3545
+
3546
+ aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
3547
+ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
3548
+
3549
+ des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
3550
+ aes_inst = aesa & CHA_VER_NUM_MASK;
3551
+ md_inst = mdha & CHA_VER_NUM_MASK;
3552
+ ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
3553
+ ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
3554
+
3555
+ gcm_support = aesa & CHA_VER_MISC_AES_GCM;
3556
+ }
34593557
34603558 /* If MD is present, limit digest size based on LP256 */
3461
- if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
3559
+ if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
34623560 md_limit = SHA256_DIGEST_SIZE;
34633561
34643562 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3465
- struct caam_crypto_alg *t_alg;
3466
- struct caam_alg_template *alg = driver_algs + i;
3467
- u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
3563
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
3564
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
34683565
34693566 /* Skip DES algorithms if not supported by device */
34703567 if (!des_inst &&
....@@ -3480,27 +3577,21 @@
34803577 * Check support for AES modes not available
34813578 * on LP devices.
34823579 */
3483
- if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
3484
- if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
3485
- OP_ALG_AAI_XTS)
3486
- continue;
3487
-
3488
- t_alg = caam_alg_alloc(alg);
3489
- if (IS_ERR(t_alg)) {
3490
- err = PTR_ERR(t_alg);
3491
- pr_warn("%s alg allocation failed\n", alg->driver_name);
3580
+ if (aes_vid == CHA_VER_VID_AES_LP &&
3581
+ (t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
3582
+ OP_ALG_AAI_XTS)
34923583 continue;
3493
- }
34943584
3495
- err = crypto_register_alg(&t_alg->crypto_alg);
3585
+ caam_skcipher_alg_init(t_alg);
3586
+
3587
+ err = crypto_register_skcipher(&t_alg->skcipher);
34963588 if (err) {
34973589 pr_warn("%s alg registration failed\n",
3498
- t_alg->crypto_alg.cra_driver_name);
3499
- kfree(t_alg);
3590
+ t_alg->skcipher.base.cra_driver_name);
35003591 continue;
35013592 }
35023593
3503
- list_add_tail(&t_alg->entry, &alg_list);
3594
+ t_alg->registered = true;
35043595 registered = true;
35053596 }
35063597
....@@ -3522,21 +3613,26 @@
35223613 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
35233614 continue;
35243615
3525
- /*
3526
- * Check support for AES algorithms not available
3527
- * on LP devices.
3528
- */
3529
- if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
3530
- if (alg_aai == OP_ALG_AAI_GCM)
3531
- continue;
3616
+ /* Skip CHACHA20 algorithms if not supported by device */
3617
+ if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
3618
+ continue;
3619
+
3620
+ /* Skip POLY1305 algorithms if not supported by device */
3621
+ if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
3622
+ continue;
3623
+
3624
+ /* Skip GCM algorithms if not supported by device */
3625
+ if (c1_alg_sel == OP_ALG_ALGSEL_AES &&
3626
+ alg_aai == OP_ALG_AAI_GCM && !gcm_support)
3627
+ continue;
35323628
35333629 /*
35343630 * Skip algorithms requiring message digests
35353631 * if MD or MD size is not supported by device.
35363632 */
3537
- if (c2_alg_sel &&
3538
- (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
3539
- continue;
3633
+ if (is_mdha(c2_alg_sel) &&
3634
+ (!md_inst || t_alg->aead.maxauthsize > md_limit))
3635
+ continue;
35403636
35413637 caam_aead_alg_init(t_alg);
35423638
....@@ -3556,10 +3652,3 @@
35563652
35573653 return err;
35583654 }
3559
-
3560
-module_init(caam_algapi_init);
3561
-module_exit(caam_algapi_exit);
3562
-
3563
-MODULE_LICENSE("GPL");
3564
-MODULE_DESCRIPTION("FSL CAAM support for crypto API");
3565
-MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");