hc
2024-01-04 1543e317f1da31b75942316931e8f491a8920811
kernel/drivers/crypto/caam/caamalg_qi.c
....@@ -1,9 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0+
12 /*
23 * Freescale FSL CAAM support for crypto API over QI backend.
34 * Based on caamalg.c
45 *
56 * Copyright 2013-2016 Freescale Semiconductor, Inc.
6
- * Copyright 2016-2017 NXP
7
+ * Copyright 2016-2019 NXP
78 */
89
910 #include "compat.h"
....@@ -17,6 +18,8 @@
1718 #include "qi.h"
1819 #include "jr.h"
1920 #include "caamalg_desc.h"
21
+#include <crypto/xts.h>
22
+#include <asm/unaligned.h>
2023
2124 /*
2225 * crypto alg
....@@ -35,10 +38,17 @@
3538 int class2_alg_type;
3639 bool rfc3686;
3740 bool geniv;
41
+ bool nodkp;
3842 };
3943
4044 struct caam_aead_alg {
4145 struct aead_alg aead;
46
+ struct caam_alg_entry caam;
47
+ bool registered;
48
+};
49
+
50
+struct caam_skcipher_alg {
51
+ struct skcipher_alg skcipher;
4252 struct caam_alg_entry caam;
4353 bool registered;
4454 };
....@@ -50,7 +60,6 @@
5060 struct device *jrdev;
5161 u32 sh_desc_enc[DESC_MAX_USED_LEN];
5262 u32 sh_desc_dec[DESC_MAX_USED_LEN];
53
- u32 sh_desc_givenc[DESC_MAX_USED_LEN];
5463 u8 key[CAAM_MAX_KEY_SIZE];
5564 dma_addr_t key_dma;
5665 enum dma_data_direction dir;
....@@ -60,6 +69,12 @@
6069 struct device *qidev;
6170 spinlock_t lock; /* Protects multiple init of driver context */
6271 struct caam_drv_ctx *drv_ctx[NUM_OP];
72
+ bool xts_key_fallback;
73
+ struct crypto_skcipher *fallback;
74
+};
75
+
76
+struct caam_skcipher_req_ctx {
77
+ struct skcipher_request fallback_req;
6378 };
6479
6580 static int aead_set_sh_desc(struct crypto_aead *aead)
....@@ -98,6 +113,18 @@
98113 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
99114 }
100115
116
+ /*
117
+ * In case |user key| > |derived key|, using DKP<imm,imm> would result
118
+ * in invalid opcodes (last bytes of user key) in the resulting
119
+ * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
120
+ * addresses are needed.
121
+ */
122
+ ctx->adata.key_virt = ctx->key;
123
+ ctx->adata.key_dma = ctx->key_dma;
124
+
125
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
126
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
127
+
101128 data_len[0] = ctx->adata.keylen_pad;
102129 data_len[1] = ctx->cdata.keylen;
103130
....@@ -110,16 +137,6 @@
110137 DESC_JOB_IO_LEN, data_len, &inl_mask,
111138 ARRAY_SIZE(data_len)) < 0)
112139 return -EINVAL;
113
-
114
- if (inl_mask & 1)
115
- ctx->adata.key_virt = ctx->key;
116
- else
117
- ctx->adata.key_dma = ctx->key_dma;
118
-
119
- if (inl_mask & 2)
120
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
121
- else
122
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
123140
124141 ctx->adata.key_inline = !!(inl_mask & 1);
125142 ctx->cdata.key_inline = !!(inl_mask & 2);
....@@ -135,16 +152,6 @@
135152 DESC_JOB_IO_LEN, data_len, &inl_mask,
136153 ARRAY_SIZE(data_len)) < 0)
137154 return -EINVAL;
138
-
139
- if (inl_mask & 1)
140
- ctx->adata.key_virt = ctx->key;
141
- else
142
- ctx->adata.key_dma = ctx->key_dma;
143
-
144
- if (inl_mask & 2)
145
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
146
- else
147
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
148155
149156 ctx->adata.key_inline = !!(inl_mask & 1);
150157 ctx->cdata.key_inline = !!(inl_mask & 2);
....@@ -163,16 +170,6 @@
163170 DESC_JOB_IO_LEN, data_len, &inl_mask,
164171 ARRAY_SIZE(data_len)) < 0)
165172 return -EINVAL;
166
-
167
- if (inl_mask & 1)
168
- ctx->adata.key_virt = ctx->key;
169
- else
170
- ctx->adata.key_dma = ctx->key_dma;
171
-
172
- if (inl_mask & 2)
173
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
174
- else
175
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
176173
177174 ctx->adata.key_inline = !!(inl_mask & 1);
178175 ctx->cdata.key_inline = !!(inl_mask & 2);
....@@ -207,13 +204,11 @@
207204 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
208205 goto badkey;
209206
210
-#ifdef DEBUG
211
- dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
207
+ dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
212208 keys.authkeylen + keys.enckeylen, keys.enckeylen,
213209 keys.authkeylen);
214
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
215
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
216
-#endif
210
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
211
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
217212
218213 /*
219214 * If DKP is supported, use it in the shared descriptor to generate
....@@ -230,7 +225,7 @@
230225 memcpy(ctx->key, keys.authkey, keys.authkeylen);
231226 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
232227 keys.enckeylen);
233
- dma_sync_single_for_device(jrdev, ctx->key_dma,
228
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
234229 ctx->adata.keylen_pad +
235230 keys.enckeylen, ctx->dir);
236231 goto skip_split_key;
....@@ -244,13 +239,13 @@
244239
245240 /* postpend encryption key to auth split key */
246241 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
247
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
248
- keys.enckeylen, ctx->dir);
249
-#ifdef DEBUG
250
- print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
251
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
252
- ctx->adata.keylen_pad + keys.enckeylen, 1);
253
-#endif
242
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
243
+ ctx->adata.keylen_pad + keys.enckeylen,
244
+ ctx->dir);
245
+
246
+ print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
247
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
248
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
254249
255250 skip_split_key:
256251 ctx->cdata.keylen = keys.enckeylen;
....@@ -281,9 +276,25 @@
281276 memzero_explicit(&keys, sizeof(keys));
282277 return ret;
283278 badkey:
284
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
285279 memzero_explicit(&keys, sizeof(keys));
286280 return -EINVAL;
281
+}
282
+
283
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
284
+ unsigned int keylen)
285
+{
286
+ struct crypto_authenc_keys keys;
287
+ int err;
288
+
289
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
290
+ if (unlikely(err))
291
+ return err;
292
+
293
+ err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
294
+ aead_setkey(aead, key, keylen);
295
+
296
+ memzero_explicit(&keys, sizeof(keys));
297
+ return err;
287298 }
288299
289300 static int gcm_set_sh_desc(struct crypto_aead *aead)
....@@ -332,6 +343,11 @@
332343 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
333344 {
334345 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
346
+ int err;
347
+
348
+ err = crypto_gcm_check_authsize(authsize);
349
+ if (err)
350
+ return err;
335351
336352 ctx->authsize = authsize;
337353 gcm_set_sh_desc(authenc);
....@@ -346,13 +362,16 @@
346362 struct device *jrdev = ctx->jrdev;
347363 int ret;
348364
349
-#ifdef DEBUG
350
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
351
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
352
-#endif
365
+ ret = aes_check_keylen(keylen);
366
+ if (ret)
367
+ return ret;
368
+
369
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
370
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
353371
354372 memcpy(ctx->key, key, keylen);
355
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
373
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen,
374
+ ctx->dir);
356375 ctx->cdata.keylen = keylen;
357376
358377 ret = gcm_set_sh_desc(aead);
....@@ -428,6 +447,11 @@
428447 unsigned int authsize)
429448 {
430449 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
450
+ int err;
451
+
452
+ err = crypto_rfc4106_check_authsize(authsize);
453
+ if (err)
454
+ return err;
431455
432456 ctx->authsize = authsize;
433457 rfc4106_set_sh_desc(authenc);
....@@ -442,13 +466,12 @@
442466 struct device *jrdev = ctx->jrdev;
443467 int ret;
444468
445
- if (keylen < 4)
446
- return -EINVAL;
469
+ ret = aes_check_keylen(keylen - 4);
470
+ if (ret)
471
+ return ret;
447472
448
-#ifdef DEBUG
449
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
450
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
451
-#endif
473
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
474
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
452475
453476 memcpy(ctx->key, key, keylen);
454477 /*
....@@ -456,8 +479,8 @@
456479 * in the nonce. Update the AES key length.
457480 */
458481 ctx->cdata.keylen = keylen - 4;
459
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
460
- ctx->dir);
482
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
483
+ ctx->cdata.keylen, ctx->dir);
461484
462485 ret = rfc4106_set_sh_desc(aead);
463486 if (ret)
....@@ -533,6 +556,9 @@
533556 {
534557 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
535558
559
+ if (authsize != 16)
560
+ return -EINVAL;
561
+
536562 ctx->authsize = authsize;
537563 rfc4543_set_sh_desc(authenc);
538564
....@@ -546,13 +572,12 @@
546572 struct device *jrdev = ctx->jrdev;
547573 int ret;
548574
549
- if (keylen < 4)
550
- return -EINVAL;
575
+ ret = aes_check_keylen(keylen - 4);
576
+ if (ret)
577
+ return ret;
551578
552
-#ifdef DEBUG
553
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
554
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
555
-#endif
579
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
580
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
556581
557582 memcpy(ctx->key, key, keylen);
558583 /*
....@@ -560,8 +585,8 @@
560585 * in the nonce. Update the AES key length.
561586 */
562587 ctx->cdata.keylen = keylen - 4;
563
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
564
- ctx->dir);
588
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
589
+ ctx->cdata.keylen, ctx->dir);
565590
566591 ret = rfc4543_set_sh_desc(aead);
567592 if (ret)
....@@ -589,107 +614,151 @@
589614 return 0;
590615 }
591616
592
-static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
593
- const u8 *key, unsigned int keylen)
617
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
618
+ unsigned int keylen, const u32 ctx1_iv_off)
594619 {
595
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
596
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
597
- const char *alg_name = crypto_tfm_alg_name(tfm);
620
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
621
+ struct caam_skcipher_alg *alg =
622
+ container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
623
+ skcipher);
598624 struct device *jrdev = ctx->jrdev;
599
- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
600
- u32 ctx1_iv_off = 0;
601
- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
602
- OP_ALG_AAI_CTR_MOD128);
603
- const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
625
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
626
+ const bool is_rfc3686 = alg->caam.rfc3686;
604627 int ret = 0;
605628
606
-#ifdef DEBUG
607
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
608
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
609
-#endif
610
- /*
611
- * AES-CTR needs to load IV in CONTEXT1 reg
612
- * at an offset of 128bits (16bytes)
613
- * CONTEXT1[255:128] = IV
614
- */
615
- if (ctr_mode)
616
- ctx1_iv_off = 16;
629
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
630
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
631
+
632
+ ctx->cdata.keylen = keylen;
633
+ ctx->cdata.key_virt = key;
634
+ ctx->cdata.key_inline = true;
635
+
636
+ /* skcipher encrypt, decrypt shared descriptors */
637
+ cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
638
+ is_rfc3686, ctx1_iv_off);
639
+ cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
640
+ is_rfc3686, ctx1_iv_off);
641
+
642
+ /* Now update the driver contexts with the new shared descriptor */
643
+ if (ctx->drv_ctx[ENCRYPT]) {
644
+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
645
+ ctx->sh_desc_enc);
646
+ if (ret) {
647
+ dev_err(jrdev, "driver enc context update failed\n");
648
+ return -EINVAL;
649
+ }
650
+ }
651
+
652
+ if (ctx->drv_ctx[DECRYPT]) {
653
+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
654
+ ctx->sh_desc_dec);
655
+ if (ret) {
656
+ dev_err(jrdev, "driver dec context update failed\n");
657
+ return -EINVAL;
658
+ }
659
+ }
660
+
661
+ return ret;
662
+}
663
+
664
+static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
665
+ const u8 *key, unsigned int keylen)
666
+{
667
+ int err;
668
+
669
+ err = aes_check_keylen(keylen);
670
+ if (err)
671
+ return err;
672
+
673
+ return skcipher_setkey(skcipher, key, keylen, 0);
674
+}
675
+
676
+static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
677
+ const u8 *key, unsigned int keylen)
678
+{
679
+ u32 ctx1_iv_off;
680
+ int err;
617681
618682 /*
619683 * RFC3686 specific:
620684 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
621685 * | *key = {KEY, NONCE}
622686 */
623
- if (is_rfc3686) {
624
- ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
625
- keylen -= CTR_RFC3686_NONCE_SIZE;
626
- }
687
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
688
+ keylen -= CTR_RFC3686_NONCE_SIZE;
627689
628
- ctx->cdata.keylen = keylen;
629
- ctx->cdata.key_virt = key;
630
- ctx->cdata.key_inline = true;
690
+ err = aes_check_keylen(keylen);
691
+ if (err)
692
+ return err;
631693
632
- /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
633
- cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
634
- is_rfc3686, ctx1_iv_off);
635
- cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
636
- is_rfc3686, ctx1_iv_off);
637
- cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
638
- ivsize, is_rfc3686, ctx1_iv_off);
639
-
640
- /* Now update the driver contexts with the new shared descriptor */
641
- if (ctx->drv_ctx[ENCRYPT]) {
642
- ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
643
- ctx->sh_desc_enc);
644
- if (ret) {
645
- dev_err(jrdev, "driver enc context update failed\n");
646
- goto badkey;
647
- }
648
- }
649
-
650
- if (ctx->drv_ctx[DECRYPT]) {
651
- ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
652
- ctx->sh_desc_dec);
653
- if (ret) {
654
- dev_err(jrdev, "driver dec context update failed\n");
655
- goto badkey;
656
- }
657
- }
658
-
659
- if (ctx->drv_ctx[GIVENCRYPT]) {
660
- ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
661
- ctx->sh_desc_givenc);
662
- if (ret) {
663
- dev_err(jrdev, "driver givenc context update failed\n");
664
- goto badkey;
665
- }
666
- }
667
-
668
- return ret;
669
-badkey:
670
- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
671
- return -EINVAL;
694
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
672695 }
673696
674
-static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
675
- const u8 *key, unsigned int keylen)
697
+static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
698
+ const u8 *key, unsigned int keylen)
676699 {
677
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
678
- struct device *jrdev = ctx->jrdev;
679
- int ret = 0;
700
+ u32 ctx1_iv_off;
701
+ int err;
680702
681
- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
682
- dev_err(jrdev, "key size mismatch\n");
683
- goto badkey;
703
+ /*
704
+ * AES-CTR needs to load IV in CONTEXT1 reg
705
+ * at an offset of 128bits (16bytes)
706
+ * CONTEXT1[255:128] = IV
707
+ */
708
+ ctx1_iv_off = 16;
709
+
710
+ err = aes_check_keylen(keylen);
711
+ if (err)
712
+ return err;
713
+
714
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
715
+}
716
+
717
+static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
718
+ const u8 *key, unsigned int keylen)
719
+{
720
+ return verify_skcipher_des3_key(skcipher, key) ?:
721
+ skcipher_setkey(skcipher, key, keylen, 0);
722
+}
723
+
724
+static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
725
+ const u8 *key, unsigned int keylen)
726
+{
727
+ return verify_skcipher_des_key(skcipher, key) ?:
728
+ skcipher_setkey(skcipher, key, keylen, 0);
729
+}
730
+
731
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
732
+ unsigned int keylen)
733
+{
734
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
735
+ struct device *jrdev = ctx->jrdev;
736
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
737
+ int ret = 0;
738
+ int err;
739
+
740
+ err = xts_verify_key(skcipher, key, keylen);
741
+ if (err) {
742
+ dev_dbg(jrdev, "key size mismatch\n");
743
+ return err;
744
+ }
745
+
746
+ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
747
+ ctx->xts_key_fallback = true;
748
+
749
+ if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) {
750
+ err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
751
+ if (err)
752
+ return err;
684753 }
685754
686755 ctx->cdata.keylen = keylen;
687756 ctx->cdata.key_virt = key;
688757 ctx->cdata.key_inline = true;
689758
690
- /* xts ablkcipher encrypt, decrypt shared descriptors */
691
- cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
692
- cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
759
+ /* xts skcipher encrypt, decrypt shared descriptors */
760
+ cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
761
+ cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
693762
694763 /* Now update the driver contexts with the new shared descriptor */
695764 if (ctx->drv_ctx[ENCRYPT]) {
....@@ -697,7 +766,7 @@
697766 ctx->sh_desc_enc);
698767 if (ret) {
699768 dev_err(jrdev, "driver enc context update failed\n");
700
- goto badkey;
769
+ return -EINVAL;
701770 }
702771 }
703772
....@@ -706,14 +775,11 @@
706775 ctx->sh_desc_dec);
707776 if (ret) {
708777 dev_err(jrdev, "driver dec context update failed\n");
709
- goto badkey;
778
+ return -EINVAL;
710779 }
711780 }
712781
713782 return ret;
714
-badkey:
715
- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
716
- return -EINVAL;
717783 }
718784
719785 /*
....@@ -737,11 +803,11 @@
737803 unsigned int assoclen;
738804 dma_addr_t assoclen_dma;
739805 struct caam_drv_req drv_req;
740
- struct qm_sg_entry sgt[0];
806
+ struct qm_sg_entry sgt[];
741807 };
742808
743809 /*
744
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
810
+ * skcipher_edesc - s/w-extended skcipher descriptor
745811 * @src_nents: number of segments in input scatterlist
746812 * @dst_nents: number of segments in output scatterlist
747813 * @iv_dma: dma address of iv for checking continuity and link table
....@@ -750,14 +816,14 @@
750816 * @drv_req: driver-specific request structure
751817 * @sgt: the h/w link table, followed by IV
752818 */
753
-struct ablkcipher_edesc {
819
+struct skcipher_edesc {
754820 int src_nents;
755821 int dst_nents;
756822 dma_addr_t iv_dma;
757823 int qm_sg_bytes;
758824 dma_addr_t qm_sg_dma;
759825 struct caam_drv_req drv_req;
760
- struct qm_sg_entry sgt[0];
826
+ struct qm_sg_entry sgt[];
761827 };
762828
763829 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
....@@ -781,14 +847,12 @@
781847
782848 if (type == ENCRYPT)
783849 desc = ctx->sh_desc_enc;
784
- else if (type == DECRYPT)
850
+ else /* (type == DECRYPT) */
785851 desc = ctx->sh_desc_dec;
786
- else /* (type == GIVENCRYPT) */
787
- desc = ctx->sh_desc_givenc;
788852
789853 cpu = smp_processor_id();
790854 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
791
- if (likely(!IS_ERR_OR_NULL(drv_ctx)))
855
+ if (!IS_ERR_OR_NULL(drv_ctx))
792856 drv_ctx->op_type = type;
793857
794858 ctx->drv_ctx[type] = drv_ctx;
....@@ -803,21 +867,20 @@
803867 static void caam_unmap(struct device *dev, struct scatterlist *src,
804868 struct scatterlist *dst, int src_nents,
805869 int dst_nents, dma_addr_t iv_dma, int ivsize,
806
- enum optype op_type, dma_addr_t qm_sg_dma,
870
+ enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
807871 int qm_sg_bytes)
808872 {
809873 if (dst != src) {
810874 if (src_nents)
811875 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
812
- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
876
+ if (dst_nents)
877
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
813878 } else {
814879 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
815880 }
816881
817882 if (iv_dma)
818
- dma_unmap_single(dev, iv_dma, ivsize,
819
- op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
820
- DMA_TO_DEVICE);
883
+ dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
821884 if (qm_sg_bytes)
822885 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
823886 }
....@@ -830,21 +893,20 @@
830893 int ivsize = crypto_aead_ivsize(aead);
831894
832895 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
833
- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
834
- edesc->qm_sg_dma, edesc->qm_sg_bytes);
896
+ edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
897
+ edesc->qm_sg_bytes);
835898 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
836899 }
837900
838
-static void ablkcipher_unmap(struct device *dev,
839
- struct ablkcipher_edesc *edesc,
840
- struct ablkcipher_request *req)
901
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
902
+ struct skcipher_request *req)
841903 {
842
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
843
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
904
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
905
+ int ivsize = crypto_skcipher_ivsize(skcipher);
844906
845907 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
846
- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
847
- edesc->qm_sg_dma, edesc->qm_sg_bytes);
908
+ edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
909
+ edesc->qm_sg_bytes);
848910 }
849911
850912 static void aead_done(struct caam_drv_req *drv_req, u32 status)
....@@ -858,20 +920,8 @@
858920
859921 qidev = caam_ctx->qidev;
860922
861
- if (unlikely(status)) {
862
- u32 ssrc = status & JRSTA_SSRC_MASK;
863
- u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
864
-
865
- caam_jr_strstatus(qidev, status);
866
- /*
867
- * verify hw auth check passed else return -EBADMSG
868
- */
869
- if (ssrc == JRSTA_SSRC_CCB_ERROR &&
870
- err_id == JRSTA_CCBERR_ERRID_ICVCHK)
871
- ecode = -EBADMSG;
872
- else
873
- ecode = -EIO;
874
- }
923
+ if (unlikely(status))
924
+ ecode = caam_jr_strstatus(qidev, status);
875925
876926 edesc = container_of(drv_req, typeof(*edesc), drv_req);
877927 aead_unmap(qidev, edesc, aead_req);
....@@ -894,6 +944,7 @@
894944 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
895945 GFP_KERNEL : GFP_ATOMIC;
896946 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
947
+ int src_len, dst_len = 0;
897948 struct aead_edesc *edesc;
898949 dma_addr_t qm_sg_dma, iv_dma = 0;
899950 int ivsize = 0;
....@@ -902,10 +953,9 @@
902953 int in_len, out_len;
903954 struct qm_sg_entry *sg_table, *fd_sgt;
904955 struct caam_drv_ctx *drv_ctx;
905
- enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
906956
907
- drv_ctx = get_drv_ctx(ctx, op_type);
908
- if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
957
+ drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
958
+ if (IS_ERR_OR_NULL(drv_ctx))
909959 return (struct aead_edesc *)drv_ctx;
910960
911961 /* allocate space for base edesc and hw desc commands, link tables */
....@@ -916,13 +966,13 @@
916966 }
917967
918968 if (likely(req->src == req->dst)) {
919
- src_nents = sg_nents_for_len(req->src, req->assoclen +
920
- req->cryptlen +
921
- (encrypt ? authsize : 0));
969
+ src_len = req->assoclen + req->cryptlen +
970
+ (encrypt ? authsize : 0);
971
+
972
+ src_nents = sg_nents_for_len(req->src, src_len);
922973 if (unlikely(src_nents < 0)) {
923974 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
924
- req->assoclen + req->cryptlen +
925
- (encrypt ? authsize : 0));
975
+ src_len);
926976 qi_cache_free(edesc);
927977 return ERR_PTR(src_nents);
928978 }
....@@ -935,23 +985,21 @@
935985 return ERR_PTR(-ENOMEM);
936986 }
937987 } else {
938
- src_nents = sg_nents_for_len(req->src, req->assoclen +
939
- req->cryptlen);
988
+ src_len = req->assoclen + req->cryptlen;
989
+ dst_len = src_len + (encrypt ? authsize : (-authsize));
990
+
991
+ src_nents = sg_nents_for_len(req->src, src_len);
940992 if (unlikely(src_nents < 0)) {
941993 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
942
- req->assoclen + req->cryptlen);
994
+ src_len);
943995 qi_cache_free(edesc);
944996 return ERR_PTR(src_nents);
945997 }
946998
947
- dst_nents = sg_nents_for_len(req->dst, req->assoclen +
948
- req->cryptlen +
949
- (encrypt ? authsize :
950
- (-authsize)));
999
+ dst_nents = sg_nents_for_len(req->dst, dst_len);
9511000 if (unlikely(dst_nents < 0)) {
9521001 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
953
- req->assoclen + req->cryptlen +
954
- (encrypt ? authsize : (-authsize)));
1002
+ dst_len);
9551003 qi_cache_free(edesc);
9561004 return ERR_PTR(dst_nents);
9571005 }
....@@ -968,13 +1016,19 @@
9681016 mapped_src_nents = 0;
9691017 }
9701018
971
- mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
972
- DMA_FROM_DEVICE);
973
- if (unlikely(!mapped_dst_nents)) {
974
- dev_err(qidev, "unable to map destination\n");
975
- dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
976
- qi_cache_free(edesc);
977
- return ERR_PTR(-ENOMEM);
1019
+ if (dst_nents) {
1020
+ mapped_dst_nents = dma_map_sg(qidev, req->dst,
1021
+ dst_nents,
1022
+ DMA_FROM_DEVICE);
1023
+ if (unlikely(!mapped_dst_nents)) {
1024
+ dev_err(qidev, "unable to map destination\n");
1025
+ dma_unmap_sg(qidev, req->src, src_nents,
1026
+ DMA_TO_DEVICE);
1027
+ qi_cache_free(edesc);
1028
+ return ERR_PTR(-ENOMEM);
1029
+ }
1030
+ } else {
1031
+ mapped_dst_nents = 0;
9781032 }
9791033 }
9801034
....@@ -984,9 +1038,24 @@
9841038 /*
9851039 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
9861040 * Input is not contiguous.
1041
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1042
+ * the end of the table by allocating more S/G entries. Logic:
1043
+ * if (src != dst && output S/G)
1044
+ * pad output S/G, if needed
1045
+ * else if (src == dst && S/G)
1046
+ * overlapping S/Gs; pad one of them
1047
+ * else if (input S/G) ...
1048
+ * pad input S/G, if needed
9871049 */
988
- qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
989
- (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
1050
+ qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
1051
+ if (mapped_dst_nents > 1)
1052
+ qm_sg_ents += pad_sg_nents(mapped_dst_nents);
1053
+ else if ((req->src == req->dst) && (mapped_src_nents > 1))
1054
+ qm_sg_ents = max(pad_sg_nents(qm_sg_ents),
1055
+ 1 + !!ivsize + pad_sg_nents(mapped_src_nents));
1056
+ else
1057
+ qm_sg_ents = pad_sg_nents(qm_sg_ents);
1058
+
9901059 sg_table = &edesc->sgt[0];
9911060 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
9921061 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
....@@ -994,7 +1063,7 @@
9941063 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
9951064 qm_sg_ents, ivsize);
9961065 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
997
- 0, 0, 0, 0);
1066
+ 0, DMA_NONE, 0, 0);
9981067 qi_cache_free(edesc);
9991068 return ERR_PTR(-ENOMEM);
10001069 }
....@@ -1009,7 +1078,7 @@
10091078 if (dma_mapping_error(qidev, iv_dma)) {
10101079 dev_err(qidev, "unable to map IV\n");
10111080 caam_unmap(qidev, req->src, req->dst, src_nents,
1012
- dst_nents, 0, 0, 0, 0, 0);
1081
+ dst_nents, 0, 0, DMA_NONE, 0, 0);
10131082 qi_cache_free(edesc);
10141083 return ERR_PTR(-ENOMEM);
10151084 }
....@@ -1028,7 +1097,7 @@
10281097 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
10291098 dev_err(qidev, "unable to map assoclen\n");
10301099 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1031
- iv_dma, ivsize, op_type, 0, 0);
1100
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
10321101 qi_cache_free(edesc);
10331102 return ERR_PTR(-ENOMEM);
10341103 }
....@@ -1039,19 +1108,18 @@
10391108 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
10401109 qm_sg_index++;
10411110 }
1042
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
1111
+ sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
10431112 qm_sg_index += mapped_src_nents;
10441113
10451114 if (mapped_dst_nents > 1)
1046
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1047
- qm_sg_index, 0);
1115
+ sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
10481116
10491117 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
10501118 if (dma_mapping_error(qidev, qm_sg_dma)) {
10511119 dev_err(qidev, "unable to map S/G table\n");
10521120 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
10531121 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1054
- iv_dma, ivsize, op_type, 0, 0);
1122
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
10551123 qi_cache_free(edesc);
10561124 return ERR_PTR(-ENOMEM);
10571125 }
....@@ -1074,7 +1142,7 @@
10741142 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
10751143 (1 + !!ivsize) * sizeof(*sg_table),
10761144 out_len, 0);
1077
- } else if (mapped_dst_nents == 1) {
1145
+ } else if (mapped_dst_nents <= 1) {
10781146 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
10791147 0);
10801148 } else {
....@@ -1124,106 +1192,88 @@
11241192
11251193 static int ipsec_gcm_encrypt(struct aead_request *req)
11261194 {
1127
- if (req->assoclen < 8)
1128
- return -EINVAL;
1129
-
1130
- return aead_crypt(req, true);
1195
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
1196
+ true);
11311197 }
11321198
11331199 static int ipsec_gcm_decrypt(struct aead_request *req)
11341200 {
1135
- if (req->assoclen < 8)
1136
- return -EINVAL;
1137
-
1138
- return aead_crypt(req, false);
1201
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
1202
+ false);
11391203 }
11401204
1141
-static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
1205
+static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
11421206 {
1143
- struct ablkcipher_edesc *edesc;
1144
- struct ablkcipher_request *req = drv_req->app_ctx;
1145
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1146
- struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
1207
+ struct skcipher_edesc *edesc;
1208
+ struct skcipher_request *req = drv_req->app_ctx;
1209
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1210
+ struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
11471211 struct device *qidev = caam_ctx->qidev;
1148
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1212
+ int ivsize = crypto_skcipher_ivsize(skcipher);
1213
+ int ecode = 0;
11491214
1150
-#ifdef DEBUG
1151
- dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
1152
-#endif
1215
+ dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
11531216
11541217 edesc = container_of(drv_req, typeof(*edesc), drv_req);
11551218
11561219 if (status)
1157
- caam_jr_strstatus(qidev, status);
1220
+ ecode = caam_jr_strstatus(qidev, status);
11581221
1159
-#ifdef DEBUG
1160
- print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
1161
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1162
- edesc->src_nents > 1 ? 100 : ivsize, 1);
1163
- caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
1222
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1223
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1224
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
1225
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
11641226 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1165
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1166
-#endif
1227
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
11671228
1168
- ablkcipher_unmap(qidev, edesc, req);
1169
-
1170
- /* In case initial IV was generated, copy it in GIVCIPHER request */
1171
- if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
1172
- u8 *iv;
1173
- struct skcipher_givcrypt_request *greq;
1174
-
1175
- greq = container_of(req, struct skcipher_givcrypt_request,
1176
- creq);
1177
- iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
1178
- memcpy(greq->giv, iv, ivsize);
1179
- }
1229
+ skcipher_unmap(qidev, edesc, req);
11801230
11811231 /*
1182
- * The crypto API expects us to set the IV (req->info) to the last
1183
- * ciphertext block. This is used e.g. by the CTS mode.
1232
+ * The crypto API expects us to set the IV (req->iv) to the last
1233
+ * ciphertext block (CBC mode) or last counter (CTR mode).
1234
+ * This is used e.g. by the CTS mode.
11841235 */
1185
- if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
1186
- scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
1187
- ivsize, ivsize, 0);
1236
+ if (!ecode)
1237
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1238
+ ivsize);
11881239
11891240 qi_cache_free(edesc);
1190
- ablkcipher_request_complete(req, status);
1241
+ skcipher_request_complete(req, ecode);
11911242 }
11921243
1193
-static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1194
- *req, bool encrypt)
1244
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1245
+ bool encrypt)
11951246 {
1196
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1197
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1247
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1248
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
11981249 struct device *qidev = ctx->qidev;
11991250 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
12001251 GFP_KERNEL : GFP_ATOMIC;
12011252 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1202
- struct ablkcipher_edesc *edesc;
1253
+ struct skcipher_edesc *edesc;
12031254 dma_addr_t iv_dma;
12041255 u8 *iv;
1205
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1256
+ int ivsize = crypto_skcipher_ivsize(skcipher);
12061257 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
12071258 struct qm_sg_entry *sg_table, *fd_sgt;
12081259 struct caam_drv_ctx *drv_ctx;
1209
- enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
12101260
1211
- drv_ctx = get_drv_ctx(ctx, op_type);
1212
- if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1213
- return (struct ablkcipher_edesc *)drv_ctx;
1261
+ drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
1262
+ if (IS_ERR_OR_NULL(drv_ctx))
1263
+ return (struct skcipher_edesc *)drv_ctx;
12141264
1215
- src_nents = sg_nents_for_len(req->src, req->nbytes);
1265
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
12161266 if (unlikely(src_nents < 0)) {
12171267 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1218
- req->nbytes);
1268
+ req->cryptlen);
12191269 return ERR_PTR(src_nents);
12201270 }
12211271
12221272 if (unlikely(req->src != req->dst)) {
1223
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1273
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
12241274 if (unlikely(dst_nents < 0)) {
12251275 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1226
- req->nbytes);
1276
+ req->cryptlen);
12271277 return ERR_PTR(dst_nents);
12281278 }
12291279
....@@ -1253,14 +1303,26 @@
12531303 qm_sg_ents = 1 + mapped_src_nents;
12541304 dst_sg_idx = qm_sg_ents;
12551305
1256
- qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1306
+ /*
1307
+ * Input, output HW S/G tables: [IV, src][dst, IV]
1308
+ * IV entries point to the same buffer
1309
+ * If src == dst, S/G entries are reused (S/G tables overlap)
1310
+ *
1311
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1312
+ * the end of the table by allocating more S/G entries.
1313
+ */
1314
+ if (req->src != req->dst)
1315
+ qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1316
+ else
1317
+ qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1318
+
12571319 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1258
- if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1320
+ if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
12591321 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
12601322 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
12611323 qm_sg_ents, ivsize);
12621324 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1263
- 0, 0, 0, 0);
1325
+ 0, DMA_NONE, 0, 0);
12641326 return ERR_PTR(-ENOMEM);
12651327 }
12661328
....@@ -1269,20 +1331,20 @@
12691331 if (unlikely(!edesc)) {
12701332 dev_err(qidev, "could not allocate extended descriptor\n");
12711333 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1272
- 0, 0, 0, 0);
1334
+ 0, DMA_NONE, 0, 0);
12731335 return ERR_PTR(-ENOMEM);
12741336 }
12751337
12761338 /* Make sure IV is located in a DMAable area */
12771339 sg_table = &edesc->sgt[0];
12781340 iv = (u8 *)(sg_table + qm_sg_ents);
1279
- memcpy(iv, req->info, ivsize);
1341
+ memcpy(iv, req->iv, ivsize);
12801342
1281
- iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1343
+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
12821344 if (dma_mapping_error(qidev, iv_dma)) {
12831345 dev_err(qidev, "unable to map IV\n");
12841346 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1285
- 0, 0, 0, 0);
1347
+ 0, DMA_NONE, 0, 0);
12861348 qi_cache_free(edesc);
12871349 return ERR_PTR(-ENOMEM);
12881350 }
....@@ -1292,22 +1354,24 @@
12921354 edesc->iv_dma = iv_dma;
12931355 edesc->qm_sg_bytes = qm_sg_bytes;
12941356 edesc->drv_req.app_ctx = req;
1295
- edesc->drv_req.cbk = ablkcipher_done;
1357
+ edesc->drv_req.cbk = skcipher_done;
12961358 edesc->drv_req.drv_ctx = drv_ctx;
12971359
12981360 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1299
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1361
+ sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
13001362
1301
- if (mapped_dst_nents > 1)
1302
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1303
- dst_sg_idx, 0);
1363
+ if (req->src != req->dst)
1364
+ sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1365
+
1366
+ dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1367
+ ivsize, 0);
13041368
13051369 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13061370 DMA_TO_DEVICE);
13071371 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13081372 dev_err(qidev, "unable to map S/G table\n");
13091373 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1310
- iv_dma, ivsize, op_type, 0, 0);
1374
+ iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
13111375 qi_cache_free(edesc);
13121376 return ERR_PTR(-ENOMEM);
13131377 }
....@@ -1315,218 +1379,65 @@
13151379 fd_sgt = &edesc->drv_req.fd_sgt[0];
13161380
13171381 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1318
- ivsize + req->nbytes, 0);
1382
+ ivsize + req->cryptlen, 0);
13191383
1320
- if (req->src == req->dst) {
1384
+ if (req->src == req->dst)
13211385 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1322
- sizeof(*sg_table), req->nbytes, 0);
1323
- } else if (mapped_dst_nents > 1) {
1324
- dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1325
- sizeof(*sg_table), req->nbytes, 0);
1326
- } else {
1327
- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1328
- req->nbytes, 0);
1329
- }
1330
-
1331
- return edesc;
1332
-}
1333
-
1334
-static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1335
- struct skcipher_givcrypt_request *creq)
1336
-{
1337
- struct ablkcipher_request *req = &creq->creq;
1338
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1339
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1340
- struct device *qidev = ctx->qidev;
1341
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1342
- GFP_KERNEL : GFP_ATOMIC;
1343
- int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1344
- struct ablkcipher_edesc *edesc;
1345
- dma_addr_t iv_dma;
1346
- u8 *iv;
1347
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1348
- struct qm_sg_entry *sg_table, *fd_sgt;
1349
- int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1350
- struct caam_drv_ctx *drv_ctx;
1351
-
1352
- drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
1353
- if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1354
- return (struct ablkcipher_edesc *)drv_ctx;
1355
-
1356
- src_nents = sg_nents_for_len(req->src, req->nbytes);
1357
- if (unlikely(src_nents < 0)) {
1358
- dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1359
- req->nbytes);
1360
- return ERR_PTR(src_nents);
1361
- }
1362
-
1363
- if (unlikely(req->src != req->dst)) {
1364
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1365
- if (unlikely(dst_nents < 0)) {
1366
- dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1367
- req->nbytes);
1368
- return ERR_PTR(dst_nents);
1369
- }
1370
-
1371
- mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1372
- DMA_TO_DEVICE);
1373
- if (unlikely(!mapped_src_nents)) {
1374
- dev_err(qidev, "unable to map source\n");
1375
- return ERR_PTR(-ENOMEM);
1376
- }
1377
-
1378
- mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1379
- DMA_FROM_DEVICE);
1380
- if (unlikely(!mapped_dst_nents)) {
1381
- dev_err(qidev, "unable to map destination\n");
1382
- dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1383
- return ERR_PTR(-ENOMEM);
1384
- }
1385
- } else {
1386
- mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1387
- DMA_BIDIRECTIONAL);
1388
- if (unlikely(!mapped_src_nents)) {
1389
- dev_err(qidev, "unable to map source\n");
1390
- return ERR_PTR(-ENOMEM);
1391
- }
1392
-
1393
- dst_nents = src_nents;
1394
- mapped_dst_nents = src_nents;
1395
- }
1396
-
1397
- qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1398
- dst_sg_idx = qm_sg_ents;
1399
-
1400
- qm_sg_ents += 1 + mapped_dst_nents;
1401
- qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1402
- if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1403
- ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1404
- dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1405
- qm_sg_ents, ivsize);
1406
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1407
- 0, 0, 0, 0);
1408
- return ERR_PTR(-ENOMEM);
1409
- }
1410
-
1411
- /* allocate space for base edesc, link tables and IV */
1412
- edesc = qi_cache_alloc(GFP_DMA | flags);
1413
- if (!edesc) {
1414
- dev_err(qidev, "could not allocate extended descriptor\n");
1415
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1416
- 0, 0, 0, 0);
1417
- return ERR_PTR(-ENOMEM);
1418
- }
1419
-
1420
- /* Make sure IV is located in a DMAable area */
1421
- sg_table = &edesc->sgt[0];
1422
- iv = (u8 *)(sg_table + qm_sg_ents);
1423
- iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
1424
- if (dma_mapping_error(qidev, iv_dma)) {
1425
- dev_err(qidev, "unable to map IV\n");
1426
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1427
- 0, 0, 0, 0);
1428
- qi_cache_free(edesc);
1429
- return ERR_PTR(-ENOMEM);
1430
- }
1431
-
1432
- edesc->src_nents = src_nents;
1433
- edesc->dst_nents = dst_nents;
1434
- edesc->iv_dma = iv_dma;
1435
- edesc->qm_sg_bytes = qm_sg_bytes;
1436
- edesc->drv_req.app_ctx = req;
1437
- edesc->drv_req.cbk = ablkcipher_done;
1438
- edesc->drv_req.drv_ctx = drv_ctx;
1439
-
1440
- if (mapped_src_nents > 1)
1441
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
1442
-
1443
- dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1444
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
1445
- 0);
1446
-
1447
- edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1448
- DMA_TO_DEVICE);
1449
- if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1450
- dev_err(qidev, "unable to map S/G table\n");
1451
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1452
- iv_dma, ivsize, GIVENCRYPT, 0, 0);
1453
- qi_cache_free(edesc);
1454
- return ERR_PTR(-ENOMEM);
1455
- }
1456
-
1457
- fd_sgt = &edesc->drv_req.fd_sgt[0];
1458
-
1459
- if (mapped_src_nents > 1)
1460
- dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
1386
+ sizeof(*sg_table), req->cryptlen + ivsize,
14611387 0);
14621388 else
1463
- dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
1464
- req->nbytes, 0);
1465
-
1466
- dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1467
- sizeof(*sg_table), ivsize + req->nbytes, 0);
1389
+ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1390
+ sizeof(*sg_table), req->cryptlen + ivsize,
1391
+ 0);
14681392
14691393 return edesc;
14701394 }
14711395
1472
-static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
1396
+static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
14731397 {
1474
- struct ablkcipher_edesc *edesc;
1475
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1476
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1477
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1398
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1399
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1400
+
1401
+ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1402
+}
1403
+
1404
+static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
1405
+{
1406
+ struct skcipher_edesc *edesc;
1407
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1408
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1409
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
14781410 int ret;
1479
-
1480
- if (unlikely(caam_congested))
1481
- return -EAGAIN;
1482
-
1483
- /* allocate extended descriptor */
1484
- edesc = ablkcipher_edesc_alloc(req, encrypt);
1485
- if (IS_ERR(edesc))
1486
- return PTR_ERR(edesc);
14871411
14881412 /*
1489
- * The crypto API expects us to set the IV (req->info) to the last
1490
- * ciphertext block.
1413
+ * XTS is expected to return an error even for input length = 0
1414
+ * Note that the case input length < block size will be caught during
1415
+ * HW offloading and return an error.
14911416 */
1492
- if (!encrypt)
1493
- scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
1494
- ivsize, ivsize, 0);
1417
+ if (!req->cryptlen && !ctx->fallback)
1418
+ return 0;
14951419
1496
- ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1497
- if (!ret) {
1498
- ret = -EINPROGRESS;
1499
- } else {
1500
- ablkcipher_unmap(ctx->qidev, edesc, req);
1501
- qi_cache_free(edesc);
1420
+ if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
1421
+ ctx->xts_key_fallback)) {
1422
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
1423
+
1424
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
1425
+ skcipher_request_set_callback(&rctx->fallback_req,
1426
+ req->base.flags,
1427
+ req->base.complete,
1428
+ req->base.data);
1429
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
1430
+ req->dst, req->cryptlen, req->iv);
1431
+
1432
+ return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
1433
+ crypto_skcipher_decrypt(&rctx->fallback_req);
15021434 }
1503
-
1504
- return ret;
1505
-}
1506
-
1507
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
1508
-{
1509
- return ablkcipher_crypt(req, true);
1510
-}
1511
-
1512
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
1513
-{
1514
- return ablkcipher_crypt(req, false);
1515
-}
1516
-
1517
-static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1518
-{
1519
- struct ablkcipher_request *req = &creq->creq;
1520
- struct ablkcipher_edesc *edesc;
1521
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1522
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1523
- int ret;
15241435
15251436 if (unlikely(caam_congested))
15261437 return -EAGAIN;
15271438
15281439 /* allocate extended descriptor */
1529
- edesc = ablkcipher_giv_edesc_alloc(creq);
1440
+ edesc = skcipher_edesc_alloc(req, encrypt);
15301441 if (IS_ERR(edesc))
15311442 return PTR_ERR(edesc);
15321443
....@@ -1534,129 +1445,129 @@
15341445 if (!ret) {
15351446 ret = -EINPROGRESS;
15361447 } else {
1537
- ablkcipher_unmap(ctx->qidev, edesc, req);
1448
+ skcipher_unmap(ctx->qidev, edesc, req);
15381449 qi_cache_free(edesc);
15391450 }
15401451
15411452 return ret;
15421453 }
15431454
1544
-#define template_ablkcipher template_u.ablkcipher
1545
-struct caam_alg_template {
1546
- char name[CRYPTO_MAX_ALG_NAME];
1547
- char driver_name[CRYPTO_MAX_ALG_NAME];
1548
- unsigned int blocksize;
1549
- u32 type;
1550
- union {
1551
- struct ablkcipher_alg ablkcipher;
1552
- } template_u;
1553
- u32 class1_alg_type;
1554
- u32 class2_alg_type;
1555
-};
1455
+static int skcipher_encrypt(struct skcipher_request *req)
1456
+{
1457
+ return skcipher_crypt(req, true);
1458
+}
15561459
1557
-static struct caam_alg_template driver_algs[] = {
1558
- /* ablkcipher descriptor */
1460
+static int skcipher_decrypt(struct skcipher_request *req)
1461
+{
1462
+ return skcipher_crypt(req, false);
1463
+}
1464
+
1465
+static struct caam_skcipher_alg driver_algs[] = {
15591466 {
1560
- .name = "cbc(aes)",
1561
- .driver_name = "cbc-aes-caam-qi",
1562
- .blocksize = AES_BLOCK_SIZE,
1563
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1564
- .template_ablkcipher = {
1565
- .setkey = ablkcipher_setkey,
1566
- .encrypt = ablkcipher_encrypt,
1567
- .decrypt = ablkcipher_decrypt,
1568
- .givencrypt = ablkcipher_givencrypt,
1569
- .geniv = "<built-in>",
1467
+ .skcipher = {
1468
+ .base = {
1469
+ .cra_name = "cbc(aes)",
1470
+ .cra_driver_name = "cbc-aes-caam-qi",
1471
+ .cra_blocksize = AES_BLOCK_SIZE,
1472
+ },
1473
+ .setkey = aes_skcipher_setkey,
1474
+ .encrypt = skcipher_encrypt,
1475
+ .decrypt = skcipher_decrypt,
15701476 .min_keysize = AES_MIN_KEY_SIZE,
15711477 .max_keysize = AES_MAX_KEY_SIZE,
15721478 .ivsize = AES_BLOCK_SIZE,
15731479 },
1574
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1480
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
15751481 },
15761482 {
1577
- .name = "cbc(des3_ede)",
1578
- .driver_name = "cbc-3des-caam-qi",
1579
- .blocksize = DES3_EDE_BLOCK_SIZE,
1580
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1581
- .template_ablkcipher = {
1582
- .setkey = ablkcipher_setkey,
1583
- .encrypt = ablkcipher_encrypt,
1584
- .decrypt = ablkcipher_decrypt,
1585
- .givencrypt = ablkcipher_givencrypt,
1586
- .geniv = "<built-in>",
1483
+ .skcipher = {
1484
+ .base = {
1485
+ .cra_name = "cbc(des3_ede)",
1486
+ .cra_driver_name = "cbc-3des-caam-qi",
1487
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1488
+ },
1489
+ .setkey = des3_skcipher_setkey,
1490
+ .encrypt = skcipher_encrypt,
1491
+ .decrypt = skcipher_decrypt,
15871492 .min_keysize = DES3_EDE_KEY_SIZE,
15881493 .max_keysize = DES3_EDE_KEY_SIZE,
15891494 .ivsize = DES3_EDE_BLOCK_SIZE,
15901495 },
1591
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1496
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
15921497 },
15931498 {
1594
- .name = "cbc(des)",
1595
- .driver_name = "cbc-des-caam-qi",
1596
- .blocksize = DES_BLOCK_SIZE,
1597
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1598
- .template_ablkcipher = {
1599
- .setkey = ablkcipher_setkey,
1600
- .encrypt = ablkcipher_encrypt,
1601
- .decrypt = ablkcipher_decrypt,
1602
- .givencrypt = ablkcipher_givencrypt,
1603
- .geniv = "<built-in>",
1499
+ .skcipher = {
1500
+ .base = {
1501
+ .cra_name = "cbc(des)",
1502
+ .cra_driver_name = "cbc-des-caam-qi",
1503
+ .cra_blocksize = DES_BLOCK_SIZE,
1504
+ },
1505
+ .setkey = des_skcipher_setkey,
1506
+ .encrypt = skcipher_encrypt,
1507
+ .decrypt = skcipher_decrypt,
16041508 .min_keysize = DES_KEY_SIZE,
16051509 .max_keysize = DES_KEY_SIZE,
16061510 .ivsize = DES_BLOCK_SIZE,
16071511 },
1608
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1512
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
16091513 },
16101514 {
1611
- .name = "ctr(aes)",
1612
- .driver_name = "ctr-aes-caam-qi",
1613
- .blocksize = 1,
1614
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1615
- .template_ablkcipher = {
1616
- .setkey = ablkcipher_setkey,
1617
- .encrypt = ablkcipher_encrypt,
1618
- .decrypt = ablkcipher_decrypt,
1619
- .geniv = "chainiv",
1515
+ .skcipher = {
1516
+ .base = {
1517
+ .cra_name = "ctr(aes)",
1518
+ .cra_driver_name = "ctr-aes-caam-qi",
1519
+ .cra_blocksize = 1,
1520
+ },
1521
+ .setkey = ctr_skcipher_setkey,
1522
+ .encrypt = skcipher_encrypt,
1523
+ .decrypt = skcipher_decrypt,
16201524 .min_keysize = AES_MIN_KEY_SIZE,
16211525 .max_keysize = AES_MAX_KEY_SIZE,
16221526 .ivsize = AES_BLOCK_SIZE,
1527
+ .chunksize = AES_BLOCK_SIZE,
16231528 },
1624
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1529
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1530
+ OP_ALG_AAI_CTR_MOD128,
16251531 },
16261532 {
1627
- .name = "rfc3686(ctr(aes))",
1628
- .driver_name = "rfc3686-ctr-aes-caam-qi",
1629
- .blocksize = 1,
1630
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1631
- .template_ablkcipher = {
1632
- .setkey = ablkcipher_setkey,
1633
- .encrypt = ablkcipher_encrypt,
1634
- .decrypt = ablkcipher_decrypt,
1635
- .givencrypt = ablkcipher_givencrypt,
1636
- .geniv = "<built-in>",
1533
+ .skcipher = {
1534
+ .base = {
1535
+ .cra_name = "rfc3686(ctr(aes))",
1536
+ .cra_driver_name = "rfc3686-ctr-aes-caam-qi",
1537
+ .cra_blocksize = 1,
1538
+ },
1539
+ .setkey = rfc3686_skcipher_setkey,
1540
+ .encrypt = skcipher_encrypt,
1541
+ .decrypt = skcipher_decrypt,
16371542 .min_keysize = AES_MIN_KEY_SIZE +
16381543 CTR_RFC3686_NONCE_SIZE,
16391544 .max_keysize = AES_MAX_KEY_SIZE +
16401545 CTR_RFC3686_NONCE_SIZE,
16411546 .ivsize = CTR_RFC3686_IV_SIZE,
1547
+ .chunksize = AES_BLOCK_SIZE,
16421548 },
1643
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1549
+ .caam = {
1550
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
1551
+ OP_ALG_AAI_CTR_MOD128,
1552
+ .rfc3686 = true,
1553
+ },
16441554 },
16451555 {
1646
- .name = "xts(aes)",
1647
- .driver_name = "xts-aes-caam-qi",
1648
- .blocksize = AES_BLOCK_SIZE,
1649
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1650
- .template_ablkcipher = {
1651
- .setkey = xts_ablkcipher_setkey,
1652
- .encrypt = ablkcipher_encrypt,
1653
- .decrypt = ablkcipher_decrypt,
1654
- .geniv = "eseqiv",
1556
+ .skcipher = {
1557
+ .base = {
1558
+ .cra_name = "xts(aes)",
1559
+ .cra_driver_name = "xts-aes-caam-qi",
1560
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1561
+ .cra_blocksize = AES_BLOCK_SIZE,
1562
+ },
1563
+ .setkey = xts_skcipher_setkey,
1564
+ .encrypt = skcipher_encrypt,
1565
+ .decrypt = skcipher_decrypt,
16551566 .min_keysize = 2 * AES_MIN_KEY_SIZE,
16561567 .max_keysize = 2 * AES_MAX_KEY_SIZE,
16571568 .ivsize = AES_BLOCK_SIZE,
16581569 },
1659
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1570
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
16601571 },
16611572 };
16621573
....@@ -1677,6 +1588,7 @@
16771588 },
16781589 .caam = {
16791590 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1591
+ .nodkp = true,
16801592 },
16811593 },
16821594 {
....@@ -1695,6 +1607,7 @@
16951607 },
16961608 .caam = {
16971609 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1610
+ .nodkp = true,
16981611 },
16991612 },
17001613 /* Galois Counter Mode */
....@@ -1714,6 +1627,7 @@
17141627 },
17151628 .caam = {
17161629 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1630
+ .nodkp = true,
17171631 }
17181632 },
17191633 /* single-pass ipsec_esp descriptor */
....@@ -1992,7 +1906,7 @@
19921906 "cbc-des3_ede-caam-qi",
19931907 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
19941908 },
1995
- .setkey = aead_setkey,
1909
+ .setkey = des3_aead_setkey,
19961910 .setauthsize = aead_setauthsize,
19971911 .encrypt = aead_encrypt,
19981912 .decrypt = aead_decrypt,
....@@ -2014,7 +1928,7 @@
20141928 "cbc-des3_ede-caam-qi",
20151929 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
20161930 },
2017
- .setkey = aead_setkey,
1931
+ .setkey = des3_aead_setkey,
20181932 .setauthsize = aead_setauthsize,
20191933 .encrypt = aead_encrypt,
20201934 .decrypt = aead_decrypt,
....@@ -2037,7 +1951,7 @@
20371951 "cbc-des3_ede-caam-qi",
20381952 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
20391953 },
2040
- .setkey = aead_setkey,
1954
+ .setkey = des3_aead_setkey,
20411955 .setauthsize = aead_setauthsize,
20421956 .encrypt = aead_encrypt,
20431957 .decrypt = aead_decrypt,
....@@ -2060,7 +1974,7 @@
20601974 "cbc-des3_ede-caam-qi",
20611975 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
20621976 },
2063
- .setkey = aead_setkey,
1977
+ .setkey = des3_aead_setkey,
20641978 .setauthsize = aead_setauthsize,
20651979 .encrypt = aead_encrypt,
20661980 .decrypt = aead_decrypt,
....@@ -2083,7 +1997,7 @@
20831997 "cbc-des3_ede-caam-qi",
20841998 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
20851999 },
2086
- .setkey = aead_setkey,
2000
+ .setkey = des3_aead_setkey,
20872001 .setauthsize = aead_setauthsize,
20882002 .encrypt = aead_encrypt,
20892003 .decrypt = aead_decrypt,
....@@ -2106,7 +2020,7 @@
21062020 "cbc-des3_ede-caam-qi",
21072021 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
21082022 },
2109
- .setkey = aead_setkey,
2023
+ .setkey = des3_aead_setkey,
21102024 .setauthsize = aead_setauthsize,
21112025 .encrypt = aead_encrypt,
21122026 .decrypt = aead_decrypt,
....@@ -2129,7 +2043,7 @@
21292043 "cbc-des3_ede-caam-qi",
21302044 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
21312045 },
2132
- .setkey = aead_setkey,
2046
+ .setkey = des3_aead_setkey,
21332047 .setauthsize = aead_setauthsize,
21342048 .encrypt = aead_encrypt,
21352049 .decrypt = aead_decrypt,
....@@ -2152,7 +2066,7 @@
21522066 "cbc-des3_ede-caam-qi",
21532067 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
21542068 },
2155
- .setkey = aead_setkey,
2069
+ .setkey = des3_aead_setkey,
21562070 .setauthsize = aead_setauthsize,
21572071 .encrypt = aead_encrypt,
21582072 .decrypt = aead_decrypt,
....@@ -2175,7 +2089,7 @@
21752089 "cbc-des3_ede-caam-qi",
21762090 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
21772091 },
2178
- .setkey = aead_setkey,
2092
+ .setkey = des3_aead_setkey,
21792093 .setauthsize = aead_setauthsize,
21802094 .encrypt = aead_encrypt,
21812095 .decrypt = aead_decrypt,
....@@ -2198,7 +2112,7 @@
21982112 "cbc-des3_ede-caam-qi",
21992113 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
22002114 },
2201
- .setkey = aead_setkey,
2115
+ .setkey = des3_aead_setkey,
22022116 .setauthsize = aead_setauthsize,
22032117 .encrypt = aead_encrypt,
22042118 .decrypt = aead_decrypt,
....@@ -2221,7 +2135,7 @@
22212135 "cbc-des3_ede-caam-qi",
22222136 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
22232137 },
2224
- .setkey = aead_setkey,
2138
+ .setkey = des3_aead_setkey,
22252139 .setauthsize = aead_setauthsize,
22262140 .encrypt = aead_encrypt,
22272141 .decrypt = aead_decrypt,
....@@ -2244,7 +2158,7 @@
22442158 "cbc-des3_ede-caam-qi",
22452159 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
22462160 },
2247
- .setkey = aead_setkey,
2161
+ .setkey = des3_aead_setkey,
22482162 .setauthsize = aead_setauthsize,
22492163 .encrypt = aead_encrypt,
22502164 .decrypt = aead_decrypt,
....@@ -2528,16 +2442,11 @@
25282442 },
25292443 };
25302444
2531
-struct caam_crypto_alg {
2532
- struct list_head entry;
2533
- struct crypto_alg crypto_alg;
2534
- struct caam_alg_entry caam;
2535
-};
2536
-
25372445 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
25382446 bool uses_dkp)
25392447 {
25402448 struct caam_drv_private *priv;
2449
+ struct device *dev;
25412450
25422451 /*
25432452 * distribute tfms across job rings to ensure in-order
....@@ -2549,16 +2458,17 @@
25492458 return PTR_ERR(ctx->jrdev);
25502459 }
25512460
2552
- priv = dev_get_drvdata(ctx->jrdev->parent);
2461
+ dev = ctx->jrdev->parent;
2462
+ priv = dev_get_drvdata(dev);
25532463 if (priv->era >= 6 && uses_dkp)
25542464 ctx->dir = DMA_BIDIRECTIONAL;
25552465 else
25562466 ctx->dir = DMA_TO_DEVICE;
25572467
2558
- ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
2468
+ ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key),
25592469 ctx->dir);
2560
- if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
2561
- dev_err(ctx->jrdev, "unable to map key\n");
2470
+ if (dma_mapping_error(dev, ctx->key_dma)) {
2471
+ dev_err(dev, "unable to map key\n");
25622472 caam_jr_free(ctx->jrdev);
25632473 return -ENOMEM;
25642474 }
....@@ -2567,24 +2477,46 @@
25672477 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
25682478 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
25692479
2570
- ctx->qidev = priv->qidev;
2480
+ ctx->qidev = dev;
25712481
25722482 spin_lock_init(&ctx->lock);
25732483 ctx->drv_ctx[ENCRYPT] = NULL;
25742484 ctx->drv_ctx[DECRYPT] = NULL;
2575
- ctx->drv_ctx[GIVENCRYPT] = NULL;
25762485
25772486 return 0;
25782487 }
25792488
2580
-static int caam_cra_init(struct crypto_tfm *tfm)
2489
+static int caam_cra_init(struct crypto_skcipher *tfm)
25812490 {
2582
- struct crypto_alg *alg = tfm->__crt_alg;
2583
- struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2584
- crypto_alg);
2585
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2491
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
2492
+ struct caam_skcipher_alg *caam_alg =
2493
+ container_of(alg, typeof(*caam_alg), skcipher);
2494
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
2495
+ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
2496
+ int ret = 0;
25862497
2587
- return caam_init_common(ctx, &caam_alg->caam, false);
2498
+ if (alg_aai == OP_ALG_AAI_XTS) {
2499
+ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
2500
+ struct crypto_skcipher *fallback;
2501
+
2502
+ fallback = crypto_alloc_skcipher(tfm_name, 0,
2503
+ CRYPTO_ALG_NEED_FALLBACK);
2504
+ if (IS_ERR(fallback)) {
2505
+ pr_err("Failed to allocate %s fallback: %ld\n",
2506
+ tfm_name, PTR_ERR(fallback));
2507
+ return PTR_ERR(fallback);
2508
+ }
2509
+
2510
+ ctx->fallback = fallback;
2511
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
2512
+ crypto_skcipher_reqsize(fallback));
2513
+ }
2514
+
2515
+ ret = caam_init_common(ctx, &caam_alg->caam, false);
2516
+ if (ret && ctx->fallback)
2517
+ crypto_free_skcipher(ctx->fallback);
2518
+
2519
+ return ret;
25882520 }
25892521
25902522 static int caam_aead_init(struct crypto_aead *tfm)
....@@ -2594,24 +2526,27 @@
25942526 aead);
25952527 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
25962528
2597
- return caam_init_common(ctx, &caam_alg->caam,
2598
- alg->setkey == aead_setkey);
2529
+ return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
25992530 }
26002531
26012532 static void caam_exit_common(struct caam_ctx *ctx)
26022533 {
26032534 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
26042535 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2605
- caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
26062536
2607
- dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
2537
+ dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key),
2538
+ ctx->dir);
26082539
26092540 caam_jr_free(ctx->jrdev);
26102541 }
26112542
2612
-static void caam_cra_exit(struct crypto_tfm *tfm)
2543
+static void caam_cra_exit(struct crypto_skcipher *tfm)
26132544 {
2614
- caam_exit_common(crypto_tfm_ctx(tfm));
2545
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
2546
+
2547
+ if (ctx->fallback)
2548
+ crypto_free_skcipher(ctx->fallback);
2549
+ caam_exit_common(ctx);
26152550 }
26162551
26172552 static void caam_aead_exit(struct crypto_aead *tfm)
....@@ -2619,10 +2554,8 @@
26192554 caam_exit_common(crypto_aead_ctx(tfm));
26202555 }
26212556
2622
-static struct list_head alg_list;
2623
-static void __exit caam_qi_algapi_exit(void)
2557
+void caam_qi_algapi_exit(void)
26242558 {
2625
- struct caam_crypto_alg *t_alg, *n;
26262559 int i;
26272560
26282561 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
....@@ -2632,55 +2565,26 @@
26322565 crypto_unregister_aead(&t_alg->aead);
26332566 }
26342567
2635
- if (!alg_list.next)
2636
- return;
2568
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2569
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
26372570
2638
- list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2639
- crypto_unregister_alg(&t_alg->crypto_alg);
2640
- list_del(&t_alg->entry);
2641
- kfree(t_alg);
2571
+ if (t_alg->registered)
2572
+ crypto_unregister_skcipher(&t_alg->skcipher);
26422573 }
26432574 }
26442575
2645
-static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2646
- *template)
2576
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
26472577 {
2648
- struct caam_crypto_alg *t_alg;
2649
- struct crypto_alg *alg;
2578
+ struct skcipher_alg *alg = &t_alg->skcipher;
26502579
2651
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2652
- if (!t_alg)
2653
- return ERR_PTR(-ENOMEM);
2580
+ alg->base.cra_module = THIS_MODULE;
2581
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
2582
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2583
+ alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
2584
+ CRYPTO_ALG_KERN_DRIVER_ONLY);
26542585
2655
- alg = &t_alg->crypto_alg;
2656
-
2657
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2658
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2659
- template->driver_name);
2660
- alg->cra_module = THIS_MODULE;
2661
- alg->cra_init = caam_cra_init;
2662
- alg->cra_exit = caam_cra_exit;
2663
- alg->cra_priority = CAAM_CRA_PRIORITY;
2664
- alg->cra_blocksize = template->blocksize;
2665
- alg->cra_alignmask = 0;
2666
- alg->cra_ctxsize = sizeof(struct caam_ctx);
2667
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2668
- template->type;
2669
- switch (template->type) {
2670
- case CRYPTO_ALG_TYPE_GIVCIPHER:
2671
- alg->cra_type = &crypto_givcipher_type;
2672
- alg->cra_ablkcipher = template->template_ablkcipher;
2673
- break;
2674
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
2675
- alg->cra_type = &crypto_ablkcipher_type;
2676
- alg->cra_ablkcipher = template->template_ablkcipher;
2677
- break;
2678
- }
2679
-
2680
- t_alg->caam.class1_alg_type = template->class1_alg_type;
2681
- t_alg->caam.class2_alg_type = template->class2_alg_type;
2682
-
2683
- return t_alg;
2586
+ alg->init = caam_cra_init;
2587
+ alg->exit = caam_cra_exit;
26842588 }
26852589
26862590 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
....@@ -2690,70 +2594,62 @@
26902594 alg->base.cra_module = THIS_MODULE;
26912595 alg->base.cra_priority = CAAM_CRA_PRIORITY;
26922596 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2693
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2597
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
2598
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
26942599
26952600 alg->init = caam_aead_init;
26962601 alg->exit = caam_aead_exit;
26972602 }
26982603
2699
-static int __init caam_qi_algapi_init(void)
2604
+int caam_qi_algapi_init(struct device *ctrldev)
27002605 {
2701
- struct device_node *dev_node;
2702
- struct platform_device *pdev;
2703
- struct device *ctrldev;
2704
- struct caam_drv_private *priv;
2606
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
27052607 int i = 0, err = 0;
2706
- u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
2608
+ u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
27072609 unsigned int md_limit = SHA512_DIGEST_SIZE;
27082610 bool registered = false;
27092611
2710
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2711
- if (!dev_node) {
2712
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2713
- if (!dev_node)
2714
- return -ENODEV;
2715
- }
2716
-
2717
- pdev = of_find_device_by_node(dev_node);
2718
- of_node_put(dev_node);
2719
- if (!pdev)
2720
- return -ENODEV;
2721
-
2722
- ctrldev = &pdev->dev;
2723
- priv = dev_get_drvdata(ctrldev);
2724
-
2725
- /*
2726
- * If priv is NULL, it's probably because the caam driver wasn't
2727
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2728
- */
2729
- if (!priv || !priv->qi_present)
2730
- return -ENODEV;
2731
-
2732
- if (caam_dpaa2) {
2733
- dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
2734
- return -ENODEV;
2735
- }
2736
-
2737
- INIT_LIST_HEAD(&alg_list);
2612
+ /* Make sure this runs only on (DPAA 1.x) QI */
2613
+ if (!priv->qi_present || caam_dpaa2)
2614
+ return 0;
27382615
27392616 /*
27402617 * Register crypto algorithms the device supports.
27412618 * First, detect presence and attributes of DES, AES, and MD blocks.
27422619 */
2743
- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2744
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2745
- des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2746
- aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2747
- md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2620
+ if (priv->era < 10) {
2621
+ u32 cha_vid, cha_inst;
2622
+
2623
+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2624
+ aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
2625
+ md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2626
+
2627
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2628
+ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
2629
+ CHA_ID_LS_DES_SHIFT;
2630
+ aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
2631
+ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2632
+ } else {
2633
+ u32 aesa, mdha;
2634
+
2635
+ aesa = rd_reg32(&priv->ctrl->vreg.aesa);
2636
+ mdha = rd_reg32(&priv->ctrl->vreg.mdha);
2637
+
2638
+ aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2639
+ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2640
+
2641
+ des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
2642
+ aes_inst = aesa & CHA_VER_NUM_MASK;
2643
+ md_inst = mdha & CHA_VER_NUM_MASK;
2644
+ }
27482645
27492646 /* If MD is present, limit digest size based on LP256 */
2750
- if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2647
+ if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
27512648 md_limit = SHA256_DIGEST_SIZE;
27522649
27532650 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2754
- struct caam_crypto_alg *t_alg;
2755
- struct caam_alg_template *alg = driver_algs + i;
2756
- u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
2651
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
2652
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
27572653
27582654 /* Skip DES algorithms if not supported by device */
27592655 if (!des_inst &&
....@@ -2765,23 +2661,16 @@
27652661 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
27662662 continue;
27672663
2768
- t_alg = caam_alg_alloc(alg);
2769
- if (IS_ERR(t_alg)) {
2770
- err = PTR_ERR(t_alg);
2771
- dev_warn(priv->qidev, "%s alg allocation failed\n",
2772
- alg->driver_name);
2773
- continue;
2774
- }
2664
+ caam_skcipher_alg_init(t_alg);
27752665
2776
- err = crypto_register_alg(&t_alg->crypto_alg);
2666
+ err = crypto_register_skcipher(&t_alg->skcipher);
27772667 if (err) {
2778
- dev_warn(priv->qidev, "%s alg registration failed\n",
2779
- t_alg->crypto_alg.cra_driver_name);
2780
- kfree(t_alg);
2668
+ dev_warn(ctrldev, "%s alg registration failed\n",
2669
+ t_alg->skcipher.base.cra_driver_name);
27812670 continue;
27822671 }
27832672
2784
- list_add_tail(&t_alg->entry, &alg_list);
2673
+ t_alg->registered = true;
27852674 registered = true;
27862675 }
27872676
....@@ -2807,8 +2696,7 @@
28072696 * Check support for AES algorithms not available
28082697 * on LP devices.
28092698 */
2810
- if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
2811
- (alg_aai == OP_ALG_AAI_GCM))
2699
+ if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
28122700 continue;
28132701
28142702 /*
....@@ -2833,14 +2721,7 @@
28332721 }
28342722
28352723 if (registered)
2836
- dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
2724
+ dev_info(ctrldev, "algorithms registered in /proc/crypto\n");
28372725
28382726 return err;
28392727 }
2840
-
2841
-module_init(caam_qi_algapi_init);
2842
-module_exit(caam_qi_algapi_exit);
2843
-
2844
-MODULE_LICENSE("GPL");
2845
-MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
2846
-MODULE_AUTHOR("Freescale Semiconductor");