hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/drivers/crypto/ccree/cc_aead.c
....@@ -1,13 +1,14 @@
11 // SPDX-License-Identifier: GPL-2.0
2
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
2
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
33
44 #include <linux/kernel.h>
55 #include <linux/module.h>
66 #include <crypto/algapi.h>
77 #include <crypto/internal/aead.h>
88 #include <crypto/authenc.h>
9
-#include <crypto/des.h>
9
+#include <crypto/gcm.h>
1010 #include <linux/rtnetlink.h>
11
+#include <crypto/internal/des.h>
1112 #include "cc_driver.h"
1213 #include "cc_buffer_mgr.h"
1314 #include "cc_aead.h"
....@@ -23,14 +24,10 @@
2324 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
2425 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
2526
26
-#define AES_CCM_RFC4309_NONCE_SIZE 3
2727 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
2828
29
-/* Value of each ICV_CMP byte (of 8) in case of success */
30
-#define ICV_VERIF_OK 0x01
31
-
3229 struct cc_aead_handle {
33
- cc_sram_addr_t sram_workspace_addr;
30
+ u32 sram_workspace_addr;
3431 struct list_head aead_list;
3532 };
3633
....@@ -58,15 +55,11 @@
5855 unsigned int enc_keylen;
5956 unsigned int auth_keylen;
6057 unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
58
+ unsigned int hash_len;
6159 enum drv_cipher_mode cipher_mode;
6260 enum cc_flow_mode flow_mode;
6361 enum drv_hash_mode auth_mode;
6462 };
65
-
66
-static inline bool valid_assoclen(struct aead_request *req)
67
-{
68
- return ((req->assoclen == 16) || (req->assoclen == 20));
69
-}
7063
7164 static void cc_aead_exit(struct crypto_aead *tfm)
7265 {
....@@ -120,6 +113,13 @@
120113 hmac->padded_authkey = NULL;
121114 }
122115 }
116
+}
117
+
118
+static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
119
+{
120
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
121
+
122
+ return cc_get_default_hash_len(ctx->drvdata);
123123 }
124124
125125 static int cc_aead_init(struct crypto_aead *tfm)
....@@ -196,6 +196,7 @@
196196 ctx->auth_state.hmac.ipad_opad = NULL;
197197 ctx->auth_state.hmac.padded_authkey = NULL;
198198 }
199
+ ctx->hash_len = cc_get_aead_hash_len(tfm);
199200
200201 return 0;
201202
....@@ -210,6 +211,10 @@
210211 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
211212 struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
212213 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
214
+
215
+ /* BACKLOG notification */
216
+ if (err == -EINPROGRESS)
217
+ goto done;
213218
214219 cc_unmap_aead_request(dev, areq);
215220
....@@ -227,31 +232,17 @@
227232 /* In case of payload authentication failure, MUST NOT
228233 * revealed the decrypted message --> zero its memory.
229234 */
230
- cc_zero_sgl(areq->dst, areq->cryptlen);
235
+ sg_zero_buffer(areq->dst, sg_nents(areq->dst),
236
+ areq->cryptlen, areq->assoclen);
231237 err = -EBADMSG;
232238 }
233
- } else { /*ENCRYPT*/
234
- if (areq_ctx->is_icv_fragmented) {
235
- u32 skip = areq->cryptlen + areq_ctx->dst_offset;
239
+ /*ENCRYPT*/
240
+ } else if (areq_ctx->is_icv_fragmented) {
241
+ u32 skip = areq->cryptlen + areq_ctx->dst_offset;
236242
237
- cc_copy_sg_portion(dev, areq_ctx->mac_buf,
238
- areq_ctx->dst_sgl, skip,
239
- (skip + ctx->authsize),
240
- CC_SG_FROM_BUF);
241
- }
242
-
243
- /* If an IV was generated, copy it back to the user provided
244
- * buffer.
245
- */
246
- if (areq_ctx->backup_giv) {
247
- if (ctx->cipher_mode == DRV_CIPHER_CTR)
248
- memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
249
- CTR_RFC3686_NONCE_SIZE,
250
- CTR_RFC3686_IV_SIZE);
251
- else if (ctx->cipher_mode == DRV_CIPHER_CCM)
252
- memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
253
- CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
254
- }
243
+ cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
244
+ skip, (skip + ctx->authsize),
245
+ CC_SG_FROM_BUF);
255246 }
256247 done:
257248 aead_request_complete(areq, err);
....@@ -298,7 +289,8 @@
298289 return 4;
299290 }
300291
301
-static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
292
+static unsigned int hmac_setkey(struct cc_hw_desc *desc,
293
+ struct cc_aead_ctx *ctx)
302294 {
303295 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
304296 unsigned int digest_ofs = 0;
....@@ -327,7 +319,7 @@
327319 /* Load the hash current length*/
328320 hw_desc_init(&desc[idx]);
329321 set_cipher_mode(&desc[idx], hash_mode);
330
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
322
+ set_din_const(&desc[idx], 0, ctx->hash_len);
331323 set_flow_mode(&desc[idx], S_DIN_to_HASH);
332324 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
333325 idx++;
....@@ -389,13 +381,13 @@
389381 return -EINVAL;
390382 break;
391383 default:
392
- dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
384
+ dev_dbg(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
393385 return -EINVAL;
394386 }
395387 /* Check cipher key size */
396388 if (ctx->flow_mode == S_DIN_to_DES) {
397389 if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
398
- dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
390
+ dev_dbg(dev, "Invalid cipher(3DES) key size: %u\n",
399391 ctx->enc_keylen);
400392 return -EINVAL;
401393 }
....@@ -403,7 +395,7 @@
403395 if (ctx->enc_keylen != AES_KEYSIZE_128 &&
404396 ctx->enc_keylen != AES_KEYSIZE_192 &&
405397 ctx->enc_keylen != AES_KEYSIZE_256) {
406
- dev_err(dev, "Invalid cipher(AES) key size: %u\n",
398
+ dev_dbg(dev, "Invalid cipher(AES) key size: %u\n",
407399 ctx->enc_keylen);
408400 return -EINVAL;
409401 }
....@@ -421,7 +413,7 @@
421413 dma_addr_t key_dma_addr = 0;
422414 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
423415 struct device *dev = drvdata_to_dev(ctx->drvdata);
424
- u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
416
+ u32 larval_addr;
425417 struct cc_crypto_req cc_req = {};
426418 unsigned int blocksize;
427419 unsigned int digestsize;
....@@ -452,18 +444,19 @@
452444 if (!key)
453445 return -ENOMEM;
454446
455
- key_dma_addr = dma_map_single(dev, (void *)key, keylen,
456
- DMA_TO_DEVICE);
447
+ key_dma_addr = dma_map_single(dev, key, keylen, DMA_TO_DEVICE);
457448 if (dma_mapping_error(dev, key_dma_addr)) {
458449 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
459450 key, keylen);
460
- kzfree(key);
451
+ kfree_sensitive(key);
461452 return -ENOMEM;
462453 }
463454 if (keylen > blocksize) {
464455 /* Load hash initial state */
465456 hw_desc_init(&desc[idx]);
466457 set_cipher_mode(&desc[idx], hashmode);
458
+ larval_addr = cc_larval_digest_addr(ctx->drvdata,
459
+ ctx->auth_mode);
467460 set_din_sram(&desc[idx], larval_addr, digestsize);
468461 set_flow_mode(&desc[idx], S_DIN_to_HASH);
469462 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
....@@ -472,7 +465,7 @@
472465 /* Load the hash current length*/
473466 hw_desc_init(&desc[idx]);
474467 set_cipher_mode(&desc[idx], hashmode);
475
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
468
+ set_din_const(&desc[idx], 0, ctx->hash_len);
476469 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
477470 set_flow_mode(&desc[idx], S_DIN_to_HASH);
478471 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
....@@ -540,7 +533,7 @@
540533 if (key_dma_addr)
541534 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
542535
543
- kzfree(key);
536
+ kfree_sensitive(key);
544537
545538 return rc;
546539 }
....@@ -566,7 +559,7 @@
566559
567560 rc = crypto_authenc_extractkeys(&keys, key, keylen);
568561 if (rc)
569
- goto badkey;
562
+ return rc;
570563 enckey = keys.enckey;
571564 authkey = keys.authkey;
572565 ctx->enc_keylen = keys.enckeylen;
....@@ -574,10 +567,9 @@
574567
575568 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
576569 /* the nonce is stored in bytes at end of key */
577
- rc = -EINVAL;
578570 if (ctx->enc_keylen <
579571 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
580
- goto badkey;
572
+ return -EINVAL;
581573 /* Copy nonce from last 4 bytes in CTR key to
582574 * first 4 bytes in CTR IV
583575 */
....@@ -595,7 +587,7 @@
595587
596588 rc = validate_keys_sizes(ctx);
597589 if (rc)
598
- goto badkey;
590
+ return rc;
599591
600592 /* STAT_PHASE_1: Copy key to ctx */
601593
....@@ -609,7 +601,7 @@
609601 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
610602 rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
611603 if (rc)
612
- goto badkey;
604
+ return rc;
613605 }
614606
615607 /* STAT_PHASE_2: Create sequence */
....@@ -626,8 +618,7 @@
626618 break; /* No auth. key setup */
627619 default:
628620 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
629
- rc = -ENOTSUPP;
630
- goto badkey;
621
+ return -ENOTSUPP;
631622 }
632623
633624 /* STAT_PHASE_3: Submit sequence to HW */
....@@ -636,18 +627,29 @@
636627 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
637628 if (rc) {
638629 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
639
- goto setkey_error;
630
+ return rc;
640631 }
641632 }
642633
643634 /* Update STAT_PHASE_3 */
644635 return rc;
636
+}
645637
646
-badkey:
647
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
638
+static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
639
+ unsigned int keylen)
640
+{
641
+ struct crypto_authenc_keys keys;
642
+ int err;
648643
649
-setkey_error:
650
- return rc;
644
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
645
+ if (unlikely(err))
646
+ return err;
647
+
648
+ err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
649
+ cc_aead_setkey(aead, key, keylen);
650
+
651
+ memzero_explicit(&keys, sizeof(keys));
652
+ return err;
651653 }
652654
653655 static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
....@@ -791,7 +793,7 @@
791793 * assoc. + iv + data -compact in one table
792794 * if assoclen is ZERO only IV perform
793795 */
794
- cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
796
+ u32 mlli_addr = areq_ctx->assoc.sram_addr;
795797 u32 mlli_nents = areq_ctx->assoc.mlli_nents;
796798
797799 if (areq_ctx->is_single_pass) {
....@@ -1008,7 +1010,7 @@
10081010 hw_desc_init(&desc[idx]);
10091011 set_cipher_mode(&desc[idx], hash_mode);
10101012 set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1011
- ctx->drvdata->hash_len_sz);
1013
+ ctx->hash_len);
10121014 set_flow_mode(&desc[idx], S_DIN_to_HASH);
10131015 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
10141016 idx++;
....@@ -1107,7 +1109,7 @@
11071109 hw_desc_init(&desc[idx]);
11081110 set_cipher_mode(&desc[idx], hash_mode);
11091111 set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1110
- ctx->drvdata->hash_len_sz);
1112
+ ctx->hash_len);
11111113 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
11121114 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
11131115 set_cipher_do(&desc[idx], DO_PAD);
....@@ -1137,7 +1139,7 @@
11371139 hw_desc_init(&desc[idx]);
11381140 set_cipher_mode(&desc[idx], hash_mode);
11391141 set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1140
- ctx->drvdata->hash_len_sz);
1142
+ ctx->hash_len);
11411143 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
11421144 set_flow_mode(&desc[idx], S_DIN_to_HASH);
11431145 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
....@@ -1161,11 +1163,11 @@
11611163 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
11621164 struct device *dev = drvdata_to_dev(ctx->drvdata);
11631165
1164
- if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1166
+ if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
11651167 req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1166
- !req_ctx->is_single_pass) {
1168
+ !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
11671169 dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1168
- (unsigned int)ctx->drvdata->mlli_sram_addr,
1170
+ ctx->drvdata->mlli_sram_addr,
11691171 req_ctx->mlli_params.mlli_len);
11701172 /* Copy MLLI table host-to-sram */
11711173 hw_desc_init(&desc[*seq_size]);
....@@ -1217,7 +1219,7 @@
12171219 req_ctx->is_single_pass);
12181220
12191221 if (req_ctx->is_single_pass) {
1220
- /**
1222
+ /*
12211223 * Single-pass flow
12221224 */
12231225 cc_set_hmac_desc(req, desc, seq_size);
....@@ -1229,7 +1231,7 @@
12291231 return;
12301232 }
12311233
1232
- /**
1234
+ /*
12331235 * Double-pass flow
12341236 * Fallback for unsupported single-pass modes,
12351237 * i.e. using assoc. data of non-word-multiple
....@@ -1270,7 +1272,7 @@
12701272 req_ctx->is_single_pass);
12711273
12721274 if (req_ctx->is_single_pass) {
1273
- /**
1275
+ /*
12741276 * Single-pass flow
12751277 */
12761278 cc_set_xcbc_desc(req, desc, seq_size);
....@@ -1281,7 +1283,7 @@
12811283 return;
12821284 }
12831285
1284
- /**
1286
+ /*
12851287 * Double-pass flow
12861288 * Fallback for unsupported single-pass modes,
12871289 * i.e. using assoc. data of non-word-multiple
....@@ -1554,7 +1556,7 @@
15541556 /* taken from crypto/ccm.c */
15551557 /* 2 <= L <= 8, so 1 <= L' <= 7. */
15561558 if (l < 2 || l > 8) {
1557
- dev_err(dev, "illegal iv value %X\n", req->iv[0]);
1559
+ dev_dbg(dev, "illegal iv value %X\n", req->iv[0]);
15581560 return -EINVAL;
15591561 }
15601562 memcpy(b0, req->iv, AES_BLOCK_SIZE);
....@@ -1606,7 +1608,6 @@
16061608 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
16071609 CCM_BLOCK_IV_SIZE);
16081610 req->iv = areq_ctx->ctr_iv;
1609
- areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
16101611 }
16111612
16121613 static void cc_set_ghash_desc(struct aead_request *req,
....@@ -1794,12 +1795,6 @@
17941795 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
17951796 unsigned int cipher_flow_mode;
17961797
1797
- if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1798
- cipher_flow_mode = AES_and_HASH;
1799
- } else { /* Encrypt */
1800
- cipher_flow_mode = AES_to_HASH_and_DOUT;
1801
- }
1802
-
18031798 //in RFC4543 no data to encrypt. just copy data from src to dest.
18041799 if (req_ctx->plaintext_authenticate_only) {
18051800 cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
....@@ -1809,6 +1804,12 @@
18091804 cc_set_gctr_desc(req, desc, seq_size);
18101805 cc_proc_gcm_result(req, desc, seq_size);
18111806 return 0;
1807
+ }
1808
+
1809
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1810
+ cipher_flow_mode = AES_and_HASH;
1811
+ } else { /* Encrypt */
1812
+ cipher_flow_mode = AES_to_HASH_and_DOUT;
18121813 }
18131814
18141815 // for gcm and rfc4106.
....@@ -1865,8 +1866,7 @@
18651866 */
18661867 __be64 temp64;
18671868
1868
- temp64 = cpu_to_be64((req_ctx->assoclen +
1869
- GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
1869
+ temp64 = cpu_to_be64((req_ctx->assoclen + cryptlen) * 8);
18701870 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
18711871 temp64 = 0;
18721872 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
....@@ -1886,7 +1886,6 @@
18861886 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
18871887 GCM_BLOCK_RFC4_IV_SIZE);
18881888 req->iv = areq_ctx->ctr_iv;
1889
- areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
18901889 }
18911890
18921891 static int cc_proc_aead(struct aead_request *req,
....@@ -1912,13 +1911,12 @@
19121911 if (validate_data_size(ctx, direct, req)) {
19131912 dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
19141913 req->cryptlen, areq_ctx->assoclen);
1915
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
19161914 return -EINVAL;
19171915 }
19181916
19191917 /* Setup request structure */
1920
- cc_req.user_cb = (void *)cc_aead_complete;
1921
- cc_req.user_arg = (void *)req;
1918
+ cc_req.user_cb = cc_aead_complete;
1919
+ cc_req.user_arg = req;
19221920
19231921 /* Setup request context */
19241922 areq_ctx->gen_ctx.op_type = direct;
....@@ -1933,9 +1931,8 @@
19331931 */
19341932 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
19351933 CTR_RFC3686_NONCE_SIZE);
1936
- if (!areq_ctx->backup_giv) /*User none-generated IV*/
1937
- memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
1938
- req->iv, CTR_RFC3686_IV_SIZE);
1934
+ memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1935
+ CTR_RFC3686_IV_SIZE);
19391936 /* Initialize counter portion of counter block */
19401937 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
19411938 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
....@@ -1981,46 +1978,11 @@
19811978 goto exit;
19821979 }
19831980
1984
- /* do we need to generate IV? */
1985
- if (areq_ctx->backup_giv) {
1986
- /* set the DMA mapped IV address*/
1987
- if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1988
- cc_req.ivgen_dma_addr[0] =
1989
- areq_ctx->gen_ctx.iv_dma_addr +
1990
- CTR_RFC3686_NONCE_SIZE;
1991
- cc_req.ivgen_dma_addr_len = 1;
1992
- } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1993
- /* In ccm, the IV needs to exist both inside B0 and
1994
- * inside the counter.It is also copied to iv_dma_addr
1995
- * for other reasons (like returning it to the user).
1996
- * So, using 3 (identical) IV outputs.
1997
- */
1998
- cc_req.ivgen_dma_addr[0] =
1999
- areq_ctx->gen_ctx.iv_dma_addr +
2000
- CCM_BLOCK_IV_OFFSET;
2001
- cc_req.ivgen_dma_addr[1] =
2002
- sg_dma_address(&areq_ctx->ccm_adata_sg) +
2003
- CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
2004
- cc_req.ivgen_dma_addr[2] =
2005
- sg_dma_address(&areq_ctx->ccm_adata_sg) +
2006
- CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
2007
- cc_req.ivgen_dma_addr_len = 3;
2008
- } else {
2009
- cc_req.ivgen_dma_addr[0] =
2010
- areq_ctx->gen_ctx.iv_dma_addr;
2011
- cc_req.ivgen_dma_addr_len = 1;
2012
- }
2013
-
2014
- /* set the IV size (8/16 B long)*/
2015
- cc_req.ivgen_size = crypto_aead_ivsize(tfm);
2016
- }
2017
-
20181981 /* STAT_PHASE_2: Create sequence */
20191982
20201983 /* Load MLLI tables to SRAM if necessary */
20211984 cc_mlli_to_sram(req, desc, &seq_len);
20221985
2023
- /*TODO: move seq len by reference */
20241986 switch (ctx->auth_mode) {
20251987 case DRV_HASH_SHA1:
20261988 case DRV_HASH_SHA256:
....@@ -2065,10 +2027,6 @@
20652027 /* No generated IV required */
20662028 areq_ctx->backup_iv = req->iv;
20672029 areq_ctx->assoclen = req->assoclen;
2068
- areq_ctx->backup_giv = NULL;
2069
- areq_ctx->is_gcm4543 = false;
2070
-
2071
- areq_ctx->plaintext_authenticate_only = false;
20722030
20732031 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
20742032 if (rc != -EINPROGRESS && rc != -EBUSY)
....@@ -2082,23 +2040,17 @@
20822040 /* Very similar to cc_aead_encrypt() above. */
20832041
20842042 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2085
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2086
- struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2087
- struct device *dev = drvdata_to_dev(ctx->drvdata);
2088
- int rc = -EINVAL;
2043
+ int rc;
20892044
2090
- if (!valid_assoclen(req)) {
2091
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2045
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
2046
+ if (rc)
20922047 goto out;
2093
- }
20942048
20952049 memset(areq_ctx, 0, sizeof(*areq_ctx));
20962050
20972051 /* No generated IV required */
20982052 areq_ctx->backup_iv = req->iv;
2099
- areq_ctx->assoclen = req->assoclen;
2100
- areq_ctx->backup_giv = NULL;
2101
- areq_ctx->is_gcm4543 = true;
2053
+ areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
21022054
21032055 cc_proc_rfc4309_ccm(req);
21042056
....@@ -2119,10 +2071,6 @@
21192071 /* No generated IV required */
21202072 areq_ctx->backup_iv = req->iv;
21212073 areq_ctx->assoclen = req->assoclen;
2122
- areq_ctx->backup_giv = NULL;
2123
- areq_ctx->is_gcm4543 = false;
2124
-
2125
- areq_ctx->plaintext_authenticate_only = false;
21262074
21272075 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
21282076 if (rc != -EINPROGRESS && rc != -EBUSY)
....@@ -2133,25 +2081,19 @@
21332081
21342082 static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
21352083 {
2136
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2137
- struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2138
- struct device *dev = drvdata_to_dev(ctx->drvdata);
21392084 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2140
- int rc = -EINVAL;
2085
+ int rc;
21412086
2142
- if (!valid_assoclen(req)) {
2143
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2087
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
2088
+ if (rc)
21442089 goto out;
2145
- }
21462090
21472091 memset(areq_ctx, 0, sizeof(*areq_ctx));
21482092
21492093 /* No generated IV required */
21502094 areq_ctx->backup_iv = req->iv;
2151
- areq_ctx->assoclen = req->assoclen;
2152
- areq_ctx->backup_giv = NULL;
2095
+ areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
21532096
2154
- areq_ctx->is_gcm4543 = true;
21552097 cc_proc_rfc4309_ccm(req);
21562098
21572099 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
....@@ -2251,30 +2193,20 @@
22512193
22522194 static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
22532195 {
2254
- /* Very similar to cc_aead_encrypt() above. */
2255
-
2256
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2257
- struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2258
- struct device *dev = drvdata_to_dev(ctx->drvdata);
22592196 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2260
- int rc = -EINVAL;
2197
+ int rc;
22612198
2262
- if (!valid_assoclen(req)) {
2263
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2199
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
2200
+ if (rc)
22642201 goto out;
2265
- }
22662202
22672203 memset(areq_ctx, 0, sizeof(*areq_ctx));
22682204
22692205 /* No generated IV required */
22702206 areq_ctx->backup_iv = req->iv;
2271
- areq_ctx->assoclen = req->assoclen;
2272
- areq_ctx->backup_giv = NULL;
2273
-
2274
- areq_ctx->plaintext_authenticate_only = false;
2207
+ areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
22752208
22762209 cc_proc_rfc4_gcm(req);
2277
- areq_ctx->is_gcm4543 = true;
22782210
22792211 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
22802212 if (rc != -EINPROGRESS && rc != -EBUSY)
....@@ -2285,10 +2217,12 @@
22852217
22862218 static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
22872219 {
2288
- /* Very similar to cc_aead_encrypt() above. */
2289
-
22902220 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
22912221 int rc;
2222
+
2223
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
2224
+ if (rc)
2225
+ goto out;
22922226
22932227 memset(areq_ctx, 0, sizeof(*areq_ctx));
22942228
....@@ -2298,44 +2232,32 @@
22982232 /* No generated IV required */
22992233 areq_ctx->backup_iv = req->iv;
23002234 areq_ctx->assoclen = req->assoclen;
2301
- areq_ctx->backup_giv = NULL;
23022235
23032236 cc_proc_rfc4_gcm(req);
2304
- areq_ctx->is_gcm4543 = true;
23052237
23062238 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
23072239 if (rc != -EINPROGRESS && rc != -EBUSY)
23082240 req->iv = areq_ctx->backup_iv;
2309
-
2241
+out:
23102242 return rc;
23112243 }
23122244
23132245 static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
23142246 {
2315
- /* Very similar to cc_aead_decrypt() above. */
2316
-
2317
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2318
- struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2319
- struct device *dev = drvdata_to_dev(ctx->drvdata);
23202247 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2321
- int rc = -EINVAL;
2248
+ int rc;
23222249
2323
- if (!valid_assoclen(req)) {
2324
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2250
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
2251
+ if (rc)
23252252 goto out;
2326
- }
23272253
23282254 memset(areq_ctx, 0, sizeof(*areq_ctx));
23292255
23302256 /* No generated IV required */
23312257 areq_ctx->backup_iv = req->iv;
2332
- areq_ctx->assoclen = req->assoclen;
2333
- areq_ctx->backup_giv = NULL;
2334
-
2335
- areq_ctx->plaintext_authenticate_only = false;
2258
+ areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
23362259
23372260 cc_proc_rfc4_gcm(req);
2338
- areq_ctx->is_gcm4543 = true;
23392261
23402262 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
23412263 if (rc != -EINPROGRESS && rc != -EBUSY)
....@@ -2346,10 +2268,12 @@
23462268
23472269 static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
23482270 {
2349
- /* Very similar to cc_aead_decrypt() above. */
2350
-
23512271 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
23522272 int rc;
2273
+
2274
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
2275
+ if (rc)
2276
+ goto out;
23532277
23542278 memset(areq_ctx, 0, sizeof(*areq_ctx));
23552279
....@@ -2359,15 +2283,13 @@
23592283 /* No generated IV required */
23602284 areq_ctx->backup_iv = req->iv;
23612285 areq_ctx->assoclen = req->assoclen;
2362
- areq_ctx->backup_giv = NULL;
23632286
23642287 cc_proc_rfc4_gcm(req);
2365
- areq_ctx->is_gcm4543 = true;
23662288
23672289 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
23682290 if (rc != -EINPROGRESS && rc != -EBUSY)
23692291 req->iv = areq_ctx->backup_iv;
2370
-
2292
+out:
23712293 return rc;
23722294 }
23732295
....@@ -2391,13 +2313,14 @@
23912313 .flow_mode = S_DIN_to_AES,
23922314 .auth_mode = DRV_HASH_SHA1,
23932315 .min_hw_rev = CC_HW_REV_630,
2316
+ .std_body = CC_STD_NIST,
23942317 },
23952318 {
23962319 .name = "authenc(hmac(sha1),cbc(des3_ede))",
23972320 .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
23982321 .blocksize = DES3_EDE_BLOCK_SIZE,
23992322 .template_aead = {
2400
- .setkey = cc_aead_setkey,
2323
+ .setkey = cc_des3_aead_setkey,
24012324 .setauthsize = cc_aead_setauthsize,
24022325 .encrypt = cc_aead_encrypt,
24032326 .decrypt = cc_aead_decrypt,
....@@ -2410,6 +2333,7 @@
24102333 .flow_mode = S_DIN_to_DES,
24112334 .auth_mode = DRV_HASH_SHA1,
24122335 .min_hw_rev = CC_HW_REV_630,
2336
+ .std_body = CC_STD_NIST,
24132337 },
24142338 {
24152339 .name = "authenc(hmac(sha256),cbc(aes))",
....@@ -2429,13 +2353,14 @@
24292353 .flow_mode = S_DIN_to_AES,
24302354 .auth_mode = DRV_HASH_SHA256,
24312355 .min_hw_rev = CC_HW_REV_630,
2356
+ .std_body = CC_STD_NIST,
24322357 },
24332358 {
24342359 .name = "authenc(hmac(sha256),cbc(des3_ede))",
24352360 .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
24362361 .blocksize = DES3_EDE_BLOCK_SIZE,
24372362 .template_aead = {
2438
- .setkey = cc_aead_setkey,
2363
+ .setkey = cc_des3_aead_setkey,
24392364 .setauthsize = cc_aead_setauthsize,
24402365 .encrypt = cc_aead_encrypt,
24412366 .decrypt = cc_aead_decrypt,
....@@ -2448,6 +2373,7 @@
24482373 .flow_mode = S_DIN_to_DES,
24492374 .auth_mode = DRV_HASH_SHA256,
24502375 .min_hw_rev = CC_HW_REV_630,
2376
+ .std_body = CC_STD_NIST,
24512377 },
24522378 {
24532379 .name = "authenc(xcbc(aes),cbc(aes))",
....@@ -2467,6 +2393,7 @@
24672393 .flow_mode = S_DIN_to_AES,
24682394 .auth_mode = DRV_HASH_XCBC_MAC,
24692395 .min_hw_rev = CC_HW_REV_630,
2396
+ .std_body = CC_STD_NIST,
24702397 },
24712398 {
24722399 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
....@@ -2486,6 +2413,7 @@
24862413 .flow_mode = S_DIN_to_AES,
24872414 .auth_mode = DRV_HASH_SHA1,
24882415 .min_hw_rev = CC_HW_REV_630,
2416
+ .std_body = CC_STD_NIST,
24892417 },
24902418 {
24912419 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
....@@ -2505,6 +2433,7 @@
25052433 .flow_mode = S_DIN_to_AES,
25062434 .auth_mode = DRV_HASH_SHA256,
25072435 .min_hw_rev = CC_HW_REV_630,
2436
+ .std_body = CC_STD_NIST,
25082437 },
25092438 {
25102439 .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
....@@ -2524,6 +2453,7 @@
25242453 .flow_mode = S_DIN_to_AES,
25252454 .auth_mode = DRV_HASH_XCBC_MAC,
25262455 .min_hw_rev = CC_HW_REV_630,
2456
+ .std_body = CC_STD_NIST,
25272457 },
25282458 {
25292459 .name = "ccm(aes)",
....@@ -2543,6 +2473,7 @@
25432473 .flow_mode = S_DIN_to_AES,
25442474 .auth_mode = DRV_HASH_NULL,
25452475 .min_hw_rev = CC_HW_REV_630,
2476
+ .std_body = CC_STD_NIST,
25462477 },
25472478 {
25482479 .name = "rfc4309(ccm(aes))",
....@@ -2562,6 +2493,7 @@
25622493 .flow_mode = S_DIN_to_AES,
25632494 .auth_mode = DRV_HASH_NULL,
25642495 .min_hw_rev = CC_HW_REV_630,
2496
+ .std_body = CC_STD_NIST,
25652497 },
25662498 {
25672499 .name = "gcm(aes)",
....@@ -2581,6 +2513,7 @@
25812513 .flow_mode = S_DIN_to_AES,
25822514 .auth_mode = DRV_HASH_NULL,
25832515 .min_hw_rev = CC_HW_REV_630,
2516
+ .std_body = CC_STD_NIST,
25842517 },
25852518 {
25862519 .name = "rfc4106(gcm(aes))",
....@@ -2600,6 +2533,7 @@
26002533 .flow_mode = S_DIN_to_AES,
26012534 .auth_mode = DRV_HASH_NULL,
26022535 .min_hw_rev = CC_HW_REV_630,
2536
+ .std_body = CC_STD_NIST,
26032537 },
26042538 {
26052539 .name = "rfc4543(gcm(aes))",
....@@ -2619,6 +2553,7 @@
26192553 .flow_mode = S_DIN_to_AES,
26202554 .auth_mode = DRV_HASH_NULL,
26212555 .min_hw_rev = CC_HW_REV_630,
2556
+ .std_body = CC_STD_NIST,
26222557 },
26232558 };
26242559
....@@ -2628,7 +2563,7 @@
26282563 struct cc_crypto_alg *t_alg;
26292564 struct aead_alg *alg;
26302565
2631
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2566
+ t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL);
26322567 if (!t_alg)
26332568 return ERR_PTR(-ENOMEM);
26342569
....@@ -2642,6 +2577,7 @@
26422577
26432578 alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
26442579 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2580
+ alg->base.cra_blocksize = tmpl->blocksize;
26452581 alg->init = cc_aead_init;
26462582 alg->exit = cc_aead_exit;
26472583
....@@ -2657,19 +2593,12 @@
26572593 int cc_aead_free(struct cc_drvdata *drvdata)
26582594 {
26592595 struct cc_crypto_alg *t_alg, *n;
2660
- struct cc_aead_handle *aead_handle =
2661
- (struct cc_aead_handle *)drvdata->aead_handle;
2596
+ struct cc_aead_handle *aead_handle = drvdata->aead_handle;
26622597
2663
- if (aead_handle) {
2664
- /* Remove registered algs */
2665
- list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
2666
- entry) {
2667
- crypto_unregister_aead(&t_alg->aead_alg);
2668
- list_del(&t_alg->entry);
2669
- kfree(t_alg);
2670
- }
2671
- kfree(aead_handle);
2672
- drvdata->aead_handle = NULL;
2598
+ /* Remove registered algs */
2599
+ list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
2600
+ crypto_unregister_aead(&t_alg->aead_alg);
2601
+ list_del(&t_alg->entry);
26732602 }
26742603
26752604 return 0;
....@@ -2683,7 +2612,7 @@
26832612 int alg;
26842613 struct device *dev = drvdata_to_dev(drvdata);
26852614
2686
- aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
2615
+ aead_handle = devm_kmalloc(dev, sizeof(*aead_handle), GFP_KERNEL);
26872616 if (!aead_handle) {
26882617 rc = -ENOMEM;
26892618 goto fail0;
....@@ -2696,14 +2625,14 @@
26962625 MAX_HMAC_DIGEST_SIZE);
26972626
26982627 if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2699
- dev_err(dev, "SRAM pool exhausted\n");
27002628 rc = -ENOMEM;
27012629 goto fail1;
27022630 }
27032631
27042632 /* Linux crypto */
27052633 for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2706
- if (aead_algs[alg].min_hw_rev > drvdata->hw_rev)
2634
+ if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
2635
+ !(drvdata->std_bodies & aead_algs[alg].std_body))
27072636 continue;
27082637
27092638 t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
....@@ -2718,18 +2647,16 @@
27182647 if (rc) {
27192648 dev_err(dev, "%s alg registration failed\n",
27202649 t_alg->aead_alg.base.cra_driver_name);
2721
- goto fail2;
2722
- } else {
2723
- list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2724
- dev_dbg(dev, "Registered %s\n",
2725
- t_alg->aead_alg.base.cra_driver_name);
2650
+ goto fail1;
27262651 }
2652
+
2653
+ list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2654
+ dev_dbg(dev, "Registered %s\n",
2655
+ t_alg->aead_alg.base.cra_driver_name);
27272656 }
27282657
27292658 return 0;
27302659
2731
-fail2:
2732
- kfree(t_alg);
27332660 fail1:
27342661 cc_aead_free(drvdata);
27352662 fail0: