| .. | .. |
|---|
| 1 | 1 | // SPDX-License-Identifier: GPL-2.0 |
|---|
| 2 | | -/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ |
|---|
| 2 | +/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ |
|---|
| 3 | 3 | |
|---|
| 4 | 4 | #include <linux/kernel.h> |
|---|
| 5 | 5 | #include <linux/module.h> |
|---|
| 6 | 6 | #include <crypto/algapi.h> |
|---|
| 7 | 7 | #include <crypto/internal/aead.h> |
|---|
| 8 | 8 | #include <crypto/authenc.h> |
|---|
| 9 | | -#include <crypto/des.h> |
|---|
| 9 | +#include <crypto/gcm.h> |
|---|
| 10 | 10 | #include <linux/rtnetlink.h> |
|---|
| 11 | +#include <crypto/internal/des.h> |
|---|
| 11 | 12 | #include "cc_driver.h" |
|---|
| 12 | 13 | #include "cc_buffer_mgr.h" |
|---|
| 13 | 14 | #include "cc_aead.h" |
|---|
| .. | .. |
|---|
| 23 | 24 | #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE) |
|---|
| 24 | 25 | #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE) |
|---|
| 25 | 26 | |
|---|
| 26 | | -#define AES_CCM_RFC4309_NONCE_SIZE 3 |
|---|
| 27 | 27 | #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE |
|---|
| 28 | 28 | |
|---|
| 29 | | -/* Value of each ICV_CMP byte (of 8) in case of success */ |
|---|
| 30 | | -#define ICV_VERIF_OK 0x01 |
|---|
| 31 | | - |
|---|
| 32 | 29 | struct cc_aead_handle { |
|---|
| 33 | | - cc_sram_addr_t sram_workspace_addr; |
|---|
| 30 | + u32 sram_workspace_addr; |
|---|
| 34 | 31 | struct list_head aead_list; |
|---|
| 35 | 32 | }; |
|---|
| 36 | 33 | |
|---|
| .. | .. |
|---|
| 58 | 55 | unsigned int enc_keylen; |
|---|
| 59 | 56 | unsigned int auth_keylen; |
|---|
| 60 | 57 | unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */ |
|---|
| 58 | + unsigned int hash_len; |
|---|
| 61 | 59 | enum drv_cipher_mode cipher_mode; |
|---|
| 62 | 60 | enum cc_flow_mode flow_mode; |
|---|
| 63 | 61 | enum drv_hash_mode auth_mode; |
|---|
| 64 | 62 | }; |
|---|
| 65 | | - |
|---|
| 66 | | -static inline bool valid_assoclen(struct aead_request *req) |
|---|
| 67 | | -{ |
|---|
| 68 | | - return ((req->assoclen == 16) || (req->assoclen == 20)); |
|---|
| 69 | | -} |
|---|
| 70 | 63 | |
|---|
| 71 | 64 | static void cc_aead_exit(struct crypto_aead *tfm) |
|---|
| 72 | 65 | { |
|---|
| .. | .. |
|---|
| 120 | 113 | hmac->padded_authkey = NULL; |
|---|
| 121 | 114 | } |
|---|
| 122 | 115 | } |
|---|
| 116 | +} |
|---|
| 117 | + |
|---|
| 118 | +static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm) |
|---|
| 119 | +{ |
|---|
| 120 | + struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
|---|
| 121 | + |
|---|
| 122 | + return cc_get_default_hash_len(ctx->drvdata); |
|---|
| 123 | 123 | } |
|---|
| 124 | 124 | |
|---|
| 125 | 125 | static int cc_aead_init(struct crypto_aead *tfm) |
|---|
| .. | .. |
|---|
| 196 | 196 | ctx->auth_state.hmac.ipad_opad = NULL; |
|---|
| 197 | 197 | ctx->auth_state.hmac.padded_authkey = NULL; |
|---|
| 198 | 198 | } |
|---|
| 199 | + ctx->hash_len = cc_get_aead_hash_len(tfm); |
|---|
| 199 | 200 | |
|---|
| 200 | 201 | return 0; |
|---|
| 201 | 202 | |
|---|
| .. | .. |
|---|
| 210 | 211 | struct aead_req_ctx *areq_ctx = aead_request_ctx(areq); |
|---|
| 211 | 212 | struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req); |
|---|
| 212 | 213 | struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
|---|
| 214 | + |
|---|
| 215 | + /* BACKLOG notification */ |
|---|
| 216 | + if (err == -EINPROGRESS) |
|---|
| 217 | + goto done; |
|---|
| 213 | 218 | |
|---|
| 214 | 219 | cc_unmap_aead_request(dev, areq); |
|---|
| 215 | 220 | |
|---|
| .. | .. |
|---|
| 227 | 232 | /* In case of payload authentication failure, MUST NOT |
|---|
| 228 | 233 | * revealed the decrypted message --> zero its memory. |
|---|
| 229 | 234 | */ |
|---|
| 230 | | - cc_zero_sgl(areq->dst, areq->cryptlen); |
|---|
| 235 | + sg_zero_buffer(areq->dst, sg_nents(areq->dst), |
|---|
| 236 | + areq->cryptlen, areq->assoclen); |
|---|
| 231 | 237 | err = -EBADMSG; |
|---|
| 232 | 238 | } |
|---|
| 233 | | - } else { /*ENCRYPT*/ |
|---|
| 234 | | - if (areq_ctx->is_icv_fragmented) { |
|---|
| 235 | | - u32 skip = areq->cryptlen + areq_ctx->dst_offset; |
|---|
| 239 | + /*ENCRYPT*/ |
|---|
| 240 | + } else if (areq_ctx->is_icv_fragmented) { |
|---|
| 241 | + u32 skip = areq->cryptlen + areq_ctx->dst_offset; |
|---|
| 236 | 242 | |
|---|
| 237 | | - cc_copy_sg_portion(dev, areq_ctx->mac_buf, |
|---|
| 238 | | - areq_ctx->dst_sgl, skip, |
|---|
| 239 | | - (skip + ctx->authsize), |
|---|
| 240 | | - CC_SG_FROM_BUF); |
|---|
| 241 | | - } |
|---|
| 242 | | - |
|---|
| 243 | | - /* If an IV was generated, copy it back to the user provided |
|---|
| 244 | | - * buffer. |
|---|
| 245 | | - */ |
|---|
| 246 | | - if (areq_ctx->backup_giv) { |
|---|
| 247 | | - if (ctx->cipher_mode == DRV_CIPHER_CTR) |
|---|
| 248 | | - memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + |
|---|
| 249 | | - CTR_RFC3686_NONCE_SIZE, |
|---|
| 250 | | - CTR_RFC3686_IV_SIZE); |
|---|
| 251 | | - else if (ctx->cipher_mode == DRV_CIPHER_CCM) |
|---|
| 252 | | - memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + |
|---|
| 253 | | - CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE); |
|---|
| 254 | | - } |
|---|
| 243 | + cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl, |
|---|
| 244 | + skip, (skip + ctx->authsize), |
|---|
| 245 | + CC_SG_FROM_BUF); |
|---|
| 255 | 246 | } |
|---|
| 256 | 247 | done: |
|---|
| 257 | 248 | aead_request_complete(areq, err); |
|---|
| .. | .. |
|---|
| 298 | 289 | return 4; |
|---|
| 299 | 290 | } |
|---|
| 300 | 291 | |
|---|
| 301 | | -static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx) |
|---|
| 292 | +static unsigned int hmac_setkey(struct cc_hw_desc *desc, |
|---|
| 293 | + struct cc_aead_ctx *ctx) |
|---|
| 302 | 294 | { |
|---|
| 303 | 295 | unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST }; |
|---|
| 304 | 296 | unsigned int digest_ofs = 0; |
|---|
| .. | .. |
|---|
| 327 | 319 | /* Load the hash current length*/ |
|---|
| 328 | 320 | hw_desc_init(&desc[idx]); |
|---|
| 329 | 321 | set_cipher_mode(&desc[idx], hash_mode); |
|---|
| 330 | | - set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz); |
|---|
| 322 | + set_din_const(&desc[idx], 0, ctx->hash_len); |
|---|
| 331 | 323 | set_flow_mode(&desc[idx], S_DIN_to_HASH); |
|---|
| 332 | 324 | set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); |
|---|
| 333 | 325 | idx++; |
|---|
| .. | .. |
|---|
| 389 | 381 | return -EINVAL; |
|---|
| 390 | 382 | break; |
|---|
| 391 | 383 | default: |
|---|
| 392 | | - dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode); |
|---|
| 384 | + dev_dbg(dev, "Invalid auth_mode=%d\n", ctx->auth_mode); |
|---|
| 393 | 385 | return -EINVAL; |
|---|
| 394 | 386 | } |
|---|
| 395 | 387 | /* Check cipher key size */ |
|---|
| 396 | 388 | if (ctx->flow_mode == S_DIN_to_DES) { |
|---|
| 397 | 389 | if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) { |
|---|
| 398 | | - dev_err(dev, "Invalid cipher(3DES) key size: %u\n", |
|---|
| 390 | + dev_dbg(dev, "Invalid cipher(3DES) key size: %u\n", |
|---|
| 399 | 391 | ctx->enc_keylen); |
|---|
| 400 | 392 | return -EINVAL; |
|---|
| 401 | 393 | } |
|---|
| .. | .. |
|---|
| 403 | 395 | if (ctx->enc_keylen != AES_KEYSIZE_128 && |
|---|
| 404 | 396 | ctx->enc_keylen != AES_KEYSIZE_192 && |
|---|
| 405 | 397 | ctx->enc_keylen != AES_KEYSIZE_256) { |
|---|
| 406 | | - dev_err(dev, "Invalid cipher(AES) key size: %u\n", |
|---|
| 398 | + dev_dbg(dev, "Invalid cipher(AES) key size: %u\n", |
|---|
| 407 | 399 | ctx->enc_keylen); |
|---|
| 408 | 400 | return -EINVAL; |
|---|
| 409 | 401 | } |
|---|
| .. | .. |
|---|
| 421 | 413 | dma_addr_t key_dma_addr = 0; |
|---|
| 422 | 414 | struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
|---|
| 423 | 415 | struct device *dev = drvdata_to_dev(ctx->drvdata); |
|---|
| 424 | | - u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode); |
|---|
| 416 | + u32 larval_addr; |
|---|
| 425 | 417 | struct cc_crypto_req cc_req = {}; |
|---|
| 426 | 418 | unsigned int blocksize; |
|---|
| 427 | 419 | unsigned int digestsize; |
|---|
| .. | .. |
|---|
| 452 | 444 | if (!key) |
|---|
| 453 | 445 | return -ENOMEM; |
|---|
| 454 | 446 | |
|---|
| 455 | | - key_dma_addr = dma_map_single(dev, (void *)key, keylen, |
|---|
| 456 | | - DMA_TO_DEVICE); |
|---|
| 447 | + key_dma_addr = dma_map_single(dev, key, keylen, DMA_TO_DEVICE); |
|---|
| 457 | 448 | if (dma_mapping_error(dev, key_dma_addr)) { |
|---|
| 458 | 449 | dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", |
|---|
| 459 | 450 | key, keylen); |
|---|
| 460 | | - kzfree(key); |
|---|
| 451 | + kfree_sensitive(key); |
|---|
| 461 | 452 | return -ENOMEM; |
|---|
| 462 | 453 | } |
|---|
| 463 | 454 | if (keylen > blocksize) { |
|---|
| 464 | 455 | /* Load hash initial state */ |
|---|
| 465 | 456 | hw_desc_init(&desc[idx]); |
|---|
| 466 | 457 | set_cipher_mode(&desc[idx], hashmode); |
|---|
| 458 | + larval_addr = cc_larval_digest_addr(ctx->drvdata, |
|---|
| 459 | + ctx->auth_mode); |
|---|
| 467 | 460 | set_din_sram(&desc[idx], larval_addr, digestsize); |
|---|
| 468 | 461 | set_flow_mode(&desc[idx], S_DIN_to_HASH); |
|---|
| 469 | 462 | set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); |
|---|
| .. | .. |
|---|
| 472 | 465 | /* Load the hash current length*/ |
|---|
| 473 | 466 | hw_desc_init(&desc[idx]); |
|---|
| 474 | 467 | set_cipher_mode(&desc[idx], hashmode); |
|---|
| 475 | | - set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz); |
|---|
| 468 | + set_din_const(&desc[idx], 0, ctx->hash_len); |
|---|
| 476 | 469 | set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); |
|---|
| 477 | 470 | set_flow_mode(&desc[idx], S_DIN_to_HASH); |
|---|
| 478 | 471 | set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); |
|---|
| .. | .. |
|---|
| 540 | 533 | if (key_dma_addr) |
|---|
| 541 | 534 | dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE); |
|---|
| 542 | 535 | |
|---|
| 543 | | - kzfree(key); |
|---|
| 536 | + kfree_sensitive(key); |
|---|
| 544 | 537 | |
|---|
| 545 | 538 | return rc; |
|---|
| 546 | 539 | } |
|---|
| .. | .. |
|---|
| 566 | 559 | |
|---|
| 567 | 560 | rc = crypto_authenc_extractkeys(&keys, key, keylen); |
|---|
| 568 | 561 | if (rc) |
|---|
| 569 | | - goto badkey; |
|---|
| 562 | + return rc; |
|---|
| 570 | 563 | enckey = keys.enckey; |
|---|
| 571 | 564 | authkey = keys.authkey; |
|---|
| 572 | 565 | ctx->enc_keylen = keys.enckeylen; |
|---|
| .. | .. |
|---|
| 574 | 567 | |
|---|
| 575 | 568 | if (ctx->cipher_mode == DRV_CIPHER_CTR) { |
|---|
| 576 | 569 | /* the nonce is stored in bytes at end of key */ |
|---|
| 577 | | - rc = -EINVAL; |
|---|
| 578 | 570 | if (ctx->enc_keylen < |
|---|
| 579 | 571 | (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) |
|---|
| 580 | | - goto badkey; |
|---|
| 572 | + return -EINVAL; |
|---|
| 581 | 573 | /* Copy nonce from last 4 bytes in CTR key to |
|---|
| 582 | 574 | * first 4 bytes in CTR IV |
|---|
| 583 | 575 | */ |
|---|
| .. | .. |
|---|
| 595 | 587 | |
|---|
| 596 | 588 | rc = validate_keys_sizes(ctx); |
|---|
| 597 | 589 | if (rc) |
|---|
| 598 | | - goto badkey; |
|---|
| 590 | + return rc; |
|---|
| 599 | 591 | |
|---|
| 600 | 592 | /* STAT_PHASE_1: Copy key to ctx */ |
|---|
| 601 | 593 | |
|---|
| .. | .. |
|---|
| 609 | 601 | } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ |
|---|
| 610 | 602 | rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); |
|---|
| 611 | 603 | if (rc) |
|---|
| 612 | | - goto badkey; |
|---|
| 604 | + return rc; |
|---|
| 613 | 605 | } |
|---|
| 614 | 606 | |
|---|
| 615 | 607 | /* STAT_PHASE_2: Create sequence */ |
|---|
| .. | .. |
|---|
| 626 | 618 | break; /* No auth. key setup */ |
|---|
| 627 | 619 | default: |
|---|
| 628 | 620 | dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode); |
|---|
| 629 | | - rc = -ENOTSUPP; |
|---|
| 630 | | - goto badkey; |
|---|
| 621 | + return -ENOTSUPP; |
|---|
| 631 | 622 | } |
|---|
| 632 | 623 | |
|---|
| 633 | 624 | /* STAT_PHASE_3: Submit sequence to HW */ |
|---|
| .. | .. |
|---|
| 636 | 627 | rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len); |
|---|
| 637 | 628 | if (rc) { |
|---|
| 638 | 629 | dev_err(dev, "send_request() failed (rc=%d)\n", rc); |
|---|
| 639 | | - goto setkey_error; |
|---|
| 630 | + return rc; |
|---|
| 640 | 631 | } |
|---|
| 641 | 632 | } |
|---|
| 642 | 633 | |
|---|
| 643 | 634 | /* Update STAT_PHASE_3 */ |
|---|
| 644 | 635 | return rc; |
|---|
| 636 | +} |
|---|
| 645 | 637 | |
|---|
| 646 | | -badkey: |
|---|
| 647 | | - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
|---|
| 638 | +static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key, |
|---|
| 639 | + unsigned int keylen) |
|---|
| 640 | +{ |
|---|
| 641 | + struct crypto_authenc_keys keys; |
|---|
| 642 | + int err; |
|---|
| 648 | 643 | |
|---|
| 649 | | -setkey_error: |
|---|
| 650 | | - return rc; |
|---|
| 644 | + err = crypto_authenc_extractkeys(&keys, key, keylen); |
|---|
| 645 | + if (unlikely(err)) |
|---|
| 646 | + return err; |
|---|
| 647 | + |
|---|
| 648 | + err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: |
|---|
| 649 | + cc_aead_setkey(aead, key, keylen); |
|---|
| 650 | + |
|---|
| 651 | + memzero_explicit(&keys, sizeof(keys)); |
|---|
| 652 | + return err; |
|---|
| 651 | 653 | } |
|---|
| 652 | 654 | |
|---|
| 653 | 655 | static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, |
|---|
| .. | .. |
|---|
| 791 | 793 | * assoc. + iv + data -compact in one table |
|---|
| 792 | 794 | * if assoclen is ZERO only IV perform |
|---|
| 793 | 795 | */ |
|---|
| 794 | | - cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr; |
|---|
| 796 | + u32 mlli_addr = areq_ctx->assoc.sram_addr; |
|---|
| 795 | 797 | u32 mlli_nents = areq_ctx->assoc.mlli_nents; |
|---|
| 796 | 798 | |
|---|
| 797 | 799 | if (areq_ctx->is_single_pass) { |
|---|
| .. | .. |
|---|
| 1008 | 1010 | hw_desc_init(&desc[idx]); |
|---|
| 1009 | 1011 | set_cipher_mode(&desc[idx], hash_mode); |
|---|
| 1010 | 1012 | set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode), |
|---|
| 1011 | | - ctx->drvdata->hash_len_sz); |
|---|
| 1013 | + ctx->hash_len); |
|---|
| 1012 | 1014 | set_flow_mode(&desc[idx], S_DIN_to_HASH); |
|---|
| 1013 | 1015 | set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); |
|---|
| 1014 | 1016 | idx++; |
|---|
| .. | .. |
|---|
| 1107 | 1109 | hw_desc_init(&desc[idx]); |
|---|
| 1108 | 1110 | set_cipher_mode(&desc[idx], hash_mode); |
|---|
| 1109 | 1111 | set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr, |
|---|
| 1110 | | - ctx->drvdata->hash_len_sz); |
|---|
| 1112 | + ctx->hash_len); |
|---|
| 1111 | 1113 | set_flow_mode(&desc[idx], S_HASH_to_DOUT); |
|---|
| 1112 | 1114 | set_setup_mode(&desc[idx], SETUP_WRITE_STATE1); |
|---|
| 1113 | 1115 | set_cipher_do(&desc[idx], DO_PAD); |
|---|
| .. | .. |
|---|
| 1137 | 1139 | hw_desc_init(&desc[idx]); |
|---|
| 1138 | 1140 | set_cipher_mode(&desc[idx], hash_mode); |
|---|
| 1139 | 1141 | set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode), |
|---|
| 1140 | | - ctx->drvdata->hash_len_sz); |
|---|
| 1142 | + ctx->hash_len); |
|---|
| 1141 | 1143 | set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); |
|---|
| 1142 | 1144 | set_flow_mode(&desc[idx], S_DIN_to_HASH); |
|---|
| 1143 | 1145 | set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); |
|---|
| .. | .. |
|---|
| 1161 | 1163 | struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
|---|
| 1162 | 1164 | struct device *dev = drvdata_to_dev(ctx->drvdata); |
|---|
| 1163 | 1165 | |
|---|
| 1164 | | - if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || |
|---|
| 1166 | + if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || |
|---|
| 1165 | 1167 | req_ctx->data_buff_type == CC_DMA_BUF_MLLI || |
|---|
| 1166 | | - !req_ctx->is_single_pass) { |
|---|
| 1168 | + !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) { |
|---|
| 1167 | 1169 | dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n", |
|---|
| 1168 | | - (unsigned int)ctx->drvdata->mlli_sram_addr, |
|---|
| 1170 | + ctx->drvdata->mlli_sram_addr, |
|---|
| 1169 | 1171 | req_ctx->mlli_params.mlli_len); |
|---|
| 1170 | 1172 | /* Copy MLLI table host-to-sram */ |
|---|
| 1171 | 1173 | hw_desc_init(&desc[*seq_size]); |
|---|
| .. | .. |
|---|
| 1217 | 1219 | req_ctx->is_single_pass); |
|---|
| 1218 | 1220 | |
|---|
| 1219 | 1221 | if (req_ctx->is_single_pass) { |
|---|
| 1220 | | - /** |
|---|
| 1222 | + /* |
|---|
| 1221 | 1223 | * Single-pass flow |
|---|
| 1222 | 1224 | */ |
|---|
| 1223 | 1225 | cc_set_hmac_desc(req, desc, seq_size); |
|---|
| .. | .. |
|---|
| 1229 | 1231 | return; |
|---|
| 1230 | 1232 | } |
|---|
| 1231 | 1233 | |
|---|
| 1232 | | - /** |
|---|
| 1234 | + /* |
|---|
| 1233 | 1235 | * Double-pass flow |
|---|
| 1234 | 1236 | * Fallback for unsupported single-pass modes, |
|---|
| 1235 | 1237 | * i.e. using assoc. data of non-word-multiple |
|---|
| .. | .. |
|---|
| 1270 | 1272 | req_ctx->is_single_pass); |
|---|
| 1271 | 1273 | |
|---|
| 1272 | 1274 | if (req_ctx->is_single_pass) { |
|---|
| 1273 | | - /** |
|---|
| 1275 | + /* |
|---|
| 1274 | 1276 | * Single-pass flow |
|---|
| 1275 | 1277 | */ |
|---|
| 1276 | 1278 | cc_set_xcbc_desc(req, desc, seq_size); |
|---|
| .. | .. |
|---|
| 1281 | 1283 | return; |
|---|
| 1282 | 1284 | } |
|---|
| 1283 | 1285 | |
|---|
| 1284 | | - /** |
|---|
| 1286 | + /* |
|---|
| 1285 | 1287 | * Double-pass flow |
|---|
| 1286 | 1288 | * Fallback for unsupported single-pass modes, |
|---|
| 1287 | 1289 | * i.e. using assoc. data of non-word-multiple |
|---|
| .. | .. |
|---|
| 1554 | 1556 | /* taken from crypto/ccm.c */ |
|---|
| 1555 | 1557 | /* 2 <= L <= 8, so 1 <= L' <= 7. */ |
|---|
| 1556 | 1558 | if (l < 2 || l > 8) { |
|---|
| 1557 | | - dev_err(dev, "illegal iv value %X\n", req->iv[0]); |
|---|
| 1559 | + dev_dbg(dev, "illegal iv value %X\n", req->iv[0]); |
|---|
| 1558 | 1560 | return -EINVAL; |
|---|
| 1559 | 1561 | } |
|---|
| 1560 | 1562 | memcpy(b0, req->iv, AES_BLOCK_SIZE); |
|---|
| .. | .. |
|---|
| 1606 | 1608 | memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv, |
|---|
| 1607 | 1609 | CCM_BLOCK_IV_SIZE); |
|---|
| 1608 | 1610 | req->iv = areq_ctx->ctr_iv; |
|---|
| 1609 | | - areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE; |
|---|
| 1610 | 1611 | } |
|---|
| 1611 | 1612 | |
|---|
| 1612 | 1613 | static void cc_set_ghash_desc(struct aead_request *req, |
|---|
| .. | .. |
|---|
| 1794 | 1795 | struct aead_req_ctx *req_ctx = aead_request_ctx(req); |
|---|
| 1795 | 1796 | unsigned int cipher_flow_mode; |
|---|
| 1796 | 1797 | |
|---|
| 1797 | | - if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { |
|---|
| 1798 | | - cipher_flow_mode = AES_and_HASH; |
|---|
| 1799 | | - } else { /* Encrypt */ |
|---|
| 1800 | | - cipher_flow_mode = AES_to_HASH_and_DOUT; |
|---|
| 1801 | | - } |
|---|
| 1802 | | - |
|---|
| 1803 | 1798 | //in RFC4543 no data to encrypt. just copy data from src to dest. |
|---|
| 1804 | 1799 | if (req_ctx->plaintext_authenticate_only) { |
|---|
| 1805 | 1800 | cc_proc_cipher_desc(req, BYPASS, desc, seq_size); |
|---|
| .. | .. |
|---|
| 1809 | 1804 | cc_set_gctr_desc(req, desc, seq_size); |
|---|
| 1810 | 1805 | cc_proc_gcm_result(req, desc, seq_size); |
|---|
| 1811 | 1806 | return 0; |
|---|
| 1807 | + } |
|---|
| 1808 | + |
|---|
| 1809 | + if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { |
|---|
| 1810 | + cipher_flow_mode = AES_and_HASH; |
|---|
| 1811 | + } else { /* Encrypt */ |
|---|
| 1812 | + cipher_flow_mode = AES_to_HASH_and_DOUT; |
|---|
| 1812 | 1813 | } |
|---|
| 1813 | 1814 | |
|---|
| 1814 | 1815 | // for gcm and rfc4106. |
|---|
| .. | .. |
|---|
| 1865 | 1866 | */ |
|---|
| 1866 | 1867 | __be64 temp64; |
|---|
| 1867 | 1868 | |
|---|
| 1868 | | - temp64 = cpu_to_be64((req_ctx->assoclen + |
|---|
| 1869 | | - GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8); |
|---|
| 1869 | + temp64 = cpu_to_be64((req_ctx->assoclen + cryptlen) * 8); |
|---|
| 1870 | 1870 | memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64)); |
|---|
| 1871 | 1871 | temp64 = 0; |
|---|
| 1872 | 1872 | memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8); |
|---|
| .. | .. |
|---|
| 1886 | 1886 | memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv, |
|---|
| 1887 | 1887 | GCM_BLOCK_RFC4_IV_SIZE); |
|---|
| 1888 | 1888 | req->iv = areq_ctx->ctr_iv; |
|---|
| 1889 | | - areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE; |
|---|
| 1890 | 1889 | } |
|---|
| 1891 | 1890 | |
|---|
| 1892 | 1891 | static int cc_proc_aead(struct aead_request *req, |
|---|
| .. | .. |
|---|
| 1912 | 1911 | if (validate_data_size(ctx, direct, req)) { |
|---|
| 1913 | 1912 | dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n", |
|---|
| 1914 | 1913 | req->cryptlen, areq_ctx->assoclen); |
|---|
| 1915 | | - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN); |
|---|
| 1916 | 1914 | return -EINVAL; |
|---|
| 1917 | 1915 | } |
|---|
| 1918 | 1916 | |
|---|
| 1919 | 1917 | /* Setup request structure */ |
|---|
| 1920 | | - cc_req.user_cb = (void *)cc_aead_complete; |
|---|
| 1921 | | - cc_req.user_arg = (void *)req; |
|---|
| 1918 | + cc_req.user_cb = cc_aead_complete; |
|---|
| 1919 | + cc_req.user_arg = req; |
|---|
| 1922 | 1920 | |
|---|
| 1923 | 1921 | /* Setup request context */ |
|---|
| 1924 | 1922 | areq_ctx->gen_ctx.op_type = direct; |
|---|
| .. | .. |
|---|
| 1933 | 1931 | */ |
|---|
| 1934 | 1932 | memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, |
|---|
| 1935 | 1933 | CTR_RFC3686_NONCE_SIZE); |
|---|
| 1936 | | - if (!areq_ctx->backup_giv) /*User none-generated IV*/ |
|---|
| 1937 | | - memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, |
|---|
| 1938 | | - req->iv, CTR_RFC3686_IV_SIZE); |
|---|
| 1934 | + memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv, |
|---|
| 1935 | + CTR_RFC3686_IV_SIZE); |
|---|
| 1939 | 1936 | /* Initialize counter portion of counter block */ |
|---|
| 1940 | 1937 | *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE + |
|---|
| 1941 | 1938 | CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); |
|---|
| .. | .. |
|---|
| 1981 | 1978 | goto exit; |
|---|
| 1982 | 1979 | } |
|---|
| 1983 | 1980 | |
|---|
| 1984 | | - /* do we need to generate IV? */ |
|---|
| 1985 | | - if (areq_ctx->backup_giv) { |
|---|
| 1986 | | - /* set the DMA mapped IV address*/ |
|---|
| 1987 | | - if (ctx->cipher_mode == DRV_CIPHER_CTR) { |
|---|
| 1988 | | - cc_req.ivgen_dma_addr[0] = |
|---|
| 1989 | | - areq_ctx->gen_ctx.iv_dma_addr + |
|---|
| 1990 | | - CTR_RFC3686_NONCE_SIZE; |
|---|
| 1991 | | - cc_req.ivgen_dma_addr_len = 1; |
|---|
| 1992 | | - } else if (ctx->cipher_mode == DRV_CIPHER_CCM) { |
|---|
| 1993 | | - /* In ccm, the IV needs to exist both inside B0 and |
|---|
| 1994 | | - * inside the counter.It is also copied to iv_dma_addr |
|---|
| 1995 | | - * for other reasons (like returning it to the user). |
|---|
| 1996 | | - * So, using 3 (identical) IV outputs. |
|---|
| 1997 | | - */ |
|---|
| 1998 | | - cc_req.ivgen_dma_addr[0] = |
|---|
| 1999 | | - areq_ctx->gen_ctx.iv_dma_addr + |
|---|
| 2000 | | - CCM_BLOCK_IV_OFFSET; |
|---|
| 2001 | | - cc_req.ivgen_dma_addr[1] = |
|---|
| 2002 | | - sg_dma_address(&areq_ctx->ccm_adata_sg) + |
|---|
| 2003 | | - CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET; |
|---|
| 2004 | | - cc_req.ivgen_dma_addr[2] = |
|---|
| 2005 | | - sg_dma_address(&areq_ctx->ccm_adata_sg) + |
|---|
| 2006 | | - CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET; |
|---|
| 2007 | | - cc_req.ivgen_dma_addr_len = 3; |
|---|
| 2008 | | - } else { |
|---|
| 2009 | | - cc_req.ivgen_dma_addr[0] = |
|---|
| 2010 | | - areq_ctx->gen_ctx.iv_dma_addr; |
|---|
| 2011 | | - cc_req.ivgen_dma_addr_len = 1; |
|---|
| 2012 | | - } |
|---|
| 2013 | | - |
|---|
| 2014 | | - /* set the IV size (8/16 B long)*/ |
|---|
| 2015 | | - cc_req.ivgen_size = crypto_aead_ivsize(tfm); |
|---|
| 2016 | | - } |
|---|
| 2017 | | - |
|---|
| 2018 | 1981 | /* STAT_PHASE_2: Create sequence */ |
|---|
| 2019 | 1982 | |
|---|
| 2020 | 1983 | /* Load MLLI tables to SRAM if necessary */ |
|---|
| 2021 | 1984 | cc_mlli_to_sram(req, desc, &seq_len); |
|---|
| 2022 | 1985 | |
|---|
| 2023 | | - /*TODO: move seq len by reference */ |
|---|
| 2024 | 1986 | switch (ctx->auth_mode) { |
|---|
| 2025 | 1987 | case DRV_HASH_SHA1: |
|---|
| 2026 | 1988 | case DRV_HASH_SHA256: |
|---|
| .. | .. |
|---|
| 2065 | 2027 | /* No generated IV required */ |
|---|
| 2066 | 2028 | areq_ctx->backup_iv = req->iv; |
|---|
| 2067 | 2029 | areq_ctx->assoclen = req->assoclen; |
|---|
| 2068 | | - areq_ctx->backup_giv = NULL; |
|---|
| 2069 | | - areq_ctx->is_gcm4543 = false; |
|---|
| 2070 | | - |
|---|
| 2071 | | - areq_ctx->plaintext_authenticate_only = false; |
|---|
| 2072 | 2030 | |
|---|
| 2073 | 2031 | rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); |
|---|
| 2074 | 2032 | if (rc != -EINPROGRESS && rc != -EBUSY) |
|---|
| .. | .. |
|---|
| 2082 | 2040 | /* Very similar to cc_aead_encrypt() above. */ |
|---|
| 2083 | 2041 | |
|---|
| 2084 | 2042 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); |
|---|
| 2085 | | - struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
|---|
| 2086 | | - struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
|---|
| 2087 | | - struct device *dev = drvdata_to_dev(ctx->drvdata); |
|---|
| 2088 | | - int rc = -EINVAL; |
|---|
| 2043 | + int rc; |
|---|
| 2089 | 2044 | |
|---|
| 2090 | | - if (!valid_assoclen(req)) { |
|---|
| 2091 | | - dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); |
|---|
| 2045 | + rc = crypto_ipsec_check_assoclen(req->assoclen); |
|---|
| 2046 | + if (rc) |
|---|
| 2092 | 2047 | goto out; |
|---|
| 2093 | | - } |
|---|
| 2094 | 2048 | |
|---|
| 2095 | 2049 | memset(areq_ctx, 0, sizeof(*areq_ctx)); |
|---|
| 2096 | 2050 | |
|---|
| 2097 | 2051 | /* No generated IV required */ |
|---|
| 2098 | 2052 | areq_ctx->backup_iv = req->iv; |
|---|
| 2099 | | - areq_ctx->assoclen = req->assoclen; |
|---|
| 2100 | | - areq_ctx->backup_giv = NULL; |
|---|
| 2101 | | - areq_ctx->is_gcm4543 = true; |
|---|
| 2053 | + areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE; |
|---|
| 2102 | 2054 | |
|---|
| 2103 | 2055 | cc_proc_rfc4309_ccm(req); |
|---|
| 2104 | 2056 | |
|---|
| .. | .. |
|---|
| 2119 | 2071 | /* No generated IV required */ |
|---|
| 2120 | 2072 | areq_ctx->backup_iv = req->iv; |
|---|
| 2121 | 2073 | areq_ctx->assoclen = req->assoclen; |
|---|
| 2122 | | - areq_ctx->backup_giv = NULL; |
|---|
| 2123 | | - areq_ctx->is_gcm4543 = false; |
|---|
| 2124 | | - |
|---|
| 2125 | | - areq_ctx->plaintext_authenticate_only = false; |
|---|
| 2126 | 2074 | |
|---|
| 2127 | 2075 | rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); |
|---|
| 2128 | 2076 | if (rc != -EINPROGRESS && rc != -EBUSY) |
|---|
| .. | .. |
|---|
| 2133 | 2081 | |
|---|
| 2134 | 2082 | static int cc_rfc4309_ccm_decrypt(struct aead_request *req) |
|---|
| 2135 | 2083 | { |
|---|
| 2136 | | - struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
|---|
| 2137 | | - struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
|---|
| 2138 | | - struct device *dev = drvdata_to_dev(ctx->drvdata); |
|---|
| 2139 | 2084 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); |
|---|
| 2140 | | - int rc = -EINVAL; |
|---|
| 2085 | + int rc; |
|---|
| 2141 | 2086 | |
|---|
| 2142 | | - if (!valid_assoclen(req)) { |
|---|
| 2143 | | - dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); |
|---|
| 2087 | + rc = crypto_ipsec_check_assoclen(req->assoclen); |
|---|
| 2088 | + if (rc) |
|---|
| 2144 | 2089 | goto out; |
|---|
| 2145 | | - } |
|---|
| 2146 | 2090 | |
|---|
| 2147 | 2091 | memset(areq_ctx, 0, sizeof(*areq_ctx)); |
|---|
| 2148 | 2092 | |
|---|
| 2149 | 2093 | /* No generated IV required */ |
|---|
| 2150 | 2094 | areq_ctx->backup_iv = req->iv; |
|---|
| 2151 | | - areq_ctx->assoclen = req->assoclen; |
|---|
| 2152 | | - areq_ctx->backup_giv = NULL; |
|---|
| 2095 | + areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE; |
|---|
| 2153 | 2096 | |
|---|
| 2154 | | - areq_ctx->is_gcm4543 = true; |
|---|
| 2155 | 2097 | cc_proc_rfc4309_ccm(req); |
|---|
| 2156 | 2098 | |
|---|
| 2157 | 2099 | rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); |
|---|
| .. | .. |
|---|
| 2251 | 2193 | |
|---|
| 2252 | 2194 | static int cc_rfc4106_gcm_encrypt(struct aead_request *req) |
|---|
| 2253 | 2195 | { |
|---|
| 2254 | | - /* Very similar to cc_aead_encrypt() above. */ |
|---|
| 2255 | | - |
|---|
| 2256 | | - struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
|---|
| 2257 | | - struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
|---|
| 2258 | | - struct device *dev = drvdata_to_dev(ctx->drvdata); |
|---|
| 2259 | 2196 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); |
|---|
| 2260 | | - int rc = -EINVAL; |
|---|
| 2197 | + int rc; |
|---|
| 2261 | 2198 | |
|---|
| 2262 | | - if (!valid_assoclen(req)) { |
|---|
| 2263 | | - dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); |
|---|
| 2199 | + rc = crypto_ipsec_check_assoclen(req->assoclen); |
|---|
| 2200 | + if (rc) |
|---|
| 2264 | 2201 | goto out; |
|---|
| 2265 | | - } |
|---|
| 2266 | 2202 | |
|---|
| 2267 | 2203 | memset(areq_ctx, 0, sizeof(*areq_ctx)); |
|---|
| 2268 | 2204 | |
|---|
| 2269 | 2205 | /* No generated IV required */ |
|---|
| 2270 | 2206 | areq_ctx->backup_iv = req->iv; |
|---|
| 2271 | | - areq_ctx->assoclen = req->assoclen; |
|---|
| 2272 | | - areq_ctx->backup_giv = NULL; |
|---|
| 2273 | | - |
|---|
| 2274 | | - areq_ctx->plaintext_authenticate_only = false; |
|---|
| 2207 | + areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE; |
|---|
| 2275 | 2208 | |
|---|
| 2276 | 2209 | cc_proc_rfc4_gcm(req); |
|---|
| 2277 | | - areq_ctx->is_gcm4543 = true; |
|---|
| 2278 | 2210 | |
|---|
| 2279 | 2211 | rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); |
|---|
| 2280 | 2212 | if (rc != -EINPROGRESS && rc != -EBUSY) |
|---|
| .. | .. |
|---|
| 2285 | 2217 | |
|---|
| 2286 | 2218 | static int cc_rfc4543_gcm_encrypt(struct aead_request *req) |
|---|
| 2287 | 2219 | { |
|---|
| 2288 | | - /* Very similar to cc_aead_encrypt() above. */ |
|---|
| 2289 | | - |
|---|
| 2290 | 2220 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); |
|---|
| 2291 | 2221 | int rc; |
|---|
| 2222 | + |
|---|
| 2223 | + rc = crypto_ipsec_check_assoclen(req->assoclen); |
|---|
| 2224 | + if (rc) |
|---|
| 2225 | + goto out; |
|---|
| 2292 | 2226 | |
|---|
| 2293 | 2227 | memset(areq_ctx, 0, sizeof(*areq_ctx)); |
|---|
| 2294 | 2228 | |
|---|
| .. | .. |
|---|
| 2298 | 2232 | /* No generated IV required */ |
|---|
| 2299 | 2233 | areq_ctx->backup_iv = req->iv; |
|---|
| 2300 | 2234 | areq_ctx->assoclen = req->assoclen; |
|---|
| 2301 | | - areq_ctx->backup_giv = NULL; |
|---|
| 2302 | 2235 | |
|---|
| 2303 | 2236 | cc_proc_rfc4_gcm(req); |
|---|
| 2304 | | - areq_ctx->is_gcm4543 = true; |
|---|
| 2305 | 2237 | |
|---|
| 2306 | 2238 | rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); |
|---|
| 2307 | 2239 | if (rc != -EINPROGRESS && rc != -EBUSY) |
|---|
| 2308 | 2240 | req->iv = areq_ctx->backup_iv; |
|---|
| 2309 | | - |
|---|
| 2241 | +out: |
|---|
| 2310 | 2242 | return rc; |
|---|
| 2311 | 2243 | } |
|---|
| 2312 | 2244 | |
|---|
| 2313 | 2245 | static int cc_rfc4106_gcm_decrypt(struct aead_request *req) |
|---|
| 2314 | 2246 | { |
|---|
| 2315 | | - /* Very similar to cc_aead_decrypt() above. */ |
|---|
| 2316 | | - |
|---|
| 2317 | | - struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
|---|
| 2318 | | - struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
|---|
| 2319 | | - struct device *dev = drvdata_to_dev(ctx->drvdata); |
|---|
| 2320 | 2247 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); |
|---|
| 2321 | | - int rc = -EINVAL; |
|---|
| 2248 | + int rc; |
|---|
| 2322 | 2249 | |
|---|
| 2323 | | - if (!valid_assoclen(req)) { |
|---|
| 2324 | | - dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); |
|---|
| 2250 | + rc = crypto_ipsec_check_assoclen(req->assoclen); |
|---|
| 2251 | + if (rc) |
|---|
| 2325 | 2252 | goto out; |
|---|
| 2326 | | - } |
|---|
| 2327 | 2253 | |
|---|
| 2328 | 2254 | memset(areq_ctx, 0, sizeof(*areq_ctx)); |
|---|
| 2329 | 2255 | |
|---|
| 2330 | 2256 | /* No generated IV required */ |
|---|
| 2331 | 2257 | areq_ctx->backup_iv = req->iv; |
|---|
| 2332 | | - areq_ctx->assoclen = req->assoclen; |
|---|
| 2333 | | - areq_ctx->backup_giv = NULL; |
|---|
| 2334 | | - |
|---|
| 2335 | | - areq_ctx->plaintext_authenticate_only = false; |
|---|
| 2258 | + areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE; |
|---|
| 2336 | 2259 | |
|---|
| 2337 | 2260 | cc_proc_rfc4_gcm(req); |
|---|
| 2338 | | - areq_ctx->is_gcm4543 = true; |
|---|
| 2339 | 2261 | |
|---|
| 2340 | 2262 | rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); |
|---|
| 2341 | 2263 | if (rc != -EINPROGRESS && rc != -EBUSY) |
|---|
| .. | .. |
|---|
| 2346 | 2268 | |
|---|
| 2347 | 2269 | static int cc_rfc4543_gcm_decrypt(struct aead_request *req) |
|---|
| 2348 | 2270 | { |
|---|
| 2349 | | - /* Very similar to cc_aead_decrypt() above. */ |
|---|
| 2350 | | - |
|---|
| 2351 | 2271 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); |
|---|
| 2352 | 2272 | int rc; |
|---|
| 2273 | + |
|---|
| 2274 | + rc = crypto_ipsec_check_assoclen(req->assoclen); |
|---|
| 2275 | + if (rc) |
|---|
| 2276 | + goto out; |
|---|
| 2353 | 2277 | |
|---|
| 2354 | 2278 | memset(areq_ctx, 0, sizeof(*areq_ctx)); |
|---|
| 2355 | 2279 | |
|---|
| .. | .. |
|---|
| 2359 | 2283 | /* No generated IV required */ |
|---|
| 2360 | 2284 | areq_ctx->backup_iv = req->iv; |
|---|
| 2361 | 2285 | areq_ctx->assoclen = req->assoclen; |
|---|
| 2362 | | - areq_ctx->backup_giv = NULL; |
|---|
| 2363 | 2286 | |
|---|
| 2364 | 2287 | cc_proc_rfc4_gcm(req); |
|---|
| 2365 | | - areq_ctx->is_gcm4543 = true; |
|---|
| 2366 | 2288 | |
|---|
| 2367 | 2289 | rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); |
|---|
| 2368 | 2290 | if (rc != -EINPROGRESS && rc != -EBUSY) |
|---|
| 2369 | 2291 | req->iv = areq_ctx->backup_iv; |
|---|
| 2370 | | - |
|---|
| 2292 | +out: |
|---|
| 2371 | 2293 | return rc; |
|---|
| 2372 | 2294 | } |
|---|
| 2373 | 2295 | |
|---|
| .. | .. |
|---|
| 2391 | 2313 | .flow_mode = S_DIN_to_AES, |
|---|
| 2392 | 2314 | .auth_mode = DRV_HASH_SHA1, |
|---|
| 2393 | 2315 | .min_hw_rev = CC_HW_REV_630, |
|---|
| 2316 | + .std_body = CC_STD_NIST, |
|---|
| 2394 | 2317 | }, |
|---|
| 2395 | 2318 | { |
|---|
| 2396 | 2319 | .name = "authenc(hmac(sha1),cbc(des3_ede))", |
|---|
| 2397 | 2320 | .driver_name = "authenc-hmac-sha1-cbc-des3-ccree", |
|---|
| 2398 | 2321 | .blocksize = DES3_EDE_BLOCK_SIZE, |
|---|
| 2399 | 2322 | .template_aead = { |
|---|
| 2400 | | - .setkey = cc_aead_setkey, |
|---|
| 2323 | + .setkey = cc_des3_aead_setkey, |
|---|
| 2401 | 2324 | .setauthsize = cc_aead_setauthsize, |
|---|
| 2402 | 2325 | .encrypt = cc_aead_encrypt, |
|---|
| 2403 | 2326 | .decrypt = cc_aead_decrypt, |
|---|
| .. | .. |
|---|
| 2410 | 2333 | .flow_mode = S_DIN_to_DES, |
|---|
| 2411 | 2334 | .auth_mode = DRV_HASH_SHA1, |
|---|
| 2412 | 2335 | .min_hw_rev = CC_HW_REV_630, |
|---|
| 2336 | + .std_body = CC_STD_NIST, |
|---|
| 2413 | 2337 | }, |
|---|
| 2414 | 2338 | { |
|---|
| 2415 | 2339 | .name = "authenc(hmac(sha256),cbc(aes))", |
|---|
| .. | .. |
|---|
| 2429 | 2353 | .flow_mode = S_DIN_to_AES, |
|---|
| 2430 | 2354 | .auth_mode = DRV_HASH_SHA256, |
|---|
| 2431 | 2355 | .min_hw_rev = CC_HW_REV_630, |
|---|
| 2356 | + .std_body = CC_STD_NIST, |
|---|
| 2432 | 2357 | }, |
|---|
| 2433 | 2358 | { |
|---|
| 2434 | 2359 | .name = "authenc(hmac(sha256),cbc(des3_ede))", |
|---|
| 2435 | 2360 | .driver_name = "authenc-hmac-sha256-cbc-des3-ccree", |
|---|
| 2436 | 2361 | .blocksize = DES3_EDE_BLOCK_SIZE, |
|---|
| 2437 | 2362 | .template_aead = { |
|---|
| 2438 | | - .setkey = cc_aead_setkey, |
|---|
| 2363 | + .setkey = cc_des3_aead_setkey, |
|---|
| 2439 | 2364 | .setauthsize = cc_aead_setauthsize, |
|---|
| 2440 | 2365 | .encrypt = cc_aead_encrypt, |
|---|
| 2441 | 2366 | .decrypt = cc_aead_decrypt, |
|---|
| .. | .. |
|---|
| 2448 | 2373 | .flow_mode = S_DIN_to_DES, |
|---|
| 2449 | 2374 | .auth_mode = DRV_HASH_SHA256, |
|---|
| 2450 | 2375 | .min_hw_rev = CC_HW_REV_630, |
|---|
| 2376 | + .std_body = CC_STD_NIST, |
|---|
| 2451 | 2377 | }, |
|---|
| 2452 | 2378 | { |
|---|
| 2453 | 2379 | .name = "authenc(xcbc(aes),cbc(aes))", |
|---|
| .. | .. |
|---|
| 2467 | 2393 | .flow_mode = S_DIN_to_AES, |
|---|
| 2468 | 2394 | .auth_mode = DRV_HASH_XCBC_MAC, |
|---|
| 2469 | 2395 | .min_hw_rev = CC_HW_REV_630, |
|---|
| 2396 | + .std_body = CC_STD_NIST, |
|---|
| 2470 | 2397 | }, |
|---|
| 2471 | 2398 | { |
|---|
| 2472 | 2399 | .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", |
|---|
| .. | .. |
|---|
| 2486 | 2413 | .flow_mode = S_DIN_to_AES, |
|---|
| 2487 | 2414 | .auth_mode = DRV_HASH_SHA1, |
|---|
| 2488 | 2415 | .min_hw_rev = CC_HW_REV_630, |
|---|
| 2416 | + .std_body = CC_STD_NIST, |
|---|
| 2489 | 2417 | }, |
|---|
| 2490 | 2418 | { |
|---|
| 2491 | 2419 | .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", |
|---|
| .. | .. |
|---|
| 2505 | 2433 | .flow_mode = S_DIN_to_AES, |
|---|
| 2506 | 2434 | .auth_mode = DRV_HASH_SHA256, |
|---|
| 2507 | 2435 | .min_hw_rev = CC_HW_REV_630, |
|---|
| 2436 | + .std_body = CC_STD_NIST, |
|---|
| 2508 | 2437 | }, |
|---|
| 2509 | 2438 | { |
|---|
| 2510 | 2439 | .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))", |
|---|
| .. | .. |
|---|
| 2524 | 2453 | .flow_mode = S_DIN_to_AES, |
|---|
| 2525 | 2454 | .auth_mode = DRV_HASH_XCBC_MAC, |
|---|
| 2526 | 2455 | .min_hw_rev = CC_HW_REV_630, |
|---|
| 2456 | + .std_body = CC_STD_NIST, |
|---|
| 2527 | 2457 | }, |
|---|
| 2528 | 2458 | { |
|---|
| 2529 | 2459 | .name = "ccm(aes)", |
|---|
| .. | .. |
|---|
| 2543 | 2473 | .flow_mode = S_DIN_to_AES, |
|---|
| 2544 | 2474 | .auth_mode = DRV_HASH_NULL, |
|---|
| 2545 | 2475 | .min_hw_rev = CC_HW_REV_630, |
|---|
| 2476 | + .std_body = CC_STD_NIST, |
|---|
| 2546 | 2477 | }, |
|---|
| 2547 | 2478 | { |
|---|
| 2548 | 2479 | .name = "rfc4309(ccm(aes))", |
|---|
| .. | .. |
|---|
| 2562 | 2493 | .flow_mode = S_DIN_to_AES, |
|---|
| 2563 | 2494 | .auth_mode = DRV_HASH_NULL, |
|---|
| 2564 | 2495 | .min_hw_rev = CC_HW_REV_630, |
|---|
| 2496 | + .std_body = CC_STD_NIST, |
|---|
| 2565 | 2497 | }, |
|---|
| 2566 | 2498 | { |
|---|
| 2567 | 2499 | .name = "gcm(aes)", |
|---|
| .. | .. |
|---|
| 2581 | 2513 | .flow_mode = S_DIN_to_AES, |
|---|
| 2582 | 2514 | .auth_mode = DRV_HASH_NULL, |
|---|
| 2583 | 2515 | .min_hw_rev = CC_HW_REV_630, |
|---|
| 2516 | + .std_body = CC_STD_NIST, |
|---|
| 2584 | 2517 | }, |
|---|
| 2585 | 2518 | { |
|---|
| 2586 | 2519 | .name = "rfc4106(gcm(aes))", |
|---|
| .. | .. |
|---|
| 2600 | 2533 | .flow_mode = S_DIN_to_AES, |
|---|
| 2601 | 2534 | .auth_mode = DRV_HASH_NULL, |
|---|
| 2602 | 2535 | .min_hw_rev = CC_HW_REV_630, |
|---|
| 2536 | + .std_body = CC_STD_NIST, |
|---|
| 2603 | 2537 | }, |
|---|
| 2604 | 2538 | { |
|---|
| 2605 | 2539 | .name = "rfc4543(gcm(aes))", |
|---|
| .. | .. |
|---|
| 2619 | 2553 | .flow_mode = S_DIN_to_AES, |
|---|
| 2620 | 2554 | .auth_mode = DRV_HASH_NULL, |
|---|
| 2621 | 2555 | .min_hw_rev = CC_HW_REV_630, |
|---|
| 2556 | + .std_body = CC_STD_NIST, |
|---|
| 2622 | 2557 | }, |
|---|
| 2623 | 2558 | }; |
|---|
| 2624 | 2559 | |
|---|
| .. | .. |
|---|
| 2628 | 2563 | struct cc_crypto_alg *t_alg; |
|---|
| 2629 | 2564 | struct aead_alg *alg; |
|---|
| 2630 | 2565 | |
|---|
| 2631 | | - t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); |
|---|
| 2566 | + t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL); |
|---|
| 2632 | 2567 | if (!t_alg) |
|---|
| 2633 | 2568 | return ERR_PTR(-ENOMEM); |
|---|
| 2634 | 2569 | |
|---|
| .. | .. |
|---|
| 2642 | 2577 | |
|---|
| 2643 | 2578 | alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx); |
|---|
| 2644 | 2579 | alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; |
|---|
| 2580 | + alg->base.cra_blocksize = tmpl->blocksize; |
|---|
| 2645 | 2581 | alg->init = cc_aead_init; |
|---|
| 2646 | 2582 | alg->exit = cc_aead_exit; |
|---|
| 2647 | 2583 | |
|---|
| .. | .. |
|---|
| 2657 | 2593 | int cc_aead_free(struct cc_drvdata *drvdata) |
|---|
| 2658 | 2594 | { |
|---|
| 2659 | 2595 | struct cc_crypto_alg *t_alg, *n; |
|---|
| 2660 | | - struct cc_aead_handle *aead_handle = |
|---|
| 2661 | | - (struct cc_aead_handle *)drvdata->aead_handle; |
|---|
| 2596 | + struct cc_aead_handle *aead_handle = drvdata->aead_handle; |
|---|
| 2662 | 2597 | |
|---|
| 2663 | | - if (aead_handle) { |
|---|
| 2664 | | - /* Remove registered algs */ |
|---|
| 2665 | | - list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, |
|---|
| 2666 | | - entry) { |
|---|
| 2667 | | - crypto_unregister_aead(&t_alg->aead_alg); |
|---|
| 2668 | | - list_del(&t_alg->entry); |
|---|
| 2669 | | - kfree(t_alg); |
|---|
| 2670 | | - } |
|---|
| 2671 | | - kfree(aead_handle); |
|---|
| 2672 | | - drvdata->aead_handle = NULL; |
|---|
| 2598 | + /* Remove registered algs */ |
|---|
| 2599 | + list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) { |
|---|
| 2600 | + crypto_unregister_aead(&t_alg->aead_alg); |
|---|
| 2601 | + list_del(&t_alg->entry); |
|---|
| 2673 | 2602 | } |
|---|
| 2674 | 2603 | |
|---|
| 2675 | 2604 | return 0; |
|---|
| .. | .. |
|---|
| 2683 | 2612 | int alg; |
|---|
| 2684 | 2613 | struct device *dev = drvdata_to_dev(drvdata); |
|---|
| 2685 | 2614 | |
|---|
| 2686 | | - aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL); |
|---|
| 2615 | + aead_handle = devm_kmalloc(dev, sizeof(*aead_handle), GFP_KERNEL); |
|---|
| 2687 | 2616 | if (!aead_handle) { |
|---|
| 2688 | 2617 | rc = -ENOMEM; |
|---|
| 2689 | 2618 | goto fail0; |
|---|
| .. | .. |
|---|
| 2696 | 2625 | MAX_HMAC_DIGEST_SIZE); |
|---|
| 2697 | 2626 | |
|---|
| 2698 | 2627 | if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) { |
|---|
| 2699 | | - dev_err(dev, "SRAM pool exhausted\n"); |
|---|
| 2700 | 2628 | rc = -ENOMEM; |
|---|
| 2701 | 2629 | goto fail1; |
|---|
| 2702 | 2630 | } |
|---|
| 2703 | 2631 | |
|---|
| 2704 | 2632 | /* Linux crypto */ |
|---|
| 2705 | 2633 | for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) { |
|---|
| 2706 | | - if (aead_algs[alg].min_hw_rev > drvdata->hw_rev) |
|---|
| 2634 | + if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) || |
|---|
| 2635 | + !(drvdata->std_bodies & aead_algs[alg].std_body)) |
|---|
| 2707 | 2636 | continue; |
|---|
| 2708 | 2637 | |
|---|
| 2709 | 2638 | t_alg = cc_create_aead_alg(&aead_algs[alg], dev); |
|---|
| .. | .. |
|---|
| 2718 | 2647 | if (rc) { |
|---|
| 2719 | 2648 | dev_err(dev, "%s alg registration failed\n", |
|---|
| 2720 | 2649 | t_alg->aead_alg.base.cra_driver_name); |
|---|
| 2721 | | - goto fail2; |
|---|
| 2722 | | - } else { |
|---|
| 2723 | | - list_add_tail(&t_alg->entry, &aead_handle->aead_list); |
|---|
| 2724 | | - dev_dbg(dev, "Registered %s\n", |
|---|
| 2725 | | - t_alg->aead_alg.base.cra_driver_name); |
|---|
| 2650 | + goto fail1; |
|---|
| 2726 | 2651 | } |
|---|
| 2652 | + |
|---|
| 2653 | + list_add_tail(&t_alg->entry, &aead_handle->aead_list); |
|---|
| 2654 | + dev_dbg(dev, "Registered %s\n", |
|---|
| 2655 | + t_alg->aead_alg.base.cra_driver_name); |
|---|
| 2727 | 2656 | } |
|---|
| 2728 | 2657 | |
|---|
| 2729 | 2658 | return 0; |
|---|
| 2730 | 2659 | |
|---|
| 2731 | | -fail2: |
|---|
| 2732 | | - kfree(t_alg); |
|---|
| 2733 | 2660 | fail1: |
|---|
| 2734 | 2661 | cc_aead_free(drvdata); |
|---|
| 2735 | 2662 | fail0: |
|---|