hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/drivers/crypto/chelsio/chcr_algo.c
....@@ -44,7 +44,6 @@
4444 #include <linux/kernel.h>
4545 #include <linux/module.h>
4646 #include <linux/crypto.h>
47
-#include <linux/cryptohash.h>
4847 #include <linux/skbuff.h>
4948 #include <linux/rtnetlink.h>
5049 #include <linux/highmem.h>
....@@ -93,7 +92,7 @@
9392 0x1B000000, 0x36000000, 0x6C000000
9493 };
9594
96
-static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
95
+static int chcr_handle_cipher_resp(struct skcipher_request *req,
9796 unsigned char *input, int err);
9897
9998 static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
....@@ -123,7 +122,7 @@
123122
124123 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125124 {
126
- return ctx->dev->u_ctx;
125
+ return container_of(ctx->dev, struct uld_ctx, dev);
127126 }
128127
129128 static inline int is_ofld_imm(const struct sk_buff *skb)
....@@ -198,18 +197,36 @@
198197 *err = 0;
199198 }
200199
201
-static inline void chcr_handle_aead_resp(struct aead_request *req,
200
+static int chcr_inc_wrcount(struct chcr_dev *dev)
201
+{
202
+ if (dev->state == CHCR_DETACH)
203
+ return 1;
204
+ atomic_inc(&dev->inflight);
205
+ return 0;
206
+}
207
+
208
+static inline void chcr_dec_wrcount(struct chcr_dev *dev)
209
+{
210
+ atomic_dec(&dev->inflight);
211
+}
212
+
213
+static inline int chcr_handle_aead_resp(struct aead_request *req,
202214 unsigned char *input,
203215 int err)
204216 {
205217 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
218
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
219
+ struct chcr_dev *dev = a_ctx(tfm)->dev;
206220
207221 chcr_aead_common_exit(req);
208222 if (reqctx->verify == VERIFY_SW) {
209223 chcr_verify_tag(req, input, &err);
210224 reqctx->verify = VERIFY_HW;
211225 }
226
+ chcr_dec_wrcount(dev);
212227 req->base.complete(&req->base, err);
228
+
229
+ return err;
213230 }
214231
215232 static void get_aes_decrypt_key(unsigned char *dec_key,
....@@ -238,7 +255,7 @@
238255 return;
239256 }
240257 for (i = 0; i < nk; i++)
241
- w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
258
+ w_ring[i] = get_unaligned_be32(&key[i * 4]);
242259
243260 i = 0;
244261 temp = w_ring[nk - 1];
....@@ -257,7 +274,7 @@
257274 }
258275 i--;
259276 for (k = 0, j = i % nk; k < nk; k++) {
260
- *((u32 *)dec_key + k) = htonl(w_ring[j]);
277
+ put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
261278 j--;
262279 if (j < 0)
263280 j += nk;
....@@ -391,7 +408,7 @@
391408
392409 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
393410 size_t size,
394
- dma_addr_t *addr)
411
+ dma_addr_t addr)
395412 {
396413 int j;
397414
....@@ -399,7 +416,7 @@
399416 return;
400417 j = walk->nents;
401418 walk->to->len[j % 8] = htons(size);
402
- walk->to->addr[j % 8] = cpu_to_be64(*addr);
419
+ walk->to->addr[j % 8] = cpu_to_be64(addr);
403420 j++;
404421 if ((j % 8) == 0)
405422 walk->to++;
....@@ -473,16 +490,16 @@
473490
474491 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
475492 size_t size,
476
- dma_addr_t *addr)
493
+ dma_addr_t addr)
477494 {
478495 if (!size)
479496 return;
480497
481498 if (walk->nents == 0) {
482499 walk->sgl->len0 = cpu_to_be32(size);
483
- walk->sgl->addr0 = cpu_to_be64(*addr);
500
+ walk->sgl->addr0 = cpu_to_be64(addr);
484501 } else {
485
- walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
502
+ walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
486503 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
487504 walk->pair_idx = !walk->pair_idx;
488505 if (!walk->pair_idx)
....@@ -550,11 +567,11 @@
550567 }
551568 }
552569
553
-static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
570
+static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
554571 {
555
- struct crypto_alg *alg = tfm->__crt_alg;
572
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
556573 struct chcr_alg_template *chcr_crypto_alg =
557
- container_of(alg, struct chcr_alg_template, alg.crypto);
574
+ container_of(alg, struct chcr_alg_template, alg.skcipher);
558575
559576 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
560577 }
....@@ -674,29 +691,71 @@
674691 }
675692
676693 static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
677
- u32 flags,
678
- struct scatterlist *src,
679
- struct scatterlist *dst,
680
- unsigned int nbytes,
694
+ struct skcipher_request *req,
681695 u8 *iv,
682696 unsigned short op_type)
683697 {
698
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
684699 int err;
685700
686
- SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
701
+ skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
702
+ skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
703
+ req->base.complete, req->base.data);
704
+ skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
705
+ req->cryptlen, iv);
687706
688
- skcipher_request_set_tfm(subreq, cipher);
689
- skcipher_request_set_callback(subreq, flags, NULL, NULL);
690
- skcipher_request_set_crypt(subreq, src, dst,
691
- nbytes, iv);
692
-
693
- err = op_type ? crypto_skcipher_decrypt(subreq) :
694
- crypto_skcipher_encrypt(subreq);
695
- skcipher_request_zero(subreq);
707
+ err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
708
+ crypto_skcipher_encrypt(&reqctx->fallback_req);
696709
697710 return err;
698711
699712 }
713
+
714
+static inline int get_qidxs(struct crypto_async_request *req,
715
+ unsigned int *txqidx, unsigned int *rxqidx)
716
+{
717
+ struct crypto_tfm *tfm = req->tfm;
718
+ int ret = 0;
719
+
720
+ switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
721
+ case CRYPTO_ALG_TYPE_AEAD:
722
+ {
723
+ struct aead_request *aead_req =
724
+ container_of(req, struct aead_request, base);
725
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
726
+ *txqidx = reqctx->txqidx;
727
+ *rxqidx = reqctx->rxqidx;
728
+ break;
729
+ }
730
+ case CRYPTO_ALG_TYPE_SKCIPHER:
731
+ {
732
+ struct skcipher_request *sk_req =
733
+ container_of(req, struct skcipher_request, base);
734
+ struct chcr_skcipher_req_ctx *reqctx =
735
+ skcipher_request_ctx(sk_req);
736
+ *txqidx = reqctx->txqidx;
737
+ *rxqidx = reqctx->rxqidx;
738
+ break;
739
+ }
740
+ case CRYPTO_ALG_TYPE_AHASH:
741
+ {
742
+ struct ahash_request *ahash_req =
743
+ container_of(req, struct ahash_request, base);
744
+ struct chcr_ahash_req_ctx *reqctx =
745
+ ahash_request_ctx(ahash_req);
746
+ *txqidx = reqctx->txqidx;
747
+ *rxqidx = reqctx->rxqidx;
748
+ break;
749
+ }
750
+ default:
751
+ ret = -EINVAL;
752
+ /* should never get here */
753
+ BUG();
754
+ break;
755
+ }
756
+ return ret;
757
+}
758
+
700759 static inline void create_wreq(struct chcr_context *ctx,
701760 struct chcr_wr *chcr_req,
702761 struct crypto_async_request *req,
....@@ -707,7 +766,16 @@
707766 unsigned int lcb)
708767 {
709768 struct uld_ctx *u_ctx = ULD_CTX(ctx);
710
- int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
769
+ unsigned int tx_channel_id, rx_channel_id;
770
+ unsigned int txqidx = 0, rxqidx = 0;
771
+ unsigned int qid, fid, portno;
772
+
773
+ get_qidxs(req, &txqidx, &rxqidx);
774
+ qid = u_ctx->lldi.rxq_ids[rxqidx];
775
+ fid = u_ctx->lldi.rxq_ids[0];
776
+ portno = rxqidx / ctx->rxq_perchan;
777
+ tx_channel_id = txqidx / ctx->txq_perchan;
778
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
711779
712780
713781 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
....@@ -716,15 +784,12 @@
716784 chcr_req->wreq.len16_pkd =
717785 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
718786 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
719
- chcr_req->wreq.rx_chid_to_rx_q_id =
720
- FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
721
- !!lcb, ctx->tx_qidx);
787
+ chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
788
+ !!lcb, txqidx);
722789
723
- chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
724
- qid);
790
+ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
725791 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
726
- ((sizeof(chcr_req->wreq)) >> 4)));
727
-
792
+ ((sizeof(chcr_req->wreq)) >> 4)));
728793 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
729794 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
730795 sizeof(chcr_req->key_ctx) + sc_len);
....@@ -739,22 +804,26 @@
739804 */
740805 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
741806 {
742
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
743
- struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
807
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
808
+ struct chcr_context *ctx = c_ctx(tfm);
809
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
810
+ struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
744811 struct sk_buff *skb = NULL;
745812 struct chcr_wr *chcr_req;
746813 struct cpl_rx_phys_dsgl *phys_cpl;
747814 struct ulptx_sgl *ulptx;
748
- struct chcr_blkcipher_req_ctx *reqctx =
749
- ablkcipher_request_ctx(wrparam->req);
815
+ struct chcr_skcipher_req_ctx *reqctx =
816
+ skcipher_request_ctx(wrparam->req);
750817 unsigned int temp = 0, transhdr_len, dst_size;
751818 int error;
752819 int nents;
753820 unsigned int kctx_len;
754821 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
755822 GFP_KERNEL : GFP_ATOMIC;
756
- struct adapter *adap = padap(c_ctx(tfm)->dev);
823
+ struct adapter *adap = padap(ctx->dev);
824
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
757825
826
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
758827 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
759828 reqctx->dst_ofst);
760829 dst_size = get_space_for_phys_dsgl(nents);
....@@ -773,7 +842,7 @@
773842 }
774843 chcr_req = __skb_put_zero(skb, transhdr_len);
775844 chcr_req->sec_cpl.op_ivinsrtofst =
776
- FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
845
+ FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
777846
778847 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
779848 chcr_req->sec_cpl.aadstart_cipherstop_hi =
....@@ -789,9 +858,9 @@
789858
790859 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
791860 if ((reqctx->op == CHCR_DECRYPT_OP) &&
792
- (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
861
+ (!(get_cryptoalg_subtype(tfm) ==
793862 CRYPTO_ALG_SUB_TYPE_CTR)) &&
794
- (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
863
+ (!(get_cryptoalg_subtype(tfm) ==
795864 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
796865 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
797866 } else {
....@@ -825,7 +894,7 @@
825894 if (reqctx->op && (ablkctx->ciph_mode ==
826895 CHCR_SCMD_CIPHER_MODE_AES_CBC))
827896 sg_pcopy_to_buffer(wrparam->req->src,
828
- sg_nents(wrparam->req->src), wrparam->req->info, 16,
897
+ sg_nents(wrparam->req->src), wrparam->req->iv, 16,
829898 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
830899
831900 return skb;
....@@ -848,26 +917,20 @@
848917
849918 return ck_size;
850919 }
851
-static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
920
+static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
852921 const u8 *key,
853922 unsigned int keylen)
854923 {
855
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
856924 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
857
- int err = 0;
858925
859
- crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
860
- crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
861
- CRYPTO_TFM_REQ_MASK);
862
- err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
863
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
864
- tfm->crt_flags |=
865
- crypto_skcipher_get_flags(ablkctx->sw_cipher) &
866
- CRYPTO_TFM_RES_MASK;
867
- return err;
926
+ crypto_skcipher_clear_flags(ablkctx->sw_cipher,
927
+ CRYPTO_TFM_REQ_MASK);
928
+ crypto_skcipher_set_flags(ablkctx->sw_cipher,
929
+ cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
930
+ return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
868931 }
869932
870
-static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
933
+static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
871934 const u8 *key,
872935 unsigned int keylen)
873936 {
....@@ -893,13 +956,12 @@
893956 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
894957 return 0;
895958 badkey_err:
896
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
897959 ablkctx->enckey_len = 0;
898960
899961 return err;
900962 }
901963
902
-static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
964
+static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
903965 const u8 *key,
904966 unsigned int keylen)
905967 {
....@@ -924,13 +986,12 @@
924986
925987 return 0;
926988 badkey_err:
927
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
928989 ablkctx->enckey_len = 0;
929990
930991 return err;
931992 }
932993
933
-static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
994
+static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
934995 const u8 *key,
935996 unsigned int keylen)
936997 {
....@@ -962,7 +1023,6 @@
9621023
9631024 return 0;
9641025 badkey_err:
965
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
9661026 ablkctx->enckey_len = 0;
9671027
9681028 return err;
....@@ -992,34 +1052,40 @@
9921052 u32 temp = be32_to_cpu(*--b);
9931053
9941054 temp = ~temp;
995
- c = (u64)temp + 1; // No of block can processed withou overflow
996
- if ((bytes / AES_BLOCK_SIZE) > c)
1055
+ c = (u64)temp + 1; // No of block can processed without overflow
1056
+ if ((bytes / AES_BLOCK_SIZE) >= c)
9971057 bytes = c * AES_BLOCK_SIZE;
9981058 return bytes;
9991059 }
10001060
1001
-static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1061
+static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
10021062 u32 isfinal)
10031063 {
1004
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1064
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
10051065 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1006
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1007
- struct crypto_cipher *cipher;
1066
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1067
+ struct crypto_aes_ctx aes;
10081068 int ret, i;
10091069 u8 *key;
10101070 unsigned int keylen;
10111071 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
10121072 int round8 = round / 8;
10131073
1014
- cipher = ablkctx->aes_generic;
10151074 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
10161075
10171076 keylen = ablkctx->enckey_len / 2;
10181077 key = ablkctx->key + keylen;
1019
- ret = crypto_cipher_setkey(cipher, key, keylen);
1078
+ /* For a 192 bit key remove the padded zeroes which was
1079
+ * added in chcr_xts_setkey
1080
+ */
1081
+ if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1082
+ == CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1083
+ ret = aes_expandkey(&aes, key, keylen - 8);
1084
+ else
1085
+ ret = aes_expandkey(&aes, key, keylen);
10201086 if (ret)
1021
- goto out;
1022
- crypto_cipher_encrypt_one(cipher, iv, iv);
1087
+ return ret;
1088
+ aes_encrypt(&aes, iv, iv);
10231089 for (i = 0; i < round8; i++)
10241090 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
10251091
....@@ -1027,21 +1093,22 @@
10271093 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
10281094
10291095 if (!isfinal)
1030
- crypto_cipher_decrypt_one(cipher, iv, iv);
1031
-out:
1032
- return ret;
1096
+ aes_decrypt(&aes, iv, iv);
1097
+
1098
+ memzero_explicit(&aes, sizeof(aes));
1099
+ return 0;
10331100 }
10341101
1035
-static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1102
+static int chcr_update_cipher_iv(struct skcipher_request *req,
10361103 struct cpl_fw6_pld *fw6_pld, u8 *iv)
10371104 {
1038
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1039
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1040
- int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1105
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1106
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1107
+ int subtype = get_cryptoalg_subtype(tfm);
10411108 int ret = 0;
10421109
10431110 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1044
- ctr_add_iv(iv, req->info, (reqctx->processed /
1111
+ ctr_add_iv(iv, req->iv, (reqctx->processed /
10451112 AES_BLOCK_SIZE));
10461113 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
10471114 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
....@@ -1052,7 +1119,7 @@
10521119 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
10531120 if (reqctx->op)
10541121 /*Updated before sending last WR*/
1055
- memcpy(iv, req->info, AES_BLOCK_SIZE);
1122
+ memcpy(iv, req->iv, AES_BLOCK_SIZE);
10561123 else
10571124 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
10581125 }
....@@ -1066,19 +1133,23 @@
10661133 * for subsequent update requests
10671134 */
10681135
1069
-static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1136
+static int chcr_final_cipher_iv(struct skcipher_request *req,
10701137 struct cpl_fw6_pld *fw6_pld, u8 *iv)
10711138 {
1072
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1073
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1074
- int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1139
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1140
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1141
+ int subtype = get_cryptoalg_subtype(tfm);
10751142 int ret = 0;
10761143
10771144 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1078
- ctr_add_iv(iv, req->info, (reqctx->processed /
1079
- AES_BLOCK_SIZE));
1080
- else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1081
- ret = chcr_update_tweak(req, iv, 1);
1145
+ ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1146
+ AES_BLOCK_SIZE));
1147
+ else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1148
+ if (!reqctx->partial_req)
1149
+ memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1150
+ else
1151
+ ret = chcr_update_tweak(req, iv, 1);
1152
+ }
10821153 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
10831154 /*Already updated for Decrypt*/
10841155 if (!reqctx->op)
....@@ -1089,24 +1160,27 @@
10891160
10901161 }
10911162
1092
-static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1163
+static int chcr_handle_cipher_resp(struct skcipher_request *req,
10931164 unsigned char *input, int err)
10941165 {
1095
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1096
- struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1097
- struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1098
- struct sk_buff *skb;
1166
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1167
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
10991168 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1100
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1101
- struct cipher_wr_param wrparam;
1169
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1170
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1171
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
1172
+ struct chcr_context *ctx = c_ctx(tfm);
1173
+ struct adapter *adap = padap(ctx->dev);
1174
+ struct cipher_wr_param wrparam;
1175
+ struct sk_buff *skb;
11021176 int bytes;
11031177
11041178 if (err)
11051179 goto unmap;
1106
- if (req->nbytes == reqctx->processed) {
1180
+ if (req->cryptlen == reqctx->processed) {
11071181 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
11081182 req);
1109
- err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1183
+ err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
11101184 goto complete;
11111185 }
11121186
....@@ -1114,13 +1188,13 @@
11141188 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
11151189 CIP_SPACE_LEFT(ablkctx->enckey_len),
11161190 reqctx->src_ofst, reqctx->dst_ofst);
1117
- if ((bytes + reqctx->processed) >= req->nbytes)
1118
- bytes = req->nbytes - reqctx->processed;
1191
+ if ((bytes + reqctx->processed) >= req->cryptlen)
1192
+ bytes = req->cryptlen - reqctx->processed;
11191193 else
11201194 bytes = rounddown(bytes, 16);
11211195 } else {
11221196 /*CTR mode counter overfloa*/
1123
- bytes = req->nbytes - reqctx->processed;
1197
+ bytes = req->cryptlen - reqctx->processed;
11241198 }
11251199 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
11261200 if (err)
....@@ -1129,65 +1203,85 @@
11291203 if (unlikely(bytes == 0)) {
11301204 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
11311205 req);
1132
- err = chcr_cipher_fallback(ablkctx->sw_cipher,
1133
- req->base.flags,
1134
- req->src,
1135
- req->dst,
1136
- req->nbytes,
1137
- req->info,
1138
- reqctx->op);
1206
+ memcpy(req->iv, reqctx->init_iv, IV);
1207
+ atomic_inc(&adap->chcr_stats.fallback);
1208
+ err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1209
+ reqctx->op);
11391210 goto complete;
11401211 }
11411212
1142
- if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1213
+ if (get_cryptoalg_subtype(tfm) ==
11431214 CRYPTO_ALG_SUB_TYPE_CTR)
11441215 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1145
- wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1216
+ wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
11461217 wrparam.req = req;
11471218 wrparam.bytes = bytes;
11481219 skb = create_cipher_wr(&wrparam);
11491220 if (IS_ERR(skb)) {
1150
- pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1221
+ pr_err("%s : Failed to form WR. No memory\n", __func__);
11511222 err = PTR_ERR(skb);
11521223 goto unmap;
11531224 }
11541225 skb->dev = u_ctx->lldi.ports[0];
1155
- set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1226
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
11561227 chcr_send_wr(skb);
11571228 reqctx->last_req_len = bytes;
11581229 reqctx->processed += bytes;
1230
+ if (get_cryptoalg_subtype(tfm) ==
1231
+ CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1232
+ CRYPTO_TFM_REQ_MAY_SLEEP ) {
1233
+ complete(&ctx->cbc_aes_aio_done);
1234
+ }
11591235 return 0;
11601236 unmap:
11611237 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
11621238 complete:
1239
+ if (get_cryptoalg_subtype(tfm) ==
1240
+ CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1241
+ CRYPTO_TFM_REQ_MAY_SLEEP ) {
1242
+ complete(&ctx->cbc_aes_aio_done);
1243
+ }
1244
+ chcr_dec_wrcount(dev);
11631245 req->base.complete(&req->base, err);
11641246 return err;
11651247 }
11661248
1167
-static int process_cipher(struct ablkcipher_request *req,
1249
+static int process_cipher(struct skcipher_request *req,
11681250 unsigned short qid,
11691251 struct sk_buff **skb,
11701252 unsigned short op_type)
11711253 {
1172
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1173
- unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1174
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1254
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1255
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1256
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
11751257 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1258
+ struct adapter *adap = padap(c_ctx(tfm)->dev);
11761259 struct cipher_wr_param wrparam;
11771260 int bytes, err = -EINVAL;
1261
+ int subtype;
11781262
11791263 reqctx->processed = 0;
1180
- if (!req->info)
1264
+ reqctx->partial_req = 0;
1265
+ if (!req->iv)
11811266 goto error;
1267
+ subtype = get_cryptoalg_subtype(tfm);
11821268 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1183
- (req->nbytes == 0) ||
1184
- (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1269
+ (req->cryptlen == 0) ||
1270
+ (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1271
+ if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1272
+ goto fallback;
1273
+ else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1274
+ subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1275
+ goto fallback;
11851276 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1186
- ablkctx->enckey_len, req->nbytes, ivsize);
1277
+ ablkctx->enckey_len, req->cryptlen, ivsize);
11871278 goto error;
11881279 }
1189
- chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1190
- if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1280
+
1281
+ err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1282
+ if (err)
1283
+ goto error;
1284
+ if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
11911285 AES_MIN_KEY_SIZE +
11921286 sizeof(struct cpl_rx_phys_dsgl) +
11931287 /*Min dsgl size*/
....@@ -1195,14 +1289,14 @@
11951289 /* Can be sent as Imm*/
11961290 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
11971291
1198
- dnents = sg_nents_xlen(req->dst, req->nbytes,
1292
+ dnents = sg_nents_xlen(req->dst, req->cryptlen,
11991293 CHCR_DST_SG_SIZE, 0);
12001294 phys_dsgl = get_space_for_phys_dsgl(dnents);
12011295 kctx_len = roundup(ablkctx->enckey_len, 16);
12021296 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1203
- reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1297
+ reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
12041298 SGE_MAX_WR_LEN;
1205
- bytes = IV + req->nbytes;
1299
+ bytes = IV + req->cryptlen;
12061300
12071301 } else {
12081302 reqctx->imm = 0;
....@@ -1212,40 +1306,39 @@
12121306 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
12131307 CIP_SPACE_LEFT(ablkctx->enckey_len),
12141308 0, 0);
1215
- if ((bytes + reqctx->processed) >= req->nbytes)
1216
- bytes = req->nbytes - reqctx->processed;
1309
+ if ((bytes + reqctx->processed) >= req->cryptlen)
1310
+ bytes = req->cryptlen - reqctx->processed;
12171311 else
12181312 bytes = rounddown(bytes, 16);
12191313 } else {
1220
- bytes = req->nbytes;
1314
+ bytes = req->cryptlen;
12211315 }
1222
- if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1223
- CRYPTO_ALG_SUB_TYPE_CTR) {
1224
- bytes = adjust_ctr_overflow(req->info, bytes);
1316
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1317
+ bytes = adjust_ctr_overflow(req->iv, bytes);
12251318 }
1226
- if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1227
- CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1319
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
12281320 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1229
- memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1321
+ memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
12301322 CTR_RFC3686_IV_SIZE);
12311323
12321324 /* initialize counter portion of counter block */
12331325 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
12341326 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1327
+ memcpy(reqctx->init_iv, reqctx->iv, IV);
12351328
12361329 } else {
12371330
1238
- memcpy(reqctx->iv, req->info, IV);
1331
+ memcpy(reqctx->iv, req->iv, IV);
1332
+ memcpy(reqctx->init_iv, req->iv, IV);
12391333 }
12401334 if (unlikely(bytes == 0)) {
12411335 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
12421336 req);
1243
- err = chcr_cipher_fallback(ablkctx->sw_cipher,
1244
- req->base.flags,
1245
- req->src,
1246
- req->dst,
1247
- req->nbytes,
1248
- reqctx->iv,
1337
+fallback: atomic_inc(&adap->chcr_stats.fallback);
1338
+ err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
1339
+ subtype ==
1340
+ CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1341
+ reqctx->iv : req->iv,
12491342 op_type);
12501343 goto error;
12511344 }
....@@ -1264,6 +1357,7 @@
12641357 }
12651358 reqctx->processed = bytes;
12661359 reqctx->last_req_len = bytes;
1360
+ reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
12671361
12681362 return 0;
12691363 unmap:
....@@ -1272,152 +1366,156 @@
12721366 return err;
12731367 }
12741368
1275
-static int chcr_aes_encrypt(struct ablkcipher_request *req)
1369
+static int chcr_aes_encrypt(struct skcipher_request *req)
12761370 {
1277
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1371
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1372
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1373
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
12781374 struct sk_buff *skb = NULL;
1279
- int err, isfull = 0;
1375
+ int err;
12801376 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1377
+ struct chcr_context *ctx = c_ctx(tfm);
1378
+ unsigned int cpu;
12811379
1380
+ cpu = get_cpu();
1381
+ reqctx->txqidx = cpu % ctx->ntxq;
1382
+ reqctx->rxqidx = cpu % ctx->nrxq;
1383
+ put_cpu();
1384
+
1385
+ err = chcr_inc_wrcount(dev);
1386
+ if (err)
1387
+ return -ENXIO;
12821388 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1283
- c_ctx(tfm)->tx_qidx))) {
1284
- isfull = 1;
1285
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1286
- return -ENOSPC;
1389
+ reqctx->txqidx) &&
1390
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1391
+ err = -ENOSPC;
1392
+ goto error;
12871393 }
12881394
1289
- err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1395
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
12901396 &skb, CHCR_ENCRYPT_OP);
12911397 if (err || !skb)
12921398 return err;
12931399 skb->dev = u_ctx->lldi.ports[0];
1294
- set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1400
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
12951401 chcr_send_wr(skb);
1296
- return isfull ? -EBUSY : -EINPROGRESS;
1402
+ if (get_cryptoalg_subtype(tfm) ==
1403
+ CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1404
+ CRYPTO_TFM_REQ_MAY_SLEEP ) {
1405
+ reqctx->partial_req = 1;
1406
+ wait_for_completion(&ctx->cbc_aes_aio_done);
1407
+ }
1408
+ return -EINPROGRESS;
1409
+error:
1410
+ chcr_dec_wrcount(dev);
1411
+ return err;
12971412 }
12981413
1299
-static int chcr_aes_decrypt(struct ablkcipher_request *req)
1414
+static int chcr_aes_decrypt(struct skcipher_request *req)
13001415 {
1301
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1416
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1417
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
13021418 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1419
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
13031420 struct sk_buff *skb = NULL;
1304
- int err, isfull = 0;
1421
+ int err;
1422
+ struct chcr_context *ctx = c_ctx(tfm);
1423
+ unsigned int cpu;
1424
+
1425
+ cpu = get_cpu();
1426
+ reqctx->txqidx = cpu % ctx->ntxq;
1427
+ reqctx->rxqidx = cpu % ctx->nrxq;
1428
+ put_cpu();
1429
+
1430
+ err = chcr_inc_wrcount(dev);
1431
+ if (err)
1432
+ return -ENXIO;
13051433
13061434 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1307
- c_ctx(tfm)->tx_qidx))) {
1308
- isfull = 1;
1309
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1435
+ reqctx->txqidx) &&
1436
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
13101437 return -ENOSPC;
1311
- }
1312
-
1313
- err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1314
- &skb, CHCR_DECRYPT_OP);
1438
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1439
+ &skb, CHCR_DECRYPT_OP);
13151440 if (err || !skb)
13161441 return err;
13171442 skb->dev = u_ctx->lldi.ports[0];
1318
- set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1443
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
13191444 chcr_send_wr(skb);
1320
- return isfull ? -EBUSY : -EINPROGRESS;
1445
+ return -EINPROGRESS;
13211446 }
1322
-
13231447 static int chcr_device_init(struct chcr_context *ctx)
13241448 {
13251449 struct uld_ctx *u_ctx = NULL;
1326
- struct adapter *adap;
1327
- unsigned int id;
1328
- int txq_perchan, txq_idx, ntxq;
1329
- int err = 0, rxq_perchan, rxq_idx;
1450
+ int txq_perchan, ntxq;
1451
+ int err = 0, rxq_perchan;
13301452
1331
- id = smp_processor_id();
13321453 if (!ctx->dev) {
13331454 u_ctx = assign_chcr_device();
13341455 if (!u_ctx) {
1456
+ err = -ENXIO;
13351457 pr_err("chcr device assignment fails\n");
13361458 goto out;
13371459 }
1338
- ctx->dev = u_ctx->dev;
1339
- adap = padap(ctx->dev);
1340
- ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
1341
- adap->vres.ncrypto_fc);
1460
+ ctx->dev = &u_ctx->dev;
1461
+ ntxq = u_ctx->lldi.ntxq;
13421462 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
13431463 txq_perchan = ntxq / u_ctx->lldi.nchan;
1344
- spin_lock(&ctx->dev->lock_chcr_dev);
1345
- ctx->tx_chan_id = ctx->dev->tx_channel_id;
1346
- ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1347
- ctx->dev->rx_channel_id = 0;
1348
- spin_unlock(&ctx->dev->lock_chcr_dev);
1349
- rxq_idx = ctx->tx_chan_id * rxq_perchan;
1350
- rxq_idx += id % rxq_perchan;
1351
- txq_idx = ctx->tx_chan_id * txq_perchan;
1352
- txq_idx += id % txq_perchan;
1353
- ctx->rx_qidx = rxq_idx;
1354
- ctx->tx_qidx = txq_idx;
1355
- /* Channel Id used by SGE to forward packet to Host.
1356
- * Same value should be used in cpl_fw6_pld RSS_CH field
1357
- * by FW. Driver programs PCI channel ID to be used in fw
1358
- * at the time of queue allocation with value "pi->tx_chan"
1359
- */
1360
- ctx->pci_chan_id = txq_idx / txq_perchan;
1464
+ ctx->ntxq = ntxq;
1465
+ ctx->nrxq = u_ctx->lldi.nrxq;
1466
+ ctx->rxq_perchan = rxq_perchan;
1467
+ ctx->txq_perchan = txq_perchan;
13611468 }
13621469 out:
13631470 return err;
13641471 }
13651472
1366
-static int chcr_cra_init(struct crypto_tfm *tfm)
1473
+static int chcr_init_tfm(struct crypto_skcipher *tfm)
13671474 {
1368
- struct crypto_alg *alg = tfm->__crt_alg;
1369
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1475
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1476
+ struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
13701477 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
13711478
1372
- ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
1373
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1479
+ ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1480
+ CRYPTO_ALG_NEED_FALLBACK);
13741481 if (IS_ERR(ablkctx->sw_cipher)) {
1375
- pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1482
+ pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
13761483 return PTR_ERR(ablkctx->sw_cipher);
13771484 }
1485
+ init_completion(&ctx->cbc_aes_aio_done);
1486
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1487
+ crypto_skcipher_reqsize(ablkctx->sw_cipher));
13781488
1379
- if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1380
- /* To update tweak*/
1381
- ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1382
- if (IS_ERR(ablkctx->aes_generic)) {
1383
- pr_err("failed to allocate aes cipher for tweak\n");
1384
- return PTR_ERR(ablkctx->aes_generic);
1385
- }
1386
- } else
1387
- ablkctx->aes_generic = NULL;
1388
-
1389
- tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1390
- return chcr_device_init(crypto_tfm_ctx(tfm));
1489
+ return chcr_device_init(ctx);
13911490 }
13921491
1393
-static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1492
+static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
13941493 {
1395
- struct crypto_alg *alg = tfm->__crt_alg;
1396
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1494
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1495
+ struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
13971496 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
13981497
13991498 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
14001499 * cannot be used as fallback in chcr_handle_cipher_response
14011500 */
14021501 ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1403
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1502
+ CRYPTO_ALG_NEED_FALLBACK);
14041503 if (IS_ERR(ablkctx->sw_cipher)) {
1405
- pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1504
+ pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
14061505 return PTR_ERR(ablkctx->sw_cipher);
14071506 }
1408
- tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1409
- return chcr_device_init(crypto_tfm_ctx(tfm));
1507
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1508
+ crypto_skcipher_reqsize(ablkctx->sw_cipher));
1509
+ return chcr_device_init(ctx);
14101510 }
14111511
14121512
1413
-static void chcr_cra_exit(struct crypto_tfm *tfm)
1513
+static void chcr_exit_tfm(struct crypto_skcipher *tfm)
14141514 {
1415
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1515
+ struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
14161516 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
14171517
14181518 crypto_free_skcipher(ablkctx->sw_cipher);
1419
- if (ablkctx->aes_generic)
1420
- crypto_free_cipher(ablkctx->aes_generic);
14211519 }
14221520
14231521 static int get_alg_config(struct algo_param *params,
....@@ -1450,7 +1548,7 @@
14501548 params->result_size = SHA512_DIGEST_SIZE;
14511549 break;
14521550 default:
1453
- pr_err("chcr : ERROR, unsupported digest size\n");
1551
+ pr_err("ERROR, unsupported digest size\n");
14541552 return -EINVAL;
14551553 }
14561554 return 0;
....@@ -1470,9 +1568,10 @@
14701568 {
14711569 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
14721570 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1473
- struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1571
+ struct chcr_context *ctx = h_ctx(tfm);
1572
+ struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
14741573 struct sk_buff *skb = NULL;
1475
- struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1574
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
14761575 struct chcr_wr *chcr_req;
14771576 struct ulptx_sgl *ulptx;
14781577 unsigned int nents = 0, transhdr_len;
....@@ -1481,7 +1580,9 @@
14811580 GFP_ATOMIC;
14821581 struct adapter *adap = padap(h_ctx(tfm)->dev);
14831582 int error = 0;
1583
+ unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
14841584
1585
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
14851586 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
14861587 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
14871588 param->sg_len) <= SGE_MAX_WR_LEN;
....@@ -1498,7 +1599,8 @@
14981599 chcr_req = __skb_put_zero(skb, transhdr_len);
14991600
15001601 chcr_req->sec_cpl.op_ivinsrtofst =
1501
- FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
1602
+ FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1603
+
15021604 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
15031605
15041606 chcr_req->sec_cpl.aadstart_cipherstop_hi =
....@@ -1561,21 +1663,22 @@
15611663 {
15621664 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
15631665 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1564
- struct uld_ctx *u_ctx = NULL;
1666
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1667
+ struct chcr_context *ctx = h_ctx(rtfm);
1668
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
15651669 struct sk_buff *skb;
15661670 u8 remainder = 0, bs;
15671671 unsigned int nbytes = req->nbytes;
15681672 struct hash_wr_param params;
1569
- int error, isfull = 0;
1673
+ int error;
1674
+ unsigned int cpu;
1675
+
1676
+ cpu = get_cpu();
1677
+ req_ctx->txqidx = cpu % ctx->ntxq;
1678
+ req_ctx->rxqidx = cpu % ctx->nrxq;
1679
+ put_cpu();
15701680
15711681 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1572
- u_ctx = ULD_CTX(h_ctx(rtfm));
1573
- if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1574
- h_ctx(rtfm)->tx_qidx))) {
1575
- isfull = 1;
1576
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1577
- return -ENOSPC;
1578
- }
15791682
15801683 if (nbytes + req_ctx->reqlen >= bs) {
15811684 remainder = (nbytes + req_ctx->reqlen) % bs;
....@@ -1586,10 +1689,25 @@
15861689 req_ctx->reqlen += nbytes;
15871690 return 0;
15881691 }
1692
+ error = chcr_inc_wrcount(dev);
1693
+ if (error)
1694
+ return -ENXIO;
1695
+ /* Detach state for CHCR means lldi or padap is freed. Increasing
1696
+ * inflight count for dev guarantees that lldi and padap is valid
1697
+ */
1698
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1699
+ req_ctx->txqidx) &&
1700
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1701
+ error = -ENOSPC;
1702
+ goto err;
1703
+ }
1704
+
15891705 chcr_init_hctx_per_wr(req_ctx);
15901706 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1591
- if (error)
1592
- return -ENOMEM;
1707
+ if (error) {
1708
+ error = -ENOMEM;
1709
+ goto err;
1710
+ }
15931711 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
15941712 params.kctx_len = roundup(params.alg_prm.result_size, 16);
15951713 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
....@@ -1623,12 +1741,13 @@
16231741 }
16241742 req_ctx->reqlen = remainder;
16251743 skb->dev = u_ctx->lldi.ports[0];
1626
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1744
+ set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
16271745 chcr_send_wr(skb);
1628
-
1629
- return isfull ? -EBUSY : -EINPROGRESS;
1746
+ return -EINPROGRESS;
16301747 unmap:
16311748 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1749
+err:
1750
+ chcr_dec_wrcount(dev);
16321751 return error;
16331752 }
16341753
....@@ -1646,13 +1765,25 @@
16461765 {
16471766 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
16481767 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1768
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
16491769 struct hash_wr_param params;
16501770 struct sk_buff *skb;
1651
- struct uld_ctx *u_ctx = NULL;
1771
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1772
+ struct chcr_context *ctx = h_ctx(rtfm);
16521773 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1774
+ int error;
1775
+ unsigned int cpu;
1776
+
1777
+ cpu = get_cpu();
1778
+ req_ctx->txqidx = cpu % ctx->ntxq;
1779
+ req_ctx->rxqidx = cpu % ctx->nrxq;
1780
+ put_cpu();
1781
+
1782
+ error = chcr_inc_wrcount(dev);
1783
+ if (error)
1784
+ return -ENXIO;
16531785
16541786 chcr_init_hctx_per_wr(req_ctx);
1655
- u_ctx = ULD_CTX(h_ctx(rtfm));
16561787 if (is_hmac(crypto_ahash_tfm(rtfm)))
16571788 params.opad_needed = 1;
16581789 else
....@@ -1686,38 +1817,55 @@
16861817 }
16871818 params.hash_size = crypto_ahash_digestsize(rtfm);
16881819 skb = create_hash_wr(req, &params);
1689
- if (IS_ERR(skb))
1690
- return PTR_ERR(skb);
1820
+ if (IS_ERR(skb)) {
1821
+ error = PTR_ERR(skb);
1822
+ goto err;
1823
+ }
16911824 req_ctx->reqlen = 0;
16921825 skb->dev = u_ctx->lldi.ports[0];
1693
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1826
+ set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
16941827 chcr_send_wr(skb);
16951828 return -EINPROGRESS;
1829
+err:
1830
+ chcr_dec_wrcount(dev);
1831
+ return error;
16961832 }
16971833
16981834 static int chcr_ahash_finup(struct ahash_request *req)
16991835 {
17001836 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
17011837 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1702
- struct uld_ctx *u_ctx = NULL;
1838
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
1839
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1840
+ struct chcr_context *ctx = h_ctx(rtfm);
17031841 struct sk_buff *skb;
17041842 struct hash_wr_param params;
17051843 u8 bs;
1706
- int error, isfull = 0;
1844
+ int error;
1845
+ unsigned int cpu;
1846
+
1847
+ cpu = get_cpu();
1848
+ req_ctx->txqidx = cpu % ctx->ntxq;
1849
+ req_ctx->rxqidx = cpu % ctx->nrxq;
1850
+ put_cpu();
17071851
17081852 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1709
- u_ctx = ULD_CTX(h_ctx(rtfm));
1853
+ error = chcr_inc_wrcount(dev);
1854
+ if (error)
1855
+ return -ENXIO;
17101856
17111857 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1712
- h_ctx(rtfm)->tx_qidx))) {
1713
- isfull = 1;
1714
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1715
- return -ENOSPC;
1858
+ req_ctx->txqidx) &&
1859
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1860
+ error = -ENOSPC;
1861
+ goto err;
17161862 }
17171863 chcr_init_hctx_per_wr(req_ctx);
17181864 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1719
- if (error)
1720
- return -ENOMEM;
1865
+ if (error) {
1866
+ error = -ENOMEM;
1867
+ goto err;
1868
+ }
17211869
17221870 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
17231871 params.kctx_len = roundup(params.alg_prm.result_size, 16);
....@@ -1768,12 +1916,13 @@
17681916 req_ctx->reqlen = 0;
17691917 req_ctx->hctx_wr.processed += params.sg_len;
17701918 skb->dev = u_ctx->lldi.ports[0];
1771
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1919
+ set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
17721920 chcr_send_wr(skb);
1773
-
1774
- return isfull ? -EBUSY : -EINPROGRESS;
1921
+ return -EINPROGRESS;
17751922 unmap:
17761923 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1924
+err:
1925
+ chcr_dec_wrcount(dev);
17771926 return error;
17781927 }
17791928
....@@ -1781,27 +1930,39 @@
17811930 {
17821931 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
17831932 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1784
- struct uld_ctx *u_ctx = NULL;
1933
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
1934
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1935
+ struct chcr_context *ctx = h_ctx(rtfm);
17851936 struct sk_buff *skb;
17861937 struct hash_wr_param params;
17871938 u8 bs;
1788
- int error, isfull = 0;
1939
+ int error;
1940
+ unsigned int cpu;
1941
+
1942
+ cpu = get_cpu();
1943
+ req_ctx->txqidx = cpu % ctx->ntxq;
1944
+ req_ctx->rxqidx = cpu % ctx->nrxq;
1945
+ put_cpu();
17891946
17901947 rtfm->init(req);
17911948 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1949
+ error = chcr_inc_wrcount(dev);
1950
+ if (error)
1951
+ return -ENXIO;
17921952
1793
- u_ctx = ULD_CTX(h_ctx(rtfm));
17941953 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1795
- h_ctx(rtfm)->tx_qidx))) {
1796
- isfull = 1;
1797
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1798
- return -ENOSPC;
1954
+ req_ctx->txqidx) &&
1955
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1956
+ error = -ENOSPC;
1957
+ goto err;
17991958 }
18001959
18011960 chcr_init_hctx_per_wr(req_ctx);
18021961 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1803
- if (error)
1804
- return -ENOMEM;
1962
+ if (error) {
1963
+ error = -ENOMEM;
1964
+ goto err;
1965
+ }
18051966
18061967 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
18071968 params.kctx_len = roundup(params.alg_prm.result_size, 16);
....@@ -1837,7 +1998,7 @@
18371998 req_ctx->data_len += params.bfr_len + params.sg_len;
18381999
18392000 if (req->nbytes == 0) {
1840
- create_last_hash_block(req_ctx->reqbfr, bs, 0);
2001
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
18412002 params.more = 1;
18422003 params.bfr_len = bs;
18432004 }
....@@ -1849,11 +2010,13 @@
18492010 }
18502011 req_ctx->hctx_wr.processed += params.sg_len;
18512012 skb->dev = u_ctx->lldi.ports[0];
1852
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
2013
+ set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
18532014 chcr_send_wr(skb);
1854
- return isfull ? -EBUSY : -EINPROGRESS;
2015
+ return -EINPROGRESS;
18552016 unmap:
18562017 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2018
+err:
2019
+ chcr_dec_wrcount(dev);
18572020 return error;
18582021 }
18592022
....@@ -1862,14 +2025,20 @@
18622025 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
18632026 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
18642027 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1865
- struct uld_ctx *u_ctx = NULL;
2028
+ struct chcr_context *ctx = h_ctx(rtfm);
2029
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
18662030 struct sk_buff *skb;
18672031 struct hash_wr_param params;
18682032 u8 bs;
18692033 int error;
2034
+ unsigned int cpu;
2035
+
2036
+ cpu = get_cpu();
2037
+ reqctx->txqidx = cpu % ctx->ntxq;
2038
+ reqctx->rxqidx = cpu % ctx->nrxq;
2039
+ put_cpu();
18702040
18712041 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1872
- u_ctx = ULD_CTX(h_ctx(rtfm));
18732042 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
18742043 params.kctx_len = roundup(params.alg_prm.result_size, 16);
18752044 if (is_hmac(crypto_ahash_tfm(rtfm))) {
....@@ -1909,7 +2078,7 @@
19092078 }
19102079 hctx_wr->processed += params.sg_len;
19112080 skb->dev = u_ctx->lldi.ports[0];
1912
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
2081
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
19132082 chcr_send_wr(skb);
19142083 return 0;
19152084 err:
....@@ -1925,6 +2094,7 @@
19252094 int digestsize, updated_digestsize;
19262095 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
19272096 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2097
+ struct chcr_dev *dev = h_ctx(tfm)->dev;
19282098
19292099 if (input == NULL)
19302100 goto out;
....@@ -1967,6 +2137,7 @@
19672137
19682138
19692139 out:
2140
+ chcr_dec_wrcount(dev);
19702141 req->base.complete(&req->base, err);
19712142 }
19722143
....@@ -1983,14 +2154,13 @@
19832154
19842155 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
19852156 case CRYPTO_ALG_TYPE_AEAD:
1986
- chcr_handle_aead_resp(aead_request_cast(req), input, err);
2157
+ err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
19872158 break;
19882159
1989
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
1990
- err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
2160
+ case CRYPTO_ALG_TYPE_SKCIPHER:
2161
+ chcr_handle_cipher_resp(skcipher_request_cast(req),
19912162 input, err);
19922163 break;
1993
-
19942164 case CRYPTO_ALG_TYPE_AHASH:
19952165 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
19962166 }
....@@ -2008,7 +2178,7 @@
20082178 memcpy(state->partial_hash, req_ctx->partial_hash,
20092179 CHCR_HASH_MAX_DIGEST_SIZE);
20102180 chcr_init_hctx_per_wr(state);
2011
- return 0;
2181
+ return 0;
20122182 }
20132183
20142184 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
....@@ -2042,7 +2212,6 @@
20422212 * ipad in hmacctx->ipad and opad in hmacctx->opad location
20432213 */
20442214 shash->tfm = hmacctx->base_hash;
2045
- shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
20462215 if (keylen > bs) {
20472216 err = crypto_shash_digest(shash, key, keylen,
20482217 hmacctx->ipad);
....@@ -2080,7 +2249,7 @@
20802249 return err;
20812250 }
20822251
2083
-static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
2252
+static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
20842253 unsigned int key_len)
20852254 {
20862255 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
....@@ -2095,16 +2264,31 @@
20952264 ablkctx->enckey_len = key_len;
20962265 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
20972266 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2098
- ablkctx->key_ctx_hdr =
2267
+ /* Both keys for xts must be aligned to 16 byte boundary
2268
+ * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2269
+ */
2270
+ if (key_len == 48) {
2271
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2272
+ + 16) >> 4;
2273
+ memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2274
+ memset(ablkctx->key + 24, 0, 8);
2275
+ memset(ablkctx->key + 56, 0, 8);
2276
+ ablkctx->enckey_len = 64;
2277
+ ablkctx->key_ctx_hdr =
2278
+ FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2279
+ CHCR_KEYCTX_NO_KEY, 1,
2280
+ 0, context_size);
2281
+ } else {
2282
+ ablkctx->key_ctx_hdr =
20992283 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
21002284 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
21012285 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
21022286 CHCR_KEYCTX_NO_KEY, 1,
21032287 0, context_size);
2288
+ }
21042289 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
21052290 return 0;
21062291 badkey_err:
2107
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
21082292 ablkctx->enckey_len = 0;
21092293
21102294 return err;
....@@ -2215,10 +2399,7 @@
22152399 error = -ENOMEM;
22162400 goto err;
22172401 }
2218
- reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
2219
- CHCR_SRC_SG_SIZE, 0);
2220
- reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
2221
- CHCR_SRC_SG_SIZE, req->assoclen);
2402
+
22222403 return 0;
22232404 err:
22242405 return error;
....@@ -2249,7 +2430,7 @@
22492430 req->base.complete, req->base.data);
22502431 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
22512432 req->iv);
2252
- aead_request_set_ad(subreq, req->assoclen);
2433
+ aead_request_set_ad(subreq, req->assoclen);
22532434 return op_type ? crypto_aead_decrypt(subreq) :
22542435 crypto_aead_encrypt(subreq);
22552436 }
....@@ -2259,7 +2440,9 @@
22592440 int size)
22602441 {
22612442 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2262
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2443
+ struct chcr_context *ctx = a_ctx(tfm);
2444
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
2445
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
22632446 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
22642447 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
22652448 struct sk_buff *skb = NULL;
....@@ -2268,15 +2451,17 @@
22682451 struct ulptx_sgl *ulptx;
22692452 unsigned int transhdr_len;
22702453 unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2271
- unsigned int kctx_len = 0, dnents;
2272
- unsigned int assoclen = req->assoclen;
2454
+ unsigned int kctx_len = 0, dnents, snents;
22732455 unsigned int authsize = crypto_aead_authsize(tfm);
22742456 int error = -EINVAL;
2457
+ u8 *ivptr;
22752458 int null = 0;
22762459 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
22772460 GFP_ATOMIC;
2278
- struct adapter *adap = padap(a_ctx(tfm)->dev);
2461
+ struct adapter *adap = padap(ctx->dev);
2462
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
22792463
2464
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
22802465 if (req->cryptlen == 0)
22812466 return NULL;
22822467
....@@ -2288,24 +2473,20 @@
22882473 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
22892474 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
22902475 null = 1;
2291
- assoclen = 0;
2292
- reqctx->aad_nents = 0;
22932476 }
2294
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2295
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
2296
- (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
2297
- req->assoclen);
2477
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2478
+ (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
22982479 dnents += MIN_AUTH_SG; // For IV
2299
-
2480
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2481
+ CHCR_SRC_SG_SIZE, 0);
23002482 dst_size = get_space_for_phys_dsgl(dnents);
2301
- kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2483
+ kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
23022484 - sizeof(chcr_req->key_ctx);
23032485 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2304
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
2486
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
23052487 SGE_MAX_WR_LEN;
2306
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
2307
- : (sgl_len(reqctx->src_nents + reqctx->aad_nents
2308
- + MIN_GCM_SG) * 8);
2488
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2489
+ : (sgl_len(snents) * 8);
23092490 transhdr_len += temp;
23102491 transhdr_len = roundup(transhdr_len, 16);
23112492
....@@ -2315,7 +2496,7 @@
23152496 chcr_aead_common_exit(req);
23162497 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
23172498 }
2318
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2499
+ skb = alloc_skb(transhdr_len, flags);
23192500 if (!skb) {
23202501 error = -ENOMEM;
23212502 goto err;
....@@ -2331,16 +2512,16 @@
23312512 * to the hardware spec
23322513 */
23332514 chcr_req->sec_cpl.op_ivinsrtofst =
2334
- FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
2335
- assoclen + 1);
2336
- chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
2515
+ FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2516
+ chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
23372517 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2338
- assoclen ? 1 : 0, assoclen,
2339
- assoclen + IV + 1,
2518
+ null ? 0 : 1 + IV,
2519
+ null ? 0 : IV + req->assoclen,
2520
+ req->assoclen + IV + 1,
23402521 (temp & 0x1F0) >> 4);
23412522 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
23422523 temp & 0xF,
2343
- null ? 0 : assoclen + IV + 1,
2524
+ null ? 0 : req->assoclen + IV + 1,
23442525 temp, temp);
23452526 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
23462527 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
....@@ -2367,23 +2548,24 @@
23672548
23682549 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
23692550 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2551
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2552
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2553
+ ulptx = (struct ulptx_sgl *)(ivptr + IV);
23702554 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
23712555 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2372
- memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2373
- memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
2556
+ memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2557
+ memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
23742558 CTR_RFC3686_IV_SIZE);
2375
- *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
2559
+ *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
23762560 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
23772561 } else {
2378
- memcpy(reqctx->iv, req->iv, IV);
2562
+ memcpy(ivptr, req->iv, IV);
23792563 }
2380
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2381
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2382
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
2383
- chcr_add_aead_src_ent(req, ulptx, assoclen);
2564
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
2565
+ chcr_add_aead_src_ent(req, ulptx);
23842566 atomic_inc(&adap->chcr_stats.cipher_rqst);
2385
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2386
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
2567
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2568
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
23872569 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
23882570 transhdr_len, temp, 0);
23892571 reqctx->skb = skb;
....@@ -2403,11 +2585,22 @@
24032585 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
24042586 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
24052587 unsigned int authsize = crypto_aead_authsize(tfm);
2406
- int dst_size;
2588
+ int src_len, dst_len;
24072589
2408
- dst_size = req->assoclen + req->cryptlen + (op_type ?
2409
- -authsize : authsize);
2410
- if (!req->cryptlen || !dst_size)
2590
+ /* calculate and handle src and dst sg length separately
2591
+ * for inplace and out-of place operations
2592
+ */
2593
+ if (req->src == req->dst) {
2594
+ src_len = req->assoclen + req->cryptlen + (op_type ?
2595
+ 0 : authsize);
2596
+ dst_len = src_len;
2597
+ } else {
2598
+ src_len = req->assoclen + req->cryptlen;
2599
+ dst_len = req->assoclen + req->cryptlen + (op_type ?
2600
+ -authsize : authsize);
2601
+ }
2602
+
2603
+ if (!req->cryptlen || !src_len || !dst_len)
24112604 return 0;
24122605 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
24132606 DMA_BIDIRECTIONAL);
....@@ -2419,20 +2612,23 @@
24192612 reqctx->b0_dma = 0;
24202613 if (req->src == req->dst) {
24212614 error = dma_map_sg(dev, req->src,
2422
- sg_nents_for_len(req->src, dst_size),
2615
+ sg_nents_for_len(req->src, src_len),
24232616 DMA_BIDIRECTIONAL);
24242617 if (!error)
24252618 goto err;
24262619 } else {
2427
- error = dma_map_sg(dev, req->src, sg_nents(req->src),
2620
+ error = dma_map_sg(dev, req->src,
2621
+ sg_nents_for_len(req->src, src_len),
24282622 DMA_TO_DEVICE);
24292623 if (!error)
24302624 goto err;
2431
- error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2625
+ error = dma_map_sg(dev, req->dst,
2626
+ sg_nents_for_len(req->dst, dst_len),
24322627 DMA_FROM_DEVICE);
24332628 if (!error) {
2434
- dma_unmap_sg(dev, req->src, sg_nents(req->src),
2435
- DMA_TO_DEVICE);
2629
+ dma_unmap_sg(dev, req->src,
2630
+ sg_nents_for_len(req->src, src_len),
2631
+ DMA_TO_DEVICE);
24362632 goto err;
24372633 }
24382634 }
....@@ -2450,29 +2646,42 @@
24502646 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
24512647 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
24522648 unsigned int authsize = crypto_aead_authsize(tfm);
2453
- int dst_size;
2649
+ int src_len, dst_len;
24542650
2455
- dst_size = req->assoclen + req->cryptlen + (op_type ?
2456
- -authsize : authsize);
2457
- if (!req->cryptlen || !dst_size)
2651
+ /* calculate and handle src and dst sg length separately
2652
+ * for inplace and out-of place operations
2653
+ */
2654
+ if (req->src == req->dst) {
2655
+ src_len = req->assoclen + req->cryptlen + (op_type ?
2656
+ 0 : authsize);
2657
+ dst_len = src_len;
2658
+ } else {
2659
+ src_len = req->assoclen + req->cryptlen;
2660
+ dst_len = req->assoclen + req->cryptlen + (op_type ?
2661
+ -authsize : authsize);
2662
+ }
2663
+
2664
+ if (!req->cryptlen || !src_len || !dst_len)
24582665 return;
24592666
24602667 dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
24612668 DMA_BIDIRECTIONAL);
24622669 if (req->src == req->dst) {
2463
- dma_unmap_sg(dev, req->src, sg_nents(req->src),
2464
- DMA_BIDIRECTIONAL);
2670
+ dma_unmap_sg(dev, req->src,
2671
+ sg_nents_for_len(req->src, src_len),
2672
+ DMA_BIDIRECTIONAL);
24652673 } else {
2466
- dma_unmap_sg(dev, req->src, sg_nents(req->src),
2467
- DMA_TO_DEVICE);
2468
- dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2469
- DMA_FROM_DEVICE);
2674
+ dma_unmap_sg(dev, req->src,
2675
+ sg_nents_for_len(req->src, src_len),
2676
+ DMA_TO_DEVICE);
2677
+ dma_unmap_sg(dev, req->dst,
2678
+ sg_nents_for_len(req->dst, dst_len),
2679
+ DMA_FROM_DEVICE);
24702680 }
24712681 }
24722682
24732683 void chcr_add_aead_src_ent(struct aead_request *req,
2474
- struct ulptx_sgl *ulptx,
2475
- unsigned int assoclen)
2684
+ struct ulptx_sgl *ulptx)
24762685 {
24772686 struct ulptx_walk ulp_walk;
24782687 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
....@@ -2485,28 +2694,20 @@
24852694 buf += reqctx->b0_len;
24862695 }
24872696 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2488
- buf, assoclen, 0);
2489
- buf += assoclen;
2490
- memcpy(buf, reqctx->iv, IV);
2491
- buf += IV;
2492
- sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2493
- buf, req->cryptlen, req->assoclen);
2697
+ buf, req->cryptlen + req->assoclen, 0);
24942698 } else {
24952699 ulptx_walk_init(&ulp_walk, ulptx);
24962700 if (reqctx->b0_len)
24972701 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2498
- &reqctx->b0_dma);
2499
- ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
2500
- ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
2501
- ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
2502
- req->assoclen);
2702
+ reqctx->b0_dma);
2703
+ ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2704
+ req->assoclen, 0);
25032705 ulptx_walk_end(&ulp_walk);
25042706 }
25052707 }
25062708
25072709 void chcr_add_aead_dst_ent(struct aead_request *req,
25082710 struct cpl_rx_phys_dsgl *phys_cpl,
2509
- unsigned int assoclen,
25102711 unsigned short qid)
25112712 {
25122713 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
....@@ -2514,24 +2715,25 @@
25142715 struct dsgl_walk dsgl_walk;
25152716 unsigned int authsize = crypto_aead_authsize(tfm);
25162717 struct chcr_context *ctx = a_ctx(tfm);
2718
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
25172719 u32 temp;
2720
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
25182721
2722
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
25192723 dsgl_walk_init(&dsgl_walk, phys_cpl);
2520
- if (reqctx->b0_len)
2521
- dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
2522
- dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
2523
- dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2524
- temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
2525
- dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
2526
- dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2724
+ dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2725
+ temp = req->assoclen + req->cryptlen +
2726
+ (reqctx->op ? -authsize : authsize);
2727
+ dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2728
+ dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
25272729 }
25282730
2529
-void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2731
+void chcr_add_cipher_src_ent(struct skcipher_request *req,
25302732 void *ulptx,
25312733 struct cipher_wr_param *wrparam)
25322734 {
25332735 struct ulptx_walk ulp_walk;
2534
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2736
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
25352737 u8 *buf = ulptx;
25362738
25372739 memcpy(buf, reqctx->iv, IV);
....@@ -2549,23 +2751,25 @@
25492751 }
25502752 }
25512753
2552
-void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2754
+void chcr_add_cipher_dst_ent(struct skcipher_request *req,
25532755 struct cpl_rx_phys_dsgl *phys_cpl,
25542756 struct cipher_wr_param *wrparam,
25552757 unsigned short qid)
25562758 {
2557
- struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2558
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
2759
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2760
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
25592761 struct chcr_context *ctx = c_ctx(tfm);
2762
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
25602763 struct dsgl_walk dsgl_walk;
2764
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
25612765
2766
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
25622767 dsgl_walk_init(&dsgl_walk, phys_cpl);
25632768 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
25642769 reqctx->dst_ofst);
25652770 reqctx->dstsg = dsgl_walk.last_sg;
25662771 reqctx->dst_ofst = dsgl_walk.last_sg_len;
2567
-
2568
- dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2772
+ dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
25692773 }
25702774
25712775 void chcr_add_hash_src_ent(struct ahash_request *req,
....@@ -2590,7 +2794,7 @@
25902794 ulptx_walk_init(&ulp_walk, ulptx);
25912795 if (param->bfr_len)
25922796 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2593
- &reqctx->hctx_wr.dma_addr);
2797
+ reqctx->hctx_wr.dma_addr);
25942798 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
25952799 param->sg_len, reqctx->hctx_wr.src_ofst);
25962800 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
....@@ -2630,7 +2834,7 @@
26302834 }
26312835
26322836 int chcr_cipher_dma_map(struct device *dev,
2633
- struct ablkcipher_request *req)
2837
+ struct skcipher_request *req)
26342838 {
26352839 int error;
26362840
....@@ -2659,7 +2863,7 @@
26592863 }
26602864
26612865 void chcr_cipher_dma_unmap(struct device *dev,
2662
- struct ablkcipher_request *req)
2866
+ struct skcipher_request *req)
26632867 {
26642868 if (req->src == req->dst) {
26652869 dma_unmap_sg(dev, req->src, sg_nents(req->src),
....@@ -2690,8 +2894,7 @@
26902894 return 0;
26912895 }
26922896
2693
-static void generate_b0(struct aead_request *req,
2694
- struct chcr_aead_ctx *aeadctx,
2897
+static int generate_b0(struct aead_request *req, u8 *ivptr,
26952898 unsigned short op_type)
26962899 {
26972900 unsigned int l, lp, m;
....@@ -2702,7 +2905,7 @@
27022905
27032906 m = crypto_aead_authsize(aead);
27042907
2705
- memcpy(b0, reqctx->iv, 16);
2908
+ memcpy(b0, ivptr, 16);
27062909
27072910 lp = b0[0];
27082911 l = lp + 1;
....@@ -2716,6 +2919,8 @@
27162919 rc = set_msg_len(b0 + 16 - l,
27172920 (op_type == CHCR_DECRYPT_OP) ?
27182921 req->cryptlen - m : req->cryptlen, l);
2922
+
2923
+ return rc;
27192924 }
27202925
27212926 static inline int crypto_ccm_check_iv(const u8 *iv)
....@@ -2728,29 +2933,30 @@
27282933 }
27292934
27302935 static int ccm_format_packet(struct aead_request *req,
2731
- struct chcr_aead_ctx *aeadctx,
2936
+ u8 *ivptr,
27322937 unsigned int sub_type,
27332938 unsigned short op_type,
27342939 unsigned int assoclen)
27352940 {
27362941 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2942
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2943
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
27372944 int rc = 0;
27382945
27392946 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2740
- reqctx->iv[0] = 3;
2741
- memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
2742
- memcpy(reqctx->iv + 4, req->iv, 8);
2743
- memset(reqctx->iv + 12, 0, 4);
2947
+ ivptr[0] = 3;
2948
+ memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2949
+ memcpy(ivptr + 4, req->iv, 8);
2950
+ memset(ivptr + 12, 0, 4);
27442951 } else {
2745
- memcpy(reqctx->iv, req->iv, 16);
2952
+ memcpy(ivptr, req->iv, 16);
27462953 }
27472954 if (assoclen)
2748
- *((unsigned short *)(reqctx->scratch_pad + 16)) =
2749
- htons(assoclen);
2955
+ put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
27502956
2751
- generate_b0(req, aeadctx, op_type);
2957
+ rc = generate_b0(req, ivptr, op_type);
27522958 /* zero the ctr value */
2753
- memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
2959
+ memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
27542960 return rc;
27552961 }
27562962
....@@ -2760,13 +2966,18 @@
27602966 unsigned short op_type)
27612967 {
27622968 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2763
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2969
+ struct chcr_context *ctx = a_ctx(tfm);
2970
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
2971
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2972
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
27642973 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
27652974 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2766
- unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
2975
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
27672976 unsigned int ccm_xtra;
27682977 unsigned int tag_offset = 0, auth_offset = 0;
27692978 unsigned int assoclen;
2979
+
2980
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
27702981
27712982 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
27722983 assoclen = req->assoclen - 8;
....@@ -2776,7 +2987,7 @@
27762987 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
27772988
27782989 auth_offset = req->cryptlen ?
2779
- (assoclen + IV + 1 + ccm_xtra) : 0;
2990
+ (req->assoclen + IV + 1 + ccm_xtra) : 0;
27802991 if (op_type == CHCR_DECRYPT_OP) {
27812992 if (crypto_aead_authsize(tfm) != req->cryptlen)
27822993 tag_offset = crypto_aead_authsize(tfm);
....@@ -2784,15 +2995,13 @@
27842995 auth_offset = 0;
27852996 }
27862997
2787
-
2788
- sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2789
- 2, assoclen + 1 + ccm_xtra);
2998
+ sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
27902999 sec_cpl->pldlen =
2791
- htonl(assoclen + IV + req->cryptlen + ccm_xtra);
3000
+ htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
27923001 /* For CCM there wil be b0 always. So AAD start will be 1 always */
27933002 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2794
- 1, assoclen + ccm_xtra, assoclen
2795
- + IV + 1 + ccm_xtra, 0);
3003
+ 1 + IV, IV + assoclen + ccm_xtra,
3004
+ req->assoclen + IV + 1 + ccm_xtra, 0);
27963005
27973006 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
27983007 auth_offset, tag_offset,
....@@ -2839,10 +3048,11 @@
28393048 struct cpl_rx_phys_dsgl *phys_cpl;
28403049 struct ulptx_sgl *ulptx;
28413050 unsigned int transhdr_len;
2842
- unsigned int dst_size = 0, kctx_len, dnents, temp;
3051
+ unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
28433052 unsigned int sub_type, assoclen = req->assoclen;
28443053 unsigned int authsize = crypto_aead_authsize(tfm);
28453054 int error = -EINVAL;
3055
+ u8 *ivptr;
28463056 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
28473057 GFP_ATOMIC;
28483058 struct adapter *adap = padap(a_ctx(tfm)->dev);
....@@ -2858,37 +3068,38 @@
28583068 error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
28593069 if (error)
28603070 goto err;
2861
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2862
- dnents += sg_nents_xlen(req->dst, req->cryptlen
3071
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
28633072 + (reqctx->op ? -authsize : authsize),
2864
- CHCR_DST_SG_SIZE, req->assoclen);
3073
+ CHCR_DST_SG_SIZE, 0);
28653074 dnents += MIN_CCM_SG; // For IV and B0
28663075 dst_size = get_space_for_phys_dsgl(dnents);
3076
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3077
+ CHCR_SRC_SG_SIZE, 0);
3078
+ snents += MIN_CCM_SG; //For B0
28673079 kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
28683080 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2869
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
3081
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
28703082 reqctx->b0_len) <= SGE_MAX_WR_LEN;
2871
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
3083
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
28723084 reqctx->b0_len, 16) :
2873
- (sgl_len(reqctx->src_nents + reqctx->aad_nents +
2874
- MIN_CCM_SG) * 8);
3085
+ (sgl_len(snents) * 8);
28753086 transhdr_len += temp;
28763087 transhdr_len = roundup(transhdr_len, 16);
28773088
28783089 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2879
- reqctx->b0_len, transhdr_len, reqctx->op)) {
3090
+ reqctx->b0_len, transhdr_len, reqctx->op)) {
28803091 atomic_inc(&adap->chcr_stats.fallback);
28813092 chcr_aead_common_exit(req);
28823093 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
28833094 }
2884
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
3095
+ skb = alloc_skb(transhdr_len, flags);
28853096
28863097 if (!skb) {
28873098 error = -ENOMEM;
28883099 goto err;
28893100 }
28903101
2891
- chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
3102
+ chcr_req = __skb_put_zero(skb, transhdr_len);
28923103
28933104 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
28943105
....@@ -2898,16 +3109,17 @@
28983109 aeadctx->key, aeadctx->enckey_len);
28993110
29003111 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2901
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2902
- error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
3112
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3113
+ ulptx = (struct ulptx_sgl *)(ivptr + IV);
3114
+ error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
29033115 if (error)
29043116 goto dstmap_fail;
2905
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
2906
- chcr_add_aead_src_ent(req, ulptx, assoclen);
3117
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
3118
+ chcr_add_aead_src_ent(req, ulptx);
29073119
29083120 atomic_inc(&adap->chcr_stats.aead_rqst);
2909
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2910
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
3121
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3122
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
29113123 reqctx->b0_len) : 0);
29123124 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
29133125 transhdr_len, temp, 0);
....@@ -2926,20 +3138,25 @@
29263138 int size)
29273139 {
29283140 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2929
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3141
+ struct chcr_context *ctx = a_ctx(tfm);
3142
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
3143
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
29303144 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
29313145 struct sk_buff *skb = NULL;
29323146 struct chcr_wr *chcr_req;
29333147 struct cpl_rx_phys_dsgl *phys_cpl;
29343148 struct ulptx_sgl *ulptx;
2935
- unsigned int transhdr_len, dnents = 0;
3149
+ unsigned int transhdr_len, dnents = 0, snents;
29363150 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
29373151 unsigned int authsize = crypto_aead_authsize(tfm);
29383152 int error = -EINVAL;
3153
+ u8 *ivptr;
29393154 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
29403155 GFP_ATOMIC;
2941
- struct adapter *adap = padap(a_ctx(tfm)->dev);
3156
+ struct adapter *adap = padap(ctx->dev);
3157
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
29423158
3159
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
29433160 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
29443161 assoclen = req->assoclen - 8;
29453162
....@@ -2947,19 +3164,19 @@
29473164 error = chcr_aead_common_init(req);
29483165 if (error)
29493166 return ERR_PTR(error);
2950
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2951
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
3167
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
29523168 (reqctx->op ? -authsize : authsize),
2953
- CHCR_DST_SG_SIZE, req->assoclen);
3169
+ CHCR_DST_SG_SIZE, 0);
3170
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3171
+ CHCR_SRC_SG_SIZE, 0);
29543172 dnents += MIN_GCM_SG; // For IV
29553173 dst_size = get_space_for_phys_dsgl(dnents);
29563174 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
29573175 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2958
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
3176
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
29593177 SGE_MAX_WR_LEN;
2960
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
2961
- (sgl_len(reqctx->src_nents +
2962
- reqctx->aad_nents + MIN_GCM_SG) * 8);
3178
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3179
+ (sgl_len(snents) * 8);
29633180 transhdr_len += temp;
29643181 transhdr_len = roundup(transhdr_len, 16);
29653182 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
....@@ -2969,7 +3186,7 @@
29693186 chcr_aead_common_exit(req);
29703187 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
29713188 }
2972
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
3189
+ skb = alloc_skb(transhdr_len, flags);
29733190 if (!skb) {
29743191 error = -ENOMEM;
29753192 goto err;
....@@ -2980,15 +3197,15 @@
29803197 //Offset of tag from end
29813198 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
29823199 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
2983
- a_ctx(tfm)->dev->rx_channel_id, 2,
2984
- (assoclen + 1));
3200
+ rx_channel_id, 2, 1);
29853201 chcr_req->sec_cpl.pldlen =
2986
- htonl(assoclen + IV + req->cryptlen);
3202
+ htonl(req->assoclen + IV + req->cryptlen);
29873203 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2988
- assoclen ? 1 : 0, assoclen,
2989
- assoclen + IV + 1, 0);
3204
+ assoclen ? 1 + IV : 0,
3205
+ assoclen ? IV + assoclen : 0,
3206
+ req->assoclen + IV + 1, 0);
29903207 chcr_req->sec_cpl.cipherstop_lo_authinsert =
2991
- FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
3208
+ FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
29923209 temp, temp);
29933210 chcr_req->sec_cpl.seqno_numivs =
29943211 FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
....@@ -3003,25 +3220,25 @@
30033220 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
30043221 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
30053222
3223
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3224
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
30063225 /* prepare a 16 byte iv */
30073226 /* S A L T | IV | 0x00000001 */
30083227 if (get_aead_subtype(tfm) ==
30093228 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3010
- memcpy(reqctx->iv, aeadctx->salt, 4);
3011
- memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
3229
+ memcpy(ivptr, aeadctx->salt, 4);
3230
+ memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
30123231 } else {
3013
- memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
3232
+ memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
30143233 }
3015
- *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
3234
+ put_unaligned_be32(0x01, &ivptr[12]);
3235
+ ulptx = (struct ulptx_sgl *)(ivptr + 16);
30163236
3017
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3018
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
3019
-
3020
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
3021
- chcr_add_aead_src_ent(req, ulptx, assoclen);
3237
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
3238
+ chcr_add_aead_src_ent(req, ulptx);
30223239 atomic_inc(&adap->chcr_stats.aead_rqst);
3023
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
3024
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
3240
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3241
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
30253242 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
30263243 transhdr_len, temp, reqctx->verify);
30273244 reqctx->skb = skb;
....@@ -3119,12 +3336,12 @@
31193336 aeadctx->mayverify = VERIFY_HW;
31203337 break;
31213338 case ICV_12:
3122
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3123
- aeadctx->mayverify = VERIFY_HW;
3339
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3340
+ aeadctx->mayverify = VERIFY_HW;
31243341 break;
31253342 case ICV_14:
3126
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3127
- aeadctx->mayverify = VERIFY_HW;
3343
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3344
+ aeadctx->mayverify = VERIFY_HW;
31283345 break;
31293346 case ICV_16:
31303347 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
....@@ -3224,7 +3441,6 @@
32243441 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
32253442 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
32263443 } else {
3227
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
32283444 aeadctx->enckey_len = 0;
32293445 return -EINVAL;
32303446 }
....@@ -3247,9 +3463,6 @@
32473463 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
32483464 CRYPTO_TFM_REQ_MASK);
32493465 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3250
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3251
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3252
- CRYPTO_TFM_RES_MASK);
32533466 if (error)
32543467 return error;
32553468 return chcr_ccm_common_setkey(aead, key, keylen);
....@@ -3262,7 +3475,6 @@
32623475 int error;
32633476
32643477 if (keylen < 3) {
3265
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
32663478 aeadctx->enckey_len = 0;
32673479 return -EINVAL;
32683480 }
....@@ -3270,9 +3482,6 @@
32703482 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
32713483 CRYPTO_TFM_REQ_MASK);
32723484 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3273
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3274
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3275
- CRYPTO_TFM_RES_MASK);
32763485 if (error)
32773486 return error;
32783487 keylen -= 3;
....@@ -3285,18 +3494,15 @@
32853494 {
32863495 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
32873496 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3288
- struct crypto_cipher *cipher;
32893497 unsigned int ck_size;
32903498 int ret = 0, key_ctx_size = 0;
3499
+ struct crypto_aes_ctx aes;
32913500
32923501 aeadctx->enckey_len = 0;
32933502 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
32943503 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
32953504 & CRYPTO_TFM_REQ_MASK);
32963505 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3297
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3298
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3299
- CRYPTO_TFM_RES_MASK);
33003506 if (ret)
33013507 goto out;
33023508
....@@ -3312,7 +3518,6 @@
33123518 } else if (keylen == AES_KEYSIZE_256) {
33133519 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
33143520 } else {
3315
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
33163521 pr_err("GCM: Invalid key length %d\n", keylen);
33173522 ret = -EINVAL;
33183523 goto out;
....@@ -3329,23 +3534,15 @@
33293534 /* Calculate the H = CIPH(K, 0 repeated 16 times).
33303535 * It will go in key context
33313536 */
3332
- cipher = crypto_alloc_cipher("aes-generic", 0, 0);
3333
- if (IS_ERR(cipher)) {
3334
- aeadctx->enckey_len = 0;
3335
- ret = -ENOMEM;
3336
- goto out;
3337
- }
3338
-
3339
- ret = crypto_cipher_setkey(cipher, key, keylen);
3537
+ ret = aes_expandkey(&aes, key, keylen);
33403538 if (ret) {
33413539 aeadctx->enckey_len = 0;
3342
- goto out1;
3540
+ goto out;
33433541 }
33443542 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3345
- crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
3543
+ aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3544
+ memzero_explicit(&aes, sizeof(aes));
33463545
3347
-out1:
3348
- crypto_free_cipher(cipher);
33493546 out:
33503547 return ret;
33513548 }
....@@ -3371,19 +3568,14 @@
33713568 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
33723569 & CRYPTO_TFM_REQ_MASK);
33733570 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3374
- crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3375
- crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3376
- & CRYPTO_TFM_RES_MASK);
33773571 if (err)
33783572 goto out;
33793573
3380
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3381
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3574
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
33823575 goto out;
3383
- }
33843576
33853577 if (get_alg_config(&param, max_authsize)) {
3386
- pr_err("chcr : Unsupported digest size\n");
3578
+ pr_err("Unsupported digest size\n");
33873579 goto out;
33883580 }
33893581 subtype = get_aead_subtype(authenc);
....@@ -3402,7 +3594,7 @@
34023594 } else if (keys.enckeylen == AES_KEYSIZE_256) {
34033595 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
34043596 } else {
3405
- pr_err("chcr : Unsupported cipher key\n");
3597
+ pr_err("Unsupported cipher key\n");
34063598 goto out;
34073599 }
34083600
....@@ -3420,16 +3612,13 @@
34203612 }
34213613 base_hash = chcr_alloc_shash(max_authsize);
34223614 if (IS_ERR(base_hash)) {
3423
- pr_err("chcr : Base driver cannot be loaded\n");
3424
- aeadctx->enckey_len = 0;
3425
- memzero_explicit(&keys, sizeof(keys));
3426
- return -EINVAL;
3615
+ pr_err("Base driver cannot be loaded\n");
3616
+ goto out;
34273617 }
34283618 {
34293619 SHASH_DESC_ON_STACK(shash, base_hash);
34303620
34313621 shash->tfm = base_hash;
3432
- shash->flags = crypto_shash_get_flags(base_hash);
34333622 bs = crypto_shash_blocksize(base_hash);
34343623 align = KEYCTX_ALIGN_PAD(max_authsize);
34353624 o_ptr = actx->h_iopad + param.result_size + align;
....@@ -3439,7 +3628,7 @@
34393628 keys.authkeylen,
34403629 o_ptr);
34413630 if (err) {
3442
- pr_err("chcr : Base driver cannot be loaded\n");
3631
+ pr_err("Base driver cannot be loaded\n");
34433632 goto out;
34443633 }
34453634 keys.authkeylen = max_authsize;
....@@ -3502,16 +3691,12 @@
35023691 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
35033692 & CRYPTO_TFM_REQ_MASK);
35043693 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3505
- crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3506
- crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3507
- & CRYPTO_TFM_RES_MASK);
35083694 if (err)
35093695 goto out;
35103696
3511
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3512
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3697
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
35133698 goto out;
3514
- }
3699
+
35153700 subtype = get_aead_subtype(authenc);
35163701 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
35173702 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
....@@ -3528,7 +3713,7 @@
35283713 } else if (keys.enckeylen == AES_KEYSIZE_256) {
35293714 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
35303715 } else {
3531
- pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3716
+ pr_err("Unsupported cipher key %d\n", keys.enckeylen);
35323717 goto out;
35333718 }
35343719 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
....@@ -3556,38 +3741,64 @@
35563741 create_wr_t create_wr_fn)
35573742 {
35583743 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3559
- struct uld_ctx *u_ctx;
3744
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3745
+ struct chcr_context *ctx = a_ctx(tfm);
3746
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
35603747 struct sk_buff *skb;
3561
- int isfull = 0;
3748
+ struct chcr_dev *cdev;
35623749
3563
- if (!a_ctx(tfm)->dev) {
3564
- pr_err("chcr : %s : No crypto device.\n", __func__);
3750
+ cdev = a_ctx(tfm)->dev;
3751
+ if (!cdev) {
3752
+ pr_err("%s : No crypto device.\n", __func__);
35653753 return -ENXIO;
35663754 }
3567
- u_ctx = ULD_CTX(a_ctx(tfm));
3755
+
3756
+ if (chcr_inc_wrcount(cdev)) {
3757
+ /* Detach state for CHCR means lldi or padap is freed.
3758
+ * We cannot increment fallback here.
3759
+ */
3760
+ return chcr_aead_fallback(req, reqctx->op);
3761
+ }
3762
+
35683763 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3569
- a_ctx(tfm)->tx_qidx)) {
3570
- isfull = 1;
3571
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3764
+ reqctx->txqidx) &&
3765
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3766
+ chcr_dec_wrcount(cdev);
35723767 return -ENOSPC;
35733768 }
35743769
3575
- /* Form a WR from req */
3576
- skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
3770
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3771
+ crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3772
+ pr_err("RFC4106: Invalid value of assoclen %d\n",
3773
+ req->assoclen);
3774
+ return -EINVAL;
3775
+ }
35773776
3578
- if (IS_ERR(skb) || !skb)
3579
- return PTR_ERR(skb);
3777
+ /* Form a WR from req */
3778
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3779
+
3780
+ if (IS_ERR_OR_NULL(skb)) {
3781
+ chcr_dec_wrcount(cdev);
3782
+ return PTR_ERR_OR_ZERO(skb);
3783
+ }
35803784
35813785 skb->dev = u_ctx->lldi.ports[0];
3582
- set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3786
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
35833787 chcr_send_wr(skb);
3584
- return isfull ? -EBUSY : -EINPROGRESS;
3788
+ return -EINPROGRESS;
35853789 }
35863790
35873791 static int chcr_aead_encrypt(struct aead_request *req)
35883792 {
35893793 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
35903794 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3795
+ struct chcr_context *ctx = a_ctx(tfm);
3796
+ unsigned int cpu;
3797
+
3798
+ cpu = get_cpu();
3799
+ reqctx->txqidx = cpu % ctx->ntxq;
3800
+ reqctx->rxqidx = cpu % ctx->nrxq;
3801
+ put_cpu();
35913802
35923803 reqctx->verify = VERIFY_HW;
35933804 reqctx->op = CHCR_ENCRYPT_OP;
....@@ -3609,9 +3820,16 @@
36093820 static int chcr_aead_decrypt(struct aead_request *req)
36103821 {
36113822 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3612
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3823
+ struct chcr_context *ctx = a_ctx(tfm);
3824
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
36133825 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
36143826 int size;
3827
+ unsigned int cpu;
3828
+
3829
+ cpu = get_cpu();
3830
+ reqctx->txqidx = cpu % ctx->ntxq;
3831
+ reqctx->rxqidx = cpu % ctx->nrxq;
3832
+ put_cpu();
36153833
36163834 if (aeadctx->mayverify == VERIFY_SW) {
36173835 size = crypto_aead_maxauthsize(tfm);
....@@ -3638,83 +3856,76 @@
36383856 static struct chcr_alg_template driver_algs[] = {
36393857 /* AES-CBC */
36403858 {
3641
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3859
+ .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
36423860 .is_registered = 0,
3643
- .alg.crypto = {
3644
- .cra_name = "cbc(aes)",
3645
- .cra_driver_name = "cbc-aes-chcr",
3646
- .cra_blocksize = AES_BLOCK_SIZE,
3647
- .cra_init = chcr_cra_init,
3648
- .cra_exit = chcr_cra_exit,
3649
- .cra_u.ablkcipher = {
3650
- .min_keysize = AES_MIN_KEY_SIZE,
3651
- .max_keysize = AES_MAX_KEY_SIZE,
3652
- .ivsize = AES_BLOCK_SIZE,
3653
- .setkey = chcr_aes_cbc_setkey,
3654
- .encrypt = chcr_aes_encrypt,
3655
- .decrypt = chcr_aes_decrypt,
3861
+ .alg.skcipher = {
3862
+ .base.cra_name = "cbc(aes)",
3863
+ .base.cra_driver_name = "cbc-aes-chcr",
3864
+ .base.cra_blocksize = AES_BLOCK_SIZE,
3865
+
3866
+ .init = chcr_init_tfm,
3867
+ .exit = chcr_exit_tfm,
3868
+ .min_keysize = AES_MIN_KEY_SIZE,
3869
+ .max_keysize = AES_MAX_KEY_SIZE,
3870
+ .ivsize = AES_BLOCK_SIZE,
3871
+ .setkey = chcr_aes_cbc_setkey,
3872
+ .encrypt = chcr_aes_encrypt,
3873
+ .decrypt = chcr_aes_decrypt,
36563874 }
3875
+ },
3876
+ {
3877
+ .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3878
+ .is_registered = 0,
3879
+ .alg.skcipher = {
3880
+ .base.cra_name = "xts(aes)",
3881
+ .base.cra_driver_name = "xts-aes-chcr",
3882
+ .base.cra_blocksize = AES_BLOCK_SIZE,
3883
+
3884
+ .init = chcr_init_tfm,
3885
+ .exit = chcr_exit_tfm,
3886
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
3887
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
3888
+ .ivsize = AES_BLOCK_SIZE,
3889
+ .setkey = chcr_aes_xts_setkey,
3890
+ .encrypt = chcr_aes_encrypt,
3891
+ .decrypt = chcr_aes_decrypt,
3892
+ }
3893
+ },
3894
+ {
3895
+ .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3896
+ .is_registered = 0,
3897
+ .alg.skcipher = {
3898
+ .base.cra_name = "ctr(aes)",
3899
+ .base.cra_driver_name = "ctr-aes-chcr",
3900
+ .base.cra_blocksize = 1,
3901
+
3902
+ .init = chcr_init_tfm,
3903
+ .exit = chcr_exit_tfm,
3904
+ .min_keysize = AES_MIN_KEY_SIZE,
3905
+ .max_keysize = AES_MAX_KEY_SIZE,
3906
+ .ivsize = AES_BLOCK_SIZE,
3907
+ .setkey = chcr_aes_ctr_setkey,
3908
+ .encrypt = chcr_aes_encrypt,
3909
+ .decrypt = chcr_aes_decrypt,
36573910 }
36583911 },
36593912 {
3660
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3661
- .is_registered = 0,
3662
- .alg.crypto = {
3663
- .cra_name = "xts(aes)",
3664
- .cra_driver_name = "xts-aes-chcr",
3665
- .cra_blocksize = AES_BLOCK_SIZE,
3666
- .cra_init = chcr_cra_init,
3667
- .cra_exit = NULL,
3668
- .cra_u .ablkcipher = {
3669
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
3670
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
3671
- .ivsize = AES_BLOCK_SIZE,
3672
- .setkey = chcr_aes_xts_setkey,
3673
- .encrypt = chcr_aes_encrypt,
3674
- .decrypt = chcr_aes_decrypt,
3675
- }
3676
- }
3677
- },
3678
- {
3679
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3680
- .is_registered = 0,
3681
- .alg.crypto = {
3682
- .cra_name = "ctr(aes)",
3683
- .cra_driver_name = "ctr-aes-chcr",
3684
- .cra_blocksize = 1,
3685
- .cra_init = chcr_cra_init,
3686
- .cra_exit = chcr_cra_exit,
3687
- .cra_u.ablkcipher = {
3688
- .min_keysize = AES_MIN_KEY_SIZE,
3689
- .max_keysize = AES_MAX_KEY_SIZE,
3690
- .ivsize = AES_BLOCK_SIZE,
3691
- .setkey = chcr_aes_ctr_setkey,
3692
- .encrypt = chcr_aes_encrypt,
3693
- .decrypt = chcr_aes_decrypt,
3694
- }
3695
- }
3696
- },
3697
- {
3698
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3913
+ .type = CRYPTO_ALG_TYPE_SKCIPHER |
36993914 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
37003915 .is_registered = 0,
3701
- .alg.crypto = {
3702
- .cra_name = "rfc3686(ctr(aes))",
3703
- .cra_driver_name = "rfc3686-ctr-aes-chcr",
3704
- .cra_blocksize = 1,
3705
- .cra_init = chcr_rfc3686_init,
3706
- .cra_exit = chcr_cra_exit,
3707
- .cra_u.ablkcipher = {
3708
- .min_keysize = AES_MIN_KEY_SIZE +
3709
- CTR_RFC3686_NONCE_SIZE,
3710
- .max_keysize = AES_MAX_KEY_SIZE +
3711
- CTR_RFC3686_NONCE_SIZE,
3712
- .ivsize = CTR_RFC3686_IV_SIZE,
3713
- .setkey = chcr_aes_rfc3686_setkey,
3714
- .encrypt = chcr_aes_encrypt,
3715
- .decrypt = chcr_aes_decrypt,
3716
- .geniv = "seqiv",
3717
- }
3916
+ .alg.skcipher = {
3917
+ .base.cra_name = "rfc3686(ctr(aes))",
3918
+ .base.cra_driver_name = "rfc3686-ctr-aes-chcr",
3919
+ .base.cra_blocksize = 1,
3920
+
3921
+ .init = chcr_rfc3686_init,
3922
+ .exit = chcr_exit_tfm,
3923
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3924
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3925
+ .ivsize = CTR_RFC3686_IV_SIZE,
3926
+ .setkey = chcr_aes_rfc3686_setkey,
3927
+ .encrypt = chcr_aes_encrypt,
3928
+ .decrypt = chcr_aes_decrypt,
37183929 }
37193930 },
37203931 /* SHA */
....@@ -4169,7 +4380,6 @@
41694380 .setauthsize = chcr_authenc_null_setauthsize,
41704381 }
41714382 },
4172
-
41734383 };
41744384
41754385 /*
....@@ -4182,23 +4392,33 @@
41824392
41834393 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
41844394 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4185
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
4186
- if (driver_algs[i].is_registered)
4187
- crypto_unregister_alg(
4188
- &driver_algs[i].alg.crypto);
4395
+ case CRYPTO_ALG_TYPE_SKCIPHER:
4396
+ if (driver_algs[i].is_registered && refcount_read(
4397
+ &driver_algs[i].alg.skcipher.base.cra_refcnt)
4398
+ == 1) {
4399
+ crypto_unregister_skcipher(
4400
+ &driver_algs[i].alg.skcipher);
4401
+ driver_algs[i].is_registered = 0;
4402
+ }
41894403 break;
41904404 case CRYPTO_ALG_TYPE_AEAD:
4191
- if (driver_algs[i].is_registered)
4405
+ if (driver_algs[i].is_registered && refcount_read(
4406
+ &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
41924407 crypto_unregister_aead(
41934408 &driver_algs[i].alg.aead);
4409
+ driver_algs[i].is_registered = 0;
4410
+ }
41944411 break;
41954412 case CRYPTO_ALG_TYPE_AHASH:
4196
- if (driver_algs[i].is_registered)
4413
+ if (driver_algs[i].is_registered && refcount_read(
4414
+ &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4415
+ == 1) {
41974416 crypto_unregister_ahash(
41984417 &driver_algs[i].alg.hash);
4418
+ driver_algs[i].is_registered = 0;
4419
+ }
41994420 break;
42004421 }
4201
- driver_algs[i].is_registered = 0;
42024422 }
42034423 return 0;
42044424 }
....@@ -4221,25 +4441,26 @@
42214441 if (driver_algs[i].is_registered)
42224442 continue;
42234443 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4224
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
4225
- driver_algs[i].alg.crypto.cra_priority =
4444
+ case CRYPTO_ALG_TYPE_SKCIPHER:
4445
+ driver_algs[i].alg.skcipher.base.cra_priority =
42264446 CHCR_CRA_PRIORITY;
4227
- driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
4228
- driver_algs[i].alg.crypto.cra_flags =
4229
- CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4447
+ driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4448
+ driver_algs[i].alg.skcipher.base.cra_flags =
4449
+ CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4450
+ CRYPTO_ALG_ALLOCATES_MEMORY |
42304451 CRYPTO_ALG_NEED_FALLBACK;
4231
- driver_algs[i].alg.crypto.cra_ctxsize =
4452
+ driver_algs[i].alg.skcipher.base.cra_ctxsize =
42324453 sizeof(struct chcr_context) +
42334454 sizeof(struct ablk_ctx);
4234
- driver_algs[i].alg.crypto.cra_alignmask = 0;
4235
- driver_algs[i].alg.crypto.cra_type =
4236
- &crypto_ablkcipher_type;
4237
- err = crypto_register_alg(&driver_algs[i].alg.crypto);
4238
- name = driver_algs[i].alg.crypto.cra_driver_name;
4455
+ driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4456
+
4457
+ err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4458
+ name = driver_algs[i].alg.skcipher.base.cra_driver_name;
42394459 break;
42404460 case CRYPTO_ALG_TYPE_AEAD:
42414461 driver_algs[i].alg.aead.base.cra_flags =
4242
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
4462
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4463
+ CRYPTO_ALG_ALLOCATES_MEMORY;
42434464 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
42444465 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
42454466 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
....@@ -4259,7 +4480,8 @@
42594480 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
42604481 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
42614482 a_hash->halg.base.cra_module = THIS_MODULE;
4262
- a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4483
+ a_hash->halg.base.cra_flags =
4484
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
42634485 a_hash->halg.base.cra_alignmask = 0;
42644486 a_hash->halg.base.cra_exit = NULL;
42654487
....@@ -4280,8 +4502,7 @@
42804502 break;
42814503 }
42824504 if (err) {
4283
- pr_err("chcr : %s : Algorithm registration failed\n",
4284
- name);
4505
+ pr_err("%s : Algorithm registration failed\n", name);
42854506 goto register_err;
42864507 } else {
42874508 driver_algs[i].is_registered = 1;