.. | .. |
---|
44 | 44 | #include <linux/kernel.h> |
---|
45 | 45 | #include <linux/module.h> |
---|
46 | 46 | #include <linux/crypto.h> |
---|
47 | | -#include <linux/cryptohash.h> |
---|
48 | 47 | #include <linux/skbuff.h> |
---|
49 | 48 | #include <linux/rtnetlink.h> |
---|
50 | 49 | #include <linux/highmem.h> |
---|
.. | .. |
---|
93 | 92 | 0x1B000000, 0x36000000, 0x6C000000 |
---|
94 | 93 | }; |
---|
95 | 94 | |
---|
96 | | -static int chcr_handle_cipher_resp(struct ablkcipher_request *req, |
---|
| 95 | +static int chcr_handle_cipher_resp(struct skcipher_request *req, |
---|
97 | 96 | unsigned char *input, int err); |
---|
98 | 97 | |
---|
99 | 98 | static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) |
---|
.. | .. |
---|
123 | 122 | |
---|
124 | 123 | static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) |
---|
125 | 124 | { |
---|
126 | | - return ctx->dev->u_ctx; |
---|
| 125 | + return container_of(ctx->dev, struct uld_ctx, dev); |
---|
127 | 126 | } |
---|
128 | 127 | |
---|
129 | 128 | static inline int is_ofld_imm(const struct sk_buff *skb) |
---|
.. | .. |
---|
198 | 197 | *err = 0; |
---|
199 | 198 | } |
---|
200 | 199 | |
---|
201 | | -static inline void chcr_handle_aead_resp(struct aead_request *req, |
---|
| 200 | +static int chcr_inc_wrcount(struct chcr_dev *dev) |
---|
| 201 | +{ |
---|
| 202 | + if (dev->state == CHCR_DETACH) |
---|
| 203 | + return 1; |
---|
| 204 | + atomic_inc(&dev->inflight); |
---|
| 205 | + return 0; |
---|
| 206 | +} |
---|
| 207 | + |
---|
| 208 | +static inline void chcr_dec_wrcount(struct chcr_dev *dev) |
---|
| 209 | +{ |
---|
| 210 | + atomic_dec(&dev->inflight); |
---|
| 211 | +} |
---|
| 212 | + |
---|
| 213 | +static inline int chcr_handle_aead_resp(struct aead_request *req, |
---|
202 | 214 | unsigned char *input, |
---|
203 | 215 | int err) |
---|
204 | 216 | { |
---|
205 | 217 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
---|
| 218 | + struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
---|
| 219 | + struct chcr_dev *dev = a_ctx(tfm)->dev; |
---|
206 | 220 | |
---|
207 | 221 | chcr_aead_common_exit(req); |
---|
208 | 222 | if (reqctx->verify == VERIFY_SW) { |
---|
209 | 223 | chcr_verify_tag(req, input, &err); |
---|
210 | 224 | reqctx->verify = VERIFY_HW; |
---|
211 | 225 | } |
---|
| 226 | + chcr_dec_wrcount(dev); |
---|
212 | 227 | req->base.complete(&req->base, err); |
---|
| 228 | + |
---|
| 229 | + return err; |
---|
213 | 230 | } |
---|
214 | 231 | |
---|
215 | 232 | static void get_aes_decrypt_key(unsigned char *dec_key, |
---|
.. | .. |
---|
238 | 255 | return; |
---|
239 | 256 | } |
---|
240 | 257 | for (i = 0; i < nk; i++) |
---|
241 | | - w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]); |
---|
| 258 | + w_ring[i] = get_unaligned_be32(&key[i * 4]); |
---|
242 | 259 | |
---|
243 | 260 | i = 0; |
---|
244 | 261 | temp = w_ring[nk - 1]; |
---|
.. | .. |
---|
257 | 274 | } |
---|
258 | 275 | i--; |
---|
259 | 276 | for (k = 0, j = i % nk; k < nk; k++) { |
---|
260 | | - *((u32 *)dec_key + k) = htonl(w_ring[j]); |
---|
| 277 | + put_unaligned_be32(w_ring[j], &dec_key[k * 4]); |
---|
261 | 278 | j--; |
---|
262 | 279 | if (j < 0) |
---|
263 | 280 | j += nk; |
---|
.. | .. |
---|
391 | 408 | |
---|
392 | 409 | static inline void dsgl_walk_add_page(struct dsgl_walk *walk, |
---|
393 | 410 | size_t size, |
---|
394 | | - dma_addr_t *addr) |
---|
| 411 | + dma_addr_t addr) |
---|
395 | 412 | { |
---|
396 | 413 | int j; |
---|
397 | 414 | |
---|
.. | .. |
---|
399 | 416 | return; |
---|
400 | 417 | j = walk->nents; |
---|
401 | 418 | walk->to->len[j % 8] = htons(size); |
---|
402 | | - walk->to->addr[j % 8] = cpu_to_be64(*addr); |
---|
| 419 | + walk->to->addr[j % 8] = cpu_to_be64(addr); |
---|
403 | 420 | j++; |
---|
404 | 421 | if ((j % 8) == 0) |
---|
405 | 422 | walk->to++; |
---|
.. | .. |
---|
473 | 490 | |
---|
474 | 491 | static inline void ulptx_walk_add_page(struct ulptx_walk *walk, |
---|
475 | 492 | size_t size, |
---|
476 | | - dma_addr_t *addr) |
---|
| 493 | + dma_addr_t addr) |
---|
477 | 494 | { |
---|
478 | 495 | if (!size) |
---|
479 | 496 | return; |
---|
480 | 497 | |
---|
481 | 498 | if (walk->nents == 0) { |
---|
482 | 499 | walk->sgl->len0 = cpu_to_be32(size); |
---|
483 | | - walk->sgl->addr0 = cpu_to_be64(*addr); |
---|
| 500 | + walk->sgl->addr0 = cpu_to_be64(addr); |
---|
484 | 501 | } else { |
---|
485 | | - walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr); |
---|
| 502 | + walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr); |
---|
486 | 503 | walk->pair->len[walk->pair_idx] = cpu_to_be32(size); |
---|
487 | 504 | walk->pair_idx = !walk->pair_idx; |
---|
488 | 505 | if (!walk->pair_idx) |
---|
.. | .. |
---|
550 | 567 | } |
---|
551 | 568 | } |
---|
552 | 569 | |
---|
553 | | -static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm) |
---|
| 570 | +static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm) |
---|
554 | 571 | { |
---|
555 | | - struct crypto_alg *alg = tfm->__crt_alg; |
---|
| 572 | + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
---|
556 | 573 | struct chcr_alg_template *chcr_crypto_alg = |
---|
557 | | - container_of(alg, struct chcr_alg_template, alg.crypto); |
---|
| 574 | + container_of(alg, struct chcr_alg_template, alg.skcipher); |
---|
558 | 575 | |
---|
559 | 576 | return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; |
---|
560 | 577 | } |
---|
.. | .. |
---|
674 | 691 | } |
---|
675 | 692 | |
---|
676 | 693 | static int chcr_cipher_fallback(struct crypto_skcipher *cipher, |
---|
677 | | - u32 flags, |
---|
678 | | - struct scatterlist *src, |
---|
679 | | - struct scatterlist *dst, |
---|
680 | | - unsigned int nbytes, |
---|
| 694 | + struct skcipher_request *req, |
---|
681 | 695 | u8 *iv, |
---|
682 | 696 | unsigned short op_type) |
---|
683 | 697 | { |
---|
| 698 | + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
---|
684 | 699 | int err; |
---|
685 | 700 | |
---|
686 | | - SKCIPHER_REQUEST_ON_STACK(subreq, cipher); |
---|
| 701 | + skcipher_request_set_tfm(&reqctx->fallback_req, cipher); |
---|
| 702 | + skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags, |
---|
| 703 | + req->base.complete, req->base.data); |
---|
| 704 | + skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst, |
---|
| 705 | + req->cryptlen, iv); |
---|
687 | 706 | |
---|
688 | | - skcipher_request_set_tfm(subreq, cipher); |
---|
689 | | - skcipher_request_set_callback(subreq, flags, NULL, NULL); |
---|
690 | | - skcipher_request_set_crypt(subreq, src, dst, |
---|
691 | | - nbytes, iv); |
---|
692 | | - |
---|
693 | | - err = op_type ? crypto_skcipher_decrypt(subreq) : |
---|
694 | | - crypto_skcipher_encrypt(subreq); |
---|
695 | | - skcipher_request_zero(subreq); |
---|
| 707 | + err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) : |
---|
| 708 | + crypto_skcipher_encrypt(&reqctx->fallback_req); |
---|
696 | 709 | |
---|
697 | 710 | return err; |
---|
698 | 711 | |
---|
699 | 712 | } |
---|
| 713 | + |
---|
| 714 | +static inline int get_qidxs(struct crypto_async_request *req, |
---|
| 715 | + unsigned int *txqidx, unsigned int *rxqidx) |
---|
| 716 | +{ |
---|
| 717 | + struct crypto_tfm *tfm = req->tfm; |
---|
| 718 | + int ret = 0; |
---|
| 719 | + |
---|
| 720 | + switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { |
---|
| 721 | + case CRYPTO_ALG_TYPE_AEAD: |
---|
| 722 | + { |
---|
| 723 | + struct aead_request *aead_req = |
---|
| 724 | + container_of(req, struct aead_request, base); |
---|
| 725 | + struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req); |
---|
| 726 | + *txqidx = reqctx->txqidx; |
---|
| 727 | + *rxqidx = reqctx->rxqidx; |
---|
| 728 | + break; |
---|
| 729 | + } |
---|
| 730 | + case CRYPTO_ALG_TYPE_SKCIPHER: |
---|
| 731 | + { |
---|
| 732 | + struct skcipher_request *sk_req = |
---|
| 733 | + container_of(req, struct skcipher_request, base); |
---|
| 734 | + struct chcr_skcipher_req_ctx *reqctx = |
---|
| 735 | + skcipher_request_ctx(sk_req); |
---|
| 736 | + *txqidx = reqctx->txqidx; |
---|
| 737 | + *rxqidx = reqctx->rxqidx; |
---|
| 738 | + break; |
---|
| 739 | + } |
---|
| 740 | + case CRYPTO_ALG_TYPE_AHASH: |
---|
| 741 | + { |
---|
| 742 | + struct ahash_request *ahash_req = |
---|
| 743 | + container_of(req, struct ahash_request, base); |
---|
| 744 | + struct chcr_ahash_req_ctx *reqctx = |
---|
| 745 | + ahash_request_ctx(ahash_req); |
---|
| 746 | + *txqidx = reqctx->txqidx; |
---|
| 747 | + *rxqidx = reqctx->rxqidx; |
---|
| 748 | + break; |
---|
| 749 | + } |
---|
| 750 | + default: |
---|
| 751 | + ret = -EINVAL; |
---|
| 752 | + /* should never get here */ |
---|
| 753 | + BUG(); |
---|
| 754 | + break; |
---|
| 755 | + } |
---|
| 756 | + return ret; |
---|
| 757 | +} |
---|
| 758 | + |
---|
700 | 759 | static inline void create_wreq(struct chcr_context *ctx, |
---|
701 | 760 | struct chcr_wr *chcr_req, |
---|
702 | 761 | struct crypto_async_request *req, |
---|
.. | .. |
---|
707 | 766 | unsigned int lcb) |
---|
708 | 767 | { |
---|
709 | 768 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
---|
710 | | - int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx]; |
---|
| 769 | + unsigned int tx_channel_id, rx_channel_id; |
---|
| 770 | + unsigned int txqidx = 0, rxqidx = 0; |
---|
| 771 | + unsigned int qid, fid, portno; |
---|
| 772 | + |
---|
| 773 | + get_qidxs(req, &txqidx, &rxqidx); |
---|
| 774 | + qid = u_ctx->lldi.rxq_ids[rxqidx]; |
---|
| 775 | + fid = u_ctx->lldi.rxq_ids[0]; |
---|
| 776 | + portno = rxqidx / ctx->rxq_perchan; |
---|
| 777 | + tx_channel_id = txqidx / ctx->txq_perchan; |
---|
| 778 | + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]); |
---|
711 | 779 | |
---|
712 | 780 | |
---|
713 | 781 | chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE; |
---|
.. | .. |
---|
716 | 784 | chcr_req->wreq.len16_pkd = |
---|
717 | 785 | htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16))); |
---|
718 | 786 | chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req); |
---|
719 | | - chcr_req->wreq.rx_chid_to_rx_q_id = |
---|
720 | | - FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, |
---|
721 | | - !!lcb, ctx->tx_qidx); |
---|
| 787 | + chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid, |
---|
| 788 | + !!lcb, txqidx); |
---|
722 | 789 | |
---|
723 | | - chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id, |
---|
724 | | - qid); |
---|
| 790 | + chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid); |
---|
725 | 791 | chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) - |
---|
726 | | - ((sizeof(chcr_req->wreq)) >> 4))); |
---|
727 | | - |
---|
| 792 | + ((sizeof(chcr_req->wreq)) >> 4))); |
---|
728 | 793 | chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm); |
---|
729 | 794 | chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + |
---|
730 | 795 | sizeof(chcr_req->key_ctx) + sc_len); |
---|
.. | .. |
---|
739 | 804 | */ |
---|
740 | 805 | static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) |
---|
741 | 806 | { |
---|
742 | | - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req); |
---|
743 | | - struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
---|
| 807 | + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req); |
---|
| 808 | + struct chcr_context *ctx = c_ctx(tfm); |
---|
| 809 | + struct uld_ctx *u_ctx = ULD_CTX(ctx); |
---|
| 810 | + struct ablk_ctx *ablkctx = ABLK_CTX(ctx); |
---|
744 | 811 | struct sk_buff *skb = NULL; |
---|
745 | 812 | struct chcr_wr *chcr_req; |
---|
746 | 813 | struct cpl_rx_phys_dsgl *phys_cpl; |
---|
747 | 814 | struct ulptx_sgl *ulptx; |
---|
748 | | - struct chcr_blkcipher_req_ctx *reqctx = |
---|
749 | | - ablkcipher_request_ctx(wrparam->req); |
---|
| 815 | + struct chcr_skcipher_req_ctx *reqctx = |
---|
| 816 | + skcipher_request_ctx(wrparam->req); |
---|
750 | 817 | unsigned int temp = 0, transhdr_len, dst_size; |
---|
751 | 818 | int error; |
---|
752 | 819 | int nents; |
---|
753 | 820 | unsigned int kctx_len; |
---|
754 | 821 | gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? |
---|
755 | 822 | GFP_KERNEL : GFP_ATOMIC; |
---|
756 | | - struct adapter *adap = padap(c_ctx(tfm)->dev); |
---|
| 823 | + struct adapter *adap = padap(ctx->dev); |
---|
| 824 | + unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; |
---|
757 | 825 | |
---|
| 826 | + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); |
---|
758 | 827 | nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE, |
---|
759 | 828 | reqctx->dst_ofst); |
---|
760 | 829 | dst_size = get_space_for_phys_dsgl(nents); |
---|
.. | .. |
---|
773 | 842 | } |
---|
774 | 843 | chcr_req = __skb_put_zero(skb, transhdr_len); |
---|
775 | 844 | chcr_req->sec_cpl.op_ivinsrtofst = |
---|
776 | | - FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1); |
---|
| 845 | + FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1); |
---|
777 | 846 | |
---|
778 | 847 | chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes); |
---|
779 | 848 | chcr_req->sec_cpl.aadstart_cipherstop_hi = |
---|
.. | .. |
---|
789 | 858 | |
---|
790 | 859 | chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; |
---|
791 | 860 | if ((reqctx->op == CHCR_DECRYPT_OP) && |
---|
792 | | - (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == |
---|
| 861 | + (!(get_cryptoalg_subtype(tfm) == |
---|
793 | 862 | CRYPTO_ALG_SUB_TYPE_CTR)) && |
---|
794 | | - (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == |
---|
| 863 | + (!(get_cryptoalg_subtype(tfm) == |
---|
795 | 864 | CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) { |
---|
796 | 865 | generate_copy_rrkey(ablkctx, &chcr_req->key_ctx); |
---|
797 | 866 | } else { |
---|
.. | .. |
---|
825 | 894 | if (reqctx->op && (ablkctx->ciph_mode == |
---|
826 | 895 | CHCR_SCMD_CIPHER_MODE_AES_CBC)) |
---|
827 | 896 | sg_pcopy_to_buffer(wrparam->req->src, |
---|
828 | | - sg_nents(wrparam->req->src), wrparam->req->info, 16, |
---|
| 897 | + sg_nents(wrparam->req->src), wrparam->req->iv, 16, |
---|
829 | 898 | reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE); |
---|
830 | 899 | |
---|
831 | 900 | return skb; |
---|
.. | .. |
---|
848 | 917 | |
---|
849 | 918 | return ck_size; |
---|
850 | 919 | } |
---|
851 | | -static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher, |
---|
| 920 | +static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher, |
---|
852 | 921 | const u8 *key, |
---|
853 | 922 | unsigned int keylen) |
---|
854 | 923 | { |
---|
855 | | - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); |
---|
856 | 924 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
---|
857 | | - int err = 0; |
---|
858 | 925 | |
---|
859 | | - crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
---|
860 | | - crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags & |
---|
861 | | - CRYPTO_TFM_REQ_MASK); |
---|
862 | | - err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen); |
---|
863 | | - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
---|
864 | | - tfm->crt_flags |= |
---|
865 | | - crypto_skcipher_get_flags(ablkctx->sw_cipher) & |
---|
866 | | - CRYPTO_TFM_RES_MASK; |
---|
867 | | - return err; |
---|
| 926 | + crypto_skcipher_clear_flags(ablkctx->sw_cipher, |
---|
| 927 | + CRYPTO_TFM_REQ_MASK); |
---|
| 928 | + crypto_skcipher_set_flags(ablkctx->sw_cipher, |
---|
| 929 | + cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); |
---|
| 930 | + return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen); |
---|
868 | 931 | } |
---|
869 | 932 | |
---|
870 | | -static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher, |
---|
| 933 | +static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, |
---|
871 | 934 | const u8 *key, |
---|
872 | 935 | unsigned int keylen) |
---|
873 | 936 | { |
---|
.. | .. |
---|
893 | 956 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; |
---|
894 | 957 | return 0; |
---|
895 | 958 | badkey_err: |
---|
896 | | - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
---|
897 | 959 | ablkctx->enckey_len = 0; |
---|
898 | 960 | |
---|
899 | 961 | return err; |
---|
900 | 962 | } |
---|
901 | 963 | |
---|
902 | | -static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher, |
---|
| 964 | +static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher, |
---|
903 | 965 | const u8 *key, |
---|
904 | 966 | unsigned int keylen) |
---|
905 | 967 | { |
---|
.. | .. |
---|
924 | 986 | |
---|
925 | 987 | return 0; |
---|
926 | 988 | badkey_err: |
---|
927 | | - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
---|
928 | 989 | ablkctx->enckey_len = 0; |
---|
929 | 990 | |
---|
930 | 991 | return err; |
---|
931 | 992 | } |
---|
932 | 993 | |
---|
933 | | -static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher, |
---|
| 994 | +static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher, |
---|
934 | 995 | const u8 *key, |
---|
935 | 996 | unsigned int keylen) |
---|
936 | 997 | { |
---|
.. | .. |
---|
962 | 1023 | |
---|
963 | 1024 | return 0; |
---|
964 | 1025 | badkey_err: |
---|
965 | | - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
---|
966 | 1026 | ablkctx->enckey_len = 0; |
---|
967 | 1027 | |
---|
968 | 1028 | return err; |
---|
.. | .. |
---|
992 | 1052 | u32 temp = be32_to_cpu(*--b); |
---|
993 | 1053 | |
---|
994 | 1054 | temp = ~temp; |
---|
995 | | - c = (u64)temp + 1; // No of block can processed withou overflow |
---|
996 | | - if ((bytes / AES_BLOCK_SIZE) > c) |
---|
| 1055 | + c = (u64)temp + 1; // No of block can processed without overflow |
---|
| 1056 | + if ((bytes / AES_BLOCK_SIZE) >= c) |
---|
997 | 1057 | bytes = c * AES_BLOCK_SIZE; |
---|
998 | 1058 | return bytes; |
---|
999 | 1059 | } |
---|
1000 | 1060 | |
---|
1001 | | -static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv, |
---|
| 1061 | +static int chcr_update_tweak(struct skcipher_request *req, u8 *iv, |
---|
1002 | 1062 | u32 isfinal) |
---|
1003 | 1063 | { |
---|
1004 | | - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
---|
| 1064 | + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
---|
1005 | 1065 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
---|
1006 | | - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
---|
1007 | | - struct crypto_cipher *cipher; |
---|
| 1066 | + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
---|
| 1067 | + struct crypto_aes_ctx aes; |
---|
1008 | 1068 | int ret, i; |
---|
1009 | 1069 | u8 *key; |
---|
1010 | 1070 | unsigned int keylen; |
---|
1011 | 1071 | int round = reqctx->last_req_len / AES_BLOCK_SIZE; |
---|
1012 | 1072 | int round8 = round / 8; |
---|
1013 | 1073 | |
---|
1014 | | - cipher = ablkctx->aes_generic; |
---|
1015 | 1074 | memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); |
---|
1016 | 1075 | |
---|
1017 | 1076 | keylen = ablkctx->enckey_len / 2; |
---|
1018 | 1077 | key = ablkctx->key + keylen; |
---|
1019 | | - ret = crypto_cipher_setkey(cipher, key, keylen); |
---|
| 1078 | + /* For a 192 bit key remove the padded zeroes which was |
---|
| 1079 | + * added in chcr_xts_setkey |
---|
| 1080 | + */ |
---|
| 1081 | + if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr)) |
---|
| 1082 | + == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) |
---|
| 1083 | + ret = aes_expandkey(&aes, key, keylen - 8); |
---|
| 1084 | + else |
---|
| 1085 | + ret = aes_expandkey(&aes, key, keylen); |
---|
1020 | 1086 | if (ret) |
---|
1021 | | - goto out; |
---|
1022 | | - crypto_cipher_encrypt_one(cipher, iv, iv); |
---|
| 1087 | + return ret; |
---|
| 1088 | + aes_encrypt(&aes, iv, iv); |
---|
1023 | 1089 | for (i = 0; i < round8; i++) |
---|
1024 | 1090 | gf128mul_x8_ble((le128 *)iv, (le128 *)iv); |
---|
1025 | 1091 | |
---|
.. | .. |
---|
1027 | 1093 | gf128mul_x_ble((le128 *)iv, (le128 *)iv); |
---|
1028 | 1094 | |
---|
1029 | 1095 | if (!isfinal) |
---|
1030 | | - crypto_cipher_decrypt_one(cipher, iv, iv); |
---|
1031 | | -out: |
---|
1032 | | - return ret; |
---|
| 1096 | + aes_decrypt(&aes, iv, iv); |
---|
| 1097 | + |
---|
| 1098 | + memzero_explicit(&aes, sizeof(aes)); |
---|
| 1099 | + return 0; |
---|
1033 | 1100 | } |
---|
1034 | 1101 | |
---|
1035 | | -static int chcr_update_cipher_iv(struct ablkcipher_request *req, |
---|
| 1102 | +static int chcr_update_cipher_iv(struct skcipher_request *req, |
---|
1036 | 1103 | struct cpl_fw6_pld *fw6_pld, u8 *iv) |
---|
1037 | 1104 | { |
---|
1038 | | - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
---|
1039 | | - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
---|
1040 | | - int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)); |
---|
| 1105 | + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
---|
| 1106 | + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
---|
| 1107 | + int subtype = get_cryptoalg_subtype(tfm); |
---|
1041 | 1108 | int ret = 0; |
---|
1042 | 1109 | |
---|
1043 | 1110 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) |
---|
1044 | | - ctr_add_iv(iv, req->info, (reqctx->processed / |
---|
| 1111 | + ctr_add_iv(iv, req->iv, (reqctx->processed / |
---|
1045 | 1112 | AES_BLOCK_SIZE)); |
---|
1046 | 1113 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) |
---|
1047 | 1114 | *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + |
---|
.. | .. |
---|
1052 | 1119 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { |
---|
1053 | 1120 | if (reqctx->op) |
---|
1054 | 1121 | /*Updated before sending last WR*/ |
---|
1055 | | - memcpy(iv, req->info, AES_BLOCK_SIZE); |
---|
| 1122 | + memcpy(iv, req->iv, AES_BLOCK_SIZE); |
---|
1056 | 1123 | else |
---|
1057 | 1124 | memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); |
---|
1058 | 1125 | } |
---|
.. | .. |
---|
1066 | 1133 | * for subsequent update requests |
---|
1067 | 1134 | */ |
---|
1068 | 1135 | |
---|
1069 | | -static int chcr_final_cipher_iv(struct ablkcipher_request *req, |
---|
| 1136 | +static int chcr_final_cipher_iv(struct skcipher_request *req, |
---|
1070 | 1137 | struct cpl_fw6_pld *fw6_pld, u8 *iv) |
---|
1071 | 1138 | { |
---|
1072 | | - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
---|
1073 | | - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
---|
1074 | | - int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)); |
---|
| 1139 | + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
---|
| 1140 | + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
---|
| 1141 | + int subtype = get_cryptoalg_subtype(tfm); |
---|
1075 | 1142 | int ret = 0; |
---|
1076 | 1143 | |
---|
1077 | 1144 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) |
---|
1078 | | - ctr_add_iv(iv, req->info, (reqctx->processed / |
---|
1079 | | - AES_BLOCK_SIZE)); |
---|
1080 | | - else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) |
---|
1081 | | - ret = chcr_update_tweak(req, iv, 1); |
---|
| 1145 | + ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed, |
---|
| 1146 | + AES_BLOCK_SIZE)); |
---|
| 1147 | + else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) { |
---|
| 1148 | + if (!reqctx->partial_req) |
---|
| 1149 | + memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); |
---|
| 1150 | + else |
---|
| 1151 | + ret = chcr_update_tweak(req, iv, 1); |
---|
| 1152 | + } |
---|
1082 | 1153 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { |
---|
1083 | 1154 | /*Already updated for Decrypt*/ |
---|
1084 | 1155 | if (!reqctx->op) |
---|
.. | .. |
---|
1089 | 1160 | |
---|
1090 | 1161 | } |
---|
1091 | 1162 | |
---|
1092 | | -static int chcr_handle_cipher_resp(struct ablkcipher_request *req, |
---|
| 1163 | +static int chcr_handle_cipher_resp(struct skcipher_request *req, |
---|
1093 | 1164 | unsigned char *input, int err) |
---|
1094 | 1165 | { |
---|
1095 | | - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
---|
1096 | | - struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); |
---|
1097 | | - struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
---|
1098 | | - struct sk_buff *skb; |
---|
| 1166 | + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
---|
| 1167 | + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
---|
1099 | 1168 | struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; |
---|
1100 | | - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
---|
1101 | | - struct cipher_wr_param wrparam; |
---|
| 1169 | + struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
---|
| 1170 | + struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); |
---|
| 1171 | + struct chcr_dev *dev = c_ctx(tfm)->dev; |
---|
| 1172 | + struct chcr_context *ctx = c_ctx(tfm); |
---|
| 1173 | + struct adapter *adap = padap(ctx->dev); |
---|
| 1174 | + struct cipher_wr_param wrparam; |
---|
| 1175 | + struct sk_buff *skb; |
---|
1102 | 1176 | int bytes; |
---|
1103 | 1177 | |
---|
1104 | 1178 | if (err) |
---|
1105 | 1179 | goto unmap; |
---|
1106 | | - if (req->nbytes == reqctx->processed) { |
---|
| 1180 | + if (req->cryptlen == reqctx->processed) { |
---|
1107 | 1181 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, |
---|
1108 | 1182 | req); |
---|
1109 | | - err = chcr_final_cipher_iv(req, fw6_pld, req->info); |
---|
| 1183 | + err = chcr_final_cipher_iv(req, fw6_pld, req->iv); |
---|
1110 | 1184 | goto complete; |
---|
1111 | 1185 | } |
---|
1112 | 1186 | |
---|
.. | .. |
---|
1114 | 1188 | bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0, |
---|
1115 | 1189 | CIP_SPACE_LEFT(ablkctx->enckey_len), |
---|
1116 | 1190 | reqctx->src_ofst, reqctx->dst_ofst); |
---|
1117 | | - if ((bytes + reqctx->processed) >= req->nbytes) |
---|
1118 | | - bytes = req->nbytes - reqctx->processed; |
---|
| 1191 | + if ((bytes + reqctx->processed) >= req->cryptlen) |
---|
| 1192 | + bytes = req->cryptlen - reqctx->processed; |
---|
1119 | 1193 | else |
---|
1120 | 1194 | bytes = rounddown(bytes, 16); |
---|
1121 | 1195 | } else { |
---|
1122 | 1196 | /*CTR mode counter overfloa*/ |
---|
1123 | | - bytes = req->nbytes - reqctx->processed; |
---|
| 1197 | + bytes = req->cryptlen - reqctx->processed; |
---|
1124 | 1198 | } |
---|
1125 | 1199 | err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv); |
---|
1126 | 1200 | if (err) |
---|
.. | .. |
---|
1129 | 1203 | if (unlikely(bytes == 0)) { |
---|
1130 | 1204 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, |
---|
1131 | 1205 | req); |
---|
1132 | | - err = chcr_cipher_fallback(ablkctx->sw_cipher, |
---|
1133 | | - req->base.flags, |
---|
1134 | | - req->src, |
---|
1135 | | - req->dst, |
---|
1136 | | - req->nbytes, |
---|
1137 | | - req->info, |
---|
1138 | | - reqctx->op); |
---|
| 1206 | + memcpy(req->iv, reqctx->init_iv, IV); |
---|
| 1207 | + atomic_inc(&adap->chcr_stats.fallback); |
---|
| 1208 | + err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv, |
---|
| 1209 | + reqctx->op); |
---|
1139 | 1210 | goto complete; |
---|
1140 | 1211 | } |
---|
1141 | 1212 | |
---|
1142 | | - if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == |
---|
| 1213 | + if (get_cryptoalg_subtype(tfm) == |
---|
1143 | 1214 | CRYPTO_ALG_SUB_TYPE_CTR) |
---|
1144 | 1215 | bytes = adjust_ctr_overflow(reqctx->iv, bytes); |
---|
1145 | | - wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx]; |
---|
| 1216 | + wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx]; |
---|
1146 | 1217 | wrparam.req = req; |
---|
1147 | 1218 | wrparam.bytes = bytes; |
---|
1148 | 1219 | skb = create_cipher_wr(&wrparam); |
---|
1149 | 1220 | if (IS_ERR(skb)) { |
---|
1150 | | - pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); |
---|
| 1221 | + pr_err("%s : Failed to form WR. No memory\n", __func__); |
---|
1151 | 1222 | err = PTR_ERR(skb); |
---|
1152 | 1223 | goto unmap; |
---|
1153 | 1224 | } |
---|
1154 | 1225 | skb->dev = u_ctx->lldi.ports[0]; |
---|
1155 | | - set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); |
---|
| 1226 | + set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); |
---|
1156 | 1227 | chcr_send_wr(skb); |
---|
1157 | 1228 | reqctx->last_req_len = bytes; |
---|
1158 | 1229 | reqctx->processed += bytes; |
---|
| 1230 | + if (get_cryptoalg_subtype(tfm) == |
---|
| 1231 | + CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags == |
---|
| 1232 | + CRYPTO_TFM_REQ_MAY_SLEEP ) { |
---|
| 1233 | + complete(&ctx->cbc_aes_aio_done); |
---|
| 1234 | + } |
---|
1159 | 1235 | return 0; |
---|
1160 | 1236 | unmap: |
---|
1161 | 1237 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); |
---|
1162 | 1238 | complete: |
---|
| 1239 | + if (get_cryptoalg_subtype(tfm) == |
---|
| 1240 | + CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags == |
---|
| 1241 | + CRYPTO_TFM_REQ_MAY_SLEEP ) { |
---|
| 1242 | + complete(&ctx->cbc_aes_aio_done); |
---|
| 1243 | + } |
---|
| 1244 | + chcr_dec_wrcount(dev); |
---|
1163 | 1245 | req->base.complete(&req->base, err); |
---|
1164 | 1246 | return err; |
---|
1165 | 1247 | } |
---|
1166 | 1248 | |
---|
1167 | | -static int process_cipher(struct ablkcipher_request *req, |
---|
| 1249 | +static int process_cipher(struct skcipher_request *req, |
---|
1168 | 1250 | unsigned short qid, |
---|
1169 | 1251 | struct sk_buff **skb, |
---|
1170 | 1252 | unsigned short op_type) |
---|
1171 | 1253 | { |
---|
1172 | | - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
---|
1173 | | - unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); |
---|
1174 | | - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
---|
| 1254 | + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
---|
| 1255 | + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
---|
| 1256 | + unsigned int ivsize = crypto_skcipher_ivsize(tfm); |
---|
1175 | 1257 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
---|
| 1258 | + struct adapter *adap = padap(c_ctx(tfm)->dev); |
---|
1176 | 1259 | struct cipher_wr_param wrparam; |
---|
1177 | 1260 | int bytes, err = -EINVAL; |
---|
| 1261 | + int subtype; |
---|
1178 | 1262 | |
---|
1179 | 1263 | reqctx->processed = 0; |
---|
1180 | | - if (!req->info) |
---|
| 1264 | + reqctx->partial_req = 0; |
---|
| 1265 | + if (!req->iv) |
---|
1181 | 1266 | goto error; |
---|
| 1267 | + subtype = get_cryptoalg_subtype(tfm); |
---|
1182 | 1268 | if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || |
---|
1183 | | - (req->nbytes == 0) || |
---|
1184 | | - (req->nbytes % crypto_ablkcipher_blocksize(tfm))) { |
---|
| 1269 | + (req->cryptlen == 0) || |
---|
| 1270 | + (req->cryptlen % crypto_skcipher_blocksize(tfm))) { |
---|
| 1271 | + if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS) |
---|
| 1272 | + goto fallback; |
---|
| 1273 | + else if (req->cryptlen % crypto_skcipher_blocksize(tfm) && |
---|
| 1274 | + subtype == CRYPTO_ALG_SUB_TYPE_XTS) |
---|
| 1275 | + goto fallback; |
---|
1185 | 1276 | pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n", |
---|
1186 | | - ablkctx->enckey_len, req->nbytes, ivsize); |
---|
| 1277 | + ablkctx->enckey_len, req->cryptlen, ivsize); |
---|
1187 | 1278 | goto error; |
---|
1188 | 1279 | } |
---|
1189 | | - chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); |
---|
1190 | | - if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) + |
---|
| 1280 | + |
---|
| 1281 | + err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); |
---|
| 1282 | + if (err) |
---|
| 1283 | + goto error; |
---|
| 1284 | + if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) + |
---|
1191 | 1285 | AES_MIN_KEY_SIZE + |
---|
1192 | 1286 | sizeof(struct cpl_rx_phys_dsgl) + |
---|
1193 | 1287 | /*Min dsgl size*/ |
---|
.. | .. |
---|
1195 | 1289 | /* Can be sent as Imm*/ |
---|
1196 | 1290 | unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len; |
---|
1197 | 1291 | |
---|
1198 | | - dnents = sg_nents_xlen(req->dst, req->nbytes, |
---|
| 1292 | + dnents = sg_nents_xlen(req->dst, req->cryptlen, |
---|
1199 | 1293 | CHCR_DST_SG_SIZE, 0); |
---|
1200 | 1294 | phys_dsgl = get_space_for_phys_dsgl(dnents); |
---|
1201 | 1295 | kctx_len = roundup(ablkctx->enckey_len, 16); |
---|
1202 | 1296 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); |
---|
1203 | | - reqctx->imm = (transhdr_len + IV + req->nbytes) <= |
---|
| 1297 | + reqctx->imm = (transhdr_len + IV + req->cryptlen) <= |
---|
1204 | 1298 | SGE_MAX_WR_LEN; |
---|
1205 | | - bytes = IV + req->nbytes; |
---|
| 1299 | + bytes = IV + req->cryptlen; |
---|
1206 | 1300 | |
---|
1207 | 1301 | } else { |
---|
1208 | 1302 | reqctx->imm = 0; |
---|
.. | .. |
---|
1212 | 1306 | bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0, |
---|
1213 | 1307 | CIP_SPACE_LEFT(ablkctx->enckey_len), |
---|
1214 | 1308 | 0, 0); |
---|
1215 | | - if ((bytes + reqctx->processed) >= req->nbytes) |
---|
1216 | | - bytes = req->nbytes - reqctx->processed; |
---|
| 1309 | + if ((bytes + reqctx->processed) >= req->cryptlen) |
---|
| 1310 | + bytes = req->cryptlen - reqctx->processed; |
---|
1217 | 1311 | else |
---|
1218 | 1312 | bytes = rounddown(bytes, 16); |
---|
1219 | 1313 | } else { |
---|
1220 | | - bytes = req->nbytes; |
---|
| 1314 | + bytes = req->cryptlen; |
---|
1221 | 1315 | } |
---|
1222 | | - if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == |
---|
1223 | | - CRYPTO_ALG_SUB_TYPE_CTR) { |
---|
1224 | | - bytes = adjust_ctr_overflow(req->info, bytes); |
---|
| 1316 | + if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) { |
---|
| 1317 | + bytes = adjust_ctr_overflow(req->iv, bytes); |
---|
1225 | 1318 | } |
---|
1226 | | - if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == |
---|
1227 | | - CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) { |
---|
| 1319 | + if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) { |
---|
1228 | 1320 | memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE); |
---|
1229 | | - memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info, |
---|
| 1321 | + memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv, |
---|
1230 | 1322 | CTR_RFC3686_IV_SIZE); |
---|
1231 | 1323 | |
---|
1232 | 1324 | /* initialize counter portion of counter block */ |
---|
1233 | 1325 | *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + |
---|
1234 | 1326 | CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); |
---|
| 1327 | + memcpy(reqctx->init_iv, reqctx->iv, IV); |
---|
1235 | 1328 | |
---|
1236 | 1329 | } else { |
---|
1237 | 1330 | |
---|
1238 | | - memcpy(reqctx->iv, req->info, IV); |
---|
| 1331 | + memcpy(reqctx->iv, req->iv, IV); |
---|
| 1332 | + memcpy(reqctx->init_iv, req->iv, IV); |
---|
1239 | 1333 | } |
---|
1240 | 1334 | if (unlikely(bytes == 0)) { |
---|
1241 | 1335 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, |
---|
1242 | 1336 | req); |
---|
1243 | | - err = chcr_cipher_fallback(ablkctx->sw_cipher, |
---|
1244 | | - req->base.flags, |
---|
1245 | | - req->src, |
---|
1246 | | - req->dst, |
---|
1247 | | - req->nbytes, |
---|
1248 | | - reqctx->iv, |
---|
| 1337 | +fallback: atomic_inc(&adap->chcr_stats.fallback); |
---|
| 1338 | + err = chcr_cipher_fallback(ablkctx->sw_cipher, req, |
---|
| 1339 | + subtype == |
---|
| 1340 | + CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ? |
---|
| 1341 | + reqctx->iv : req->iv, |
---|
1249 | 1342 | op_type); |
---|
1250 | 1343 | goto error; |
---|
1251 | 1344 | } |
---|
.. | .. |
---|
1264 | 1357 | } |
---|
1265 | 1358 | reqctx->processed = bytes; |
---|
1266 | 1359 | reqctx->last_req_len = bytes; |
---|
| 1360 | + reqctx->partial_req = !!(req->cryptlen - reqctx->processed); |
---|
1267 | 1361 | |
---|
1268 | 1362 | return 0; |
---|
1269 | 1363 | unmap: |
---|
.. | .. |
---|
1272 | 1366 | return err; |
---|
1273 | 1367 | } |
---|
1274 | 1368 | |
---|
1275 | | -static int chcr_aes_encrypt(struct ablkcipher_request *req) |
---|
| 1369 | +static int chcr_aes_encrypt(struct skcipher_request *req) |
---|
1276 | 1370 | { |
---|
1277 | | - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
---|
| 1371 | + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
---|
| 1372 | + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
---|
| 1373 | + struct chcr_dev *dev = c_ctx(tfm)->dev; |
---|
1278 | 1374 | struct sk_buff *skb = NULL; |
---|
1279 | | - int err, isfull = 0; |
---|
| 1375 | + int err; |
---|
1280 | 1376 | struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); |
---|
| 1377 | + struct chcr_context *ctx = c_ctx(tfm); |
---|
| 1378 | + unsigned int cpu; |
---|
1281 | 1379 | |
---|
| 1380 | + cpu = get_cpu(); |
---|
| 1381 | + reqctx->txqidx = cpu % ctx->ntxq; |
---|
| 1382 | + reqctx->rxqidx = cpu % ctx->nrxq; |
---|
| 1383 | + put_cpu(); |
---|
| 1384 | + |
---|
| 1385 | + err = chcr_inc_wrcount(dev); |
---|
| 1386 | + if (err) |
---|
| 1387 | + return -ENXIO; |
---|
1282 | 1388 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
---|
1283 | | - c_ctx(tfm)->tx_qidx))) { |
---|
1284 | | - isfull = 1; |
---|
1285 | | - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
---|
1286 | | - return -ENOSPC; |
---|
| 1389 | + reqctx->txqidx) && |
---|
| 1390 | + (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { |
---|
| 1391 | + err = -ENOSPC; |
---|
| 1392 | + goto error; |
---|
1287 | 1393 | } |
---|
1288 | 1394 | |
---|
1289 | | - err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], |
---|
| 1395 | + err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], |
---|
1290 | 1396 | &skb, CHCR_ENCRYPT_OP); |
---|
1291 | 1397 | if (err || !skb) |
---|
1292 | 1398 | return err; |
---|
1293 | 1399 | skb->dev = u_ctx->lldi.ports[0]; |
---|
1294 | | - set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); |
---|
| 1400 | + set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); |
---|
1295 | 1401 | chcr_send_wr(skb); |
---|
1296 | | - return isfull ? -EBUSY : -EINPROGRESS; |
---|
| 1402 | + if (get_cryptoalg_subtype(tfm) == |
---|
| 1403 | + CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags == |
---|
| 1404 | + CRYPTO_TFM_REQ_MAY_SLEEP ) { |
---|
| 1405 | + reqctx->partial_req = 1; |
---|
| 1406 | + wait_for_completion(&ctx->cbc_aes_aio_done); |
---|
| 1407 | + } |
---|
| 1408 | + return -EINPROGRESS; |
---|
| 1409 | +error: |
---|
| 1410 | + chcr_dec_wrcount(dev); |
---|
| 1411 | + return err; |
---|
1297 | 1412 | } |
---|
1298 | 1413 | |
---|
1299 | | -static int chcr_aes_decrypt(struct ablkcipher_request *req) |
---|
| 1414 | +static int chcr_aes_decrypt(struct skcipher_request *req) |
---|
1300 | 1415 | { |
---|
1301 | | - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
---|
| 1416 | + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
---|
| 1417 | + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
---|
1302 | 1418 | struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); |
---|
| 1419 | + struct chcr_dev *dev = c_ctx(tfm)->dev; |
---|
1303 | 1420 | struct sk_buff *skb = NULL; |
---|
1304 | | - int err, isfull = 0; |
---|
| 1421 | + int err; |
---|
| 1422 | + struct chcr_context *ctx = c_ctx(tfm); |
---|
| 1423 | + unsigned int cpu; |
---|
| 1424 | + |
---|
| 1425 | + cpu = get_cpu(); |
---|
| 1426 | + reqctx->txqidx = cpu % ctx->ntxq; |
---|
| 1427 | + reqctx->rxqidx = cpu % ctx->nrxq; |
---|
| 1428 | + put_cpu(); |
---|
| 1429 | + |
---|
| 1430 | + err = chcr_inc_wrcount(dev); |
---|
| 1431 | + if (err) |
---|
| 1432 | + return -ENXIO; |
---|
1305 | 1433 | |
---|
1306 | 1434 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
---|
1307 | | - c_ctx(tfm)->tx_qidx))) { |
---|
1308 | | - isfull = 1; |
---|
1309 | | - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
---|
| 1435 | + reqctx->txqidx) && |
---|
| 1436 | + (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) |
---|
1310 | 1437 | return -ENOSPC; |
---|
1311 | | - } |
---|
1312 | | - |
---|
1313 | | - err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], |
---|
1314 | | - &skb, CHCR_DECRYPT_OP); |
---|
| 1438 | + err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], |
---|
| 1439 | + &skb, CHCR_DECRYPT_OP); |
---|
1315 | 1440 | if (err || !skb) |
---|
1316 | 1441 | return err; |
---|
1317 | 1442 | skb->dev = u_ctx->lldi.ports[0]; |
---|
1318 | | - set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); |
---|
| 1443 | + set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); |
---|
1319 | 1444 | chcr_send_wr(skb); |
---|
1320 | | - return isfull ? -EBUSY : -EINPROGRESS; |
---|
| 1445 | + return -EINPROGRESS; |
---|
1321 | 1446 | } |
---|
1322 | | - |
---|
1323 | 1447 | static int chcr_device_init(struct chcr_context *ctx) |
---|
1324 | 1448 | { |
---|
1325 | 1449 | struct uld_ctx *u_ctx = NULL; |
---|
1326 | | - struct adapter *adap; |
---|
1327 | | - unsigned int id; |
---|
1328 | | - int txq_perchan, txq_idx, ntxq; |
---|
1329 | | - int err = 0, rxq_perchan, rxq_idx; |
---|
| 1450 | + int txq_perchan, ntxq; |
---|
| 1451 | + int err = 0, rxq_perchan; |
---|
1330 | 1452 | |
---|
1331 | | - id = smp_processor_id(); |
---|
1332 | 1453 | if (!ctx->dev) { |
---|
1333 | 1454 | u_ctx = assign_chcr_device(); |
---|
1334 | 1455 | if (!u_ctx) { |
---|
| 1456 | + err = -ENXIO; |
---|
1335 | 1457 | pr_err("chcr device assignment fails\n"); |
---|
1336 | 1458 | goto out; |
---|
1337 | 1459 | } |
---|
1338 | | - ctx->dev = u_ctx->dev; |
---|
1339 | | - adap = padap(ctx->dev); |
---|
1340 | | - ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq, |
---|
1341 | | - adap->vres.ncrypto_fc); |
---|
| 1460 | + ctx->dev = &u_ctx->dev; |
---|
| 1461 | + ntxq = u_ctx->lldi.ntxq; |
---|
1342 | 1462 | rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; |
---|
1343 | 1463 | txq_perchan = ntxq / u_ctx->lldi.nchan; |
---|
1344 | | - spin_lock(&ctx->dev->lock_chcr_dev); |
---|
1345 | | - ctx->tx_chan_id = ctx->dev->tx_channel_id; |
---|
1346 | | - ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id; |
---|
1347 | | - ctx->dev->rx_channel_id = 0; |
---|
1348 | | - spin_unlock(&ctx->dev->lock_chcr_dev); |
---|
1349 | | - rxq_idx = ctx->tx_chan_id * rxq_perchan; |
---|
1350 | | - rxq_idx += id % rxq_perchan; |
---|
1351 | | - txq_idx = ctx->tx_chan_id * txq_perchan; |
---|
1352 | | - txq_idx += id % txq_perchan; |
---|
1353 | | - ctx->rx_qidx = rxq_idx; |
---|
1354 | | - ctx->tx_qidx = txq_idx; |
---|
1355 | | - /* Channel Id used by SGE to forward packet to Host. |
---|
1356 | | - * Same value should be used in cpl_fw6_pld RSS_CH field |
---|
1357 | | - * by FW. Driver programs PCI channel ID to be used in fw |
---|
1358 | | - * at the time of queue allocation with value "pi->tx_chan" |
---|
1359 | | - */ |
---|
1360 | | - ctx->pci_chan_id = txq_idx / txq_perchan; |
---|
| 1464 | + ctx->ntxq = ntxq; |
---|
| 1465 | + ctx->nrxq = u_ctx->lldi.nrxq; |
---|
| 1466 | + ctx->rxq_perchan = rxq_perchan; |
---|
| 1467 | + ctx->txq_perchan = txq_perchan; |
---|
1361 | 1468 | } |
---|
1362 | 1469 | out: |
---|
1363 | 1470 | return err; |
---|
1364 | 1471 | } |
---|
1365 | 1472 | |
---|
1366 | | -static int chcr_cra_init(struct crypto_tfm *tfm) |
---|
| 1473 | +static int chcr_init_tfm(struct crypto_skcipher *tfm) |
---|
1367 | 1474 | { |
---|
1368 | | - struct crypto_alg *alg = tfm->__crt_alg; |
---|
1369 | | - struct chcr_context *ctx = crypto_tfm_ctx(tfm); |
---|
| 1475 | + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
---|
| 1476 | + struct chcr_context *ctx = crypto_skcipher_ctx(tfm); |
---|
1370 | 1477 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); |
---|
1371 | 1478 | |
---|
1372 | | - ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0, |
---|
1373 | | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); |
---|
| 1479 | + ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0, |
---|
| 1480 | + CRYPTO_ALG_NEED_FALLBACK); |
---|
1374 | 1481 | if (IS_ERR(ablkctx->sw_cipher)) { |
---|
1375 | | - pr_err("failed to allocate fallback for %s\n", alg->cra_name); |
---|
| 1482 | + pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); |
---|
1376 | 1483 | return PTR_ERR(ablkctx->sw_cipher); |
---|
1377 | 1484 | } |
---|
| 1485 | + init_completion(&ctx->cbc_aes_aio_done); |
---|
| 1486 | + crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) + |
---|
| 1487 | + crypto_skcipher_reqsize(ablkctx->sw_cipher)); |
---|
1378 | 1488 | |
---|
1379 | | - if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) { |
---|
1380 | | - /* To update tweak*/ |
---|
1381 | | - ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0); |
---|
1382 | | - if (IS_ERR(ablkctx->aes_generic)) { |
---|
1383 | | - pr_err("failed to allocate aes cipher for tweak\n"); |
---|
1384 | | - return PTR_ERR(ablkctx->aes_generic); |
---|
1385 | | - } |
---|
1386 | | - } else |
---|
1387 | | - ablkctx->aes_generic = NULL; |
---|
1388 | | - |
---|
1389 | | - tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); |
---|
1390 | | - return chcr_device_init(crypto_tfm_ctx(tfm)); |
---|
| 1489 | + return chcr_device_init(ctx); |
---|
1391 | 1490 | } |
---|
1392 | 1491 | |
---|
1393 | | -static int chcr_rfc3686_init(struct crypto_tfm *tfm) |
---|
| 1492 | +static int chcr_rfc3686_init(struct crypto_skcipher *tfm) |
---|
1394 | 1493 | { |
---|
1395 | | - struct crypto_alg *alg = tfm->__crt_alg; |
---|
1396 | | - struct chcr_context *ctx = crypto_tfm_ctx(tfm); |
---|
| 1494 | + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
---|
| 1495 | + struct chcr_context *ctx = crypto_skcipher_ctx(tfm); |
---|
1397 | 1496 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); |
---|
1398 | 1497 | |
---|
1399 | 1498 | /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes)) |
---|
1400 | 1499 | * cannot be used as fallback in chcr_handle_cipher_response |
---|
1401 | 1500 | */ |
---|
1402 | 1501 | ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0, |
---|
1403 | | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); |
---|
| 1502 | + CRYPTO_ALG_NEED_FALLBACK); |
---|
1404 | 1503 | if (IS_ERR(ablkctx->sw_cipher)) { |
---|
1405 | | - pr_err("failed to allocate fallback for %s\n", alg->cra_name); |
---|
| 1504 | + pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); |
---|
1406 | 1505 | return PTR_ERR(ablkctx->sw_cipher); |
---|
1407 | 1506 | } |
---|
1408 | | - tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); |
---|
1409 | | - return chcr_device_init(crypto_tfm_ctx(tfm)); |
---|
| 1507 | + crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) + |
---|
| 1508 | + crypto_skcipher_reqsize(ablkctx->sw_cipher)); |
---|
| 1509 | + return chcr_device_init(ctx); |
---|
1410 | 1510 | } |
---|
1411 | 1511 | |
---|
1412 | 1512 | |
---|
1413 | | -static void chcr_cra_exit(struct crypto_tfm *tfm) |
---|
| 1513 | +static void chcr_exit_tfm(struct crypto_skcipher *tfm) |
---|
1414 | 1514 | { |
---|
1415 | | - struct chcr_context *ctx = crypto_tfm_ctx(tfm); |
---|
| 1515 | + struct chcr_context *ctx = crypto_skcipher_ctx(tfm); |
---|
1416 | 1516 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); |
---|
1417 | 1517 | |
---|
1418 | 1518 | crypto_free_skcipher(ablkctx->sw_cipher); |
---|
1419 | | - if (ablkctx->aes_generic) |
---|
1420 | | - crypto_free_cipher(ablkctx->aes_generic); |
---|
1421 | 1519 | } |
---|
1422 | 1520 | |
---|
1423 | 1521 | static int get_alg_config(struct algo_param *params, |
---|
.. | .. |
---|
1450 | 1548 | params->result_size = SHA512_DIGEST_SIZE; |
---|
1451 | 1549 | break; |
---|
1452 | 1550 | default: |
---|
1453 | | - pr_err("chcr : ERROR, unsupported digest size\n"); |
---|
| 1551 | + pr_err("ERROR, unsupported digest size\n"); |
---|
1454 | 1552 | return -EINVAL; |
---|
1455 | 1553 | } |
---|
1456 | 1554 | return 0; |
---|
.. | .. |
---|
1470 | 1568 | { |
---|
1471 | 1569 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
---|
1472 | 1570 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
---|
1473 | | - struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm)); |
---|
| 1571 | + struct chcr_context *ctx = h_ctx(tfm); |
---|
| 1572 | + struct hmac_ctx *hmacctx = HMAC_CTX(ctx); |
---|
1474 | 1573 | struct sk_buff *skb = NULL; |
---|
1475 | | - struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); |
---|
| 1574 | + struct uld_ctx *u_ctx = ULD_CTX(ctx); |
---|
1476 | 1575 | struct chcr_wr *chcr_req; |
---|
1477 | 1576 | struct ulptx_sgl *ulptx; |
---|
1478 | 1577 | unsigned int nents = 0, transhdr_len; |
---|
.. | .. |
---|
1481 | 1580 | GFP_ATOMIC; |
---|
1482 | 1581 | struct adapter *adap = padap(h_ctx(tfm)->dev); |
---|
1483 | 1582 | int error = 0; |
---|
| 1583 | + unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan; |
---|
1484 | 1584 | |
---|
| 1585 | + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); |
---|
1485 | 1586 | transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len); |
---|
1486 | 1587 | req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len + |
---|
1487 | 1588 | param->sg_len) <= SGE_MAX_WR_LEN; |
---|
.. | .. |
---|
1498 | 1599 | chcr_req = __skb_put_zero(skb, transhdr_len); |
---|
1499 | 1600 | |
---|
1500 | 1601 | chcr_req->sec_cpl.op_ivinsrtofst = |
---|
1501 | | - FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0); |
---|
| 1602 | + FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0); |
---|
| 1603 | + |
---|
1502 | 1604 | chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len); |
---|
1503 | 1605 | |
---|
1504 | 1606 | chcr_req->sec_cpl.aadstart_cipherstop_hi = |
---|
.. | .. |
---|
1561 | 1663 | { |
---|
1562 | 1664 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
---|
1563 | 1665 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); |
---|
1564 | | - struct uld_ctx *u_ctx = NULL; |
---|
| 1666 | + struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm)); |
---|
| 1667 | + struct chcr_context *ctx = h_ctx(rtfm); |
---|
| 1668 | + struct chcr_dev *dev = h_ctx(rtfm)->dev; |
---|
1565 | 1669 | struct sk_buff *skb; |
---|
1566 | 1670 | u8 remainder = 0, bs; |
---|
1567 | 1671 | unsigned int nbytes = req->nbytes; |
---|
1568 | 1672 | struct hash_wr_param params; |
---|
1569 | | - int error, isfull = 0; |
---|
| 1673 | + int error; |
---|
| 1674 | + unsigned int cpu; |
---|
| 1675 | + |
---|
| 1676 | + cpu = get_cpu(); |
---|
| 1677 | + req_ctx->txqidx = cpu % ctx->ntxq; |
---|
| 1678 | + req_ctx->rxqidx = cpu % ctx->nrxq; |
---|
| 1679 | + put_cpu(); |
---|
1570 | 1680 | |
---|
1571 | 1681 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); |
---|
1572 | | - u_ctx = ULD_CTX(h_ctx(rtfm)); |
---|
1573 | | - if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
---|
1574 | | - h_ctx(rtfm)->tx_qidx))) { |
---|
1575 | | - isfull = 1; |
---|
1576 | | - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
---|
1577 | | - return -ENOSPC; |
---|
1578 | | - } |
---|
1579 | 1682 | |
---|
1580 | 1683 | if (nbytes + req_ctx->reqlen >= bs) { |
---|
1581 | 1684 | remainder = (nbytes + req_ctx->reqlen) % bs; |
---|
.. | .. |
---|
1586 | 1689 | req_ctx->reqlen += nbytes; |
---|
1587 | 1690 | return 0; |
---|
1588 | 1691 | } |
---|
| 1692 | + error = chcr_inc_wrcount(dev); |
---|
| 1693 | + if (error) |
---|
| 1694 | + return -ENXIO; |
---|
| 1695 | + /* Detach state for CHCR means lldi or padap is freed. Increasing |
---|
| 1696 | + * inflight count for dev guarantees that lldi and padap is valid |
---|
| 1697 | + */ |
---|
| 1698 | + if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
---|
| 1699 | + req_ctx->txqidx) && |
---|
| 1700 | + (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { |
---|
| 1701 | + error = -ENOSPC; |
---|
| 1702 | + goto err; |
---|
| 1703 | + } |
---|
| 1704 | + |
---|
1589 | 1705 | chcr_init_hctx_per_wr(req_ctx); |
---|
1590 | 1706 | error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); |
---|
1591 | | - if (error) |
---|
1592 | | - return -ENOMEM; |
---|
| 1707 | + if (error) { |
---|
| 1708 | + error = -ENOMEM; |
---|
| 1709 | + goto err; |
---|
| 1710 | + } |
---|
1593 | 1711 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); |
---|
1594 | 1712 | params.kctx_len = roundup(params.alg_prm.result_size, 16); |
---|
1595 | 1713 | params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, |
---|
.. | .. |
---|
1623 | 1741 | } |
---|
1624 | 1742 | req_ctx->reqlen = remainder; |
---|
1625 | 1743 | skb->dev = u_ctx->lldi.ports[0]; |
---|
1626 | | - set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
---|
| 1744 | + set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx); |
---|
1627 | 1745 | chcr_send_wr(skb); |
---|
1628 | | - |
---|
1629 | | - return isfull ? -EBUSY : -EINPROGRESS; |
---|
| 1746 | + return -EINPROGRESS; |
---|
1630 | 1747 | unmap: |
---|
1631 | 1748 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); |
---|
| 1749 | +err: |
---|
| 1750 | + chcr_dec_wrcount(dev); |
---|
1632 | 1751 | return error; |
---|
1633 | 1752 | } |
---|
1634 | 1753 | |
---|
.. | .. |
---|
1646 | 1765 | { |
---|
1647 | 1766 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
---|
1648 | 1767 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); |
---|
| 1768 | + struct chcr_dev *dev = h_ctx(rtfm)->dev; |
---|
1649 | 1769 | struct hash_wr_param params; |
---|
1650 | 1770 | struct sk_buff *skb; |
---|
1651 | | - struct uld_ctx *u_ctx = NULL; |
---|
| 1771 | + struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm)); |
---|
| 1772 | + struct chcr_context *ctx = h_ctx(rtfm); |
---|
1652 | 1773 | u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); |
---|
| 1774 | + int error; |
---|
| 1775 | + unsigned int cpu; |
---|
| 1776 | + |
---|
| 1777 | + cpu = get_cpu(); |
---|
| 1778 | + req_ctx->txqidx = cpu % ctx->ntxq; |
---|
| 1779 | + req_ctx->rxqidx = cpu % ctx->nrxq; |
---|
| 1780 | + put_cpu(); |
---|
| 1781 | + |
---|
| 1782 | + error = chcr_inc_wrcount(dev); |
---|
| 1783 | + if (error) |
---|
| 1784 | + return -ENXIO; |
---|
1653 | 1785 | |
---|
1654 | 1786 | chcr_init_hctx_per_wr(req_ctx); |
---|
1655 | | - u_ctx = ULD_CTX(h_ctx(rtfm)); |
---|
1656 | 1787 | if (is_hmac(crypto_ahash_tfm(rtfm))) |
---|
1657 | 1788 | params.opad_needed = 1; |
---|
1658 | 1789 | else |
---|
.. | .. |
---|
1686 | 1817 | } |
---|
1687 | 1818 | params.hash_size = crypto_ahash_digestsize(rtfm); |
---|
1688 | 1819 | skb = create_hash_wr(req, ¶ms); |
---|
1689 | | - if (IS_ERR(skb)) |
---|
1690 | | - return PTR_ERR(skb); |
---|
| 1820 | + if (IS_ERR(skb)) { |
---|
| 1821 | + error = PTR_ERR(skb); |
---|
| 1822 | + goto err; |
---|
| 1823 | + } |
---|
1691 | 1824 | req_ctx->reqlen = 0; |
---|
1692 | 1825 | skb->dev = u_ctx->lldi.ports[0]; |
---|
1693 | | - set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
---|
| 1826 | + set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx); |
---|
1694 | 1827 | chcr_send_wr(skb); |
---|
1695 | 1828 | return -EINPROGRESS; |
---|
| 1829 | +err: |
---|
| 1830 | + chcr_dec_wrcount(dev); |
---|
| 1831 | + return error; |
---|
1696 | 1832 | } |
---|
1697 | 1833 | |
---|
1698 | 1834 | static int chcr_ahash_finup(struct ahash_request *req) |
---|
1699 | 1835 | { |
---|
1700 | 1836 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
---|
1701 | 1837 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); |
---|
1702 | | - struct uld_ctx *u_ctx = NULL; |
---|
| 1838 | + struct chcr_dev *dev = h_ctx(rtfm)->dev; |
---|
| 1839 | + struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm)); |
---|
| 1840 | + struct chcr_context *ctx = h_ctx(rtfm); |
---|
1703 | 1841 | struct sk_buff *skb; |
---|
1704 | 1842 | struct hash_wr_param params; |
---|
1705 | 1843 | u8 bs; |
---|
1706 | | - int error, isfull = 0; |
---|
| 1844 | + int error; |
---|
| 1845 | + unsigned int cpu; |
---|
| 1846 | + |
---|
| 1847 | + cpu = get_cpu(); |
---|
| 1848 | + req_ctx->txqidx = cpu % ctx->ntxq; |
---|
| 1849 | + req_ctx->rxqidx = cpu % ctx->nrxq; |
---|
| 1850 | + put_cpu(); |
---|
1707 | 1851 | |
---|
1708 | 1852 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); |
---|
1709 | | - u_ctx = ULD_CTX(h_ctx(rtfm)); |
---|
| 1853 | + error = chcr_inc_wrcount(dev); |
---|
| 1854 | + if (error) |
---|
| 1855 | + return -ENXIO; |
---|
1710 | 1856 | |
---|
1711 | 1857 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
---|
1712 | | - h_ctx(rtfm)->tx_qidx))) { |
---|
1713 | | - isfull = 1; |
---|
1714 | | - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
---|
1715 | | - return -ENOSPC; |
---|
| 1858 | + req_ctx->txqidx) && |
---|
| 1859 | + (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { |
---|
| 1860 | + error = -ENOSPC; |
---|
| 1861 | + goto err; |
---|
1716 | 1862 | } |
---|
1717 | 1863 | chcr_init_hctx_per_wr(req_ctx); |
---|
1718 | 1864 | error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); |
---|
1719 | | - if (error) |
---|
1720 | | - return -ENOMEM; |
---|
| 1865 | + if (error) { |
---|
| 1866 | + error = -ENOMEM; |
---|
| 1867 | + goto err; |
---|
| 1868 | + } |
---|
1721 | 1869 | |
---|
1722 | 1870 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); |
---|
1723 | 1871 | params.kctx_len = roundup(params.alg_prm.result_size, 16); |
---|
.. | .. |
---|
1768 | 1916 | req_ctx->reqlen = 0; |
---|
1769 | 1917 | req_ctx->hctx_wr.processed += params.sg_len; |
---|
1770 | 1918 | skb->dev = u_ctx->lldi.ports[0]; |
---|
1771 | | - set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
---|
| 1919 | + set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx); |
---|
1772 | 1920 | chcr_send_wr(skb); |
---|
1773 | | - |
---|
1774 | | - return isfull ? -EBUSY : -EINPROGRESS; |
---|
| 1921 | + return -EINPROGRESS; |
---|
1775 | 1922 | unmap: |
---|
1776 | 1923 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); |
---|
| 1924 | +err: |
---|
| 1925 | + chcr_dec_wrcount(dev); |
---|
1777 | 1926 | return error; |
---|
1778 | 1927 | } |
---|
1779 | 1928 | |
---|
.. | .. |
---|
1781 | 1930 | { |
---|
1782 | 1931 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
---|
1783 | 1932 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); |
---|
1784 | | - struct uld_ctx *u_ctx = NULL; |
---|
| 1933 | + struct chcr_dev *dev = h_ctx(rtfm)->dev; |
---|
| 1934 | + struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm)); |
---|
| 1935 | + struct chcr_context *ctx = h_ctx(rtfm); |
---|
1785 | 1936 | struct sk_buff *skb; |
---|
1786 | 1937 | struct hash_wr_param params; |
---|
1787 | 1938 | u8 bs; |
---|
1788 | | - int error, isfull = 0; |
---|
| 1939 | + int error; |
---|
| 1940 | + unsigned int cpu; |
---|
| 1941 | + |
---|
| 1942 | + cpu = get_cpu(); |
---|
| 1943 | + req_ctx->txqidx = cpu % ctx->ntxq; |
---|
| 1944 | + req_ctx->rxqidx = cpu % ctx->nrxq; |
---|
| 1945 | + put_cpu(); |
---|
1789 | 1946 | |
---|
1790 | 1947 | rtfm->init(req); |
---|
1791 | 1948 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); |
---|
| 1949 | + error = chcr_inc_wrcount(dev); |
---|
| 1950 | + if (error) |
---|
| 1951 | + return -ENXIO; |
---|
1792 | 1952 | |
---|
1793 | | - u_ctx = ULD_CTX(h_ctx(rtfm)); |
---|
1794 | 1953 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
---|
1795 | | - h_ctx(rtfm)->tx_qidx))) { |
---|
1796 | | - isfull = 1; |
---|
1797 | | - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
---|
1798 | | - return -ENOSPC; |
---|
| 1954 | + req_ctx->txqidx) && |
---|
| 1955 | + (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { |
---|
| 1956 | + error = -ENOSPC; |
---|
| 1957 | + goto err; |
---|
1799 | 1958 | } |
---|
1800 | 1959 | |
---|
1801 | 1960 | chcr_init_hctx_per_wr(req_ctx); |
---|
1802 | 1961 | error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); |
---|
1803 | | - if (error) |
---|
1804 | | - return -ENOMEM; |
---|
| 1962 | + if (error) { |
---|
| 1963 | + error = -ENOMEM; |
---|
| 1964 | + goto err; |
---|
| 1965 | + } |
---|
1805 | 1966 | |
---|
1806 | 1967 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); |
---|
1807 | 1968 | params.kctx_len = roundup(params.alg_prm.result_size, 16); |
---|
.. | .. |
---|
1837 | 1998 | req_ctx->data_len += params.bfr_len + params.sg_len; |
---|
1838 | 1999 | |
---|
1839 | 2000 | if (req->nbytes == 0) { |
---|
1840 | | - create_last_hash_block(req_ctx->reqbfr, bs, 0); |
---|
| 2001 | + create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); |
---|
1841 | 2002 | params.more = 1; |
---|
1842 | 2003 | params.bfr_len = bs; |
---|
1843 | 2004 | } |
---|
.. | .. |
---|
1849 | 2010 | } |
---|
1850 | 2011 | req_ctx->hctx_wr.processed += params.sg_len; |
---|
1851 | 2012 | skb->dev = u_ctx->lldi.ports[0]; |
---|
1852 | | - set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
---|
| 2013 | + set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx); |
---|
1853 | 2014 | chcr_send_wr(skb); |
---|
1854 | | - return isfull ? -EBUSY : -EINPROGRESS; |
---|
| 2015 | + return -EINPROGRESS; |
---|
1855 | 2016 | unmap: |
---|
1856 | 2017 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); |
---|
| 2018 | +err: |
---|
| 2019 | + chcr_dec_wrcount(dev); |
---|
1857 | 2020 | return error; |
---|
1858 | 2021 | } |
---|
1859 | 2022 | |
---|
.. | .. |
---|
1862 | 2025 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); |
---|
1863 | 2026 | struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr; |
---|
1864 | 2027 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); |
---|
1865 | | - struct uld_ctx *u_ctx = NULL; |
---|
| 2028 | + struct chcr_context *ctx = h_ctx(rtfm); |
---|
| 2029 | + struct uld_ctx *u_ctx = ULD_CTX(ctx); |
---|
1866 | 2030 | struct sk_buff *skb; |
---|
1867 | 2031 | struct hash_wr_param params; |
---|
1868 | 2032 | u8 bs; |
---|
1869 | 2033 | int error; |
---|
| 2034 | + unsigned int cpu; |
---|
| 2035 | + |
---|
| 2036 | + cpu = get_cpu(); |
---|
| 2037 | + reqctx->txqidx = cpu % ctx->ntxq; |
---|
| 2038 | + reqctx->rxqidx = cpu % ctx->nrxq; |
---|
| 2039 | + put_cpu(); |
---|
1870 | 2040 | |
---|
1871 | 2041 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); |
---|
1872 | | - u_ctx = ULD_CTX(h_ctx(rtfm)); |
---|
1873 | 2042 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); |
---|
1874 | 2043 | params.kctx_len = roundup(params.alg_prm.result_size, 16); |
---|
1875 | 2044 | if (is_hmac(crypto_ahash_tfm(rtfm))) { |
---|
.. | .. |
---|
1909 | 2078 | } |
---|
1910 | 2079 | hctx_wr->processed += params.sg_len; |
---|
1911 | 2080 | skb->dev = u_ctx->lldi.ports[0]; |
---|
1912 | | - set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
---|
| 2081 | + set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); |
---|
1913 | 2082 | chcr_send_wr(skb); |
---|
1914 | 2083 | return 0; |
---|
1915 | 2084 | err: |
---|
.. | .. |
---|
1925 | 2094 | int digestsize, updated_digestsize; |
---|
1926 | 2095 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
---|
1927 | 2096 | struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); |
---|
| 2097 | + struct chcr_dev *dev = h_ctx(tfm)->dev; |
---|
1928 | 2098 | |
---|
1929 | 2099 | if (input == NULL) |
---|
1930 | 2100 | goto out; |
---|
.. | .. |
---|
1967 | 2137 | |
---|
1968 | 2138 | |
---|
1969 | 2139 | out: |
---|
| 2140 | + chcr_dec_wrcount(dev); |
---|
1970 | 2141 | req->base.complete(&req->base, err); |
---|
1971 | 2142 | } |
---|
1972 | 2143 | |
---|
.. | .. |
---|
1983 | 2154 | |
---|
1984 | 2155 | switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { |
---|
1985 | 2156 | case CRYPTO_ALG_TYPE_AEAD: |
---|
1986 | | - chcr_handle_aead_resp(aead_request_cast(req), input, err); |
---|
| 2157 | + err = chcr_handle_aead_resp(aead_request_cast(req), input, err); |
---|
1987 | 2158 | break; |
---|
1988 | 2159 | |
---|
1989 | | - case CRYPTO_ALG_TYPE_ABLKCIPHER: |
---|
1990 | | - err = chcr_handle_cipher_resp(ablkcipher_request_cast(req), |
---|
| 2160 | + case CRYPTO_ALG_TYPE_SKCIPHER: |
---|
| 2161 | + chcr_handle_cipher_resp(skcipher_request_cast(req), |
---|
1991 | 2162 | input, err); |
---|
1992 | 2163 | break; |
---|
1993 | | - |
---|
1994 | 2164 | case CRYPTO_ALG_TYPE_AHASH: |
---|
1995 | 2165 | chcr_handle_ahash_resp(ahash_request_cast(req), input, err); |
---|
1996 | 2166 | } |
---|
.. | .. |
---|
2008 | 2178 | memcpy(state->partial_hash, req_ctx->partial_hash, |
---|
2009 | 2179 | CHCR_HASH_MAX_DIGEST_SIZE); |
---|
2010 | 2180 | chcr_init_hctx_per_wr(state); |
---|
2011 | | - return 0; |
---|
| 2181 | + return 0; |
---|
2012 | 2182 | } |
---|
2013 | 2183 | |
---|
2014 | 2184 | static int chcr_ahash_import(struct ahash_request *areq, const void *in) |
---|
.. | .. |
---|
2042 | 2212 | * ipad in hmacctx->ipad and opad in hmacctx->opad location |
---|
2043 | 2213 | */ |
---|
2044 | 2214 | shash->tfm = hmacctx->base_hash; |
---|
2045 | | - shash->flags = crypto_shash_get_flags(hmacctx->base_hash); |
---|
2046 | 2215 | if (keylen > bs) { |
---|
2047 | 2216 | err = crypto_shash_digest(shash, key, keylen, |
---|
2048 | 2217 | hmacctx->ipad); |
---|
.. | .. |
---|
2080 | 2249 | return err; |
---|
2081 | 2250 | } |
---|
2082 | 2251 | |
---|
2083 | | -static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
---|
| 2252 | +static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key, |
---|
2084 | 2253 | unsigned int key_len) |
---|
2085 | 2254 | { |
---|
2086 | 2255 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
---|
.. | .. |
---|
2095 | 2264 | ablkctx->enckey_len = key_len; |
---|
2096 | 2265 | get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2); |
---|
2097 | 2266 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4; |
---|
2098 | | - ablkctx->key_ctx_hdr = |
---|
| 2267 | + /* Both keys for xts must be aligned to 16 byte boundary |
---|
| 2268 | + * by padding with zeros. So for 24 byte keys padding 8 zeroes. |
---|
| 2269 | + */ |
---|
| 2270 | + if (key_len == 48) { |
---|
| 2271 | + context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len |
---|
| 2272 | + + 16) >> 4; |
---|
| 2273 | + memmove(ablkctx->key + 32, ablkctx->key + 24, 24); |
---|
| 2274 | + memset(ablkctx->key + 24, 0, 8); |
---|
| 2275 | + memset(ablkctx->key + 56, 0, 8); |
---|
| 2276 | + ablkctx->enckey_len = 64; |
---|
| 2277 | + ablkctx->key_ctx_hdr = |
---|
| 2278 | + FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192, |
---|
| 2279 | + CHCR_KEYCTX_NO_KEY, 1, |
---|
| 2280 | + 0, context_size); |
---|
| 2281 | + } else { |
---|
| 2282 | + ablkctx->key_ctx_hdr = |
---|
2099 | 2283 | FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ? |
---|
2100 | 2284 | CHCR_KEYCTX_CIPHER_KEY_SIZE_128 : |
---|
2101 | 2285 | CHCR_KEYCTX_CIPHER_KEY_SIZE_256, |
---|
2102 | 2286 | CHCR_KEYCTX_NO_KEY, 1, |
---|
2103 | 2287 | 0, context_size); |
---|
| 2288 | + } |
---|
2104 | 2289 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; |
---|
2105 | 2290 | return 0; |
---|
2106 | 2291 | badkey_err: |
---|
2107 | | - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
---|
2108 | 2292 | ablkctx->enckey_len = 0; |
---|
2109 | 2293 | |
---|
2110 | 2294 | return err; |
---|
.. | .. |
---|
2215 | 2399 | error = -ENOMEM; |
---|
2216 | 2400 | goto err; |
---|
2217 | 2401 | } |
---|
2218 | | - reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen, |
---|
2219 | | - CHCR_SRC_SG_SIZE, 0); |
---|
2220 | | - reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen, |
---|
2221 | | - CHCR_SRC_SG_SIZE, req->assoclen); |
---|
| 2402 | + |
---|
2222 | 2403 | return 0; |
---|
2223 | 2404 | err: |
---|
2224 | 2405 | return error; |
---|
.. | .. |
---|
2249 | 2430 | req->base.complete, req->base.data); |
---|
2250 | 2431 | aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, |
---|
2251 | 2432 | req->iv); |
---|
2252 | | - aead_request_set_ad(subreq, req->assoclen); |
---|
| 2433 | + aead_request_set_ad(subreq, req->assoclen); |
---|
2253 | 2434 | return op_type ? crypto_aead_decrypt(subreq) : |
---|
2254 | 2435 | crypto_aead_encrypt(subreq); |
---|
2255 | 2436 | } |
---|
.. | .. |
---|
2259 | 2440 | int size) |
---|
2260 | 2441 | { |
---|
2261 | 2442 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
---|
2262 | | - struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
---|
| 2443 | + struct chcr_context *ctx = a_ctx(tfm); |
---|
| 2444 | + struct uld_ctx *u_ctx = ULD_CTX(ctx); |
---|
| 2445 | + struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); |
---|
2263 | 2446 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); |
---|
2264 | 2447 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
---|
2265 | 2448 | struct sk_buff *skb = NULL; |
---|
.. | .. |
---|
2268 | 2451 | struct ulptx_sgl *ulptx; |
---|
2269 | 2452 | unsigned int transhdr_len; |
---|
2270 | 2453 | unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm); |
---|
2271 | | - unsigned int kctx_len = 0, dnents; |
---|
2272 | | - unsigned int assoclen = req->assoclen; |
---|
| 2454 | + unsigned int kctx_len = 0, dnents, snents; |
---|
2273 | 2455 | unsigned int authsize = crypto_aead_authsize(tfm); |
---|
2274 | 2456 | int error = -EINVAL; |
---|
| 2457 | + u8 *ivptr; |
---|
2275 | 2458 | int null = 0; |
---|
2276 | 2459 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
---|
2277 | 2460 | GFP_ATOMIC; |
---|
2278 | | - struct adapter *adap = padap(a_ctx(tfm)->dev); |
---|
| 2461 | + struct adapter *adap = padap(ctx->dev); |
---|
| 2462 | + unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; |
---|
2279 | 2463 | |
---|
| 2464 | + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); |
---|
2280 | 2465 | if (req->cryptlen == 0) |
---|
2281 | 2466 | return NULL; |
---|
2282 | 2467 | |
---|
.. | .. |
---|
2288 | 2473 | if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL || |
---|
2289 | 2474 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { |
---|
2290 | 2475 | null = 1; |
---|
2291 | | - assoclen = 0; |
---|
2292 | | - reqctx->aad_nents = 0; |
---|
2293 | 2476 | } |
---|
2294 | | - dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); |
---|
2295 | | - dnents += sg_nents_xlen(req->dst, req->cryptlen + |
---|
2296 | | - (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, |
---|
2297 | | - req->assoclen); |
---|
| 2477 | + dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen + |
---|
| 2478 | + (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0); |
---|
2298 | 2479 | dnents += MIN_AUTH_SG; // For IV |
---|
2299 | | - |
---|
| 2480 | + snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen, |
---|
| 2481 | + CHCR_SRC_SG_SIZE, 0); |
---|
2300 | 2482 | dst_size = get_space_for_phys_dsgl(dnents); |
---|
2301 | | - kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4) |
---|
| 2483 | + kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4) |
---|
2302 | 2484 | - sizeof(chcr_req->key_ctx); |
---|
2303 | 2485 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
---|
2304 | | - reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) < |
---|
| 2486 | + reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) < |
---|
2305 | 2487 | SGE_MAX_WR_LEN; |
---|
2306 | | - temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) |
---|
2307 | | - : (sgl_len(reqctx->src_nents + reqctx->aad_nents |
---|
2308 | | - + MIN_GCM_SG) * 8); |
---|
| 2488 | + temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) |
---|
| 2489 | + : (sgl_len(snents) * 8); |
---|
2309 | 2490 | transhdr_len += temp; |
---|
2310 | 2491 | transhdr_len = roundup(transhdr_len, 16); |
---|
2311 | 2492 | |
---|
.. | .. |
---|
2315 | 2496 | chcr_aead_common_exit(req); |
---|
2316 | 2497 | return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); |
---|
2317 | 2498 | } |
---|
2318 | | - skb = alloc_skb(SGE_MAX_WR_LEN, flags); |
---|
| 2499 | + skb = alloc_skb(transhdr_len, flags); |
---|
2319 | 2500 | if (!skb) { |
---|
2320 | 2501 | error = -ENOMEM; |
---|
2321 | 2502 | goto err; |
---|
.. | .. |
---|
2331 | 2512 | * to the hardware spec |
---|
2332 | 2513 | */ |
---|
2333 | 2514 | chcr_req->sec_cpl.op_ivinsrtofst = |
---|
2334 | | - FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2, |
---|
2335 | | - assoclen + 1); |
---|
2336 | | - chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen); |
---|
| 2515 | + FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1); |
---|
| 2516 | + chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen); |
---|
2337 | 2517 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( |
---|
2338 | | - assoclen ? 1 : 0, assoclen, |
---|
2339 | | - assoclen + IV + 1, |
---|
| 2518 | + null ? 0 : 1 + IV, |
---|
| 2519 | + null ? 0 : IV + req->assoclen, |
---|
| 2520 | + req->assoclen + IV + 1, |
---|
2340 | 2521 | (temp & 0x1F0) >> 4); |
---|
2341 | 2522 | chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT( |
---|
2342 | 2523 | temp & 0xF, |
---|
2343 | | - null ? 0 : assoclen + IV + 1, |
---|
| 2524 | + null ? 0 : req->assoclen + IV + 1, |
---|
2344 | 2525 | temp, temp); |
---|
2345 | 2526 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL || |
---|
2346 | 2527 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA) |
---|
.. | .. |
---|
2367 | 2548 | |
---|
2368 | 2549 | memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), |
---|
2369 | 2550 | actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16)); |
---|
| 2551 | + phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
---|
| 2552 | + ivptr = (u8 *)(phys_cpl + 1) + dst_size; |
---|
| 2553 | + ulptx = (struct ulptx_sgl *)(ivptr + IV); |
---|
2370 | 2554 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || |
---|
2371 | 2555 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { |
---|
2372 | | - memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE); |
---|
2373 | | - memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv, |
---|
| 2556 | + memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE); |
---|
| 2557 | + memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv, |
---|
2374 | 2558 | CTR_RFC3686_IV_SIZE); |
---|
2375 | | - *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + |
---|
| 2559 | + *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE + |
---|
2376 | 2560 | CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); |
---|
2377 | 2561 | } else { |
---|
2378 | | - memcpy(reqctx->iv, req->iv, IV); |
---|
| 2562 | + memcpy(ivptr, req->iv, IV); |
---|
2379 | 2563 | } |
---|
2380 | | - phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
---|
2381 | | - ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); |
---|
2382 | | - chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid); |
---|
2383 | | - chcr_add_aead_src_ent(req, ulptx, assoclen); |
---|
| 2564 | + chcr_add_aead_dst_ent(req, phys_cpl, qid); |
---|
| 2565 | + chcr_add_aead_src_ent(req, ulptx); |
---|
2384 | 2566 | atomic_inc(&adap->chcr_stats.cipher_rqst); |
---|
2385 | | - temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + |
---|
2386 | | - kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); |
---|
| 2567 | + temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV + |
---|
| 2568 | + kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0); |
---|
2387 | 2569 | create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, |
---|
2388 | 2570 | transhdr_len, temp, 0); |
---|
2389 | 2571 | reqctx->skb = skb; |
---|
.. | .. |
---|
2403 | 2585 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
---|
2404 | 2586 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
---|
2405 | 2587 | unsigned int authsize = crypto_aead_authsize(tfm); |
---|
2406 | | - int dst_size; |
---|
| 2588 | + int src_len, dst_len; |
---|
2407 | 2589 | |
---|
2408 | | - dst_size = req->assoclen + req->cryptlen + (op_type ? |
---|
2409 | | - -authsize : authsize); |
---|
2410 | | - if (!req->cryptlen || !dst_size) |
---|
| 2590 | + /* calculate and handle src and dst sg length separately |
---|
| 2591 | + * for inplace and out-of place operations |
---|
| 2592 | + */ |
---|
| 2593 | + if (req->src == req->dst) { |
---|
| 2594 | + src_len = req->assoclen + req->cryptlen + (op_type ? |
---|
| 2595 | + 0 : authsize); |
---|
| 2596 | + dst_len = src_len; |
---|
| 2597 | + } else { |
---|
| 2598 | + src_len = req->assoclen + req->cryptlen; |
---|
| 2599 | + dst_len = req->assoclen + req->cryptlen + (op_type ? |
---|
| 2600 | + -authsize : authsize); |
---|
| 2601 | + } |
---|
| 2602 | + |
---|
| 2603 | + if (!req->cryptlen || !src_len || !dst_len) |
---|
2411 | 2604 | return 0; |
---|
2412 | 2605 | reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len), |
---|
2413 | 2606 | DMA_BIDIRECTIONAL); |
---|
.. | .. |
---|
2419 | 2612 | reqctx->b0_dma = 0; |
---|
2420 | 2613 | if (req->src == req->dst) { |
---|
2421 | 2614 | error = dma_map_sg(dev, req->src, |
---|
2422 | | - sg_nents_for_len(req->src, dst_size), |
---|
| 2615 | + sg_nents_for_len(req->src, src_len), |
---|
2423 | 2616 | DMA_BIDIRECTIONAL); |
---|
2424 | 2617 | if (!error) |
---|
2425 | 2618 | goto err; |
---|
2426 | 2619 | } else { |
---|
2427 | | - error = dma_map_sg(dev, req->src, sg_nents(req->src), |
---|
| 2620 | + error = dma_map_sg(dev, req->src, |
---|
| 2621 | + sg_nents_for_len(req->src, src_len), |
---|
2428 | 2622 | DMA_TO_DEVICE); |
---|
2429 | 2623 | if (!error) |
---|
2430 | 2624 | goto err; |
---|
2431 | | - error = dma_map_sg(dev, req->dst, sg_nents(req->dst), |
---|
| 2625 | + error = dma_map_sg(dev, req->dst, |
---|
| 2626 | + sg_nents_for_len(req->dst, dst_len), |
---|
2432 | 2627 | DMA_FROM_DEVICE); |
---|
2433 | 2628 | if (!error) { |
---|
2434 | | - dma_unmap_sg(dev, req->src, sg_nents(req->src), |
---|
2435 | | - DMA_TO_DEVICE); |
---|
| 2629 | + dma_unmap_sg(dev, req->src, |
---|
| 2630 | + sg_nents_for_len(req->src, src_len), |
---|
| 2631 | + DMA_TO_DEVICE); |
---|
2436 | 2632 | goto err; |
---|
2437 | 2633 | } |
---|
2438 | 2634 | } |
---|
.. | .. |
---|
2450 | 2646 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
---|
2451 | 2647 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
---|
2452 | 2648 | unsigned int authsize = crypto_aead_authsize(tfm); |
---|
2453 | | - int dst_size; |
---|
| 2649 | + int src_len, dst_len; |
---|
2454 | 2650 | |
---|
2455 | | - dst_size = req->assoclen + req->cryptlen + (op_type ? |
---|
2456 | | - -authsize : authsize); |
---|
2457 | | - if (!req->cryptlen || !dst_size) |
---|
| 2651 | + /* calculate and handle src and dst sg length separately |
---|
| 2652 | + * for inplace and out-of place operations |
---|
| 2653 | + */ |
---|
| 2654 | + if (req->src == req->dst) { |
---|
| 2655 | + src_len = req->assoclen + req->cryptlen + (op_type ? |
---|
| 2656 | + 0 : authsize); |
---|
| 2657 | + dst_len = src_len; |
---|
| 2658 | + } else { |
---|
| 2659 | + src_len = req->assoclen + req->cryptlen; |
---|
| 2660 | + dst_len = req->assoclen + req->cryptlen + (op_type ? |
---|
| 2661 | + -authsize : authsize); |
---|
| 2662 | + } |
---|
| 2663 | + |
---|
| 2664 | + if (!req->cryptlen || !src_len || !dst_len) |
---|
2458 | 2665 | return; |
---|
2459 | 2666 | |
---|
2460 | 2667 | dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len), |
---|
2461 | 2668 | DMA_BIDIRECTIONAL); |
---|
2462 | 2669 | if (req->src == req->dst) { |
---|
2463 | | - dma_unmap_sg(dev, req->src, sg_nents(req->src), |
---|
2464 | | - DMA_BIDIRECTIONAL); |
---|
| 2670 | + dma_unmap_sg(dev, req->src, |
---|
| 2671 | + sg_nents_for_len(req->src, src_len), |
---|
| 2672 | + DMA_BIDIRECTIONAL); |
---|
2465 | 2673 | } else { |
---|
2466 | | - dma_unmap_sg(dev, req->src, sg_nents(req->src), |
---|
2467 | | - DMA_TO_DEVICE); |
---|
2468 | | - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), |
---|
2469 | | - DMA_FROM_DEVICE); |
---|
| 2674 | + dma_unmap_sg(dev, req->src, |
---|
| 2675 | + sg_nents_for_len(req->src, src_len), |
---|
| 2676 | + DMA_TO_DEVICE); |
---|
| 2677 | + dma_unmap_sg(dev, req->dst, |
---|
| 2678 | + sg_nents_for_len(req->dst, dst_len), |
---|
| 2679 | + DMA_FROM_DEVICE); |
---|
2470 | 2680 | } |
---|
2471 | 2681 | } |
---|
2472 | 2682 | |
---|
2473 | 2683 | void chcr_add_aead_src_ent(struct aead_request *req, |
---|
2474 | | - struct ulptx_sgl *ulptx, |
---|
2475 | | - unsigned int assoclen) |
---|
| 2684 | + struct ulptx_sgl *ulptx) |
---|
2476 | 2685 | { |
---|
2477 | 2686 | struct ulptx_walk ulp_walk; |
---|
2478 | 2687 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
---|
.. | .. |
---|
2485 | 2694 | buf += reqctx->b0_len; |
---|
2486 | 2695 | } |
---|
2487 | 2696 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), |
---|
2488 | | - buf, assoclen, 0); |
---|
2489 | | - buf += assoclen; |
---|
2490 | | - memcpy(buf, reqctx->iv, IV); |
---|
2491 | | - buf += IV; |
---|
2492 | | - sg_pcopy_to_buffer(req->src, sg_nents(req->src), |
---|
2493 | | - buf, req->cryptlen, req->assoclen); |
---|
| 2697 | + buf, req->cryptlen + req->assoclen, 0); |
---|
2494 | 2698 | } else { |
---|
2495 | 2699 | ulptx_walk_init(&ulp_walk, ulptx); |
---|
2496 | 2700 | if (reqctx->b0_len) |
---|
2497 | 2701 | ulptx_walk_add_page(&ulp_walk, reqctx->b0_len, |
---|
2498 | | - &reqctx->b0_dma); |
---|
2499 | | - ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0); |
---|
2500 | | - ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma); |
---|
2501 | | - ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen, |
---|
2502 | | - req->assoclen); |
---|
| 2702 | + reqctx->b0_dma); |
---|
| 2703 | + ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen + |
---|
| 2704 | + req->assoclen, 0); |
---|
2503 | 2705 | ulptx_walk_end(&ulp_walk); |
---|
2504 | 2706 | } |
---|
2505 | 2707 | } |
---|
2506 | 2708 | |
---|
2507 | 2709 | void chcr_add_aead_dst_ent(struct aead_request *req, |
---|
2508 | 2710 | struct cpl_rx_phys_dsgl *phys_cpl, |
---|
2509 | | - unsigned int assoclen, |
---|
2510 | 2711 | unsigned short qid) |
---|
2511 | 2712 | { |
---|
2512 | 2713 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
---|
.. | .. |
---|
2514 | 2715 | struct dsgl_walk dsgl_walk; |
---|
2515 | 2716 | unsigned int authsize = crypto_aead_authsize(tfm); |
---|
2516 | 2717 | struct chcr_context *ctx = a_ctx(tfm); |
---|
| 2718 | + struct uld_ctx *u_ctx = ULD_CTX(ctx); |
---|
2517 | 2719 | u32 temp; |
---|
| 2720 | + unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; |
---|
2518 | 2721 | |
---|
| 2722 | + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); |
---|
2519 | 2723 | dsgl_walk_init(&dsgl_walk, phys_cpl); |
---|
2520 | | - if (reqctx->b0_len) |
---|
2521 | | - dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma); |
---|
2522 | | - dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0); |
---|
2523 | | - dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); |
---|
2524 | | - temp = req->cryptlen + (reqctx->op ? -authsize : authsize); |
---|
2525 | | - dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen); |
---|
2526 | | - dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id); |
---|
| 2724 | + dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma); |
---|
| 2725 | + temp = req->assoclen + req->cryptlen + |
---|
| 2726 | + (reqctx->op ? -authsize : authsize); |
---|
| 2727 | + dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0); |
---|
| 2728 | + dsgl_walk_end(&dsgl_walk, qid, rx_channel_id); |
---|
2527 | 2729 | } |
---|
2528 | 2730 | |
---|
2529 | | -void chcr_add_cipher_src_ent(struct ablkcipher_request *req, |
---|
| 2731 | +void chcr_add_cipher_src_ent(struct skcipher_request *req, |
---|
2530 | 2732 | void *ulptx, |
---|
2531 | 2733 | struct cipher_wr_param *wrparam) |
---|
2532 | 2734 | { |
---|
2533 | 2735 | struct ulptx_walk ulp_walk; |
---|
2534 | | - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
---|
| 2736 | + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
---|
2535 | 2737 | u8 *buf = ulptx; |
---|
2536 | 2738 | |
---|
2537 | 2739 | memcpy(buf, reqctx->iv, IV); |
---|
.. | .. |
---|
2549 | 2751 | } |
---|
2550 | 2752 | } |
---|
2551 | 2753 | |
---|
2552 | | -void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, |
---|
| 2754 | +void chcr_add_cipher_dst_ent(struct skcipher_request *req, |
---|
2553 | 2755 | struct cpl_rx_phys_dsgl *phys_cpl, |
---|
2554 | 2756 | struct cipher_wr_param *wrparam, |
---|
2555 | 2757 | unsigned short qid) |
---|
2556 | 2758 | { |
---|
2557 | | - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
---|
2558 | | - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req); |
---|
| 2759 | + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
---|
| 2760 | + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req); |
---|
2559 | 2761 | struct chcr_context *ctx = c_ctx(tfm); |
---|
| 2762 | + struct uld_ctx *u_ctx = ULD_CTX(ctx); |
---|
2560 | 2763 | struct dsgl_walk dsgl_walk; |
---|
| 2764 | + unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; |
---|
2561 | 2765 | |
---|
| 2766 | + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); |
---|
2562 | 2767 | dsgl_walk_init(&dsgl_walk, phys_cpl); |
---|
2563 | 2768 | dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes, |
---|
2564 | 2769 | reqctx->dst_ofst); |
---|
2565 | 2770 | reqctx->dstsg = dsgl_walk.last_sg; |
---|
2566 | 2771 | reqctx->dst_ofst = dsgl_walk.last_sg_len; |
---|
2567 | | - |
---|
2568 | | - dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id); |
---|
| 2772 | + dsgl_walk_end(&dsgl_walk, qid, rx_channel_id); |
---|
2569 | 2773 | } |
---|
2570 | 2774 | |
---|
2571 | 2775 | void chcr_add_hash_src_ent(struct ahash_request *req, |
---|
.. | .. |
---|
2590 | 2794 | ulptx_walk_init(&ulp_walk, ulptx); |
---|
2591 | 2795 | if (param->bfr_len) |
---|
2592 | 2796 | ulptx_walk_add_page(&ulp_walk, param->bfr_len, |
---|
2593 | | - &reqctx->hctx_wr.dma_addr); |
---|
| 2797 | + reqctx->hctx_wr.dma_addr); |
---|
2594 | 2798 | ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg, |
---|
2595 | 2799 | param->sg_len, reqctx->hctx_wr.src_ofst); |
---|
2596 | 2800 | reqctx->hctx_wr.srcsg = ulp_walk.last_sg; |
---|
.. | .. |
---|
2630 | 2834 | } |
---|
2631 | 2835 | |
---|
2632 | 2836 | int chcr_cipher_dma_map(struct device *dev, |
---|
2633 | | - struct ablkcipher_request *req) |
---|
| 2837 | + struct skcipher_request *req) |
---|
2634 | 2838 | { |
---|
2635 | 2839 | int error; |
---|
2636 | 2840 | |
---|
.. | .. |
---|
2659 | 2863 | } |
---|
2660 | 2864 | |
---|
2661 | 2865 | void chcr_cipher_dma_unmap(struct device *dev, |
---|
2662 | | - struct ablkcipher_request *req) |
---|
| 2866 | + struct skcipher_request *req) |
---|
2663 | 2867 | { |
---|
2664 | 2868 | if (req->src == req->dst) { |
---|
2665 | 2869 | dma_unmap_sg(dev, req->src, sg_nents(req->src), |
---|
.. | .. |
---|
2690 | 2894 | return 0; |
---|
2691 | 2895 | } |
---|
2692 | 2896 | |
---|
2693 | | -static void generate_b0(struct aead_request *req, |
---|
2694 | | - struct chcr_aead_ctx *aeadctx, |
---|
| 2897 | +static int generate_b0(struct aead_request *req, u8 *ivptr, |
---|
2695 | 2898 | unsigned short op_type) |
---|
2696 | 2899 | { |
---|
2697 | 2900 | unsigned int l, lp, m; |
---|
.. | .. |
---|
2702 | 2905 | |
---|
2703 | 2906 | m = crypto_aead_authsize(aead); |
---|
2704 | 2907 | |
---|
2705 | | - memcpy(b0, reqctx->iv, 16); |
---|
| 2908 | + memcpy(b0, ivptr, 16); |
---|
2706 | 2909 | |
---|
2707 | 2910 | lp = b0[0]; |
---|
2708 | 2911 | l = lp + 1; |
---|
.. | .. |
---|
2716 | 2919 | rc = set_msg_len(b0 + 16 - l, |
---|
2717 | 2920 | (op_type == CHCR_DECRYPT_OP) ? |
---|
2718 | 2921 | req->cryptlen - m : req->cryptlen, l); |
---|
| 2922 | + |
---|
| 2923 | + return rc; |
---|
2719 | 2924 | } |
---|
2720 | 2925 | |
---|
2721 | 2926 | static inline int crypto_ccm_check_iv(const u8 *iv) |
---|
.. | .. |
---|
2728 | 2933 | } |
---|
2729 | 2934 | |
---|
2730 | 2935 | static int ccm_format_packet(struct aead_request *req, |
---|
2731 | | - struct chcr_aead_ctx *aeadctx, |
---|
| 2936 | + u8 *ivptr, |
---|
2732 | 2937 | unsigned int sub_type, |
---|
2733 | 2938 | unsigned short op_type, |
---|
2734 | 2939 | unsigned int assoclen) |
---|
2735 | 2940 | { |
---|
2736 | 2941 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
---|
| 2942 | + struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
---|
| 2943 | + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
---|
2737 | 2944 | int rc = 0; |
---|
2738 | 2945 | |
---|
2739 | 2946 | if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { |
---|
2740 | | - reqctx->iv[0] = 3; |
---|
2741 | | - memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3); |
---|
2742 | | - memcpy(reqctx->iv + 4, req->iv, 8); |
---|
2743 | | - memset(reqctx->iv + 12, 0, 4); |
---|
| 2947 | + ivptr[0] = 3; |
---|
| 2948 | + memcpy(ivptr + 1, &aeadctx->salt[0], 3); |
---|
| 2949 | + memcpy(ivptr + 4, req->iv, 8); |
---|
| 2950 | + memset(ivptr + 12, 0, 4); |
---|
2744 | 2951 | } else { |
---|
2745 | | - memcpy(reqctx->iv, req->iv, 16); |
---|
| 2952 | + memcpy(ivptr, req->iv, 16); |
---|
2746 | 2953 | } |
---|
2747 | 2954 | if (assoclen) |
---|
2748 | | - *((unsigned short *)(reqctx->scratch_pad + 16)) = |
---|
2749 | | - htons(assoclen); |
---|
| 2955 | + put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]); |
---|
2750 | 2956 | |
---|
2751 | | - generate_b0(req, aeadctx, op_type); |
---|
| 2957 | + rc = generate_b0(req, ivptr, op_type); |
---|
2752 | 2958 | /* zero the ctr value */ |
---|
2753 | | - memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1); |
---|
| 2959 | + memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1); |
---|
2754 | 2960 | return rc; |
---|
2755 | 2961 | } |
---|
2756 | 2962 | |
---|
.. | .. |
---|
2760 | 2966 | unsigned short op_type) |
---|
2761 | 2967 | { |
---|
2762 | 2968 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
---|
2763 | | - struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
---|
| 2969 | + struct chcr_context *ctx = a_ctx(tfm); |
---|
| 2970 | + struct uld_ctx *u_ctx = ULD_CTX(ctx); |
---|
| 2971 | + struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); |
---|
| 2972 | + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
---|
2764 | 2973 | unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; |
---|
2765 | 2974 | unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; |
---|
2766 | | - unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id; |
---|
| 2975 | + unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; |
---|
2767 | 2976 | unsigned int ccm_xtra; |
---|
2768 | 2977 | unsigned int tag_offset = 0, auth_offset = 0; |
---|
2769 | 2978 | unsigned int assoclen; |
---|
| 2979 | + |
---|
| 2980 | + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); |
---|
2770 | 2981 | |
---|
2771 | 2982 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) |
---|
2772 | 2983 | assoclen = req->assoclen - 8; |
---|
.. | .. |
---|
2776 | 2987 | ((assoclen) ? CCM_AAD_FIELD_SIZE : 0); |
---|
2777 | 2988 | |
---|
2778 | 2989 | auth_offset = req->cryptlen ? |
---|
2779 | | - (assoclen + IV + 1 + ccm_xtra) : 0; |
---|
| 2990 | + (req->assoclen + IV + 1 + ccm_xtra) : 0; |
---|
2780 | 2991 | if (op_type == CHCR_DECRYPT_OP) { |
---|
2781 | 2992 | if (crypto_aead_authsize(tfm) != req->cryptlen) |
---|
2782 | 2993 | tag_offset = crypto_aead_authsize(tfm); |
---|
.. | .. |
---|
2784 | 2995 | auth_offset = 0; |
---|
2785 | 2996 | } |
---|
2786 | 2997 | |
---|
2787 | | - |
---|
2788 | | - sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id, |
---|
2789 | | - 2, assoclen + 1 + ccm_xtra); |
---|
| 2998 | + sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1); |
---|
2790 | 2999 | sec_cpl->pldlen = |
---|
2791 | | - htonl(assoclen + IV + req->cryptlen + ccm_xtra); |
---|
| 3000 | + htonl(req->assoclen + IV + req->cryptlen + ccm_xtra); |
---|
2792 | 3001 | /* For CCM there wil be b0 always. So AAD start will be 1 always */ |
---|
2793 | 3002 | sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( |
---|
2794 | | - 1, assoclen + ccm_xtra, assoclen |
---|
2795 | | - + IV + 1 + ccm_xtra, 0); |
---|
| 3003 | + 1 + IV, IV + assoclen + ccm_xtra, |
---|
| 3004 | + req->assoclen + IV + 1 + ccm_xtra, 0); |
---|
2796 | 3005 | |
---|
2797 | 3006 | sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, |
---|
2798 | 3007 | auth_offset, tag_offset, |
---|
.. | .. |
---|
2839 | 3048 | struct cpl_rx_phys_dsgl *phys_cpl; |
---|
2840 | 3049 | struct ulptx_sgl *ulptx; |
---|
2841 | 3050 | unsigned int transhdr_len; |
---|
2842 | | - unsigned int dst_size = 0, kctx_len, dnents, temp; |
---|
| 3051 | + unsigned int dst_size = 0, kctx_len, dnents, temp, snents; |
---|
2843 | 3052 | unsigned int sub_type, assoclen = req->assoclen; |
---|
2844 | 3053 | unsigned int authsize = crypto_aead_authsize(tfm); |
---|
2845 | 3054 | int error = -EINVAL; |
---|
| 3055 | + u8 *ivptr; |
---|
2846 | 3056 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
---|
2847 | 3057 | GFP_ATOMIC; |
---|
2848 | 3058 | struct adapter *adap = padap(a_ctx(tfm)->dev); |
---|
.. | .. |
---|
2858 | 3068 | error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type); |
---|
2859 | 3069 | if (error) |
---|
2860 | 3070 | goto err; |
---|
2861 | | - dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); |
---|
2862 | | - dnents += sg_nents_xlen(req->dst, req->cryptlen |
---|
| 3071 | + dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen |
---|
2863 | 3072 | + (reqctx->op ? -authsize : authsize), |
---|
2864 | | - CHCR_DST_SG_SIZE, req->assoclen); |
---|
| 3073 | + CHCR_DST_SG_SIZE, 0); |
---|
2865 | 3074 | dnents += MIN_CCM_SG; // For IV and B0 |
---|
2866 | 3075 | dst_size = get_space_for_phys_dsgl(dnents); |
---|
| 3076 | + snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen, |
---|
| 3077 | + CHCR_SRC_SG_SIZE, 0); |
---|
| 3078 | + snents += MIN_CCM_SG; //For B0 |
---|
2867 | 3079 | kctx_len = roundup(aeadctx->enckey_len, 16) * 2; |
---|
2868 | 3080 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
---|
2869 | | - reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen + |
---|
| 3081 | + reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen + |
---|
2870 | 3082 | reqctx->b0_len) <= SGE_MAX_WR_LEN; |
---|
2871 | | - temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen + |
---|
| 3083 | + temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen + |
---|
2872 | 3084 | reqctx->b0_len, 16) : |
---|
2873 | | - (sgl_len(reqctx->src_nents + reqctx->aad_nents + |
---|
2874 | | - MIN_CCM_SG) * 8); |
---|
| 3085 | + (sgl_len(snents) * 8); |
---|
2875 | 3086 | transhdr_len += temp; |
---|
2876 | 3087 | transhdr_len = roundup(transhdr_len, 16); |
---|
2877 | 3088 | |
---|
2878 | 3089 | if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE - |
---|
2879 | | - reqctx->b0_len, transhdr_len, reqctx->op)) { |
---|
| 3090 | + reqctx->b0_len, transhdr_len, reqctx->op)) { |
---|
2880 | 3091 | atomic_inc(&adap->chcr_stats.fallback); |
---|
2881 | 3092 | chcr_aead_common_exit(req); |
---|
2882 | 3093 | return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); |
---|
2883 | 3094 | } |
---|
2884 | | - skb = alloc_skb(SGE_MAX_WR_LEN, flags); |
---|
| 3095 | + skb = alloc_skb(transhdr_len, flags); |
---|
2885 | 3096 | |
---|
2886 | 3097 | if (!skb) { |
---|
2887 | 3098 | error = -ENOMEM; |
---|
2888 | 3099 | goto err; |
---|
2889 | 3100 | } |
---|
2890 | 3101 | |
---|
2891 | | - chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len); |
---|
| 3102 | + chcr_req = __skb_put_zero(skb, transhdr_len); |
---|
2892 | 3103 | |
---|
2893 | 3104 | fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op); |
---|
2894 | 3105 | |
---|
.. | .. |
---|
2898 | 3109 | aeadctx->key, aeadctx->enckey_len); |
---|
2899 | 3110 | |
---|
2900 | 3111 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
---|
2901 | | - ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); |
---|
2902 | | - error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen); |
---|
| 3112 | + ivptr = (u8 *)(phys_cpl + 1) + dst_size; |
---|
| 3113 | + ulptx = (struct ulptx_sgl *)(ivptr + IV); |
---|
| 3114 | + error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen); |
---|
2903 | 3115 | if (error) |
---|
2904 | 3116 | goto dstmap_fail; |
---|
2905 | | - chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid); |
---|
2906 | | - chcr_add_aead_src_ent(req, ulptx, assoclen); |
---|
| 3117 | + chcr_add_aead_dst_ent(req, phys_cpl, qid); |
---|
| 3118 | + chcr_add_aead_src_ent(req, ulptx); |
---|
2907 | 3119 | |
---|
2908 | 3120 | atomic_inc(&adap->chcr_stats.aead_rqst); |
---|
2909 | | - temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + |
---|
2910 | | - kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen + |
---|
| 3121 | + temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV + |
---|
| 3122 | + kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen + |
---|
2911 | 3123 | reqctx->b0_len) : 0); |
---|
2912 | 3124 | create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0, |
---|
2913 | 3125 | transhdr_len, temp, 0); |
---|
.. | .. |
---|
2926 | 3138 | int size) |
---|
2927 | 3139 | { |
---|
2928 | 3140 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
---|
2929 | | - struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
---|
| 3141 | + struct chcr_context *ctx = a_ctx(tfm); |
---|
| 3142 | + struct uld_ctx *u_ctx = ULD_CTX(ctx); |
---|
| 3143 | + struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); |
---|
2930 | 3144 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
---|
2931 | 3145 | struct sk_buff *skb = NULL; |
---|
2932 | 3146 | struct chcr_wr *chcr_req; |
---|
2933 | 3147 | struct cpl_rx_phys_dsgl *phys_cpl; |
---|
2934 | 3148 | struct ulptx_sgl *ulptx; |
---|
2935 | | - unsigned int transhdr_len, dnents = 0; |
---|
| 3149 | + unsigned int transhdr_len, dnents = 0, snents; |
---|
2936 | 3150 | unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen; |
---|
2937 | 3151 | unsigned int authsize = crypto_aead_authsize(tfm); |
---|
2938 | 3152 | int error = -EINVAL; |
---|
| 3153 | + u8 *ivptr; |
---|
2939 | 3154 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
---|
2940 | 3155 | GFP_ATOMIC; |
---|
2941 | | - struct adapter *adap = padap(a_ctx(tfm)->dev); |
---|
| 3156 | + struct adapter *adap = padap(ctx->dev); |
---|
| 3157 | + unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; |
---|
2942 | 3158 | |
---|
| 3159 | + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); |
---|
2943 | 3160 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) |
---|
2944 | 3161 | assoclen = req->assoclen - 8; |
---|
2945 | 3162 | |
---|
.. | .. |
---|
2947 | 3164 | error = chcr_aead_common_init(req); |
---|
2948 | 3165 | if (error) |
---|
2949 | 3166 | return ERR_PTR(error); |
---|
2950 | | - dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); |
---|
2951 | | - dnents += sg_nents_xlen(req->dst, req->cryptlen + |
---|
| 3167 | + dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen + |
---|
2952 | 3168 | (reqctx->op ? -authsize : authsize), |
---|
2953 | | - CHCR_DST_SG_SIZE, req->assoclen); |
---|
| 3169 | + CHCR_DST_SG_SIZE, 0); |
---|
| 3170 | + snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen, |
---|
| 3171 | + CHCR_SRC_SG_SIZE, 0); |
---|
2954 | 3172 | dnents += MIN_GCM_SG; // For IV |
---|
2955 | 3173 | dst_size = get_space_for_phys_dsgl(dnents); |
---|
2956 | 3174 | kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE; |
---|
2957 | 3175 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
---|
2958 | | - reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <= |
---|
| 3176 | + reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <= |
---|
2959 | 3177 | SGE_MAX_WR_LEN; |
---|
2960 | | - temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) : |
---|
2961 | | - (sgl_len(reqctx->src_nents + |
---|
2962 | | - reqctx->aad_nents + MIN_GCM_SG) * 8); |
---|
| 3178 | + temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) : |
---|
| 3179 | + (sgl_len(snents) * 8); |
---|
2963 | 3180 | transhdr_len += temp; |
---|
2964 | 3181 | transhdr_len = roundup(transhdr_len, 16); |
---|
2965 | 3182 | if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, |
---|
.. | .. |
---|
2969 | 3186 | chcr_aead_common_exit(req); |
---|
2970 | 3187 | return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); |
---|
2971 | 3188 | } |
---|
2972 | | - skb = alloc_skb(SGE_MAX_WR_LEN, flags); |
---|
| 3189 | + skb = alloc_skb(transhdr_len, flags); |
---|
2973 | 3190 | if (!skb) { |
---|
2974 | 3191 | error = -ENOMEM; |
---|
2975 | 3192 | goto err; |
---|
.. | .. |
---|
2980 | 3197 | //Offset of tag from end |
---|
2981 | 3198 | temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize; |
---|
2982 | 3199 | chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( |
---|
2983 | | - a_ctx(tfm)->dev->rx_channel_id, 2, |
---|
2984 | | - (assoclen + 1)); |
---|
| 3200 | + rx_channel_id, 2, 1); |
---|
2985 | 3201 | chcr_req->sec_cpl.pldlen = |
---|
2986 | | - htonl(assoclen + IV + req->cryptlen); |
---|
| 3202 | + htonl(req->assoclen + IV + req->cryptlen); |
---|
2987 | 3203 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( |
---|
2988 | | - assoclen ? 1 : 0, assoclen, |
---|
2989 | | - assoclen + IV + 1, 0); |
---|
| 3204 | + assoclen ? 1 + IV : 0, |
---|
| 3205 | + assoclen ? IV + assoclen : 0, |
---|
| 3206 | + req->assoclen + IV + 1, 0); |
---|
2990 | 3207 | chcr_req->sec_cpl.cipherstop_lo_authinsert = |
---|
2991 | | - FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1, |
---|
| 3208 | + FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1, |
---|
2992 | 3209 | temp, temp); |
---|
2993 | 3210 | chcr_req->sec_cpl.seqno_numivs = |
---|
2994 | 3211 | FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op == |
---|
.. | .. |
---|
3003 | 3220 | memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), |
---|
3004 | 3221 | GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE); |
---|
3005 | 3222 | |
---|
| 3223 | + phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
---|
| 3224 | + ivptr = (u8 *)(phys_cpl + 1) + dst_size; |
---|
3006 | 3225 | /* prepare a 16 byte iv */ |
---|
3007 | 3226 | /* S A L T | IV | 0x00000001 */ |
---|
3008 | 3227 | if (get_aead_subtype(tfm) == |
---|
3009 | 3228 | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { |
---|
3010 | | - memcpy(reqctx->iv, aeadctx->salt, 4); |
---|
3011 | | - memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE); |
---|
| 3229 | + memcpy(ivptr, aeadctx->salt, 4); |
---|
| 3230 | + memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE); |
---|
3012 | 3231 | } else { |
---|
3013 | | - memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE); |
---|
| 3232 | + memcpy(ivptr, req->iv, GCM_AES_IV_SIZE); |
---|
3014 | 3233 | } |
---|
3015 | | - *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01); |
---|
| 3234 | + put_unaligned_be32(0x01, &ivptr[12]); |
---|
| 3235 | + ulptx = (struct ulptx_sgl *)(ivptr + 16); |
---|
3016 | 3236 | |
---|
3017 | | - phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
---|
3018 | | - ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); |
---|
3019 | | - |
---|
3020 | | - chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid); |
---|
3021 | | - chcr_add_aead_src_ent(req, ulptx, assoclen); |
---|
| 3237 | + chcr_add_aead_dst_ent(req, phys_cpl, qid); |
---|
| 3238 | + chcr_add_aead_src_ent(req, ulptx); |
---|
3022 | 3239 | atomic_inc(&adap->chcr_stats.aead_rqst); |
---|
3023 | | - temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + |
---|
3024 | | - kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); |
---|
| 3240 | + temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV + |
---|
| 3241 | + kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0); |
---|
3025 | 3242 | create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, |
---|
3026 | 3243 | transhdr_len, temp, reqctx->verify); |
---|
3027 | 3244 | reqctx->skb = skb; |
---|
.. | .. |
---|
3119 | 3336 | aeadctx->mayverify = VERIFY_HW; |
---|
3120 | 3337 | break; |
---|
3121 | 3338 | case ICV_12: |
---|
3122 | | - aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; |
---|
3123 | | - aeadctx->mayverify = VERIFY_HW; |
---|
| 3339 | + aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; |
---|
| 3340 | + aeadctx->mayverify = VERIFY_HW; |
---|
3124 | 3341 | break; |
---|
3125 | 3342 | case ICV_14: |
---|
3126 | | - aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; |
---|
3127 | | - aeadctx->mayverify = VERIFY_HW; |
---|
| 3343 | + aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; |
---|
| 3344 | + aeadctx->mayverify = VERIFY_HW; |
---|
3128 | 3345 | break; |
---|
3129 | 3346 | case ICV_16: |
---|
3130 | 3347 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; |
---|
.. | .. |
---|
3224 | 3441 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; |
---|
3225 | 3442 | mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; |
---|
3226 | 3443 | } else { |
---|
3227 | | - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); |
---|
3228 | 3444 | aeadctx->enckey_len = 0; |
---|
3229 | 3445 | return -EINVAL; |
---|
3230 | 3446 | } |
---|
.. | .. |
---|
3247 | 3463 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & |
---|
3248 | 3464 | CRYPTO_TFM_REQ_MASK); |
---|
3249 | 3465 | error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); |
---|
3250 | | - crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); |
---|
3251 | | - crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & |
---|
3252 | | - CRYPTO_TFM_RES_MASK); |
---|
3253 | 3466 | if (error) |
---|
3254 | 3467 | return error; |
---|
3255 | 3468 | return chcr_ccm_common_setkey(aead, key, keylen); |
---|
.. | .. |
---|
3262 | 3475 | int error; |
---|
3263 | 3476 | |
---|
3264 | 3477 | if (keylen < 3) { |
---|
3265 | | - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); |
---|
3266 | 3478 | aeadctx->enckey_len = 0; |
---|
3267 | 3479 | return -EINVAL; |
---|
3268 | 3480 | } |
---|
.. | .. |
---|
3270 | 3482 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & |
---|
3271 | 3483 | CRYPTO_TFM_REQ_MASK); |
---|
3272 | 3484 | error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); |
---|
3273 | | - crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); |
---|
3274 | | - crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & |
---|
3275 | | - CRYPTO_TFM_RES_MASK); |
---|
3276 | 3485 | if (error) |
---|
3277 | 3486 | return error; |
---|
3278 | 3487 | keylen -= 3; |
---|
.. | .. |
---|
3285 | 3494 | { |
---|
3286 | 3495 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); |
---|
3287 | 3496 | struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx); |
---|
3288 | | - struct crypto_cipher *cipher; |
---|
3289 | 3497 | unsigned int ck_size; |
---|
3290 | 3498 | int ret = 0, key_ctx_size = 0; |
---|
| 3499 | + struct crypto_aes_ctx aes; |
---|
3291 | 3500 | |
---|
3292 | 3501 | aeadctx->enckey_len = 0; |
---|
3293 | 3502 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
---|
3294 | 3503 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) |
---|
3295 | 3504 | & CRYPTO_TFM_REQ_MASK); |
---|
3296 | 3505 | ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); |
---|
3297 | | - crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); |
---|
3298 | | - crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & |
---|
3299 | | - CRYPTO_TFM_RES_MASK); |
---|
3300 | 3506 | if (ret) |
---|
3301 | 3507 | goto out; |
---|
3302 | 3508 | |
---|
.. | .. |
---|
3312 | 3518 | } else if (keylen == AES_KEYSIZE_256) { |
---|
3313 | 3519 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; |
---|
3314 | 3520 | } else { |
---|
3315 | | - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); |
---|
3316 | 3521 | pr_err("GCM: Invalid key length %d\n", keylen); |
---|
3317 | 3522 | ret = -EINVAL; |
---|
3318 | 3523 | goto out; |
---|
.. | .. |
---|
3329 | 3534 | /* Calculate the H = CIPH(K, 0 repeated 16 times). |
---|
3330 | 3535 | * It will go in key context |
---|
3331 | 3536 | */ |
---|
3332 | | - cipher = crypto_alloc_cipher("aes-generic", 0, 0); |
---|
3333 | | - if (IS_ERR(cipher)) { |
---|
3334 | | - aeadctx->enckey_len = 0; |
---|
3335 | | - ret = -ENOMEM; |
---|
3336 | | - goto out; |
---|
3337 | | - } |
---|
3338 | | - |
---|
3339 | | - ret = crypto_cipher_setkey(cipher, key, keylen); |
---|
| 3537 | + ret = aes_expandkey(&aes, key, keylen); |
---|
3340 | 3538 | if (ret) { |
---|
3341 | 3539 | aeadctx->enckey_len = 0; |
---|
3342 | | - goto out1; |
---|
| 3540 | + goto out; |
---|
3343 | 3541 | } |
---|
3344 | 3542 | memset(gctx->ghash_h, 0, AEAD_H_SIZE); |
---|
3345 | | - crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h); |
---|
| 3543 | + aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h); |
---|
| 3544 | + memzero_explicit(&aes, sizeof(aes)); |
---|
3346 | 3545 | |
---|
3347 | | -out1: |
---|
3348 | | - crypto_free_cipher(cipher); |
---|
3349 | 3546 | out: |
---|
3350 | 3547 | return ret; |
---|
3351 | 3548 | } |
---|
.. | .. |
---|
3371 | 3568 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) |
---|
3372 | 3569 | & CRYPTO_TFM_REQ_MASK); |
---|
3373 | 3570 | err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); |
---|
3374 | | - crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK); |
---|
3375 | | - crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher) |
---|
3376 | | - & CRYPTO_TFM_RES_MASK); |
---|
3377 | 3571 | if (err) |
---|
3378 | 3572 | goto out; |
---|
3379 | 3573 | |
---|
3380 | | - if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { |
---|
3381 | | - crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); |
---|
| 3574 | + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
---|
3382 | 3575 | goto out; |
---|
3383 | | - } |
---|
3384 | 3576 | |
---|
3385 | 3577 | if (get_alg_config(¶m, max_authsize)) { |
---|
3386 | | - pr_err("chcr : Unsupported digest size\n"); |
---|
| 3578 | + pr_err("Unsupported digest size\n"); |
---|
3387 | 3579 | goto out; |
---|
3388 | 3580 | } |
---|
3389 | 3581 | subtype = get_aead_subtype(authenc); |
---|
.. | .. |
---|
3402 | 3594 | } else if (keys.enckeylen == AES_KEYSIZE_256) { |
---|
3403 | 3595 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; |
---|
3404 | 3596 | } else { |
---|
3405 | | - pr_err("chcr : Unsupported cipher key\n"); |
---|
| 3597 | + pr_err("Unsupported cipher key\n"); |
---|
3406 | 3598 | goto out; |
---|
3407 | 3599 | } |
---|
3408 | 3600 | |
---|
.. | .. |
---|
3420 | 3612 | } |
---|
3421 | 3613 | base_hash = chcr_alloc_shash(max_authsize); |
---|
3422 | 3614 | if (IS_ERR(base_hash)) { |
---|
3423 | | - pr_err("chcr : Base driver cannot be loaded\n"); |
---|
3424 | | - aeadctx->enckey_len = 0; |
---|
3425 | | - memzero_explicit(&keys, sizeof(keys)); |
---|
3426 | | - return -EINVAL; |
---|
| 3615 | + pr_err("Base driver cannot be loaded\n"); |
---|
| 3616 | + goto out; |
---|
3427 | 3617 | } |
---|
3428 | 3618 | { |
---|
3429 | 3619 | SHASH_DESC_ON_STACK(shash, base_hash); |
---|
3430 | 3620 | |
---|
3431 | 3621 | shash->tfm = base_hash; |
---|
3432 | | - shash->flags = crypto_shash_get_flags(base_hash); |
---|
3433 | 3622 | bs = crypto_shash_blocksize(base_hash); |
---|
3434 | 3623 | align = KEYCTX_ALIGN_PAD(max_authsize); |
---|
3435 | 3624 | o_ptr = actx->h_iopad + param.result_size + align; |
---|
.. | .. |
---|
3439 | 3628 | keys.authkeylen, |
---|
3440 | 3629 | o_ptr); |
---|
3441 | 3630 | if (err) { |
---|
3442 | | - pr_err("chcr : Base driver cannot be loaded\n"); |
---|
| 3631 | + pr_err("Base driver cannot be loaded\n"); |
---|
3443 | 3632 | goto out; |
---|
3444 | 3633 | } |
---|
3445 | 3634 | keys.authkeylen = max_authsize; |
---|
.. | .. |
---|
3502 | 3691 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) |
---|
3503 | 3692 | & CRYPTO_TFM_REQ_MASK); |
---|
3504 | 3693 | err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); |
---|
3505 | | - crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK); |
---|
3506 | | - crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher) |
---|
3507 | | - & CRYPTO_TFM_RES_MASK); |
---|
3508 | 3694 | if (err) |
---|
3509 | 3695 | goto out; |
---|
3510 | 3696 | |
---|
3511 | | - if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { |
---|
3512 | | - crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); |
---|
| 3697 | + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
---|
3513 | 3698 | goto out; |
---|
3514 | | - } |
---|
| 3699 | + |
---|
3515 | 3700 | subtype = get_aead_subtype(authenc); |
---|
3516 | 3701 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || |
---|
3517 | 3702 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { |
---|
.. | .. |
---|
3528 | 3713 | } else if (keys.enckeylen == AES_KEYSIZE_256) { |
---|
3529 | 3714 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; |
---|
3530 | 3715 | } else { |
---|
3531 | | - pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen); |
---|
| 3716 | + pr_err("Unsupported cipher key %d\n", keys.enckeylen); |
---|
3532 | 3717 | goto out; |
---|
3533 | 3718 | } |
---|
3534 | 3719 | memcpy(aeadctx->key, keys.enckey, keys.enckeylen); |
---|
.. | .. |
---|
3556 | 3741 | create_wr_t create_wr_fn) |
---|
3557 | 3742 | { |
---|
3558 | 3743 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
---|
3559 | | - struct uld_ctx *u_ctx; |
---|
| 3744 | + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
---|
| 3745 | + struct chcr_context *ctx = a_ctx(tfm); |
---|
| 3746 | + struct uld_ctx *u_ctx = ULD_CTX(ctx); |
---|
3560 | 3747 | struct sk_buff *skb; |
---|
3561 | | - int isfull = 0; |
---|
| 3748 | + struct chcr_dev *cdev; |
---|
3562 | 3749 | |
---|
3563 | | - if (!a_ctx(tfm)->dev) { |
---|
3564 | | - pr_err("chcr : %s : No crypto device.\n", __func__); |
---|
| 3750 | + cdev = a_ctx(tfm)->dev; |
---|
| 3751 | + if (!cdev) { |
---|
| 3752 | + pr_err("%s : No crypto device.\n", __func__); |
---|
3565 | 3753 | return -ENXIO; |
---|
3566 | 3754 | } |
---|
3567 | | - u_ctx = ULD_CTX(a_ctx(tfm)); |
---|
| 3755 | + |
---|
| 3756 | + if (chcr_inc_wrcount(cdev)) { |
---|
| 3757 | + /* Detach state for CHCR means lldi or padap is freed. |
---|
| 3758 | + * We cannot increment fallback here. |
---|
| 3759 | + */ |
---|
| 3760 | + return chcr_aead_fallback(req, reqctx->op); |
---|
| 3761 | + } |
---|
| 3762 | + |
---|
3568 | 3763 | if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
---|
3569 | | - a_ctx(tfm)->tx_qidx)) { |
---|
3570 | | - isfull = 1; |
---|
3571 | | - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
---|
| 3764 | + reqctx->txqidx) && |
---|
| 3765 | + (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) { |
---|
| 3766 | + chcr_dec_wrcount(cdev); |
---|
3572 | 3767 | return -ENOSPC; |
---|
3573 | 3768 | } |
---|
3574 | 3769 | |
---|
3575 | | - /* Form a WR from req */ |
---|
3576 | | - skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size); |
---|
| 3770 | + if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 && |
---|
| 3771 | + crypto_ipsec_check_assoclen(req->assoclen) != 0) { |
---|
| 3772 | + pr_err("RFC4106: Invalid value of assoclen %d\n", |
---|
| 3773 | + req->assoclen); |
---|
| 3774 | + return -EINVAL; |
---|
| 3775 | + } |
---|
3577 | 3776 | |
---|
3578 | | - if (IS_ERR(skb) || !skb) |
---|
3579 | | - return PTR_ERR(skb); |
---|
| 3777 | + /* Form a WR from req */ |
---|
| 3778 | + skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size); |
---|
| 3779 | + |
---|
| 3780 | + if (IS_ERR_OR_NULL(skb)) { |
---|
| 3781 | + chcr_dec_wrcount(cdev); |
---|
| 3782 | + return PTR_ERR_OR_ZERO(skb); |
---|
| 3783 | + } |
---|
3580 | 3784 | |
---|
3581 | 3785 | skb->dev = u_ctx->lldi.ports[0]; |
---|
3582 | | - set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx); |
---|
| 3786 | + set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); |
---|
3583 | 3787 | chcr_send_wr(skb); |
---|
3584 | | - return isfull ? -EBUSY : -EINPROGRESS; |
---|
| 3788 | + return -EINPROGRESS; |
---|
3585 | 3789 | } |
---|
3586 | 3790 | |
---|
3587 | 3791 | static int chcr_aead_encrypt(struct aead_request *req) |
---|
3588 | 3792 | { |
---|
3589 | 3793 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
---|
3590 | 3794 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
---|
| 3795 | + struct chcr_context *ctx = a_ctx(tfm); |
---|
| 3796 | + unsigned int cpu; |
---|
| 3797 | + |
---|
| 3798 | + cpu = get_cpu(); |
---|
| 3799 | + reqctx->txqidx = cpu % ctx->ntxq; |
---|
| 3800 | + reqctx->rxqidx = cpu % ctx->nrxq; |
---|
| 3801 | + put_cpu(); |
---|
3591 | 3802 | |
---|
3592 | 3803 | reqctx->verify = VERIFY_HW; |
---|
3593 | 3804 | reqctx->op = CHCR_ENCRYPT_OP; |
---|
.. | .. |
---|
3609 | 3820 | static int chcr_aead_decrypt(struct aead_request *req) |
---|
3610 | 3821 | { |
---|
3611 | 3822 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
---|
3612 | | - struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
---|
| 3823 | + struct chcr_context *ctx = a_ctx(tfm); |
---|
| 3824 | + struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); |
---|
3613 | 3825 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
---|
3614 | 3826 | int size; |
---|
| 3827 | + unsigned int cpu; |
---|
| 3828 | + |
---|
| 3829 | + cpu = get_cpu(); |
---|
| 3830 | + reqctx->txqidx = cpu % ctx->ntxq; |
---|
| 3831 | + reqctx->rxqidx = cpu % ctx->nrxq; |
---|
| 3832 | + put_cpu(); |
---|
3615 | 3833 | |
---|
3616 | 3834 | if (aeadctx->mayverify == VERIFY_SW) { |
---|
3617 | 3835 | size = crypto_aead_maxauthsize(tfm); |
---|
.. | .. |
---|
3638 | 3856 | static struct chcr_alg_template driver_algs[] = { |
---|
3639 | 3857 | /* AES-CBC */ |
---|
3640 | 3858 | { |
---|
3641 | | - .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC, |
---|
| 3859 | + .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC, |
---|
3642 | 3860 | .is_registered = 0, |
---|
3643 | | - .alg.crypto = { |
---|
3644 | | - .cra_name = "cbc(aes)", |
---|
3645 | | - .cra_driver_name = "cbc-aes-chcr", |
---|
3646 | | - .cra_blocksize = AES_BLOCK_SIZE, |
---|
3647 | | - .cra_init = chcr_cra_init, |
---|
3648 | | - .cra_exit = chcr_cra_exit, |
---|
3649 | | - .cra_u.ablkcipher = { |
---|
3650 | | - .min_keysize = AES_MIN_KEY_SIZE, |
---|
3651 | | - .max_keysize = AES_MAX_KEY_SIZE, |
---|
3652 | | - .ivsize = AES_BLOCK_SIZE, |
---|
3653 | | - .setkey = chcr_aes_cbc_setkey, |
---|
3654 | | - .encrypt = chcr_aes_encrypt, |
---|
3655 | | - .decrypt = chcr_aes_decrypt, |
---|
| 3861 | + .alg.skcipher = { |
---|
| 3862 | + .base.cra_name = "cbc(aes)", |
---|
| 3863 | + .base.cra_driver_name = "cbc-aes-chcr", |
---|
| 3864 | + .base.cra_blocksize = AES_BLOCK_SIZE, |
---|
| 3865 | + |
---|
| 3866 | + .init = chcr_init_tfm, |
---|
| 3867 | + .exit = chcr_exit_tfm, |
---|
| 3868 | + .min_keysize = AES_MIN_KEY_SIZE, |
---|
| 3869 | + .max_keysize = AES_MAX_KEY_SIZE, |
---|
| 3870 | + .ivsize = AES_BLOCK_SIZE, |
---|
| 3871 | + .setkey = chcr_aes_cbc_setkey, |
---|
| 3872 | + .encrypt = chcr_aes_encrypt, |
---|
| 3873 | + .decrypt = chcr_aes_decrypt, |
---|
3656 | 3874 | } |
---|
| 3875 | + }, |
---|
| 3876 | + { |
---|
| 3877 | + .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS, |
---|
| 3878 | + .is_registered = 0, |
---|
| 3879 | + .alg.skcipher = { |
---|
| 3880 | + .base.cra_name = "xts(aes)", |
---|
| 3881 | + .base.cra_driver_name = "xts-aes-chcr", |
---|
| 3882 | + .base.cra_blocksize = AES_BLOCK_SIZE, |
---|
| 3883 | + |
---|
| 3884 | + .init = chcr_init_tfm, |
---|
| 3885 | + .exit = chcr_exit_tfm, |
---|
| 3886 | + .min_keysize = 2 * AES_MIN_KEY_SIZE, |
---|
| 3887 | + .max_keysize = 2 * AES_MAX_KEY_SIZE, |
---|
| 3888 | + .ivsize = AES_BLOCK_SIZE, |
---|
| 3889 | + .setkey = chcr_aes_xts_setkey, |
---|
| 3890 | + .encrypt = chcr_aes_encrypt, |
---|
| 3891 | + .decrypt = chcr_aes_decrypt, |
---|
| 3892 | + } |
---|
| 3893 | + }, |
---|
| 3894 | + { |
---|
| 3895 | + .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR, |
---|
| 3896 | + .is_registered = 0, |
---|
| 3897 | + .alg.skcipher = { |
---|
| 3898 | + .base.cra_name = "ctr(aes)", |
---|
| 3899 | + .base.cra_driver_name = "ctr-aes-chcr", |
---|
| 3900 | + .base.cra_blocksize = 1, |
---|
| 3901 | + |
---|
| 3902 | + .init = chcr_init_tfm, |
---|
| 3903 | + .exit = chcr_exit_tfm, |
---|
| 3904 | + .min_keysize = AES_MIN_KEY_SIZE, |
---|
| 3905 | + .max_keysize = AES_MAX_KEY_SIZE, |
---|
| 3906 | + .ivsize = AES_BLOCK_SIZE, |
---|
| 3907 | + .setkey = chcr_aes_ctr_setkey, |
---|
| 3908 | + .encrypt = chcr_aes_encrypt, |
---|
| 3909 | + .decrypt = chcr_aes_decrypt, |
---|
3657 | 3910 | } |
---|
3658 | 3911 | }, |
---|
3659 | 3912 | { |
---|
3660 | | - .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS, |
---|
3661 | | - .is_registered = 0, |
---|
3662 | | - .alg.crypto = { |
---|
3663 | | - .cra_name = "xts(aes)", |
---|
3664 | | - .cra_driver_name = "xts-aes-chcr", |
---|
3665 | | - .cra_blocksize = AES_BLOCK_SIZE, |
---|
3666 | | - .cra_init = chcr_cra_init, |
---|
3667 | | - .cra_exit = NULL, |
---|
3668 | | - .cra_u .ablkcipher = { |
---|
3669 | | - .min_keysize = 2 * AES_MIN_KEY_SIZE, |
---|
3670 | | - .max_keysize = 2 * AES_MAX_KEY_SIZE, |
---|
3671 | | - .ivsize = AES_BLOCK_SIZE, |
---|
3672 | | - .setkey = chcr_aes_xts_setkey, |
---|
3673 | | - .encrypt = chcr_aes_encrypt, |
---|
3674 | | - .decrypt = chcr_aes_decrypt, |
---|
3675 | | - } |
---|
3676 | | - } |
---|
3677 | | - }, |
---|
3678 | | - { |
---|
3679 | | - .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR, |
---|
3680 | | - .is_registered = 0, |
---|
3681 | | - .alg.crypto = { |
---|
3682 | | - .cra_name = "ctr(aes)", |
---|
3683 | | - .cra_driver_name = "ctr-aes-chcr", |
---|
3684 | | - .cra_blocksize = 1, |
---|
3685 | | - .cra_init = chcr_cra_init, |
---|
3686 | | - .cra_exit = chcr_cra_exit, |
---|
3687 | | - .cra_u.ablkcipher = { |
---|
3688 | | - .min_keysize = AES_MIN_KEY_SIZE, |
---|
3689 | | - .max_keysize = AES_MAX_KEY_SIZE, |
---|
3690 | | - .ivsize = AES_BLOCK_SIZE, |
---|
3691 | | - .setkey = chcr_aes_ctr_setkey, |
---|
3692 | | - .encrypt = chcr_aes_encrypt, |
---|
3693 | | - .decrypt = chcr_aes_decrypt, |
---|
3694 | | - } |
---|
3695 | | - } |
---|
3696 | | - }, |
---|
3697 | | - { |
---|
3698 | | - .type = CRYPTO_ALG_TYPE_ABLKCIPHER | |
---|
| 3913 | + .type = CRYPTO_ALG_TYPE_SKCIPHER | |
---|
3699 | 3914 | CRYPTO_ALG_SUB_TYPE_CTR_RFC3686, |
---|
3700 | 3915 | .is_registered = 0, |
---|
3701 | | - .alg.crypto = { |
---|
3702 | | - .cra_name = "rfc3686(ctr(aes))", |
---|
3703 | | - .cra_driver_name = "rfc3686-ctr-aes-chcr", |
---|
3704 | | - .cra_blocksize = 1, |
---|
3705 | | - .cra_init = chcr_rfc3686_init, |
---|
3706 | | - .cra_exit = chcr_cra_exit, |
---|
3707 | | - .cra_u.ablkcipher = { |
---|
3708 | | - .min_keysize = AES_MIN_KEY_SIZE + |
---|
3709 | | - CTR_RFC3686_NONCE_SIZE, |
---|
3710 | | - .max_keysize = AES_MAX_KEY_SIZE + |
---|
3711 | | - CTR_RFC3686_NONCE_SIZE, |
---|
3712 | | - .ivsize = CTR_RFC3686_IV_SIZE, |
---|
3713 | | - .setkey = chcr_aes_rfc3686_setkey, |
---|
3714 | | - .encrypt = chcr_aes_encrypt, |
---|
3715 | | - .decrypt = chcr_aes_decrypt, |
---|
3716 | | - .geniv = "seqiv", |
---|
3717 | | - } |
---|
| 3916 | + .alg.skcipher = { |
---|
| 3917 | + .base.cra_name = "rfc3686(ctr(aes))", |
---|
| 3918 | + .base.cra_driver_name = "rfc3686-ctr-aes-chcr", |
---|
| 3919 | + .base.cra_blocksize = 1, |
---|
| 3920 | + |
---|
| 3921 | + .init = chcr_rfc3686_init, |
---|
| 3922 | + .exit = chcr_exit_tfm, |
---|
| 3923 | + .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, |
---|
| 3924 | + .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, |
---|
| 3925 | + .ivsize = CTR_RFC3686_IV_SIZE, |
---|
| 3926 | + .setkey = chcr_aes_rfc3686_setkey, |
---|
| 3927 | + .encrypt = chcr_aes_encrypt, |
---|
| 3928 | + .decrypt = chcr_aes_decrypt, |
---|
3718 | 3929 | } |
---|
3719 | 3930 | }, |
---|
3720 | 3931 | /* SHA */ |
---|
.. | .. |
---|
4169 | 4380 | .setauthsize = chcr_authenc_null_setauthsize, |
---|
4170 | 4381 | } |
---|
4171 | 4382 | }, |
---|
4172 | | - |
---|
4173 | 4383 | }; |
---|
4174 | 4384 | |
---|
4175 | 4385 | /* |
---|
.. | .. |
---|
4182 | 4392 | |
---|
4183 | 4393 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
---|
4184 | 4394 | switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { |
---|
4185 | | - case CRYPTO_ALG_TYPE_ABLKCIPHER: |
---|
4186 | | - if (driver_algs[i].is_registered) |
---|
4187 | | - crypto_unregister_alg( |
---|
4188 | | - &driver_algs[i].alg.crypto); |
---|
| 4395 | + case CRYPTO_ALG_TYPE_SKCIPHER: |
---|
| 4396 | + if (driver_algs[i].is_registered && refcount_read( |
---|
| 4397 | + &driver_algs[i].alg.skcipher.base.cra_refcnt) |
---|
| 4398 | + == 1) { |
---|
| 4399 | + crypto_unregister_skcipher( |
---|
| 4400 | + &driver_algs[i].alg.skcipher); |
---|
| 4401 | + driver_algs[i].is_registered = 0; |
---|
| 4402 | + } |
---|
4189 | 4403 | break; |
---|
4190 | 4404 | case CRYPTO_ALG_TYPE_AEAD: |
---|
4191 | | - if (driver_algs[i].is_registered) |
---|
| 4405 | + if (driver_algs[i].is_registered && refcount_read( |
---|
| 4406 | + &driver_algs[i].alg.aead.base.cra_refcnt) == 1) { |
---|
4192 | 4407 | crypto_unregister_aead( |
---|
4193 | 4408 | &driver_algs[i].alg.aead); |
---|
| 4409 | + driver_algs[i].is_registered = 0; |
---|
| 4410 | + } |
---|
4194 | 4411 | break; |
---|
4195 | 4412 | case CRYPTO_ALG_TYPE_AHASH: |
---|
4196 | | - if (driver_algs[i].is_registered) |
---|
| 4413 | + if (driver_algs[i].is_registered && refcount_read( |
---|
| 4414 | + &driver_algs[i].alg.hash.halg.base.cra_refcnt) |
---|
| 4415 | + == 1) { |
---|
4197 | 4416 | crypto_unregister_ahash( |
---|
4198 | 4417 | &driver_algs[i].alg.hash); |
---|
| 4418 | + driver_algs[i].is_registered = 0; |
---|
| 4419 | + } |
---|
4199 | 4420 | break; |
---|
4200 | 4421 | } |
---|
4201 | | - driver_algs[i].is_registered = 0; |
---|
4202 | 4422 | } |
---|
4203 | 4423 | return 0; |
---|
4204 | 4424 | } |
---|
.. | .. |
---|
4221 | 4441 | if (driver_algs[i].is_registered) |
---|
4222 | 4442 | continue; |
---|
4223 | 4443 | switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { |
---|
4224 | | - case CRYPTO_ALG_TYPE_ABLKCIPHER: |
---|
4225 | | - driver_algs[i].alg.crypto.cra_priority = |
---|
| 4444 | + case CRYPTO_ALG_TYPE_SKCIPHER: |
---|
| 4445 | + driver_algs[i].alg.skcipher.base.cra_priority = |
---|
4226 | 4446 | CHCR_CRA_PRIORITY; |
---|
4227 | | - driver_algs[i].alg.crypto.cra_module = THIS_MODULE; |
---|
4228 | | - driver_algs[i].alg.crypto.cra_flags = |
---|
4229 | | - CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | |
---|
| 4447 | + driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE; |
---|
| 4448 | + driver_algs[i].alg.skcipher.base.cra_flags = |
---|
| 4449 | + CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | |
---|
| 4450 | + CRYPTO_ALG_ALLOCATES_MEMORY | |
---|
4230 | 4451 | CRYPTO_ALG_NEED_FALLBACK; |
---|
4231 | | - driver_algs[i].alg.crypto.cra_ctxsize = |
---|
| 4452 | + driver_algs[i].alg.skcipher.base.cra_ctxsize = |
---|
4232 | 4453 | sizeof(struct chcr_context) + |
---|
4233 | 4454 | sizeof(struct ablk_ctx); |
---|
4234 | | - driver_algs[i].alg.crypto.cra_alignmask = 0; |
---|
4235 | | - driver_algs[i].alg.crypto.cra_type = |
---|
4236 | | - &crypto_ablkcipher_type; |
---|
4237 | | - err = crypto_register_alg(&driver_algs[i].alg.crypto); |
---|
4238 | | - name = driver_algs[i].alg.crypto.cra_driver_name; |
---|
| 4455 | + driver_algs[i].alg.skcipher.base.cra_alignmask = 0; |
---|
| 4456 | + |
---|
| 4457 | + err = crypto_register_skcipher(&driver_algs[i].alg.skcipher); |
---|
| 4458 | + name = driver_algs[i].alg.skcipher.base.cra_driver_name; |
---|
4239 | 4459 | break; |
---|
4240 | 4460 | case CRYPTO_ALG_TYPE_AEAD: |
---|
4241 | 4461 | driver_algs[i].alg.aead.base.cra_flags = |
---|
4242 | | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; |
---|
| 4462 | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | |
---|
| 4463 | + CRYPTO_ALG_ALLOCATES_MEMORY; |
---|
4243 | 4464 | driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt; |
---|
4244 | 4465 | driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt; |
---|
4245 | 4466 | driver_algs[i].alg.aead.init = chcr_aead_cra_init; |
---|
.. | .. |
---|
4259 | 4480 | a_hash->halg.statesize = SZ_AHASH_REQ_CTX; |
---|
4260 | 4481 | a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY; |
---|
4261 | 4482 | a_hash->halg.base.cra_module = THIS_MODULE; |
---|
4262 | | - a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC; |
---|
| 4483 | + a_hash->halg.base.cra_flags = |
---|
| 4484 | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; |
---|
4263 | 4485 | a_hash->halg.base.cra_alignmask = 0; |
---|
4264 | 4486 | a_hash->halg.base.cra_exit = NULL; |
---|
4265 | 4487 | |
---|
.. | .. |
---|
4280 | 4502 | break; |
---|
4281 | 4503 | } |
---|
4282 | 4504 | if (err) { |
---|
4283 | | - pr_err("chcr : %s : Algorithm registration failed\n", |
---|
4284 | | - name); |
---|
| 4505 | + pr_err("%s : Algorithm registration failed\n", name); |
---|
4285 | 4506 | goto register_err; |
---|
4286 | 4507 | } else { |
---|
4287 | 4508 | driver_algs[i].is_registered = 1; |
---|