hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/crypto/qat/qat_common/qat_algs.c
....@@ -1,59 +1,18 @@
1
-/*
2
- This file is provided under a dual BSD/GPLv2 license. When using or
3
- redistributing this file, you may do so under either license.
4
-
5
- GPL LICENSE SUMMARY
6
- Copyright(c) 2014 Intel Corporation.
7
- This program is free software; you can redistribute it and/or modify
8
- it under the terms of version 2 of the GNU General Public License as
9
- published by the Free Software Foundation.
10
-
11
- This program is distributed in the hope that it will be useful, but
12
- WITHOUT ANY WARRANTY; without even the implied warranty of
13
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
- General Public License for more details.
15
-
16
- Contact Information:
17
- qat-linux@intel.com
18
-
19
- BSD LICENSE
20
- Copyright(c) 2014 Intel Corporation.
21
- Redistribution and use in source and binary forms, with or without
22
- modification, are permitted provided that the following conditions
23
- are met:
24
-
25
- * Redistributions of source code must retain the above copyright
26
- notice, this list of conditions and the following disclaimer.
27
- * Redistributions in binary form must reproduce the above copyright
28
- notice, this list of conditions and the following disclaimer in
29
- the documentation and/or other materials provided with the
30
- distribution.
31
- * Neither the name of Intel Corporation nor the names of its
32
- contributors may be used to endorse or promote products derived
33
- from this software without specific prior written permission.
34
-
35
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46
-*/
1
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2
+/* Copyright(c) 2014 - 2020 Intel Corporation */
473 #include <linux/module.h>
484 #include <linux/slab.h>
495 #include <linux/crypto.h>
506 #include <crypto/internal/aead.h>
7
+#include <crypto/internal/cipher.h>
8
+#include <crypto/internal/skcipher.h>
519 #include <crypto/aes.h>
5210 #include <crypto/sha.h>
5311 #include <crypto/hash.h>
5412 #include <crypto/hmac.h>
5513 #include <crypto/algapi.h>
5614 #include <crypto/authenc.h>
15
+#include <crypto/xts.h>
5716 #include <linux/dma-mapping.h>
5817 #include "adf_accel_devices.h"
5918 #include "adf_transport.h"
....@@ -76,19 +35,6 @@
7635 static DEFINE_MUTEX(algs_lock);
7736 static unsigned int active_devs;
7837
79
-struct qat_alg_buf {
80
- uint32_t len;
81
- uint32_t resrvd;
82
- uint64_t addr;
83
-} __packed;
84
-
85
-struct qat_alg_buf_list {
86
- uint64_t resrvd;
87
- uint32_t num_bufs;
88
- uint32_t num_mapped_bufs;
89
- struct qat_alg_buf bufers[];
90
-} __packed __aligned(64);
91
-
9238 /* Common content descriptor */
9339 struct qat_alg_cd {
9440 union {
....@@ -96,7 +42,7 @@
9642 struct icp_qat_hw_cipher_algo_blk cipher;
9743 struct icp_qat_hw_auth_algo_blk hash;
9844 } qat_enc_cd;
99
- struct qat_dec { /* Decrytp content desc */
45
+ struct qat_dec { /* Decrypt content desc */
10046 struct icp_qat_hw_auth_algo_blk hash;
10147 struct icp_qat_hw_cipher_algo_blk cipher;
10248 } qat_dec_cd;
....@@ -113,9 +59,16 @@
11359 struct crypto_shash *hash_tfm;
11460 enum icp_qat_hw_auth_algo qat_hash_alg;
11561 struct qat_crypto_instance *inst;
62
+ union {
63
+ struct sha1_state sha1;
64
+ struct sha256_state sha256;
65
+ struct sha512_state sha512;
66
+ };
67
+ char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
68
+ char opad[SHA512_BLOCK_SIZE];
11669 };
11770
118
-struct qat_alg_ablkcipher_ctx {
71
+struct qat_alg_skcipher_ctx {
11972 struct icp_qat_hw_cipher_algo_blk *enc_cd;
12073 struct icp_qat_hw_cipher_algo_blk *dec_cd;
12174 dma_addr_t enc_cd_paddr;
....@@ -123,8 +76,8 @@
12376 struct icp_qat_fw_la_bulk_req enc_fw_req;
12477 struct icp_qat_fw_la_bulk_req dec_fw_req;
12578 struct qat_crypto_instance *inst;
126
- struct crypto_tfm *tfm;
127
- spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
79
+ struct crypto_skcipher *ftfm;
80
+ bool fallback;
12881 };
12982
13083 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
....@@ -144,41 +97,35 @@
14497
14598 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
14699 struct qat_alg_aead_ctx *ctx,
147
- const uint8_t *auth_key,
100
+ const u8 *auth_key,
148101 unsigned int auth_keylen)
149102 {
150103 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
151
- struct sha1_state sha1;
152
- struct sha256_state sha256;
153
- struct sha512_state sha512;
154104 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
155105 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
156
- char ipad[block_size];
157
- char opad[block_size];
158106 __be32 *hash_state_out;
159107 __be64 *hash512_state_out;
160108 int i, offset;
161109
162
- memset(ipad, 0, block_size);
163
- memset(opad, 0, block_size);
110
+ memset(ctx->ipad, 0, block_size);
111
+ memset(ctx->opad, 0, block_size);
164112 shash->tfm = ctx->hash_tfm;
165
- shash->flags = 0x0;
166113
167114 if (auth_keylen > block_size) {
168115 int ret = crypto_shash_digest(shash, auth_key,
169
- auth_keylen, ipad);
116
+ auth_keylen, ctx->ipad);
170117 if (ret)
171118 return ret;
172119
173
- memcpy(opad, ipad, digest_size);
120
+ memcpy(ctx->opad, ctx->ipad, digest_size);
174121 } else {
175
- memcpy(ipad, auth_key, auth_keylen);
176
- memcpy(opad, auth_key, auth_keylen);
122
+ memcpy(ctx->ipad, auth_key, auth_keylen);
123
+ memcpy(ctx->opad, auth_key, auth_keylen);
177124 }
178125
179126 for (i = 0; i < block_size; i++) {
180
- char *ipad_ptr = ipad + i;
181
- char *opad_ptr = opad + i;
127
+ char *ipad_ptr = ctx->ipad + i;
128
+ char *opad_ptr = ctx->opad + i;
182129 *ipad_ptr ^= HMAC_IPAD_VALUE;
183130 *opad_ptr ^= HMAC_OPAD_VALUE;
184131 }
....@@ -186,7 +133,7 @@
186133 if (crypto_shash_init(shash))
187134 return -EFAULT;
188135
189
- if (crypto_shash_update(shash, ipad, block_size))
136
+ if (crypto_shash_update(shash, ctx->ipad, block_size))
190137 return -EFAULT;
191138
192139 hash_state_out = (__be32 *)hash->sha.state1;
....@@ -194,22 +141,22 @@
194141
195142 switch (ctx->qat_hash_alg) {
196143 case ICP_QAT_HW_AUTH_ALGO_SHA1:
197
- if (crypto_shash_export(shash, &sha1))
144
+ if (crypto_shash_export(shash, &ctx->sha1))
198145 return -EFAULT;
199146 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
200
- *hash_state_out = cpu_to_be32(*(sha1.state + i));
147
+ *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
201148 break;
202149 case ICP_QAT_HW_AUTH_ALGO_SHA256:
203
- if (crypto_shash_export(shash, &sha256))
150
+ if (crypto_shash_export(shash, &ctx->sha256))
204151 return -EFAULT;
205152 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
206
- *hash_state_out = cpu_to_be32(*(sha256.state + i));
153
+ *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
207154 break;
208155 case ICP_QAT_HW_AUTH_ALGO_SHA512:
209
- if (crypto_shash_export(shash, &sha512))
156
+ if (crypto_shash_export(shash, &ctx->sha512))
210157 return -EFAULT;
211158 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
212
- *hash512_state_out = cpu_to_be64(*(sha512.state + i));
159
+ *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
213160 break;
214161 default:
215162 return -EFAULT;
....@@ -218,41 +165,61 @@
218165 if (crypto_shash_init(shash))
219166 return -EFAULT;
220167
221
- if (crypto_shash_update(shash, opad, block_size))
168
+ if (crypto_shash_update(shash, ctx->opad, block_size))
222169 return -EFAULT;
223170
224171 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
172
+ if (offset < 0)
173
+ return -EFAULT;
174
+
225175 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
226176 hash512_state_out = (__be64 *)hash_state_out;
227177
228178 switch (ctx->qat_hash_alg) {
229179 case ICP_QAT_HW_AUTH_ALGO_SHA1:
230
- if (crypto_shash_export(shash, &sha1))
180
+ if (crypto_shash_export(shash, &ctx->sha1))
231181 return -EFAULT;
232182 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
233
- *hash_state_out = cpu_to_be32(*(sha1.state + i));
183
+ *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
234184 break;
235185 case ICP_QAT_HW_AUTH_ALGO_SHA256:
236
- if (crypto_shash_export(shash, &sha256))
186
+ if (crypto_shash_export(shash, &ctx->sha256))
237187 return -EFAULT;
238188 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
239
- *hash_state_out = cpu_to_be32(*(sha256.state + i));
189
+ *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
240190 break;
241191 case ICP_QAT_HW_AUTH_ALGO_SHA512:
242
- if (crypto_shash_export(shash, &sha512))
192
+ if (crypto_shash_export(shash, &ctx->sha512))
243193 return -EFAULT;
244194 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
245
- *hash512_state_out = cpu_to_be64(*(sha512.state + i));
195
+ *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
246196 break;
247197 default:
248198 return -EFAULT;
249199 }
250
- memzero_explicit(ipad, block_size);
251
- memzero_explicit(opad, block_size);
200
+ memzero_explicit(ctx->ipad, block_size);
201
+ memzero_explicit(ctx->opad, block_size);
252202 return 0;
253203 }
254204
255
-static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
205
+static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
206
+{
207
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
208
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
209
+ ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
210
+ ICP_QAT_FW_LA_UPDATE_STATE);
211
+}
212
+
213
+static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
214
+{
215
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
216
+ ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
217
+ ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
218
+ ICP_QAT_FW_LA_NO_UPDATE_STATE);
219
+}
220
+
221
+static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
222
+ int aead)
256223 {
257224 header->hdr_flags =
258225 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
....@@ -262,12 +229,12 @@
262229 QAT_COMN_PTR_TYPE_SGL);
263230 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
264231 ICP_QAT_FW_LA_PARTIAL_NONE);
265
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
266
- ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
232
+ if (aead)
233
+ qat_alg_init_hdr_no_iv_updt(header);
234
+ else
235
+ qat_alg_init_hdr_iv_updt(header);
267236 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
268237 ICP_QAT_FW_LA_NO_PROTO);
269
- ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
270
- ICP_QAT_FW_LA_NO_UPDATE_STATE);
271238 }
272239
273240 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
....@@ -302,7 +269,7 @@
302269 return -EFAULT;
303270
304271 /* Request setup */
305
- qat_alg_init_common_hdr(header);
272
+ qat_alg_init_common_hdr(header, 1);
306273 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
307274 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
308275 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
....@@ -389,7 +356,7 @@
389356 return -EFAULT;
390357
391358 /* Request setup */
392
- qat_alg_init_common_hdr(header);
359
+ qat_alg_init_common_hdr(header, 1);
393360 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
394361 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
395362 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
....@@ -443,17 +410,17 @@
443410 return 0;
444411 }
445412
446
-static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
447
- struct icp_qat_fw_la_bulk_req *req,
448
- struct icp_qat_hw_cipher_algo_blk *cd,
449
- const uint8_t *key, unsigned int keylen)
413
+static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
414
+ struct icp_qat_fw_la_bulk_req *req,
415
+ struct icp_qat_hw_cipher_algo_blk *cd,
416
+ const u8 *key, unsigned int keylen)
450417 {
451418 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
452419 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
453420 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
454421
455422 memcpy(cd->aes.key, key, keylen);
456
- qat_alg_init_common_hdr(header);
423
+ qat_alg_init_common_hdr(header, 0);
457424 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
458425 cd_pars->u.s.content_desc_params_sz =
459426 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
....@@ -465,28 +432,28 @@
465432 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
466433 }
467434
468
-static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
469
- int alg, const uint8_t *key,
470
- unsigned int keylen, int mode)
435
+static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
436
+ int alg, const u8 *key,
437
+ unsigned int keylen, int mode)
471438 {
472439 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
473440 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
474441 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
475442
476
- qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
443
+ qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
477444 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
478445 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
479446 }
480447
481
-static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
482
- int alg, const uint8_t *key,
483
- unsigned int keylen, int mode)
448
+static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
449
+ int alg, const u8 *key,
450
+ unsigned int keylen, int mode)
484451 {
485452 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
486453 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
487454 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
488455
489
- qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
456
+ qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
490457 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
491458
492459 if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
....@@ -549,7 +516,6 @@
549516 memzero_explicit(&keys, sizeof(keys));
550517 return 0;
551518 bad_key:
552
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
553519 memzero_explicit(&keys, sizeof(keys));
554520 return -EINVAL;
555521 error:
....@@ -557,63 +523,67 @@
557523 return -EFAULT;
558524 }
559525
560
-static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
561
- const uint8_t *key,
562
- unsigned int keylen,
563
- int mode)
526
+static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
527
+ const u8 *key,
528
+ unsigned int keylen,
529
+ int mode)
564530 {
565531 int alg;
566532
567533 if (qat_alg_validate_key(keylen, &alg, mode))
568
- goto bad_key;
534
+ return -EINVAL;
569535
570
- qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
571
- qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
536
+ qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
537
+ qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
572538 return 0;
573
-bad_key:
574
- crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
575
- return -EINVAL;
576539 }
577540
578
-static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
541
+static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
542
+ unsigned int keylen)
543
+{
544
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
545
+
546
+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
547
+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
548
+ memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
549
+ memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
550
+
551
+ return qat_alg_aead_init_sessions(tfm, key, keylen,
552
+ ICP_QAT_HW_CIPHER_CBC_MODE);
553
+}
554
+
555
+static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
579556 unsigned int keylen)
580557 {
581558 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
559
+ struct qat_crypto_instance *inst = NULL;
560
+ int node = get_current_node();
582561 struct device *dev;
562
+ int ret;
583563
584
- if (ctx->enc_cd) {
585
- /* rekeying */
586
- dev = &GET_DEV(ctx->inst->accel_dev);
587
- memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
588
- memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
589
- memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
590
- memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
591
- } else {
592
- /* new key */
593
- int node = get_current_node();
594
- struct qat_crypto_instance *inst =
595
- qat_crypto_get_instance_node(node);
596
- if (!inst) {
597
- return -EINVAL;
598
- }
599
-
600
- dev = &GET_DEV(inst->accel_dev);
601
- ctx->inst = inst;
602
- ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
603
- &ctx->enc_cd_paddr,
604
- GFP_ATOMIC);
605
- if (!ctx->enc_cd) {
606
- return -ENOMEM;
607
- }
608
- ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
609
- &ctx->dec_cd_paddr,
610
- GFP_ATOMIC);
611
- if (!ctx->dec_cd) {
612
- goto out_free_enc;
613
- }
564
+ inst = qat_crypto_get_instance_node(node);
565
+ if (!inst)
566
+ return -EINVAL;
567
+ dev = &GET_DEV(inst->accel_dev);
568
+ ctx->inst = inst;
569
+ ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
570
+ &ctx->enc_cd_paddr,
571
+ GFP_ATOMIC);
572
+ if (!ctx->enc_cd) {
573
+ ret = -ENOMEM;
574
+ goto out_free_inst;
614575 }
615
- if (qat_alg_aead_init_sessions(tfm, key, keylen,
616
- ICP_QAT_HW_CIPHER_CBC_MODE))
576
+ ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
577
+ &ctx->dec_cd_paddr,
578
+ GFP_ATOMIC);
579
+ if (!ctx->dec_cd) {
580
+ ret = -ENOMEM;
581
+ goto out_free_enc;
582
+ }
583
+
584
+ ret = qat_alg_aead_init_sessions(tfm, key, keylen,
585
+ ICP_QAT_HW_CIPHER_CBC_MODE);
586
+ if (ret)
617587 goto out_free_all;
618588
619589 return 0;
....@@ -628,7 +598,21 @@
628598 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
629599 ctx->enc_cd, ctx->enc_cd_paddr);
630600 ctx->enc_cd = NULL;
631
- return -ENOMEM;
601
+out_free_inst:
602
+ ctx->inst = NULL;
603
+ qat_crypto_put_instance(inst);
604
+ return ret;
605
+}
606
+
607
+static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
608
+ unsigned int keylen)
609
+{
610
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
611
+
612
+ if (ctx->enc_cd)
613
+ return qat_alg_aead_rekey(tfm, key, keylen);
614
+ else
615
+ return qat_alg_aead_newkey(tfm, key, keylen);
632616 }
633617
634618 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
....@@ -641,14 +625,20 @@
641625 dma_addr_t blpout = qat_req->buf.bloutp;
642626 size_t sz = qat_req->buf.sz;
643627 size_t sz_out = qat_req->buf.sz_out;
628
+ int bl_dma_dir;
644629 int i;
630
+
631
+ bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
645632
646633 for (i = 0; i < bl->num_bufs; i++)
647634 dma_unmap_single(dev, bl->bufers[i].addr,
648
- bl->bufers[i].len, DMA_BIDIRECTIONAL);
635
+ bl->bufers[i].len, bl_dma_dir);
649636
650637 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
651
- kfree(bl);
638
+
639
+ if (!qat_req->buf.sgl_src_valid)
640
+ kfree(bl);
641
+
652642 if (blp != blpout) {
653643 /* If out of place operation dma unmap only data */
654644 int bufless = blout->num_bufs - blout->num_mapped_bufs;
....@@ -656,10 +646,12 @@
656646 for (i = bufless; i < blout->num_bufs; i++) {
657647 dma_unmap_single(dev, blout->bufers[i].addr,
658648 blout->bufers[i].len,
659
- DMA_BIDIRECTIONAL);
649
+ DMA_FROM_DEVICE);
660650 }
661651 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
662
- kfree(blout);
652
+
653
+ if (!qat_req->buf.sgl_dst_valid)
654
+ kfree(blout);
663655 }
664656 }
665657
....@@ -673,23 +665,33 @@
673665 int n = sg_nents(sgl);
674666 struct qat_alg_buf_list *bufl;
675667 struct qat_alg_buf_list *buflout = NULL;
676
- dma_addr_t blp;
677
- dma_addr_t bloutp = 0;
668
+ dma_addr_t blp = DMA_MAPPING_ERROR;
669
+ dma_addr_t bloutp = DMA_MAPPING_ERROR;
678670 struct scatterlist *sg;
679
- size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
680
- ((1 + n) * sizeof(struct qat_alg_buf));
671
+ size_t sz_out, sz = struct_size(bufl, bufers, n);
672
+ int node = dev_to_node(&GET_DEV(inst->accel_dev));
673
+ int bufl_dma_dir;
681674
682675 if (unlikely(!n))
683676 return -EINVAL;
684677
685
- bufl = kzalloc_node(sz, GFP_ATOMIC,
686
- dev_to_node(&GET_DEV(inst->accel_dev)));
687
- if (unlikely(!bufl))
688
- return -ENOMEM;
678
+ qat_req->buf.sgl_src_valid = false;
679
+ qat_req->buf.sgl_dst_valid = false;
689680
690
- blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
691
- if (unlikely(dma_mapping_error(dev, blp)))
692
- goto err_in;
681
+ if (n > QAT_MAX_BUFF_DESC) {
682
+ bufl = kzalloc_node(sz, GFP_ATOMIC, node);
683
+ if (unlikely(!bufl))
684
+ return -ENOMEM;
685
+ } else {
686
+ bufl = &qat_req->buf.sgl_src.sgl_hdr;
687
+ memset(bufl, 0, sizeof(struct qat_alg_buf_list));
688
+ qat_req->buf.sgl_src_valid = true;
689
+ }
690
+
691
+ bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
692
+
693
+ for_each_sg(sgl, sg, n, i)
694
+ bufl->bufers[i].addr = DMA_MAPPING_ERROR;
693695
694696 for_each_sg(sgl, sg, n, i) {
695697 int y = sg_nctr;
....@@ -699,13 +701,16 @@
699701
700702 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
701703 sg->length,
702
- DMA_BIDIRECTIONAL);
704
+ bufl_dma_dir);
703705 bufl->bufers[y].len = sg->length;
704706 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
705707 goto err_in;
706708 sg_nctr++;
707709 }
708710 bufl->num_bufs = sg_nctr;
711
+ blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
712
+ if (unlikely(dma_mapping_error(dev, blp)))
713
+ goto err_in;
709714 qat_req->buf.bl = bufl;
710715 qat_req->buf.blp = blp;
711716 qat_req->buf.sz = sz;
....@@ -714,17 +719,23 @@
714719 struct qat_alg_buf *bufers;
715720
716721 n = sg_nents(sglout);
717
- sz_out = sizeof(struct qat_alg_buf_list) +
718
- ((1 + n) * sizeof(struct qat_alg_buf));
722
+ sz_out = struct_size(buflout, bufers, n);
719723 sg_nctr = 0;
720
- buflout = kzalloc_node(sz_out, GFP_ATOMIC,
721
- dev_to_node(&GET_DEV(inst->accel_dev)));
722
- if (unlikely(!buflout))
723
- goto err_in;
724
- bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
725
- if (unlikely(dma_mapping_error(dev, bloutp)))
726
- goto err_out;
724
+
725
+ if (n > QAT_MAX_BUFF_DESC) {
726
+ buflout = kzalloc_node(sz_out, GFP_ATOMIC, node);
727
+ if (unlikely(!buflout))
728
+ goto err_in;
729
+ } else {
730
+ buflout = &qat_req->buf.sgl_dst.sgl_hdr;
731
+ memset(buflout, 0, sizeof(struct qat_alg_buf_list));
732
+ qat_req->buf.sgl_dst_valid = true;
733
+ }
734
+
727735 bufers = buflout->bufers;
736
+ for_each_sg(sglout, sg, n, i)
737
+ bufers[i].addr = DMA_MAPPING_ERROR;
738
+
728739 for_each_sg(sglout, sg, n, i) {
729740 int y = sg_nctr;
730741
....@@ -733,7 +744,7 @@
733744
734745 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
735746 sg->length,
736
- DMA_BIDIRECTIONAL);
747
+ DMA_FROM_DEVICE);
737748 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
738749 goto err_out;
739750 bufers[y].len = sg->length;
....@@ -741,6 +752,9 @@
741752 }
742753 buflout->num_bufs = sg_nctr;
743754 buflout->num_mapped_bufs = sg_nctr;
755
+ bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
756
+ if (unlikely(dma_mapping_error(dev, bloutp)))
757
+ goto err_out;
744758 qat_req->buf.blout = buflout;
745759 qat_req->buf.bloutp = bloutp;
746760 qat_req->buf.sz_out = sz_out;
....@@ -752,27 +766,32 @@
752766 return 0;
753767
754768 err_out:
769
+ if (!dma_mapping_error(dev, bloutp))
770
+ dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
771
+
755772 n = sg_nents(sglout);
756773 for (i = 0; i < n; i++)
757774 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
758775 dma_unmap_single(dev, buflout->bufers[i].addr,
759776 buflout->bufers[i].len,
760
- DMA_BIDIRECTIONAL);
761
- if (!dma_mapping_error(dev, bloutp))
762
- dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
763
- kfree(buflout);
777
+ DMA_FROM_DEVICE);
778
+
779
+ if (!qat_req->buf.sgl_dst_valid)
780
+ kfree(buflout);
764781
765782 err_in:
783
+ if (!dma_mapping_error(dev, blp))
784
+ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
785
+
766786 n = sg_nents(sgl);
767787 for (i = 0; i < n; i++)
768788 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
769789 dma_unmap_single(dev, bufl->bufers[i].addr,
770790 bufl->bufers[i].len,
771
- DMA_BIDIRECTIONAL);
791
+ bufl_dma_dir);
772792
773
- if (!dma_mapping_error(dev, blp))
774
- dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
775
- kfree(bufl);
793
+ if (!qat_req->buf.sgl_src_valid)
794
+ kfree(bufl);
776795
777796 dev_err(dev, "Failed to map buf for dma\n");
778797 return -ENOMEM;
....@@ -784,7 +803,7 @@
784803 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
785804 struct qat_crypto_instance *inst = ctx->inst;
786805 struct aead_request *areq = qat_req->aead_req;
787
- uint8_t stat_filed = qat_resp->comn_resp.comn_status;
806
+ u8 stat_filed = qat_resp->comn_resp.comn_status;
788807 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
789808
790809 qat_alg_free_bufl(inst, qat_req);
....@@ -793,19 +812,25 @@
793812 areq->base.complete(&areq->base, res);
794813 }
795814
796
-static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
797
- struct qat_crypto_request *qat_req)
815
+static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
816
+ struct qat_crypto_request *qat_req)
798817 {
799
- struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
818
+ struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
800819 struct qat_crypto_instance *inst = ctx->inst;
801
- struct ablkcipher_request *areq = qat_req->ablkcipher_req;
802
- uint8_t stat_filed = qat_resp->comn_resp.comn_status;
820
+ struct skcipher_request *sreq = qat_req->skcipher_req;
821
+ u8 stat_filed = qat_resp->comn_resp.comn_status;
822
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
803823 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
804824
805825 qat_alg_free_bufl(inst, qat_req);
806826 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
807827 res = -EINVAL;
808
- areq->base.complete(&areq->base, res);
828
+
829
+ memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
830
+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
831
+ qat_req->iv_paddr);
832
+
833
+ sreq->base.complete(&sreq->base, res);
809834 }
810835
811836 void qat_alg_callback(void *resp)
....@@ -843,18 +868,18 @@
843868 qat_req->aead_ctx = ctx;
844869 qat_req->aead_req = areq;
845870 qat_req->cb = qat_aead_alg_callback;
846
- qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
871
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
847872 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
848873 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
849874 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
850875 cipher_param->cipher_length = cipher_len;
851876 cipher_param->cipher_offset = areq->assoclen;
852877 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
853
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
878
+ auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
854879 auth_param->auth_off = 0;
855880 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
856881 do {
857
- ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
882
+ ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
858883 } while (ret == -EAGAIN && ctr++ < 10);
859884
860885 if (ret == -EAGAIN) {
....@@ -873,7 +898,7 @@
873898 struct icp_qat_fw_la_cipher_req_params *cipher_param;
874899 struct icp_qat_fw_la_auth_req_params *auth_param;
875900 struct icp_qat_fw_la_bulk_req *msg;
876
- uint8_t *iv = areq->iv;
901
+ u8 *iv = areq->iv;
877902 int ret, ctr = 0;
878903
879904 if (areq->cryptlen % AES_BLOCK_SIZE != 0)
....@@ -888,11 +913,11 @@
888913 qat_req->aead_ctx = ctx;
889914 qat_req->aead_req = areq;
890915 qat_req->cb = qat_aead_alg_callback;
891
- qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
916
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
892917 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
893918 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
894919 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
895
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
920
+ auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
896921
897922 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
898923 cipher_param->cipher_length = areq->cryptlen;
....@@ -902,7 +927,7 @@
902927 auth_param->auth_len = areq->assoclen + areq->cryptlen;
903928
904929 do {
905
- ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
930
+ ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
906931 } while (ret == -EAGAIN && ctr++ < 10);
907932
908933 if (ret == -EAGAIN) {
....@@ -912,50 +937,49 @@
912937 return -EINPROGRESS;
913938 }
914939
915
-static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
916
- const u8 *key, unsigned int keylen,
917
- int mode)
940
+static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
941
+ const u8 *key, unsigned int keylen,
942
+ int mode)
918943 {
919
- struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
944
+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
945
+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
946
+ memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
947
+ memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
948
+
949
+ return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
950
+}
951
+
952
+static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
953
+ const u8 *key, unsigned int keylen,
954
+ int mode)
955
+{
956
+ struct qat_crypto_instance *inst = NULL;
920957 struct device *dev;
958
+ int node = get_current_node();
959
+ int ret;
921960
922
- spin_lock(&ctx->lock);
923
- if (ctx->enc_cd) {
924
- /* rekeying */
925
- dev = &GET_DEV(ctx->inst->accel_dev);
926
- memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
927
- memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
928
- memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
929
- memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
930
- } else {
931
- /* new key */
932
- int node = get_current_node();
933
- struct qat_crypto_instance *inst =
934
- qat_crypto_get_instance_node(node);
935
- if (!inst) {
936
- spin_unlock(&ctx->lock);
937
- return -EINVAL;
938
- }
939
-
940
- dev = &GET_DEV(inst->accel_dev);
941
- ctx->inst = inst;
942
- ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
943
- &ctx->enc_cd_paddr,
944
- GFP_ATOMIC);
945
- if (!ctx->enc_cd) {
946
- spin_unlock(&ctx->lock);
947
- return -ENOMEM;
948
- }
949
- ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
950
- &ctx->dec_cd_paddr,
951
- GFP_ATOMIC);
952
- if (!ctx->dec_cd) {
953
- spin_unlock(&ctx->lock);
954
- goto out_free_enc;
955
- }
961
+ inst = qat_crypto_get_instance_node(node);
962
+ if (!inst)
963
+ return -EINVAL;
964
+ dev = &GET_DEV(inst->accel_dev);
965
+ ctx->inst = inst;
966
+ ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
967
+ &ctx->enc_cd_paddr,
968
+ GFP_ATOMIC);
969
+ if (!ctx->enc_cd) {
970
+ ret = -ENOMEM;
971
+ goto out_free_instance;
956972 }
957
- spin_unlock(&ctx->lock);
958
- if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
973
+ ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
974
+ &ctx->dec_cd_paddr,
975
+ GFP_ATOMIC);
976
+ if (!ctx->dec_cd) {
977
+ ret = -ENOMEM;
978
+ goto out_free_enc;
979
+ }
980
+
981
+ ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
982
+ if (ret)
959983 goto out_free_all;
960984
961985 return 0;
....@@ -970,102 +994,218 @@
970994 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
971995 ctx->enc_cd, ctx->enc_cd_paddr);
972996 ctx->enc_cd = NULL;
973
- return -ENOMEM;
997
+out_free_instance:
998
+ ctx->inst = NULL;
999
+ qat_crypto_put_instance(inst);
1000
+ return ret;
9741001 }
9751002
976
-static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
977
- const u8 *key, unsigned int keylen)
1003
+static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
1004
+ const u8 *key, unsigned int keylen,
1005
+ int mode)
9781006 {
979
- return qat_alg_ablkcipher_setkey(tfm, key, keylen,
980
- ICP_QAT_HW_CIPHER_CBC_MODE);
1007
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1008
+
1009
+ if (ctx->enc_cd)
1010
+ return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
1011
+ else
1012
+ return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
9811013 }
9821014
983
-static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
984
- const u8 *key, unsigned int keylen)
1015
+static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
1016
+ const u8 *key, unsigned int keylen)
9851017 {
986
- return qat_alg_ablkcipher_setkey(tfm, key, keylen,
987
- ICP_QAT_HW_CIPHER_CTR_MODE);
1018
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
1019
+ ICP_QAT_HW_CIPHER_CBC_MODE);
9881020 }
9891021
990
-static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
991
- const u8 *key, unsigned int keylen)
1022
+static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
1023
+ const u8 *key, unsigned int keylen)
9921024 {
993
- return qat_alg_ablkcipher_setkey(tfm, key, keylen,
994
- ICP_QAT_HW_CIPHER_XTS_MODE);
1025
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
1026
+ ICP_QAT_HW_CIPHER_CTR_MODE);
9951027 }
9961028
997
-static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
1029
+static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
1030
+ const u8 *key, unsigned int keylen)
9981031 {
999
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1000
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1001
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1002
- struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1032
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1033
+ int ret;
1034
+
1035
+ ret = xts_verify_key(tfm, key, keylen);
1036
+ if (ret)
1037
+ return ret;
1038
+
1039
+ if (keylen >> 1 == AES_KEYSIZE_192) {
1040
+ ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
1041
+ if (ret)
1042
+ return ret;
1043
+
1044
+ ctx->fallback = true;
1045
+
1046
+ return 0;
1047
+ }
1048
+
1049
+ ctx->fallback = false;
1050
+
1051
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
1052
+ ICP_QAT_HW_CIPHER_XTS_MODE);
1053
+}
1054
+
1055
+static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1056
+{
1057
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1058
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1059
+ struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1060
+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
10031061 struct icp_qat_fw_la_cipher_req_params *cipher_param;
10041062 struct icp_qat_fw_la_bulk_req *msg;
1063
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
10051064 int ret, ctr = 0;
10061065
1066
+ if (req->cryptlen == 0)
1067
+ return 0;
1068
+
1069
+ qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1070
+ &qat_req->iv_paddr, GFP_ATOMIC);
1071
+ if (!qat_req->iv)
1072
+ return -ENOMEM;
1073
+
10071074 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1008
- if (unlikely(ret))
1075
+ if (unlikely(ret)) {
1076
+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1077
+ qat_req->iv_paddr);
10091078 return ret;
1079
+ }
10101080
10111081 msg = &qat_req->req;
10121082 *msg = ctx->enc_fw_req;
1013
- qat_req->ablkcipher_ctx = ctx;
1014
- qat_req->ablkcipher_req = req;
1015
- qat_req->cb = qat_ablkcipher_alg_callback;
1016
- qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1083
+ qat_req->skcipher_ctx = ctx;
1084
+ qat_req->skcipher_req = req;
1085
+ qat_req->cb = qat_skcipher_alg_callback;
1086
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
10171087 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
10181088 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
10191089 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1020
- cipher_param->cipher_length = req->nbytes;
1090
+ cipher_param->cipher_length = req->cryptlen;
10211091 cipher_param->cipher_offset = 0;
1022
- memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1092
+ cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1093
+ memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
10231094 do {
1024
- ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1095
+ ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
10251096 } while (ret == -EAGAIN && ctr++ < 10);
10261097
10271098 if (ret == -EAGAIN) {
10281099 qat_alg_free_bufl(ctx->inst, qat_req);
1100
+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1101
+ qat_req->iv_paddr);
10291102 return -EBUSY;
10301103 }
10311104 return -EINPROGRESS;
10321105 }
10331106
1034
-static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1107
+static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
10351108 {
1036
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1037
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1038
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1039
- struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1109
+ if (req->cryptlen % AES_BLOCK_SIZE != 0)
1110
+ return -EINVAL;
1111
+
1112
+ return qat_alg_skcipher_encrypt(req);
1113
+}
1114
+
1115
+static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
1116
+{
1117
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1118
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1119
+ struct skcipher_request *nreq = skcipher_request_ctx(req);
1120
+
1121
+ if (req->cryptlen < XTS_BLOCK_SIZE)
1122
+ return -EINVAL;
1123
+
1124
+ if (ctx->fallback) {
1125
+ memcpy(nreq, req, sizeof(*req));
1126
+ skcipher_request_set_tfm(nreq, ctx->ftfm);
1127
+ return crypto_skcipher_encrypt(nreq);
1128
+ }
1129
+
1130
+ return qat_alg_skcipher_encrypt(req);
1131
+}
1132
+
1133
+static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1134
+{
1135
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1136
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1137
+ struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1138
+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
10401139 struct icp_qat_fw_la_cipher_req_params *cipher_param;
10411140 struct icp_qat_fw_la_bulk_req *msg;
1141
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
10421142 int ret, ctr = 0;
10431143
1144
+ if (req->cryptlen == 0)
1145
+ return 0;
1146
+
1147
+ qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1148
+ &qat_req->iv_paddr, GFP_ATOMIC);
1149
+ if (!qat_req->iv)
1150
+ return -ENOMEM;
1151
+
10441152 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1045
- if (unlikely(ret))
1153
+ if (unlikely(ret)) {
1154
+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1155
+ qat_req->iv_paddr);
10461156 return ret;
1157
+ }
10471158
10481159 msg = &qat_req->req;
10491160 *msg = ctx->dec_fw_req;
1050
- qat_req->ablkcipher_ctx = ctx;
1051
- qat_req->ablkcipher_req = req;
1052
- qat_req->cb = qat_ablkcipher_alg_callback;
1053
- qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1161
+ qat_req->skcipher_ctx = ctx;
1162
+ qat_req->skcipher_req = req;
1163
+ qat_req->cb = qat_skcipher_alg_callback;
1164
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
10541165 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
10551166 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
10561167 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1057
- cipher_param->cipher_length = req->nbytes;
1168
+ cipher_param->cipher_length = req->cryptlen;
10581169 cipher_param->cipher_offset = 0;
1059
- memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1170
+ cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1171
+ memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
10601172 do {
1061
- ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1173
+ ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
10621174 } while (ret == -EAGAIN && ctr++ < 10);
10631175
10641176 if (ret == -EAGAIN) {
10651177 qat_alg_free_bufl(ctx->inst, qat_req);
1178
+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1179
+ qat_req->iv_paddr);
10661180 return -EBUSY;
10671181 }
10681182 return -EINPROGRESS;
1183
+}
1184
+
1185
+static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1186
+{
1187
+ if (req->cryptlen % AES_BLOCK_SIZE != 0)
1188
+ return -EINVAL;
1189
+
1190
+ return qat_alg_skcipher_decrypt(req);
1191
+}
1192
+
1193
+static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1194
+{
1195
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1196
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1197
+ struct skcipher_request *nreq = skcipher_request_ctx(req);
1198
+
1199
+ if (req->cryptlen < XTS_BLOCK_SIZE)
1200
+ return -EINVAL;
1201
+
1202
+ if (ctx->fallback) {
1203
+ memcpy(nreq, req, sizeof(*req));
1204
+ skcipher_request_set_tfm(nreq, ctx->ftfm);
1205
+ return crypto_skcipher_decrypt(nreq);
1206
+ }
1207
+
1208
+ return qat_alg_skcipher_decrypt(req);
10691209 }
10701210
10711211 static int qat_alg_aead_init(struct crypto_aead *tfm,
....@@ -1122,19 +1262,33 @@
11221262 qat_crypto_put_instance(inst);
11231263 }
11241264
1125
-static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1265
+static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
11261266 {
1127
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1128
-
1129
- spin_lock_init(&ctx->lock);
1130
- tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
1131
- ctx->tfm = tfm;
1267
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
11321268 return 0;
11331269 }
11341270
1135
-static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1271
+static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
11361272 {
1137
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1273
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1274
+ int reqsize;
1275
+
1276
+ ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1277
+ CRYPTO_ALG_NEED_FALLBACK);
1278
+ if (IS_ERR(ctx->ftfm))
1279
+ return PTR_ERR(ctx->ftfm);
1280
+
1281
+ reqsize = max(sizeof(struct qat_crypto_request),
1282
+ sizeof(struct skcipher_request) +
1283
+ crypto_skcipher_reqsize(ctx->ftfm));
1284
+ crypto_skcipher_set_reqsize(tfm, reqsize);
1285
+
1286
+ return 0;
1287
+}
1288
+
1289
+static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1290
+{
1291
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
11381292 struct qat_crypto_instance *inst = ctx->inst;
11391293 struct device *dev;
11401294
....@@ -1159,13 +1313,22 @@
11591313 qat_crypto_put_instance(inst);
11601314 }
11611315
1316
+static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1317
+{
1318
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1319
+
1320
+ if (ctx->ftfm)
1321
+ crypto_free_skcipher(ctx->ftfm);
1322
+
1323
+ qat_alg_skcipher_exit_tfm(tfm);
1324
+}
11621325
11631326 static struct aead_alg qat_aeads[] = { {
11641327 .base = {
11651328 .cra_name = "authenc(hmac(sha1),cbc(aes))",
11661329 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
11671330 .cra_priority = 4001,
1168
- .cra_flags = CRYPTO_ALG_ASYNC,
1331
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
11691332 .cra_blocksize = AES_BLOCK_SIZE,
11701333 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
11711334 .cra_module = THIS_MODULE,
....@@ -1182,7 +1345,7 @@
11821345 .cra_name = "authenc(hmac(sha256),cbc(aes))",
11831346 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
11841347 .cra_priority = 4001,
1185
- .cra_flags = CRYPTO_ALG_ASYNC,
1348
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
11861349 .cra_blocksize = AES_BLOCK_SIZE,
11871350 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
11881351 .cra_module = THIS_MODULE,
....@@ -1199,7 +1362,7 @@
11991362 .cra_name = "authenc(hmac(sha512),cbc(aes))",
12001363 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
12011364 .cra_priority = 4001,
1202
- .cra_flags = CRYPTO_ALG_ASYNC,
1365
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
12031366 .cra_blocksize = AES_BLOCK_SIZE,
12041367 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
12051368 .cra_module = THIS_MODULE,
....@@ -1213,91 +1376,75 @@
12131376 .maxauthsize = SHA512_DIGEST_SIZE,
12141377 } };
12151378
1216
-static struct crypto_alg qat_algs[] = { {
1217
- .cra_name = "cbc(aes)",
1218
- .cra_driver_name = "qat_aes_cbc",
1219
- .cra_priority = 4001,
1220
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1221
- .cra_blocksize = AES_BLOCK_SIZE,
1222
- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1223
- .cra_alignmask = 0,
1224
- .cra_type = &crypto_ablkcipher_type,
1225
- .cra_module = THIS_MODULE,
1226
- .cra_init = qat_alg_ablkcipher_init,
1227
- .cra_exit = qat_alg_ablkcipher_exit,
1228
- .cra_u = {
1229
- .ablkcipher = {
1230
- .setkey = qat_alg_ablkcipher_cbc_setkey,
1231
- .decrypt = qat_alg_ablkcipher_decrypt,
1232
- .encrypt = qat_alg_ablkcipher_encrypt,
1233
- .min_keysize = AES_MIN_KEY_SIZE,
1234
- .max_keysize = AES_MAX_KEY_SIZE,
1235
- .ivsize = AES_BLOCK_SIZE,
1236
- },
1237
- },
1379
+static struct skcipher_alg qat_skciphers[] = { {
1380
+ .base.cra_name = "cbc(aes)",
1381
+ .base.cra_driver_name = "qat_aes_cbc",
1382
+ .base.cra_priority = 4001,
1383
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1384
+ .base.cra_blocksize = AES_BLOCK_SIZE,
1385
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1386
+ .base.cra_alignmask = 0,
1387
+ .base.cra_module = THIS_MODULE,
1388
+
1389
+ .init = qat_alg_skcipher_init_tfm,
1390
+ .exit = qat_alg_skcipher_exit_tfm,
1391
+ .setkey = qat_alg_skcipher_cbc_setkey,
1392
+ .decrypt = qat_alg_skcipher_blk_decrypt,
1393
+ .encrypt = qat_alg_skcipher_blk_encrypt,
1394
+ .min_keysize = AES_MIN_KEY_SIZE,
1395
+ .max_keysize = AES_MAX_KEY_SIZE,
1396
+ .ivsize = AES_BLOCK_SIZE,
12381397 }, {
1239
- .cra_name = "ctr(aes)",
1240
- .cra_driver_name = "qat_aes_ctr",
1241
- .cra_priority = 4001,
1242
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1243
- .cra_blocksize = AES_BLOCK_SIZE,
1244
- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1245
- .cra_alignmask = 0,
1246
- .cra_type = &crypto_ablkcipher_type,
1247
- .cra_module = THIS_MODULE,
1248
- .cra_init = qat_alg_ablkcipher_init,
1249
- .cra_exit = qat_alg_ablkcipher_exit,
1250
- .cra_u = {
1251
- .ablkcipher = {
1252
- .setkey = qat_alg_ablkcipher_ctr_setkey,
1253
- .decrypt = qat_alg_ablkcipher_decrypt,
1254
- .encrypt = qat_alg_ablkcipher_encrypt,
1255
- .min_keysize = AES_MIN_KEY_SIZE,
1256
- .max_keysize = AES_MAX_KEY_SIZE,
1257
- .ivsize = AES_BLOCK_SIZE,
1258
- },
1259
- },
1398
+ .base.cra_name = "ctr(aes)",
1399
+ .base.cra_driver_name = "qat_aes_ctr",
1400
+ .base.cra_priority = 4001,
1401
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1402
+ .base.cra_blocksize = 1,
1403
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1404
+ .base.cra_alignmask = 0,
1405
+ .base.cra_module = THIS_MODULE,
1406
+
1407
+ .init = qat_alg_skcipher_init_tfm,
1408
+ .exit = qat_alg_skcipher_exit_tfm,
1409
+ .setkey = qat_alg_skcipher_ctr_setkey,
1410
+ .decrypt = qat_alg_skcipher_decrypt,
1411
+ .encrypt = qat_alg_skcipher_encrypt,
1412
+ .min_keysize = AES_MIN_KEY_SIZE,
1413
+ .max_keysize = AES_MAX_KEY_SIZE,
1414
+ .ivsize = AES_BLOCK_SIZE,
12601415 }, {
1261
- .cra_name = "xts(aes)",
1262
- .cra_driver_name = "qat_aes_xts",
1263
- .cra_priority = 4001,
1264
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1265
- .cra_blocksize = AES_BLOCK_SIZE,
1266
- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1267
- .cra_alignmask = 0,
1268
- .cra_type = &crypto_ablkcipher_type,
1269
- .cra_module = THIS_MODULE,
1270
- .cra_init = qat_alg_ablkcipher_init,
1271
- .cra_exit = qat_alg_ablkcipher_exit,
1272
- .cra_u = {
1273
- .ablkcipher = {
1274
- .setkey = qat_alg_ablkcipher_xts_setkey,
1275
- .decrypt = qat_alg_ablkcipher_decrypt,
1276
- .encrypt = qat_alg_ablkcipher_encrypt,
1277
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
1278
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
1279
- .ivsize = AES_BLOCK_SIZE,
1280
- },
1281
- },
1416
+ .base.cra_name = "xts(aes)",
1417
+ .base.cra_driver_name = "qat_aes_xts",
1418
+ .base.cra_priority = 4001,
1419
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1420
+ CRYPTO_ALG_ALLOCATES_MEMORY,
1421
+ .base.cra_blocksize = AES_BLOCK_SIZE,
1422
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1423
+ .base.cra_alignmask = 0,
1424
+ .base.cra_module = THIS_MODULE,
1425
+
1426
+ .init = qat_alg_skcipher_init_xts_tfm,
1427
+ .exit = qat_alg_skcipher_exit_xts_tfm,
1428
+ .setkey = qat_alg_skcipher_xts_setkey,
1429
+ .decrypt = qat_alg_skcipher_xts_decrypt,
1430
+ .encrypt = qat_alg_skcipher_xts_encrypt,
1431
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
1432
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
1433
+ .ivsize = AES_BLOCK_SIZE,
12821434 } };
12831435
12841436 int qat_algs_register(void)
12851437 {
1286
- int ret = 0, i;
1438
+ int ret = 0;
12871439
12881440 mutex_lock(&algs_lock);
12891441 if (++active_devs != 1)
12901442 goto unlock;
12911443
1292
- for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1293
- qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1294
-
1295
- ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1444
+ ret = crypto_register_skciphers(qat_skciphers,
1445
+ ARRAY_SIZE(qat_skciphers));
12961446 if (ret)
12971447 goto unlock;
1298
-
1299
- for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
1300
- qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
13011448
13021449 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
13031450 if (ret)
....@@ -1308,7 +1455,7 @@
13081455 return ret;
13091456
13101457 unreg_algs:
1311
- crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1458
+ crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
13121459 goto unlock;
13131460 }
13141461
....@@ -1319,7 +1466,7 @@
13191466 goto unlock;
13201467
13211468 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1322
- crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1469
+ crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
13231470
13241471 unlock:
13251472 mutex_unlock(&algs_lock);