.. | .. |
---|
1 | | -/* |
---|
2 | | - This file is provided under a dual BSD/GPLv2 license. When using or |
---|
3 | | - redistributing this file, you may do so under either license. |
---|
4 | | - |
---|
5 | | - GPL LICENSE SUMMARY |
---|
6 | | - Copyright(c) 2014 Intel Corporation. |
---|
7 | | - This program is free software; you can redistribute it and/or modify |
---|
8 | | - it under the terms of version 2 of the GNU General Public License as |
---|
9 | | - published by the Free Software Foundation. |
---|
10 | | - |
---|
11 | | - This program is distributed in the hope that it will be useful, but |
---|
12 | | - WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
13 | | - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
14 | | - General Public License for more details. |
---|
15 | | - |
---|
16 | | - Contact Information: |
---|
17 | | - qat-linux@intel.com |
---|
18 | | - |
---|
19 | | - BSD LICENSE |
---|
20 | | - Copyright(c) 2014 Intel Corporation. |
---|
21 | | - Redistribution and use in source and binary forms, with or without |
---|
22 | | - modification, are permitted provided that the following conditions |
---|
23 | | - are met: |
---|
24 | | - |
---|
25 | | - * Redistributions of source code must retain the above copyright |
---|
26 | | - notice, this list of conditions and the following disclaimer. |
---|
27 | | - * Redistributions in binary form must reproduce the above copyright |
---|
28 | | - notice, this list of conditions and the following disclaimer in |
---|
29 | | - the documentation and/or other materials provided with the |
---|
30 | | - distribution. |
---|
31 | | - * Neither the name of Intel Corporation nor the names of its |
---|
32 | | - contributors may be used to endorse or promote products derived |
---|
33 | | - from this software without specific prior written permission. |
---|
34 | | - |
---|
35 | | - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
---|
36 | | - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
---|
37 | | - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
---|
38 | | - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
---|
39 | | - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
---|
40 | | - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
---|
41 | | - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
---|
42 | | - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
---|
43 | | - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
---|
44 | | - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
---|
45 | | - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
---|
46 | | -*/ |
---|
| 1 | +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) |
---|
| 2 | +/* Copyright(c) 2014 - 2020 Intel Corporation */ |
---|
47 | 3 | #include <linux/module.h> |
---|
48 | 4 | #include <linux/slab.h> |
---|
49 | 5 | #include <linux/crypto.h> |
---|
50 | 6 | #include <crypto/internal/aead.h> |
---|
| 7 | +#include <crypto/internal/cipher.h> |
---|
| 8 | +#include <crypto/internal/skcipher.h> |
---|
51 | 9 | #include <crypto/aes.h> |
---|
52 | 10 | #include <crypto/sha.h> |
---|
53 | 11 | #include <crypto/hash.h> |
---|
54 | 12 | #include <crypto/hmac.h> |
---|
55 | 13 | #include <crypto/algapi.h> |
---|
56 | 14 | #include <crypto/authenc.h> |
---|
| 15 | +#include <crypto/xts.h> |
---|
57 | 16 | #include <linux/dma-mapping.h> |
---|
58 | 17 | #include "adf_accel_devices.h" |
---|
59 | 18 | #include "adf_transport.h" |
---|
.. | .. |
---|
76 | 35 | static DEFINE_MUTEX(algs_lock); |
---|
77 | 36 | static unsigned int active_devs; |
---|
78 | 37 | |
---|
79 | | -struct qat_alg_buf { |
---|
80 | | - uint32_t len; |
---|
81 | | - uint32_t resrvd; |
---|
82 | | - uint64_t addr; |
---|
83 | | -} __packed; |
---|
84 | | - |
---|
85 | | -struct qat_alg_buf_list { |
---|
86 | | - uint64_t resrvd; |
---|
87 | | - uint32_t num_bufs; |
---|
88 | | - uint32_t num_mapped_bufs; |
---|
89 | | - struct qat_alg_buf bufers[]; |
---|
90 | | -} __packed __aligned(64); |
---|
91 | | - |
---|
92 | 38 | /* Common content descriptor */ |
---|
93 | 39 | struct qat_alg_cd { |
---|
94 | 40 | union { |
---|
.. | .. |
---|
96 | 42 | struct icp_qat_hw_cipher_algo_blk cipher; |
---|
97 | 43 | struct icp_qat_hw_auth_algo_blk hash; |
---|
98 | 44 | } qat_enc_cd; |
---|
99 | | - struct qat_dec { /* Decrytp content desc */ |
---|
| 45 | + struct qat_dec { /* Decrypt content desc */ |
---|
100 | 46 | struct icp_qat_hw_auth_algo_blk hash; |
---|
101 | 47 | struct icp_qat_hw_cipher_algo_blk cipher; |
---|
102 | 48 | } qat_dec_cd; |
---|
.. | .. |
---|
113 | 59 | struct crypto_shash *hash_tfm; |
---|
114 | 60 | enum icp_qat_hw_auth_algo qat_hash_alg; |
---|
115 | 61 | struct qat_crypto_instance *inst; |
---|
| 62 | + union { |
---|
| 63 | + struct sha1_state sha1; |
---|
| 64 | + struct sha256_state sha256; |
---|
| 65 | + struct sha512_state sha512; |
---|
| 66 | + }; |
---|
| 67 | + char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */ |
---|
| 68 | + char opad[SHA512_BLOCK_SIZE]; |
---|
116 | 69 | }; |
---|
117 | 70 | |
---|
118 | | -struct qat_alg_ablkcipher_ctx { |
---|
| 71 | +struct qat_alg_skcipher_ctx { |
---|
119 | 72 | struct icp_qat_hw_cipher_algo_blk *enc_cd; |
---|
120 | 73 | struct icp_qat_hw_cipher_algo_blk *dec_cd; |
---|
121 | 74 | dma_addr_t enc_cd_paddr; |
---|
.. | .. |
---|
123 | 76 | struct icp_qat_fw_la_bulk_req enc_fw_req; |
---|
124 | 77 | struct icp_qat_fw_la_bulk_req dec_fw_req; |
---|
125 | 78 | struct qat_crypto_instance *inst; |
---|
126 | | - struct crypto_tfm *tfm; |
---|
127 | | - spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */ |
---|
| 79 | + struct crypto_skcipher *ftfm; |
---|
| 80 | + bool fallback; |
---|
128 | 81 | }; |
---|
129 | 82 | |
---|
130 | 83 | static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) |
---|
.. | .. |
---|
144 | 97 | |
---|
145 | 98 | static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, |
---|
146 | 99 | struct qat_alg_aead_ctx *ctx, |
---|
147 | | - const uint8_t *auth_key, |
---|
| 100 | + const u8 *auth_key, |
---|
148 | 101 | unsigned int auth_keylen) |
---|
149 | 102 | { |
---|
150 | 103 | SHASH_DESC_ON_STACK(shash, ctx->hash_tfm); |
---|
151 | | - struct sha1_state sha1; |
---|
152 | | - struct sha256_state sha256; |
---|
153 | | - struct sha512_state sha512; |
---|
154 | 104 | int block_size = crypto_shash_blocksize(ctx->hash_tfm); |
---|
155 | 105 | int digest_size = crypto_shash_digestsize(ctx->hash_tfm); |
---|
156 | | - char ipad[block_size]; |
---|
157 | | - char opad[block_size]; |
---|
158 | 106 | __be32 *hash_state_out; |
---|
159 | 107 | __be64 *hash512_state_out; |
---|
160 | 108 | int i, offset; |
---|
161 | 109 | |
---|
162 | | - memset(ipad, 0, block_size); |
---|
163 | | - memset(opad, 0, block_size); |
---|
| 110 | + memset(ctx->ipad, 0, block_size); |
---|
| 111 | + memset(ctx->opad, 0, block_size); |
---|
164 | 112 | shash->tfm = ctx->hash_tfm; |
---|
165 | | - shash->flags = 0x0; |
---|
166 | 113 | |
---|
167 | 114 | if (auth_keylen > block_size) { |
---|
168 | 115 | int ret = crypto_shash_digest(shash, auth_key, |
---|
169 | | - auth_keylen, ipad); |
---|
| 116 | + auth_keylen, ctx->ipad); |
---|
170 | 117 | if (ret) |
---|
171 | 118 | return ret; |
---|
172 | 119 | |
---|
173 | | - memcpy(opad, ipad, digest_size); |
---|
| 120 | + memcpy(ctx->opad, ctx->ipad, digest_size); |
---|
174 | 121 | } else { |
---|
175 | | - memcpy(ipad, auth_key, auth_keylen); |
---|
176 | | - memcpy(opad, auth_key, auth_keylen); |
---|
| 122 | + memcpy(ctx->ipad, auth_key, auth_keylen); |
---|
| 123 | + memcpy(ctx->opad, auth_key, auth_keylen); |
---|
177 | 124 | } |
---|
178 | 125 | |
---|
179 | 126 | for (i = 0; i < block_size; i++) { |
---|
180 | | - char *ipad_ptr = ipad + i; |
---|
181 | | - char *opad_ptr = opad + i; |
---|
| 127 | + char *ipad_ptr = ctx->ipad + i; |
---|
| 128 | + char *opad_ptr = ctx->opad + i; |
---|
182 | 129 | *ipad_ptr ^= HMAC_IPAD_VALUE; |
---|
183 | 130 | *opad_ptr ^= HMAC_OPAD_VALUE; |
---|
184 | 131 | } |
---|
.. | .. |
---|
186 | 133 | if (crypto_shash_init(shash)) |
---|
187 | 134 | return -EFAULT; |
---|
188 | 135 | |
---|
189 | | - if (crypto_shash_update(shash, ipad, block_size)) |
---|
| 136 | + if (crypto_shash_update(shash, ctx->ipad, block_size)) |
---|
190 | 137 | return -EFAULT; |
---|
191 | 138 | |
---|
192 | 139 | hash_state_out = (__be32 *)hash->sha.state1; |
---|
.. | .. |
---|
194 | 141 | |
---|
195 | 142 | switch (ctx->qat_hash_alg) { |
---|
196 | 143 | case ICP_QAT_HW_AUTH_ALGO_SHA1: |
---|
197 | | - if (crypto_shash_export(shash, &sha1)) |
---|
| 144 | + if (crypto_shash_export(shash, &ctx->sha1)) |
---|
198 | 145 | return -EFAULT; |
---|
199 | 146 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) |
---|
200 | | - *hash_state_out = cpu_to_be32(*(sha1.state + i)); |
---|
| 147 | + *hash_state_out = cpu_to_be32(ctx->sha1.state[i]); |
---|
201 | 148 | break; |
---|
202 | 149 | case ICP_QAT_HW_AUTH_ALGO_SHA256: |
---|
203 | | - if (crypto_shash_export(shash, &sha256)) |
---|
| 150 | + if (crypto_shash_export(shash, &ctx->sha256)) |
---|
204 | 151 | return -EFAULT; |
---|
205 | 152 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) |
---|
206 | | - *hash_state_out = cpu_to_be32(*(sha256.state + i)); |
---|
| 153 | + *hash_state_out = cpu_to_be32(ctx->sha256.state[i]); |
---|
207 | 154 | break; |
---|
208 | 155 | case ICP_QAT_HW_AUTH_ALGO_SHA512: |
---|
209 | | - if (crypto_shash_export(shash, &sha512)) |
---|
| 156 | + if (crypto_shash_export(shash, &ctx->sha512)) |
---|
210 | 157 | return -EFAULT; |
---|
211 | 158 | for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) |
---|
212 | | - *hash512_state_out = cpu_to_be64(*(sha512.state + i)); |
---|
| 159 | + *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]); |
---|
213 | 160 | break; |
---|
214 | 161 | default: |
---|
215 | 162 | return -EFAULT; |
---|
.. | .. |
---|
218 | 165 | if (crypto_shash_init(shash)) |
---|
219 | 166 | return -EFAULT; |
---|
220 | 167 | |
---|
221 | | - if (crypto_shash_update(shash, opad, block_size)) |
---|
| 168 | + if (crypto_shash_update(shash, ctx->opad, block_size)) |
---|
222 | 169 | return -EFAULT; |
---|
223 | 170 | |
---|
224 | 171 | offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8); |
---|
| 172 | + if (offset < 0) |
---|
| 173 | + return -EFAULT; |
---|
| 174 | + |
---|
225 | 175 | hash_state_out = (__be32 *)(hash->sha.state1 + offset); |
---|
226 | 176 | hash512_state_out = (__be64 *)hash_state_out; |
---|
227 | 177 | |
---|
228 | 178 | switch (ctx->qat_hash_alg) { |
---|
229 | 179 | case ICP_QAT_HW_AUTH_ALGO_SHA1: |
---|
230 | | - if (crypto_shash_export(shash, &sha1)) |
---|
| 180 | + if (crypto_shash_export(shash, &ctx->sha1)) |
---|
231 | 181 | return -EFAULT; |
---|
232 | 182 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) |
---|
233 | | - *hash_state_out = cpu_to_be32(*(sha1.state + i)); |
---|
| 183 | + *hash_state_out = cpu_to_be32(ctx->sha1.state[i]); |
---|
234 | 184 | break; |
---|
235 | 185 | case ICP_QAT_HW_AUTH_ALGO_SHA256: |
---|
236 | | - if (crypto_shash_export(shash, &sha256)) |
---|
| 186 | + if (crypto_shash_export(shash, &ctx->sha256)) |
---|
237 | 187 | return -EFAULT; |
---|
238 | 188 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) |
---|
239 | | - *hash_state_out = cpu_to_be32(*(sha256.state + i)); |
---|
| 189 | + *hash_state_out = cpu_to_be32(ctx->sha256.state[i]); |
---|
240 | 190 | break; |
---|
241 | 191 | case ICP_QAT_HW_AUTH_ALGO_SHA512: |
---|
242 | | - if (crypto_shash_export(shash, &sha512)) |
---|
| 192 | + if (crypto_shash_export(shash, &ctx->sha512)) |
---|
243 | 193 | return -EFAULT; |
---|
244 | 194 | for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) |
---|
245 | | - *hash512_state_out = cpu_to_be64(*(sha512.state + i)); |
---|
| 195 | + *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]); |
---|
246 | 196 | break; |
---|
247 | 197 | default: |
---|
248 | 198 | return -EFAULT; |
---|
249 | 199 | } |
---|
250 | | - memzero_explicit(ipad, block_size); |
---|
251 | | - memzero_explicit(opad, block_size); |
---|
| 200 | + memzero_explicit(ctx->ipad, block_size); |
---|
| 201 | + memzero_explicit(ctx->opad, block_size); |
---|
252 | 202 | return 0; |
---|
253 | 203 | } |
---|
254 | 204 | |
---|
255 | | -static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) |
---|
| 205 | +static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header) |
---|
| 206 | +{ |
---|
| 207 | + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, |
---|
| 208 | + ICP_QAT_FW_CIPH_IV_64BIT_PTR); |
---|
| 209 | + ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, |
---|
| 210 | + ICP_QAT_FW_LA_UPDATE_STATE); |
---|
| 211 | +} |
---|
| 212 | + |
---|
| 213 | +static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header) |
---|
| 214 | +{ |
---|
| 215 | + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, |
---|
| 216 | + ICP_QAT_FW_CIPH_IV_16BYTE_DATA); |
---|
| 217 | + ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, |
---|
| 218 | + ICP_QAT_FW_LA_NO_UPDATE_STATE); |
---|
| 219 | +} |
---|
| 220 | + |
---|
| 221 | +static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, |
---|
| 222 | + int aead) |
---|
256 | 223 | { |
---|
257 | 224 | header->hdr_flags = |
---|
258 | 225 | ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); |
---|
.. | .. |
---|
262 | 229 | QAT_COMN_PTR_TYPE_SGL); |
---|
263 | 230 | ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, |
---|
264 | 231 | ICP_QAT_FW_LA_PARTIAL_NONE); |
---|
265 | | - ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, |
---|
266 | | - ICP_QAT_FW_CIPH_IV_16BYTE_DATA); |
---|
| 232 | + if (aead) |
---|
| 233 | + qat_alg_init_hdr_no_iv_updt(header); |
---|
| 234 | + else |
---|
| 235 | + qat_alg_init_hdr_iv_updt(header); |
---|
267 | 236 | ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, |
---|
268 | 237 | ICP_QAT_FW_LA_NO_PROTO); |
---|
269 | | - ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, |
---|
270 | | - ICP_QAT_FW_LA_NO_UPDATE_STATE); |
---|
271 | 238 | } |
---|
272 | 239 | |
---|
273 | 240 | static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm, |
---|
.. | .. |
---|
302 | 269 | return -EFAULT; |
---|
303 | 270 | |
---|
304 | 271 | /* Request setup */ |
---|
305 | | - qat_alg_init_common_hdr(header); |
---|
| 272 | + qat_alg_init_common_hdr(header, 1); |
---|
306 | 273 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH; |
---|
307 | 274 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, |
---|
308 | 275 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER); |
---|
.. | .. |
---|
389 | 356 | return -EFAULT; |
---|
390 | 357 | |
---|
391 | 358 | /* Request setup */ |
---|
392 | | - qat_alg_init_common_hdr(header); |
---|
| 359 | + qat_alg_init_common_hdr(header, 1); |
---|
393 | 360 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER; |
---|
394 | 361 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, |
---|
395 | 362 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER); |
---|
.. | .. |
---|
443 | 410 | return 0; |
---|
444 | 411 | } |
---|
445 | 412 | |
---|
446 | | -static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx, |
---|
447 | | - struct icp_qat_fw_la_bulk_req *req, |
---|
448 | | - struct icp_qat_hw_cipher_algo_blk *cd, |
---|
449 | | - const uint8_t *key, unsigned int keylen) |
---|
| 413 | +static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx, |
---|
| 414 | + struct icp_qat_fw_la_bulk_req *req, |
---|
| 415 | + struct icp_qat_hw_cipher_algo_blk *cd, |
---|
| 416 | + const u8 *key, unsigned int keylen) |
---|
450 | 417 | { |
---|
451 | 418 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; |
---|
452 | 419 | struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr; |
---|
453 | 420 | struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl; |
---|
454 | 421 | |
---|
455 | 422 | memcpy(cd->aes.key, key, keylen); |
---|
456 | | - qat_alg_init_common_hdr(header); |
---|
| 423 | + qat_alg_init_common_hdr(header, 0); |
---|
457 | 424 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER; |
---|
458 | 425 | cd_pars->u.s.content_desc_params_sz = |
---|
459 | 426 | sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3; |
---|
.. | .. |
---|
465 | 432 | ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); |
---|
466 | 433 | } |
---|
467 | 434 | |
---|
468 | | -static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx, |
---|
469 | | - int alg, const uint8_t *key, |
---|
470 | | - unsigned int keylen, int mode) |
---|
| 435 | +static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx, |
---|
| 436 | + int alg, const u8 *key, |
---|
| 437 | + unsigned int keylen, int mode) |
---|
471 | 438 | { |
---|
472 | 439 | struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd; |
---|
473 | 440 | struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req; |
---|
474 | 441 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; |
---|
475 | 442 | |
---|
476 | | - qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen); |
---|
| 443 | + qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen); |
---|
477 | 444 | cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr; |
---|
478 | 445 | enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode); |
---|
479 | 446 | } |
---|
480 | 447 | |
---|
481 | | -static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx, |
---|
482 | | - int alg, const uint8_t *key, |
---|
483 | | - unsigned int keylen, int mode) |
---|
| 448 | +static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx, |
---|
| 449 | + int alg, const u8 *key, |
---|
| 450 | + unsigned int keylen, int mode) |
---|
484 | 451 | { |
---|
485 | 452 | struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd; |
---|
486 | 453 | struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req; |
---|
487 | 454 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; |
---|
488 | 455 | |
---|
489 | | - qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen); |
---|
| 456 | + qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen); |
---|
490 | 457 | cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; |
---|
491 | 458 | |
---|
492 | 459 | if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) |
---|
.. | .. |
---|
549 | 516 | memzero_explicit(&keys, sizeof(keys)); |
---|
550 | 517 | return 0; |
---|
551 | 518 | bad_key: |
---|
552 | | - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
---|
553 | 519 | memzero_explicit(&keys, sizeof(keys)); |
---|
554 | 520 | return -EINVAL; |
---|
555 | 521 | error: |
---|
.. | .. |
---|
557 | 523 | return -EFAULT; |
---|
558 | 524 | } |
---|
559 | 525 | |
---|
560 | | -static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx, |
---|
561 | | - const uint8_t *key, |
---|
562 | | - unsigned int keylen, |
---|
563 | | - int mode) |
---|
| 526 | +static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx, |
---|
| 527 | + const u8 *key, |
---|
| 528 | + unsigned int keylen, |
---|
| 529 | + int mode) |
---|
564 | 530 | { |
---|
565 | 531 | int alg; |
---|
566 | 532 | |
---|
567 | 533 | if (qat_alg_validate_key(keylen, &alg, mode)) |
---|
568 | | - goto bad_key; |
---|
| 534 | + return -EINVAL; |
---|
569 | 535 | |
---|
570 | | - qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode); |
---|
571 | | - qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode); |
---|
| 536 | + qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode); |
---|
| 537 | + qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode); |
---|
572 | 538 | return 0; |
---|
573 | | -bad_key: |
---|
574 | | - crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
---|
575 | | - return -EINVAL; |
---|
576 | 539 | } |
---|
577 | 540 | |
---|
578 | | -static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, |
---|
| 541 | +static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key, |
---|
| 542 | + unsigned int keylen) |
---|
| 543 | +{ |
---|
| 544 | + struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); |
---|
| 545 | + |
---|
| 546 | + memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); |
---|
| 547 | + memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); |
---|
| 548 | + memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); |
---|
| 549 | + memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); |
---|
| 550 | + |
---|
| 551 | + return qat_alg_aead_init_sessions(tfm, key, keylen, |
---|
| 552 | + ICP_QAT_HW_CIPHER_CBC_MODE); |
---|
| 553 | +} |
---|
| 554 | + |
---|
| 555 | +static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key, |
---|
579 | 556 | unsigned int keylen) |
---|
580 | 557 | { |
---|
581 | 558 | struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); |
---|
| 559 | + struct qat_crypto_instance *inst = NULL; |
---|
| 560 | + int node = get_current_node(); |
---|
582 | 561 | struct device *dev; |
---|
| 562 | + int ret; |
---|
583 | 563 | |
---|
584 | | - if (ctx->enc_cd) { |
---|
585 | | - /* rekeying */ |
---|
586 | | - dev = &GET_DEV(ctx->inst->accel_dev); |
---|
587 | | - memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); |
---|
588 | | - memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); |
---|
589 | | - memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); |
---|
590 | | - memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); |
---|
591 | | - } else { |
---|
592 | | - /* new key */ |
---|
593 | | - int node = get_current_node(); |
---|
594 | | - struct qat_crypto_instance *inst = |
---|
595 | | - qat_crypto_get_instance_node(node); |
---|
596 | | - if (!inst) { |
---|
597 | | - return -EINVAL; |
---|
598 | | - } |
---|
599 | | - |
---|
600 | | - dev = &GET_DEV(inst->accel_dev); |
---|
601 | | - ctx->inst = inst; |
---|
602 | | - ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), |
---|
603 | | - &ctx->enc_cd_paddr, |
---|
604 | | - GFP_ATOMIC); |
---|
605 | | - if (!ctx->enc_cd) { |
---|
606 | | - return -ENOMEM; |
---|
607 | | - } |
---|
608 | | - ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), |
---|
609 | | - &ctx->dec_cd_paddr, |
---|
610 | | - GFP_ATOMIC); |
---|
611 | | - if (!ctx->dec_cd) { |
---|
612 | | - goto out_free_enc; |
---|
613 | | - } |
---|
| 564 | + inst = qat_crypto_get_instance_node(node); |
---|
| 565 | + if (!inst) |
---|
| 566 | + return -EINVAL; |
---|
| 567 | + dev = &GET_DEV(inst->accel_dev); |
---|
| 568 | + ctx->inst = inst; |
---|
| 569 | + ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), |
---|
| 570 | + &ctx->enc_cd_paddr, |
---|
| 571 | + GFP_ATOMIC); |
---|
| 572 | + if (!ctx->enc_cd) { |
---|
| 573 | + ret = -ENOMEM; |
---|
| 574 | + goto out_free_inst; |
---|
614 | 575 | } |
---|
615 | | - if (qat_alg_aead_init_sessions(tfm, key, keylen, |
---|
616 | | - ICP_QAT_HW_CIPHER_CBC_MODE)) |
---|
| 576 | + ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), |
---|
| 577 | + &ctx->dec_cd_paddr, |
---|
| 578 | + GFP_ATOMIC); |
---|
| 579 | + if (!ctx->dec_cd) { |
---|
| 580 | + ret = -ENOMEM; |
---|
| 581 | + goto out_free_enc; |
---|
| 582 | + } |
---|
| 583 | + |
---|
| 584 | + ret = qat_alg_aead_init_sessions(tfm, key, keylen, |
---|
| 585 | + ICP_QAT_HW_CIPHER_CBC_MODE); |
---|
| 586 | + if (ret) |
---|
617 | 587 | goto out_free_all; |
---|
618 | 588 | |
---|
619 | 589 | return 0; |
---|
.. | .. |
---|
628 | 598 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), |
---|
629 | 599 | ctx->enc_cd, ctx->enc_cd_paddr); |
---|
630 | 600 | ctx->enc_cd = NULL; |
---|
631 | | - return -ENOMEM; |
---|
| 601 | +out_free_inst: |
---|
| 602 | + ctx->inst = NULL; |
---|
| 603 | + qat_crypto_put_instance(inst); |
---|
| 604 | + return ret; |
---|
| 605 | +} |
---|
| 606 | + |
---|
| 607 | +static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key, |
---|
| 608 | + unsigned int keylen) |
---|
| 609 | +{ |
---|
| 610 | + struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); |
---|
| 611 | + |
---|
| 612 | + if (ctx->enc_cd) |
---|
| 613 | + return qat_alg_aead_rekey(tfm, key, keylen); |
---|
| 614 | + else |
---|
| 615 | + return qat_alg_aead_newkey(tfm, key, keylen); |
---|
632 | 616 | } |
---|
633 | 617 | |
---|
634 | 618 | static void qat_alg_free_bufl(struct qat_crypto_instance *inst, |
---|
.. | .. |
---|
641 | 625 | dma_addr_t blpout = qat_req->buf.bloutp; |
---|
642 | 626 | size_t sz = qat_req->buf.sz; |
---|
643 | 627 | size_t sz_out = qat_req->buf.sz_out; |
---|
| 628 | + int bl_dma_dir; |
---|
644 | 629 | int i; |
---|
| 630 | + |
---|
| 631 | + bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; |
---|
645 | 632 | |
---|
646 | 633 | for (i = 0; i < bl->num_bufs; i++) |
---|
647 | 634 | dma_unmap_single(dev, bl->bufers[i].addr, |
---|
648 | | - bl->bufers[i].len, DMA_BIDIRECTIONAL); |
---|
| 635 | + bl->bufers[i].len, bl_dma_dir); |
---|
649 | 636 | |
---|
650 | 637 | dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); |
---|
651 | | - kfree(bl); |
---|
| 638 | + |
---|
| 639 | + if (!qat_req->buf.sgl_src_valid) |
---|
| 640 | + kfree(bl); |
---|
| 641 | + |
---|
652 | 642 | if (blp != blpout) { |
---|
653 | 643 | /* If out of place operation dma unmap only data */ |
---|
654 | 644 | int bufless = blout->num_bufs - blout->num_mapped_bufs; |
---|
.. | .. |
---|
656 | 646 | for (i = bufless; i < blout->num_bufs; i++) { |
---|
657 | 647 | dma_unmap_single(dev, blout->bufers[i].addr, |
---|
658 | 648 | blout->bufers[i].len, |
---|
659 | | - DMA_BIDIRECTIONAL); |
---|
| 649 | + DMA_FROM_DEVICE); |
---|
660 | 650 | } |
---|
661 | 651 | dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); |
---|
662 | | - kfree(blout); |
---|
| 652 | + |
---|
| 653 | + if (!qat_req->buf.sgl_dst_valid) |
---|
| 654 | + kfree(blout); |
---|
663 | 655 | } |
---|
664 | 656 | } |
---|
665 | 657 | |
---|
.. | .. |
---|
673 | 665 | int n = sg_nents(sgl); |
---|
674 | 666 | struct qat_alg_buf_list *bufl; |
---|
675 | 667 | struct qat_alg_buf_list *buflout = NULL; |
---|
676 | | - dma_addr_t blp; |
---|
677 | | - dma_addr_t bloutp = 0; |
---|
| 668 | + dma_addr_t blp = DMA_MAPPING_ERROR; |
---|
| 669 | + dma_addr_t bloutp = DMA_MAPPING_ERROR; |
---|
678 | 670 | struct scatterlist *sg; |
---|
679 | | - size_t sz_out, sz = sizeof(struct qat_alg_buf_list) + |
---|
680 | | - ((1 + n) * sizeof(struct qat_alg_buf)); |
---|
| 671 | + size_t sz_out, sz = struct_size(bufl, bufers, n); |
---|
| 672 | + int node = dev_to_node(&GET_DEV(inst->accel_dev)); |
---|
| 673 | + int bufl_dma_dir; |
---|
681 | 674 | |
---|
682 | 675 | if (unlikely(!n)) |
---|
683 | 676 | return -EINVAL; |
---|
684 | 677 | |
---|
685 | | - bufl = kzalloc_node(sz, GFP_ATOMIC, |
---|
686 | | - dev_to_node(&GET_DEV(inst->accel_dev))); |
---|
687 | | - if (unlikely(!bufl)) |
---|
688 | | - return -ENOMEM; |
---|
| 678 | + qat_req->buf.sgl_src_valid = false; |
---|
| 679 | + qat_req->buf.sgl_dst_valid = false; |
---|
689 | 680 | |
---|
690 | | - blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); |
---|
691 | | - if (unlikely(dma_mapping_error(dev, blp))) |
---|
692 | | - goto err_in; |
---|
| 681 | + if (n > QAT_MAX_BUFF_DESC) { |
---|
| 682 | + bufl = kzalloc_node(sz, GFP_ATOMIC, node); |
---|
| 683 | + if (unlikely(!bufl)) |
---|
| 684 | + return -ENOMEM; |
---|
| 685 | + } else { |
---|
| 686 | + bufl = &qat_req->buf.sgl_src.sgl_hdr; |
---|
| 687 | + memset(bufl, 0, sizeof(struct qat_alg_buf_list)); |
---|
| 688 | + qat_req->buf.sgl_src_valid = true; |
---|
| 689 | + } |
---|
| 690 | + |
---|
| 691 | + bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; |
---|
| 692 | + |
---|
| 693 | + for_each_sg(sgl, sg, n, i) |
---|
| 694 | + bufl->bufers[i].addr = DMA_MAPPING_ERROR; |
---|
693 | 695 | |
---|
694 | 696 | for_each_sg(sgl, sg, n, i) { |
---|
695 | 697 | int y = sg_nctr; |
---|
.. | .. |
---|
699 | 701 | |
---|
700 | 702 | bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), |
---|
701 | 703 | sg->length, |
---|
702 | | - DMA_BIDIRECTIONAL); |
---|
| 704 | + bufl_dma_dir); |
---|
703 | 705 | bufl->bufers[y].len = sg->length; |
---|
704 | 706 | if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) |
---|
705 | 707 | goto err_in; |
---|
706 | 708 | sg_nctr++; |
---|
707 | 709 | } |
---|
708 | 710 | bufl->num_bufs = sg_nctr; |
---|
| 711 | + blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); |
---|
| 712 | + if (unlikely(dma_mapping_error(dev, blp))) |
---|
| 713 | + goto err_in; |
---|
709 | 714 | qat_req->buf.bl = bufl; |
---|
710 | 715 | qat_req->buf.blp = blp; |
---|
711 | 716 | qat_req->buf.sz = sz; |
---|
.. | .. |
---|
714 | 719 | struct qat_alg_buf *bufers; |
---|
715 | 720 | |
---|
716 | 721 | n = sg_nents(sglout); |
---|
717 | | - sz_out = sizeof(struct qat_alg_buf_list) + |
---|
718 | | - ((1 + n) * sizeof(struct qat_alg_buf)); |
---|
| 722 | + sz_out = struct_size(buflout, bufers, n); |
---|
719 | 723 | sg_nctr = 0; |
---|
720 | | - buflout = kzalloc_node(sz_out, GFP_ATOMIC, |
---|
721 | | - dev_to_node(&GET_DEV(inst->accel_dev))); |
---|
722 | | - if (unlikely(!buflout)) |
---|
723 | | - goto err_in; |
---|
724 | | - bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); |
---|
725 | | - if (unlikely(dma_mapping_error(dev, bloutp))) |
---|
726 | | - goto err_out; |
---|
| 724 | + |
---|
| 725 | + if (n > QAT_MAX_BUFF_DESC) { |
---|
| 726 | + buflout = kzalloc_node(sz_out, GFP_ATOMIC, node); |
---|
| 727 | + if (unlikely(!buflout)) |
---|
| 728 | + goto err_in; |
---|
| 729 | + } else { |
---|
| 730 | + buflout = &qat_req->buf.sgl_dst.sgl_hdr; |
---|
| 731 | + memset(buflout, 0, sizeof(struct qat_alg_buf_list)); |
---|
| 732 | + qat_req->buf.sgl_dst_valid = true; |
---|
| 733 | + } |
---|
| 734 | + |
---|
727 | 735 | bufers = buflout->bufers; |
---|
| 736 | + for_each_sg(sglout, sg, n, i) |
---|
| 737 | + bufers[i].addr = DMA_MAPPING_ERROR; |
---|
| 738 | + |
---|
728 | 739 | for_each_sg(sglout, sg, n, i) { |
---|
729 | 740 | int y = sg_nctr; |
---|
730 | 741 | |
---|
.. | .. |
---|
733 | 744 | |
---|
734 | 745 | bufers[y].addr = dma_map_single(dev, sg_virt(sg), |
---|
735 | 746 | sg->length, |
---|
736 | | - DMA_BIDIRECTIONAL); |
---|
| 747 | + DMA_FROM_DEVICE); |
---|
737 | 748 | if (unlikely(dma_mapping_error(dev, bufers[y].addr))) |
---|
738 | 749 | goto err_out; |
---|
739 | 750 | bufers[y].len = sg->length; |
---|
.. | .. |
---|
741 | 752 | } |
---|
742 | 753 | buflout->num_bufs = sg_nctr; |
---|
743 | 754 | buflout->num_mapped_bufs = sg_nctr; |
---|
| 755 | + bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); |
---|
| 756 | + if (unlikely(dma_mapping_error(dev, bloutp))) |
---|
| 757 | + goto err_out; |
---|
744 | 758 | qat_req->buf.blout = buflout; |
---|
745 | 759 | qat_req->buf.bloutp = bloutp; |
---|
746 | 760 | qat_req->buf.sz_out = sz_out; |
---|
.. | .. |
---|
752 | 766 | return 0; |
---|
753 | 767 | |
---|
754 | 768 | err_out: |
---|
| 769 | + if (!dma_mapping_error(dev, bloutp)) |
---|
| 770 | + dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); |
---|
| 771 | + |
---|
755 | 772 | n = sg_nents(sglout); |
---|
756 | 773 | for (i = 0; i < n; i++) |
---|
757 | 774 | if (!dma_mapping_error(dev, buflout->bufers[i].addr)) |
---|
758 | 775 | dma_unmap_single(dev, buflout->bufers[i].addr, |
---|
759 | 776 | buflout->bufers[i].len, |
---|
760 | | - DMA_BIDIRECTIONAL); |
---|
761 | | - if (!dma_mapping_error(dev, bloutp)) |
---|
762 | | - dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); |
---|
763 | | - kfree(buflout); |
---|
| 777 | + DMA_FROM_DEVICE); |
---|
| 778 | + |
---|
| 779 | + if (!qat_req->buf.sgl_dst_valid) |
---|
| 780 | + kfree(buflout); |
---|
764 | 781 | |
---|
765 | 782 | err_in: |
---|
| 783 | + if (!dma_mapping_error(dev, blp)) |
---|
| 784 | + dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); |
---|
| 785 | + |
---|
766 | 786 | n = sg_nents(sgl); |
---|
767 | 787 | for (i = 0; i < n; i++) |
---|
768 | 788 | if (!dma_mapping_error(dev, bufl->bufers[i].addr)) |
---|
769 | 789 | dma_unmap_single(dev, bufl->bufers[i].addr, |
---|
770 | 790 | bufl->bufers[i].len, |
---|
771 | | - DMA_BIDIRECTIONAL); |
---|
| 791 | + bufl_dma_dir); |
---|
772 | 792 | |
---|
773 | | - if (!dma_mapping_error(dev, blp)) |
---|
774 | | - dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); |
---|
775 | | - kfree(bufl); |
---|
| 793 | + if (!qat_req->buf.sgl_src_valid) |
---|
| 794 | + kfree(bufl); |
---|
776 | 795 | |
---|
777 | 796 | dev_err(dev, "Failed to map buf for dma\n"); |
---|
778 | 797 | return -ENOMEM; |
---|
.. | .. |
---|
784 | 803 | struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx; |
---|
785 | 804 | struct qat_crypto_instance *inst = ctx->inst; |
---|
786 | 805 | struct aead_request *areq = qat_req->aead_req; |
---|
787 | | - uint8_t stat_filed = qat_resp->comn_resp.comn_status; |
---|
| 806 | + u8 stat_filed = qat_resp->comn_resp.comn_status; |
---|
788 | 807 | int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); |
---|
789 | 808 | |
---|
790 | 809 | qat_alg_free_bufl(inst, qat_req); |
---|
.. | .. |
---|
793 | 812 | areq->base.complete(&areq->base, res); |
---|
794 | 813 | } |
---|
795 | 814 | |
---|
796 | | -static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp, |
---|
797 | | - struct qat_crypto_request *qat_req) |
---|
| 815 | +static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp, |
---|
| 816 | + struct qat_crypto_request *qat_req) |
---|
798 | 817 | { |
---|
799 | | - struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx; |
---|
| 818 | + struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx; |
---|
800 | 819 | struct qat_crypto_instance *inst = ctx->inst; |
---|
801 | | - struct ablkcipher_request *areq = qat_req->ablkcipher_req; |
---|
802 | | - uint8_t stat_filed = qat_resp->comn_resp.comn_status; |
---|
| 820 | + struct skcipher_request *sreq = qat_req->skcipher_req; |
---|
| 821 | + u8 stat_filed = qat_resp->comn_resp.comn_status; |
---|
| 822 | + struct device *dev = &GET_DEV(ctx->inst->accel_dev); |
---|
803 | 823 | int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); |
---|
804 | 824 | |
---|
805 | 825 | qat_alg_free_bufl(inst, qat_req); |
---|
806 | 826 | if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) |
---|
807 | 827 | res = -EINVAL; |
---|
808 | | - areq->base.complete(&areq->base, res); |
---|
| 828 | + |
---|
| 829 | + memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE); |
---|
| 830 | + dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, |
---|
| 831 | + qat_req->iv_paddr); |
---|
| 832 | + |
---|
| 833 | + sreq->base.complete(&sreq->base, res); |
---|
809 | 834 | } |
---|
810 | 835 | |
---|
811 | 836 | void qat_alg_callback(void *resp) |
---|
.. | .. |
---|
843 | 868 | qat_req->aead_ctx = ctx; |
---|
844 | 869 | qat_req->aead_req = areq; |
---|
845 | 870 | qat_req->cb = qat_aead_alg_callback; |
---|
846 | | - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; |
---|
| 871 | + qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req; |
---|
847 | 872 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; |
---|
848 | 873 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; |
---|
849 | 874 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; |
---|
850 | 875 | cipher_param->cipher_length = cipher_len; |
---|
851 | 876 | cipher_param->cipher_offset = areq->assoclen; |
---|
852 | 877 | memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); |
---|
853 | | - auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); |
---|
| 878 | + auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param)); |
---|
854 | 879 | auth_param->auth_off = 0; |
---|
855 | 880 | auth_param->auth_len = areq->assoclen + cipher_param->cipher_length; |
---|
856 | 881 | do { |
---|
857 | | - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); |
---|
| 882 | + ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); |
---|
858 | 883 | } while (ret == -EAGAIN && ctr++ < 10); |
---|
859 | 884 | |
---|
860 | 885 | if (ret == -EAGAIN) { |
---|
.. | .. |
---|
873 | 898 | struct icp_qat_fw_la_cipher_req_params *cipher_param; |
---|
874 | 899 | struct icp_qat_fw_la_auth_req_params *auth_param; |
---|
875 | 900 | struct icp_qat_fw_la_bulk_req *msg; |
---|
876 | | - uint8_t *iv = areq->iv; |
---|
| 901 | + u8 *iv = areq->iv; |
---|
877 | 902 | int ret, ctr = 0; |
---|
878 | 903 | |
---|
879 | 904 | if (areq->cryptlen % AES_BLOCK_SIZE != 0) |
---|
.. | .. |
---|
888 | 913 | qat_req->aead_ctx = ctx; |
---|
889 | 914 | qat_req->aead_req = areq; |
---|
890 | 915 | qat_req->cb = qat_aead_alg_callback; |
---|
891 | | - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; |
---|
| 916 | + qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req; |
---|
892 | 917 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; |
---|
893 | 918 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; |
---|
894 | 919 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; |
---|
895 | | - auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); |
---|
| 920 | + auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param)); |
---|
896 | 921 | |
---|
897 | 922 | memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE); |
---|
898 | 923 | cipher_param->cipher_length = areq->cryptlen; |
---|
.. | .. |
---|
902 | 927 | auth_param->auth_len = areq->assoclen + areq->cryptlen; |
---|
903 | 928 | |
---|
904 | 929 | do { |
---|
905 | | - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); |
---|
| 930 | + ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); |
---|
906 | 931 | } while (ret == -EAGAIN && ctr++ < 10); |
---|
907 | 932 | |
---|
908 | 933 | if (ret == -EAGAIN) { |
---|
.. | .. |
---|
912 | 937 | return -EINPROGRESS; |
---|
913 | 938 | } |
---|
914 | 939 | |
---|
915 | | -static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, |
---|
916 | | - const u8 *key, unsigned int keylen, |
---|
917 | | - int mode) |
---|
| 940 | +static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx, |
---|
| 941 | + const u8 *key, unsigned int keylen, |
---|
| 942 | + int mode) |
---|
918 | 943 | { |
---|
919 | | - struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
---|
| 944 | + memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); |
---|
| 945 | + memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); |
---|
| 946 | + memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); |
---|
| 947 | + memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); |
---|
| 948 | + |
---|
| 949 | + return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode); |
---|
| 950 | +} |
---|
| 951 | + |
---|
| 952 | +static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx, |
---|
| 953 | + const u8 *key, unsigned int keylen, |
---|
| 954 | + int mode) |
---|
| 955 | +{ |
---|
| 956 | + struct qat_crypto_instance *inst = NULL; |
---|
920 | 957 | struct device *dev; |
---|
| 958 | + int node = get_current_node(); |
---|
| 959 | + int ret; |
---|
921 | 960 | |
---|
922 | | - spin_lock(&ctx->lock); |
---|
923 | | - if (ctx->enc_cd) { |
---|
924 | | - /* rekeying */ |
---|
925 | | - dev = &GET_DEV(ctx->inst->accel_dev); |
---|
926 | | - memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); |
---|
927 | | - memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); |
---|
928 | | - memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); |
---|
929 | | - memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); |
---|
930 | | - } else { |
---|
931 | | - /* new key */ |
---|
932 | | - int node = get_current_node(); |
---|
933 | | - struct qat_crypto_instance *inst = |
---|
934 | | - qat_crypto_get_instance_node(node); |
---|
935 | | - if (!inst) { |
---|
936 | | - spin_unlock(&ctx->lock); |
---|
937 | | - return -EINVAL; |
---|
938 | | - } |
---|
939 | | - |
---|
940 | | - dev = &GET_DEV(inst->accel_dev); |
---|
941 | | - ctx->inst = inst; |
---|
942 | | - ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), |
---|
943 | | - &ctx->enc_cd_paddr, |
---|
944 | | - GFP_ATOMIC); |
---|
945 | | - if (!ctx->enc_cd) { |
---|
946 | | - spin_unlock(&ctx->lock); |
---|
947 | | - return -ENOMEM; |
---|
948 | | - } |
---|
949 | | - ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), |
---|
950 | | - &ctx->dec_cd_paddr, |
---|
951 | | - GFP_ATOMIC); |
---|
952 | | - if (!ctx->dec_cd) { |
---|
953 | | - spin_unlock(&ctx->lock); |
---|
954 | | - goto out_free_enc; |
---|
955 | | - } |
---|
| 961 | + inst = qat_crypto_get_instance_node(node); |
---|
| 962 | + if (!inst) |
---|
| 963 | + return -EINVAL; |
---|
| 964 | + dev = &GET_DEV(inst->accel_dev); |
---|
| 965 | + ctx->inst = inst; |
---|
| 966 | + ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), |
---|
| 967 | + &ctx->enc_cd_paddr, |
---|
| 968 | + GFP_ATOMIC); |
---|
| 969 | + if (!ctx->enc_cd) { |
---|
| 970 | + ret = -ENOMEM; |
---|
| 971 | + goto out_free_instance; |
---|
956 | 972 | } |
---|
957 | | - spin_unlock(&ctx->lock); |
---|
958 | | - if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode)) |
---|
| 973 | + ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), |
---|
| 974 | + &ctx->dec_cd_paddr, |
---|
| 975 | + GFP_ATOMIC); |
---|
| 976 | + if (!ctx->dec_cd) { |
---|
| 977 | + ret = -ENOMEM; |
---|
| 978 | + goto out_free_enc; |
---|
| 979 | + } |
---|
| 980 | + |
---|
| 981 | + ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode); |
---|
| 982 | + if (ret) |
---|
959 | 983 | goto out_free_all; |
---|
960 | 984 | |
---|
961 | 985 | return 0; |
---|
.. | .. |
---|
970 | 994 | dma_free_coherent(dev, sizeof(*ctx->enc_cd), |
---|
971 | 995 | ctx->enc_cd, ctx->enc_cd_paddr); |
---|
972 | 996 | ctx->enc_cd = NULL; |
---|
973 | | - return -ENOMEM; |
---|
| 997 | +out_free_instance: |
---|
| 998 | + ctx->inst = NULL; |
---|
| 999 | + qat_crypto_put_instance(inst); |
---|
| 1000 | + return ret; |
---|
974 | 1001 | } |
---|
975 | 1002 | |
---|
976 | | -static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm, |
---|
977 | | - const u8 *key, unsigned int keylen) |
---|
| 1003 | +static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm, |
---|
| 1004 | + const u8 *key, unsigned int keylen, |
---|
| 1005 | + int mode) |
---|
978 | 1006 | { |
---|
979 | | - return qat_alg_ablkcipher_setkey(tfm, key, keylen, |
---|
980 | | - ICP_QAT_HW_CIPHER_CBC_MODE); |
---|
| 1007 | + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
---|
| 1008 | + |
---|
| 1009 | + if (ctx->enc_cd) |
---|
| 1010 | + return qat_alg_skcipher_rekey(ctx, key, keylen, mode); |
---|
| 1011 | + else |
---|
| 1012 | + return qat_alg_skcipher_newkey(ctx, key, keylen, mode); |
---|
981 | 1013 | } |
---|
982 | 1014 | |
---|
983 | | -static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm, |
---|
984 | | - const u8 *key, unsigned int keylen) |
---|
| 1015 | +static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm, |
---|
| 1016 | + const u8 *key, unsigned int keylen) |
---|
985 | 1017 | { |
---|
986 | | - return qat_alg_ablkcipher_setkey(tfm, key, keylen, |
---|
987 | | - ICP_QAT_HW_CIPHER_CTR_MODE); |
---|
| 1018 | + return qat_alg_skcipher_setkey(tfm, key, keylen, |
---|
| 1019 | + ICP_QAT_HW_CIPHER_CBC_MODE); |
---|
988 | 1020 | } |
---|
989 | 1021 | |
---|
990 | | -static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm, |
---|
991 | | - const u8 *key, unsigned int keylen) |
---|
| 1022 | +static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm, |
---|
| 1023 | + const u8 *key, unsigned int keylen) |
---|
992 | 1024 | { |
---|
993 | | - return qat_alg_ablkcipher_setkey(tfm, key, keylen, |
---|
994 | | - ICP_QAT_HW_CIPHER_XTS_MODE); |
---|
| 1025 | + return qat_alg_skcipher_setkey(tfm, key, keylen, |
---|
| 1026 | + ICP_QAT_HW_CIPHER_CTR_MODE); |
---|
995 | 1027 | } |
---|
996 | 1028 | |
---|
997 | | -static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req) |
---|
| 1029 | +static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm, |
---|
| 1030 | + const u8 *key, unsigned int keylen) |
---|
998 | 1031 | { |
---|
999 | | - struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); |
---|
1000 | | - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm); |
---|
1001 | | - struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
---|
1002 | | - struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); |
---|
| 1032 | + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
---|
| 1033 | + int ret; |
---|
| 1034 | + |
---|
| 1035 | + ret = xts_verify_key(tfm, key, keylen); |
---|
| 1036 | + if (ret) |
---|
| 1037 | + return ret; |
---|
| 1038 | + |
---|
| 1039 | + if (keylen >> 1 == AES_KEYSIZE_192) { |
---|
| 1040 | + ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen); |
---|
| 1041 | + if (ret) |
---|
| 1042 | + return ret; |
---|
| 1043 | + |
---|
| 1044 | + ctx->fallback = true; |
---|
| 1045 | + |
---|
| 1046 | + return 0; |
---|
| 1047 | + } |
---|
| 1048 | + |
---|
| 1049 | + ctx->fallback = false; |
---|
| 1050 | + |
---|
| 1051 | + return qat_alg_skcipher_setkey(tfm, key, keylen, |
---|
| 1052 | + ICP_QAT_HW_CIPHER_XTS_MODE); |
---|
| 1053 | +} |
---|
| 1054 | + |
---|
| 1055 | +static int qat_alg_skcipher_encrypt(struct skcipher_request *req) |
---|
| 1056 | +{ |
---|
| 1057 | + struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); |
---|
| 1058 | + struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm); |
---|
| 1059 | + struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
---|
| 1060 | + struct qat_crypto_request *qat_req = skcipher_request_ctx(req); |
---|
1003 | 1061 | struct icp_qat_fw_la_cipher_req_params *cipher_param; |
---|
1004 | 1062 | struct icp_qat_fw_la_bulk_req *msg; |
---|
| 1063 | + struct device *dev = &GET_DEV(ctx->inst->accel_dev); |
---|
1005 | 1064 | int ret, ctr = 0; |
---|
1006 | 1065 | |
---|
| 1066 | + if (req->cryptlen == 0) |
---|
| 1067 | + return 0; |
---|
| 1068 | + |
---|
| 1069 | + qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE, |
---|
| 1070 | + &qat_req->iv_paddr, GFP_ATOMIC); |
---|
| 1071 | + if (!qat_req->iv) |
---|
| 1072 | + return -ENOMEM; |
---|
| 1073 | + |
---|
1007 | 1074 | ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); |
---|
1008 | | - if (unlikely(ret)) |
---|
| 1075 | + if (unlikely(ret)) { |
---|
| 1076 | + dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, |
---|
| 1077 | + qat_req->iv_paddr); |
---|
1009 | 1078 | return ret; |
---|
| 1079 | + } |
---|
1010 | 1080 | |
---|
1011 | 1081 | msg = &qat_req->req; |
---|
1012 | 1082 | *msg = ctx->enc_fw_req; |
---|
1013 | | - qat_req->ablkcipher_ctx = ctx; |
---|
1014 | | - qat_req->ablkcipher_req = req; |
---|
1015 | | - qat_req->cb = qat_ablkcipher_alg_callback; |
---|
1016 | | - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; |
---|
| 1083 | + qat_req->skcipher_ctx = ctx; |
---|
| 1084 | + qat_req->skcipher_req = req; |
---|
| 1085 | + qat_req->cb = qat_skcipher_alg_callback; |
---|
| 1086 | + qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req; |
---|
1017 | 1087 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; |
---|
1018 | 1088 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; |
---|
1019 | 1089 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; |
---|
1020 | | - cipher_param->cipher_length = req->nbytes; |
---|
| 1090 | + cipher_param->cipher_length = req->cryptlen; |
---|
1021 | 1091 | cipher_param->cipher_offset = 0; |
---|
1022 | | - memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE); |
---|
| 1092 | + cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr; |
---|
| 1093 | + memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE); |
---|
1023 | 1094 | do { |
---|
1024 | | - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); |
---|
| 1095 | + ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); |
---|
1025 | 1096 | } while (ret == -EAGAIN && ctr++ < 10); |
---|
1026 | 1097 | |
---|
1027 | 1098 | if (ret == -EAGAIN) { |
---|
1028 | 1099 | qat_alg_free_bufl(ctx->inst, qat_req); |
---|
| 1100 | + dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, |
---|
| 1101 | + qat_req->iv_paddr); |
---|
1029 | 1102 | return -EBUSY; |
---|
1030 | 1103 | } |
---|
1031 | 1104 | return -EINPROGRESS; |
---|
1032 | 1105 | } |
---|
1033 | 1106 | |
---|
1034 | | -static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) |
---|
| 1107 | +static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req) |
---|
1035 | 1108 | { |
---|
1036 | | - struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); |
---|
1037 | | - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm); |
---|
1038 | | - struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
---|
1039 | | - struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); |
---|
| 1109 | + if (req->cryptlen % AES_BLOCK_SIZE != 0) |
---|
| 1110 | + return -EINVAL; |
---|
| 1111 | + |
---|
| 1112 | + return qat_alg_skcipher_encrypt(req); |
---|
| 1113 | +} |
---|
| 1114 | + |
---|
| 1115 | +static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req) |
---|
| 1116 | +{ |
---|
| 1117 | + struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); |
---|
| 1118 | + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm); |
---|
| 1119 | + struct skcipher_request *nreq = skcipher_request_ctx(req); |
---|
| 1120 | + |
---|
| 1121 | + if (req->cryptlen < XTS_BLOCK_SIZE) |
---|
| 1122 | + return -EINVAL; |
---|
| 1123 | + |
---|
| 1124 | + if (ctx->fallback) { |
---|
| 1125 | + memcpy(nreq, req, sizeof(*req)); |
---|
| 1126 | + skcipher_request_set_tfm(nreq, ctx->ftfm); |
---|
| 1127 | + return crypto_skcipher_encrypt(nreq); |
---|
| 1128 | + } |
---|
| 1129 | + |
---|
| 1130 | + return qat_alg_skcipher_encrypt(req); |
---|
| 1131 | +} |
---|
| 1132 | + |
---|
| 1133 | +static int qat_alg_skcipher_decrypt(struct skcipher_request *req) |
---|
| 1134 | +{ |
---|
| 1135 | + struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); |
---|
| 1136 | + struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm); |
---|
| 1137 | + struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
---|
| 1138 | + struct qat_crypto_request *qat_req = skcipher_request_ctx(req); |
---|
1040 | 1139 | struct icp_qat_fw_la_cipher_req_params *cipher_param; |
---|
1041 | 1140 | struct icp_qat_fw_la_bulk_req *msg; |
---|
| 1141 | + struct device *dev = &GET_DEV(ctx->inst->accel_dev); |
---|
1042 | 1142 | int ret, ctr = 0; |
---|
1043 | 1143 | |
---|
| 1144 | + if (req->cryptlen == 0) |
---|
| 1145 | + return 0; |
---|
| 1146 | + |
---|
| 1147 | + qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE, |
---|
| 1148 | + &qat_req->iv_paddr, GFP_ATOMIC); |
---|
| 1149 | + if (!qat_req->iv) |
---|
| 1150 | + return -ENOMEM; |
---|
| 1151 | + |
---|
1044 | 1152 | ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); |
---|
1045 | | - if (unlikely(ret)) |
---|
| 1153 | + if (unlikely(ret)) { |
---|
| 1154 | + dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, |
---|
| 1155 | + qat_req->iv_paddr); |
---|
1046 | 1156 | return ret; |
---|
| 1157 | + } |
---|
1047 | 1158 | |
---|
1048 | 1159 | msg = &qat_req->req; |
---|
1049 | 1160 | *msg = ctx->dec_fw_req; |
---|
1050 | | - qat_req->ablkcipher_ctx = ctx; |
---|
1051 | | - qat_req->ablkcipher_req = req; |
---|
1052 | | - qat_req->cb = qat_ablkcipher_alg_callback; |
---|
1053 | | - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; |
---|
| 1161 | + qat_req->skcipher_ctx = ctx; |
---|
| 1162 | + qat_req->skcipher_req = req; |
---|
| 1163 | + qat_req->cb = qat_skcipher_alg_callback; |
---|
| 1164 | + qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req; |
---|
1054 | 1165 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; |
---|
1055 | 1166 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; |
---|
1056 | 1167 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; |
---|
1057 | | - cipher_param->cipher_length = req->nbytes; |
---|
| 1168 | + cipher_param->cipher_length = req->cryptlen; |
---|
1058 | 1169 | cipher_param->cipher_offset = 0; |
---|
1059 | | - memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE); |
---|
| 1170 | + cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr; |
---|
| 1171 | + memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE); |
---|
1060 | 1172 | do { |
---|
1061 | | - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); |
---|
| 1173 | + ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); |
---|
1062 | 1174 | } while (ret == -EAGAIN && ctr++ < 10); |
---|
1063 | 1175 | |
---|
1064 | 1176 | if (ret == -EAGAIN) { |
---|
1065 | 1177 | qat_alg_free_bufl(ctx->inst, qat_req); |
---|
| 1178 | + dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, |
---|
| 1179 | + qat_req->iv_paddr); |
---|
1066 | 1180 | return -EBUSY; |
---|
1067 | 1181 | } |
---|
1068 | 1182 | return -EINPROGRESS; |
---|
| 1183 | +} |
---|
| 1184 | + |
---|
| 1185 | +static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req) |
---|
| 1186 | +{ |
---|
| 1187 | + if (req->cryptlen % AES_BLOCK_SIZE != 0) |
---|
| 1188 | + return -EINVAL; |
---|
| 1189 | + |
---|
| 1190 | + return qat_alg_skcipher_decrypt(req); |
---|
| 1191 | +} |
---|
| 1192 | + |
---|
| 1193 | +static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req) |
---|
| 1194 | +{ |
---|
| 1195 | + struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); |
---|
| 1196 | + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm); |
---|
| 1197 | + struct skcipher_request *nreq = skcipher_request_ctx(req); |
---|
| 1198 | + |
---|
| 1199 | + if (req->cryptlen < XTS_BLOCK_SIZE) |
---|
| 1200 | + return -EINVAL; |
---|
| 1201 | + |
---|
| 1202 | + if (ctx->fallback) { |
---|
| 1203 | + memcpy(nreq, req, sizeof(*req)); |
---|
| 1204 | + skcipher_request_set_tfm(nreq, ctx->ftfm); |
---|
| 1205 | + return crypto_skcipher_decrypt(nreq); |
---|
| 1206 | + } |
---|
| 1207 | + |
---|
| 1208 | + return qat_alg_skcipher_decrypt(req); |
---|
1069 | 1209 | } |
---|
1070 | 1210 | |
---|
1071 | 1211 | static int qat_alg_aead_init(struct crypto_aead *tfm, |
---|
.. | .. |
---|
1122 | 1262 | qat_crypto_put_instance(inst); |
---|
1123 | 1263 | } |
---|
1124 | 1264 | |
---|
1125 | | -static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm) |
---|
| 1265 | +static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm) |
---|
1126 | 1266 | { |
---|
1127 | | - struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
---|
1128 | | - |
---|
1129 | | - spin_lock_init(&ctx->lock); |
---|
1130 | | - tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request); |
---|
1131 | | - ctx->tfm = tfm; |
---|
| 1267 | + crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request)); |
---|
1132 | 1268 | return 0; |
---|
1133 | 1269 | } |
---|
1134 | 1270 | |
---|
1135 | | -static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm) |
---|
| 1271 | +static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm) |
---|
1136 | 1272 | { |
---|
1137 | | - struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
---|
| 1273 | + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
---|
| 1274 | + int reqsize; |
---|
| 1275 | + |
---|
| 1276 | + ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0, |
---|
| 1277 | + CRYPTO_ALG_NEED_FALLBACK); |
---|
| 1278 | + if (IS_ERR(ctx->ftfm)) |
---|
| 1279 | + return PTR_ERR(ctx->ftfm); |
---|
| 1280 | + |
---|
| 1281 | + reqsize = max(sizeof(struct qat_crypto_request), |
---|
| 1282 | + sizeof(struct skcipher_request) + |
---|
| 1283 | + crypto_skcipher_reqsize(ctx->ftfm)); |
---|
| 1284 | + crypto_skcipher_set_reqsize(tfm, reqsize); |
---|
| 1285 | + |
---|
| 1286 | + return 0; |
---|
| 1287 | +} |
---|
| 1288 | + |
---|
| 1289 | +static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm) |
---|
| 1290 | +{ |
---|
| 1291 | + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
---|
1138 | 1292 | struct qat_crypto_instance *inst = ctx->inst; |
---|
1139 | 1293 | struct device *dev; |
---|
1140 | 1294 | |
---|
.. | .. |
---|
1159 | 1313 | qat_crypto_put_instance(inst); |
---|
1160 | 1314 | } |
---|
1161 | 1315 | |
---|
| 1316 | +static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm) |
---|
| 1317 | +{ |
---|
| 1318 | + struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
---|
| 1319 | + |
---|
| 1320 | + if (ctx->ftfm) |
---|
| 1321 | + crypto_free_skcipher(ctx->ftfm); |
---|
| 1322 | + |
---|
| 1323 | + qat_alg_skcipher_exit_tfm(tfm); |
---|
| 1324 | +} |
---|
1162 | 1325 | |
---|
1163 | 1326 | static struct aead_alg qat_aeads[] = { { |
---|
1164 | 1327 | .base = { |
---|
1165 | 1328 | .cra_name = "authenc(hmac(sha1),cbc(aes))", |
---|
1166 | 1329 | .cra_driver_name = "qat_aes_cbc_hmac_sha1", |
---|
1167 | 1330 | .cra_priority = 4001, |
---|
1168 | | - .cra_flags = CRYPTO_ALG_ASYNC, |
---|
| 1331 | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, |
---|
1169 | 1332 | .cra_blocksize = AES_BLOCK_SIZE, |
---|
1170 | 1333 | .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), |
---|
1171 | 1334 | .cra_module = THIS_MODULE, |
---|
.. | .. |
---|
1182 | 1345 | .cra_name = "authenc(hmac(sha256),cbc(aes))", |
---|
1183 | 1346 | .cra_driver_name = "qat_aes_cbc_hmac_sha256", |
---|
1184 | 1347 | .cra_priority = 4001, |
---|
1185 | | - .cra_flags = CRYPTO_ALG_ASYNC, |
---|
| 1348 | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, |
---|
1186 | 1349 | .cra_blocksize = AES_BLOCK_SIZE, |
---|
1187 | 1350 | .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), |
---|
1188 | 1351 | .cra_module = THIS_MODULE, |
---|
.. | .. |
---|
1199 | 1362 | .cra_name = "authenc(hmac(sha512),cbc(aes))", |
---|
1200 | 1363 | .cra_driver_name = "qat_aes_cbc_hmac_sha512", |
---|
1201 | 1364 | .cra_priority = 4001, |
---|
1202 | | - .cra_flags = CRYPTO_ALG_ASYNC, |
---|
| 1365 | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, |
---|
1203 | 1366 | .cra_blocksize = AES_BLOCK_SIZE, |
---|
1204 | 1367 | .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), |
---|
1205 | 1368 | .cra_module = THIS_MODULE, |
---|
.. | .. |
---|
1213 | 1376 | .maxauthsize = SHA512_DIGEST_SIZE, |
---|
1214 | 1377 | } }; |
---|
1215 | 1378 | |
---|
1216 | | -static struct crypto_alg qat_algs[] = { { |
---|
1217 | | - .cra_name = "cbc(aes)", |
---|
1218 | | - .cra_driver_name = "qat_aes_cbc", |
---|
1219 | | - .cra_priority = 4001, |
---|
1220 | | - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
---|
1221 | | - .cra_blocksize = AES_BLOCK_SIZE, |
---|
1222 | | - .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx), |
---|
1223 | | - .cra_alignmask = 0, |
---|
1224 | | - .cra_type = &crypto_ablkcipher_type, |
---|
1225 | | - .cra_module = THIS_MODULE, |
---|
1226 | | - .cra_init = qat_alg_ablkcipher_init, |
---|
1227 | | - .cra_exit = qat_alg_ablkcipher_exit, |
---|
1228 | | - .cra_u = { |
---|
1229 | | - .ablkcipher = { |
---|
1230 | | - .setkey = qat_alg_ablkcipher_cbc_setkey, |
---|
1231 | | - .decrypt = qat_alg_ablkcipher_decrypt, |
---|
1232 | | - .encrypt = qat_alg_ablkcipher_encrypt, |
---|
1233 | | - .min_keysize = AES_MIN_KEY_SIZE, |
---|
1234 | | - .max_keysize = AES_MAX_KEY_SIZE, |
---|
1235 | | - .ivsize = AES_BLOCK_SIZE, |
---|
1236 | | - }, |
---|
1237 | | - }, |
---|
| 1379 | +static struct skcipher_alg qat_skciphers[] = { { |
---|
| 1380 | + .base.cra_name = "cbc(aes)", |
---|
| 1381 | + .base.cra_driver_name = "qat_aes_cbc", |
---|
| 1382 | + .base.cra_priority = 4001, |
---|
| 1383 | + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, |
---|
| 1384 | + .base.cra_blocksize = AES_BLOCK_SIZE, |
---|
| 1385 | + .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), |
---|
| 1386 | + .base.cra_alignmask = 0, |
---|
| 1387 | + .base.cra_module = THIS_MODULE, |
---|
| 1388 | + |
---|
| 1389 | + .init = qat_alg_skcipher_init_tfm, |
---|
| 1390 | + .exit = qat_alg_skcipher_exit_tfm, |
---|
| 1391 | + .setkey = qat_alg_skcipher_cbc_setkey, |
---|
| 1392 | + .decrypt = qat_alg_skcipher_blk_decrypt, |
---|
| 1393 | + .encrypt = qat_alg_skcipher_blk_encrypt, |
---|
| 1394 | + .min_keysize = AES_MIN_KEY_SIZE, |
---|
| 1395 | + .max_keysize = AES_MAX_KEY_SIZE, |
---|
| 1396 | + .ivsize = AES_BLOCK_SIZE, |
---|
1238 | 1397 | }, { |
---|
1239 | | - .cra_name = "ctr(aes)", |
---|
1240 | | - .cra_driver_name = "qat_aes_ctr", |
---|
1241 | | - .cra_priority = 4001, |
---|
1242 | | - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
---|
1243 | | - .cra_blocksize = AES_BLOCK_SIZE, |
---|
1244 | | - .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx), |
---|
1245 | | - .cra_alignmask = 0, |
---|
1246 | | - .cra_type = &crypto_ablkcipher_type, |
---|
1247 | | - .cra_module = THIS_MODULE, |
---|
1248 | | - .cra_init = qat_alg_ablkcipher_init, |
---|
1249 | | - .cra_exit = qat_alg_ablkcipher_exit, |
---|
1250 | | - .cra_u = { |
---|
1251 | | - .ablkcipher = { |
---|
1252 | | - .setkey = qat_alg_ablkcipher_ctr_setkey, |
---|
1253 | | - .decrypt = qat_alg_ablkcipher_decrypt, |
---|
1254 | | - .encrypt = qat_alg_ablkcipher_encrypt, |
---|
1255 | | - .min_keysize = AES_MIN_KEY_SIZE, |
---|
1256 | | - .max_keysize = AES_MAX_KEY_SIZE, |
---|
1257 | | - .ivsize = AES_BLOCK_SIZE, |
---|
1258 | | - }, |
---|
1259 | | - }, |
---|
| 1398 | + .base.cra_name = "ctr(aes)", |
---|
| 1399 | + .base.cra_driver_name = "qat_aes_ctr", |
---|
| 1400 | + .base.cra_priority = 4001, |
---|
| 1401 | + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, |
---|
| 1402 | + .base.cra_blocksize = 1, |
---|
| 1403 | + .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), |
---|
| 1404 | + .base.cra_alignmask = 0, |
---|
| 1405 | + .base.cra_module = THIS_MODULE, |
---|
| 1406 | + |
---|
| 1407 | + .init = qat_alg_skcipher_init_tfm, |
---|
| 1408 | + .exit = qat_alg_skcipher_exit_tfm, |
---|
| 1409 | + .setkey = qat_alg_skcipher_ctr_setkey, |
---|
| 1410 | + .decrypt = qat_alg_skcipher_decrypt, |
---|
| 1411 | + .encrypt = qat_alg_skcipher_encrypt, |
---|
| 1412 | + .min_keysize = AES_MIN_KEY_SIZE, |
---|
| 1413 | + .max_keysize = AES_MAX_KEY_SIZE, |
---|
| 1414 | + .ivsize = AES_BLOCK_SIZE, |
---|
1260 | 1415 | }, { |
---|
1261 | | - .cra_name = "xts(aes)", |
---|
1262 | | - .cra_driver_name = "qat_aes_xts", |
---|
1263 | | - .cra_priority = 4001, |
---|
1264 | | - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
---|
1265 | | - .cra_blocksize = AES_BLOCK_SIZE, |
---|
1266 | | - .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx), |
---|
1267 | | - .cra_alignmask = 0, |
---|
1268 | | - .cra_type = &crypto_ablkcipher_type, |
---|
1269 | | - .cra_module = THIS_MODULE, |
---|
1270 | | - .cra_init = qat_alg_ablkcipher_init, |
---|
1271 | | - .cra_exit = qat_alg_ablkcipher_exit, |
---|
1272 | | - .cra_u = { |
---|
1273 | | - .ablkcipher = { |
---|
1274 | | - .setkey = qat_alg_ablkcipher_xts_setkey, |
---|
1275 | | - .decrypt = qat_alg_ablkcipher_decrypt, |
---|
1276 | | - .encrypt = qat_alg_ablkcipher_encrypt, |
---|
1277 | | - .min_keysize = 2 * AES_MIN_KEY_SIZE, |
---|
1278 | | - .max_keysize = 2 * AES_MAX_KEY_SIZE, |
---|
1279 | | - .ivsize = AES_BLOCK_SIZE, |
---|
1280 | | - }, |
---|
1281 | | - }, |
---|
| 1416 | + .base.cra_name = "xts(aes)", |
---|
| 1417 | + .base.cra_driver_name = "qat_aes_xts", |
---|
| 1418 | + .base.cra_priority = 4001, |
---|
| 1419 | + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | |
---|
| 1420 | + CRYPTO_ALG_ALLOCATES_MEMORY, |
---|
| 1421 | + .base.cra_blocksize = AES_BLOCK_SIZE, |
---|
| 1422 | + .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), |
---|
| 1423 | + .base.cra_alignmask = 0, |
---|
| 1424 | + .base.cra_module = THIS_MODULE, |
---|
| 1425 | + |
---|
| 1426 | + .init = qat_alg_skcipher_init_xts_tfm, |
---|
| 1427 | + .exit = qat_alg_skcipher_exit_xts_tfm, |
---|
| 1428 | + .setkey = qat_alg_skcipher_xts_setkey, |
---|
| 1429 | + .decrypt = qat_alg_skcipher_xts_decrypt, |
---|
| 1430 | + .encrypt = qat_alg_skcipher_xts_encrypt, |
---|
| 1431 | + .min_keysize = 2 * AES_MIN_KEY_SIZE, |
---|
| 1432 | + .max_keysize = 2 * AES_MAX_KEY_SIZE, |
---|
| 1433 | + .ivsize = AES_BLOCK_SIZE, |
---|
1282 | 1434 | } }; |
---|
1283 | 1435 | |
---|
1284 | 1436 | int qat_algs_register(void) |
---|
1285 | 1437 | { |
---|
1286 | | - int ret = 0, i; |
---|
| 1438 | + int ret = 0; |
---|
1287 | 1439 | |
---|
1288 | 1440 | mutex_lock(&algs_lock); |
---|
1289 | 1441 | if (++active_devs != 1) |
---|
1290 | 1442 | goto unlock; |
---|
1291 | 1443 | |
---|
1292 | | - for (i = 0; i < ARRAY_SIZE(qat_algs); i++) |
---|
1293 | | - qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
---|
1294 | | - |
---|
1295 | | - ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); |
---|
| 1444 | + ret = crypto_register_skciphers(qat_skciphers, |
---|
| 1445 | + ARRAY_SIZE(qat_skciphers)); |
---|
1296 | 1446 | if (ret) |
---|
1297 | 1447 | goto unlock; |
---|
1298 | | - |
---|
1299 | | - for (i = 0; i < ARRAY_SIZE(qat_aeads); i++) |
---|
1300 | | - qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC; |
---|
1301 | 1448 | |
---|
1302 | 1449 | ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads)); |
---|
1303 | 1450 | if (ret) |
---|
.. | .. |
---|
1308 | 1455 | return ret; |
---|
1309 | 1456 | |
---|
1310 | 1457 | unreg_algs: |
---|
1311 | | - crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); |
---|
| 1458 | + crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers)); |
---|
1312 | 1459 | goto unlock; |
---|
1313 | 1460 | } |
---|
1314 | 1461 | |
---|
.. | .. |
---|
1319 | 1466 | goto unlock; |
---|
1320 | 1467 | |
---|
1321 | 1468 | crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads)); |
---|
1322 | | - crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); |
---|
| 1469 | + crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers)); |
---|
1323 | 1470 | |
---|
1324 | 1471 | unlock: |
---|
1325 | 1472 | mutex_unlock(&algs_lock); |
---|