.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support |
---|
3 | 4 | * |
---|
.. | .. |
---|
5 | 6 | * |
---|
6 | 7 | * Author: Gary R Hook <gary.hook@amd.com> |
---|
7 | 8 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
---|
8 | | - * |
---|
9 | | - * This program is free software; you can redistribute it and/or modify |
---|
10 | | - * it under the terms of the GNU General Public License version 2 as |
---|
11 | | - * published by the Free Software Foundation. |
---|
12 | 9 | */ |
---|
13 | 10 | |
---|
14 | 11 | #include <linux/module.h> |
---|
.. | .. |
---|
27 | 24 | const char *drv_name; |
---|
28 | 25 | }; |
---|
29 | 26 | |
---|
30 | | -static struct ccp_aes_xts_def aes_xts_algs[] = { |
---|
| 27 | +static const struct ccp_aes_xts_def aes_xts_algs[] = { |
---|
31 | 28 | { |
---|
32 | 29 | .name = "xts(aes)", |
---|
33 | 30 | .drv_name = "xts-aes-ccp", |
---|
.. | .. |
---|
64 | 61 | |
---|
65 | 62 | static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret) |
---|
66 | 63 | { |
---|
67 | | - struct ablkcipher_request *req = ablkcipher_request_cast(async_req); |
---|
68 | | - struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); |
---|
| 64 | + struct skcipher_request *req = skcipher_request_cast(async_req); |
---|
| 65 | + struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req); |
---|
69 | 66 | |
---|
70 | 67 | if (ret) |
---|
71 | 68 | return ret; |
---|
72 | 69 | |
---|
73 | | - memcpy(req->info, rctx->iv, AES_BLOCK_SIZE); |
---|
| 70 | + memcpy(req->iv, rctx->iv, AES_BLOCK_SIZE); |
---|
74 | 71 | |
---|
75 | 72 | return 0; |
---|
76 | 73 | } |
---|
77 | 74 | |
---|
78 | | -static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key, |
---|
| 75 | +static int ccp_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, |
---|
79 | 76 | unsigned int key_len) |
---|
80 | 77 | { |
---|
81 | | - struct crypto_tfm *xfm = crypto_ablkcipher_tfm(tfm); |
---|
82 | | - struct ccp_ctx *ctx = crypto_tfm_ctx(xfm); |
---|
| 78 | + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); |
---|
83 | 79 | unsigned int ccpversion = ccp_version(); |
---|
84 | 80 | int ret; |
---|
85 | 81 | |
---|
86 | | - ret = xts_check_key(xfm, key, key_len); |
---|
| 82 | + ret = xts_verify_key(tfm, key, key_len); |
---|
87 | 83 | if (ret) |
---|
88 | 84 | return ret; |
---|
89 | 85 | |
---|
.. | .. |
---|
105 | 101 | return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len); |
---|
106 | 102 | } |
---|
107 | 103 | |
---|
108 | | -static int ccp_aes_xts_crypt(struct ablkcipher_request *req, |
---|
| 104 | +static int ccp_aes_xts_crypt(struct skcipher_request *req, |
---|
109 | 105 | unsigned int encrypt) |
---|
110 | 106 | { |
---|
111 | | - struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
---|
112 | | - struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); |
---|
| 107 | + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
---|
| 108 | + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); |
---|
| 109 | + struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req); |
---|
113 | 110 | unsigned int ccpversion = ccp_version(); |
---|
114 | 111 | unsigned int fallback = 0; |
---|
115 | 112 | unsigned int unit; |
---|
.. | .. |
---|
119 | 116 | if (!ctx->u.aes.key_len) |
---|
120 | 117 | return -EINVAL; |
---|
121 | 118 | |
---|
122 | | - if (req->nbytes & (AES_BLOCK_SIZE - 1)) |
---|
123 | | - return -EINVAL; |
---|
124 | | - |
---|
125 | | - if (!req->info) |
---|
| 119 | + if (!req->iv) |
---|
126 | 120 | return -EINVAL; |
---|
127 | 121 | |
---|
128 | 122 | /* Check conditions under which the CCP can fulfill a request. The |
---|
.. | .. |
---|
133 | 127 | */ |
---|
134 | 128 | unit_size = CCP_XTS_AES_UNIT_SIZE__LAST; |
---|
135 | 129 | for (unit = 0; unit < ARRAY_SIZE(xts_unit_sizes); unit++) { |
---|
136 | | - if (req->nbytes == xts_unit_sizes[unit].size) { |
---|
| 130 | + if (req->cryptlen == xts_unit_sizes[unit].size) { |
---|
137 | 131 | unit_size = unit; |
---|
138 | 132 | break; |
---|
139 | 133 | } |
---|
.. | .. |
---|
151 | 145 | (ctx->u.aes.key_len != AES_KEYSIZE_256)) |
---|
152 | 146 | fallback = 1; |
---|
153 | 147 | if (fallback) { |
---|
154 | | - SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher); |
---|
155 | | - |
---|
156 | 148 | /* Use the fallback to process the request for any |
---|
157 | 149 | * unsupported unit sizes or key sizes |
---|
158 | 150 | */ |
---|
159 | | - skcipher_request_set_tfm(subreq, ctx->u.aes.tfm_skcipher); |
---|
160 | | - skcipher_request_set_callback(subreq, req->base.flags, |
---|
161 | | - NULL, NULL); |
---|
162 | | - skcipher_request_set_crypt(subreq, req->src, req->dst, |
---|
163 | | - req->nbytes, req->info); |
---|
164 | | - ret = encrypt ? crypto_skcipher_encrypt(subreq) : |
---|
165 | | - crypto_skcipher_decrypt(subreq); |
---|
166 | | - skcipher_request_zero(subreq); |
---|
| 151 | + skcipher_request_set_tfm(&rctx->fallback_req, |
---|
| 152 | + ctx->u.aes.tfm_skcipher); |
---|
| 153 | + skcipher_request_set_callback(&rctx->fallback_req, |
---|
| 154 | + req->base.flags, |
---|
| 155 | + req->base.complete, |
---|
| 156 | + req->base.data); |
---|
| 157 | + skcipher_request_set_crypt(&rctx->fallback_req, req->src, |
---|
| 158 | + req->dst, req->cryptlen, req->iv); |
---|
| 159 | + ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : |
---|
| 160 | + crypto_skcipher_decrypt(&rctx->fallback_req); |
---|
167 | 161 | return ret; |
---|
168 | 162 | } |
---|
169 | 163 | |
---|
170 | | - memcpy(rctx->iv, req->info, AES_BLOCK_SIZE); |
---|
| 164 | + memcpy(rctx->iv, req->iv, AES_BLOCK_SIZE); |
---|
171 | 165 | sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE); |
---|
172 | 166 | |
---|
173 | 167 | memset(&rctx->cmd, 0, sizeof(rctx->cmd)); |
---|
.. | .. |
---|
182 | 176 | rctx->cmd.u.xts.iv = &rctx->iv_sg; |
---|
183 | 177 | rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE; |
---|
184 | 178 | rctx->cmd.u.xts.src = req->src; |
---|
185 | | - rctx->cmd.u.xts.src_len = req->nbytes; |
---|
| 179 | + rctx->cmd.u.xts.src_len = req->cryptlen; |
---|
186 | 180 | rctx->cmd.u.xts.dst = req->dst; |
---|
187 | 181 | |
---|
188 | 182 | ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); |
---|
.. | .. |
---|
190 | 184 | return ret; |
---|
191 | 185 | } |
---|
192 | 186 | |
---|
193 | | -static int ccp_aes_xts_encrypt(struct ablkcipher_request *req) |
---|
| 187 | +static int ccp_aes_xts_encrypt(struct skcipher_request *req) |
---|
194 | 188 | { |
---|
195 | 189 | return ccp_aes_xts_crypt(req, 1); |
---|
196 | 190 | } |
---|
197 | 191 | |
---|
198 | | -static int ccp_aes_xts_decrypt(struct ablkcipher_request *req) |
---|
| 192 | +static int ccp_aes_xts_decrypt(struct skcipher_request *req) |
---|
199 | 193 | { |
---|
200 | 194 | return ccp_aes_xts_crypt(req, 0); |
---|
201 | 195 | } |
---|
202 | 196 | |
---|
203 | | -static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm) |
---|
| 197 | +static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm) |
---|
204 | 198 | { |
---|
205 | | - struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); |
---|
| 199 | + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); |
---|
206 | 200 | struct crypto_skcipher *fallback_tfm; |
---|
207 | 201 | |
---|
208 | 202 | ctx->complete = ccp_aes_xts_complete; |
---|
209 | 203 | ctx->u.aes.key_len = 0; |
---|
210 | 204 | |
---|
211 | 205 | fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0, |
---|
212 | | - CRYPTO_ALG_ASYNC | |
---|
213 | 206 | CRYPTO_ALG_NEED_FALLBACK); |
---|
214 | 207 | if (IS_ERR(fallback_tfm)) { |
---|
215 | 208 | pr_warn("could not load fallback driver xts(aes)\n"); |
---|
.. | .. |
---|
217 | 210 | } |
---|
218 | 211 | ctx->u.aes.tfm_skcipher = fallback_tfm; |
---|
219 | 212 | |
---|
220 | | - tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx); |
---|
| 213 | + crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx) + |
---|
| 214 | + crypto_skcipher_reqsize(fallback_tfm)); |
---|
221 | 215 | |
---|
222 | 216 | return 0; |
---|
223 | 217 | } |
---|
224 | 218 | |
---|
225 | | -static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm) |
---|
| 219 | +static void ccp_aes_xts_exit_tfm(struct crypto_skcipher *tfm) |
---|
226 | 220 | { |
---|
227 | | - struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); |
---|
| 221 | + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); |
---|
228 | 222 | |
---|
229 | 223 | crypto_free_skcipher(ctx->u.aes.tfm_skcipher); |
---|
230 | 224 | } |
---|
.. | .. |
---|
232 | 226 | static int ccp_register_aes_xts_alg(struct list_head *head, |
---|
233 | 227 | const struct ccp_aes_xts_def *def) |
---|
234 | 228 | { |
---|
235 | | - struct ccp_crypto_ablkcipher_alg *ccp_alg; |
---|
236 | | - struct crypto_alg *alg; |
---|
| 229 | + struct ccp_crypto_skcipher_alg *ccp_alg; |
---|
| 230 | + struct skcipher_alg *alg; |
---|
237 | 231 | int ret; |
---|
238 | 232 | |
---|
239 | 233 | ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); |
---|
.. | .. |
---|
244 | 238 | |
---|
245 | 239 | alg = &ccp_alg->alg; |
---|
246 | 240 | |
---|
247 | | - snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); |
---|
248 | | - snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", |
---|
| 241 | + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); |
---|
| 242 | + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", |
---|
249 | 243 | def->drv_name); |
---|
250 | | - alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | |
---|
251 | | - CRYPTO_ALG_KERN_DRIVER_ONLY | |
---|
252 | | - CRYPTO_ALG_NEED_FALLBACK; |
---|
253 | | - alg->cra_blocksize = AES_BLOCK_SIZE; |
---|
254 | | - alg->cra_ctxsize = sizeof(struct ccp_ctx); |
---|
255 | | - alg->cra_priority = CCP_CRA_PRIORITY; |
---|
256 | | - alg->cra_type = &crypto_ablkcipher_type; |
---|
257 | | - alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey; |
---|
258 | | - alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt; |
---|
259 | | - alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt; |
---|
260 | | - alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2; |
---|
261 | | - alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2; |
---|
262 | | - alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE; |
---|
263 | | - alg->cra_init = ccp_aes_xts_cra_init; |
---|
264 | | - alg->cra_exit = ccp_aes_xts_cra_exit; |
---|
265 | | - alg->cra_module = THIS_MODULE; |
---|
| 244 | + alg->base.cra_flags = CRYPTO_ALG_ASYNC | |
---|
| 245 | + CRYPTO_ALG_ALLOCATES_MEMORY | |
---|
| 246 | + CRYPTO_ALG_KERN_DRIVER_ONLY | |
---|
| 247 | + CRYPTO_ALG_NEED_FALLBACK; |
---|
| 248 | + alg->base.cra_blocksize = AES_BLOCK_SIZE; |
---|
| 249 | + alg->base.cra_ctxsize = sizeof(struct ccp_ctx); |
---|
| 250 | + alg->base.cra_priority = CCP_CRA_PRIORITY; |
---|
| 251 | + alg->base.cra_module = THIS_MODULE; |
---|
266 | 252 | |
---|
267 | | - ret = crypto_register_alg(alg); |
---|
| 253 | + alg->setkey = ccp_aes_xts_setkey; |
---|
| 254 | + alg->encrypt = ccp_aes_xts_encrypt; |
---|
| 255 | + alg->decrypt = ccp_aes_xts_decrypt; |
---|
| 256 | + alg->min_keysize = AES_MIN_KEY_SIZE * 2; |
---|
| 257 | + alg->max_keysize = AES_MAX_KEY_SIZE * 2; |
---|
| 258 | + alg->ivsize = AES_BLOCK_SIZE; |
---|
| 259 | + alg->init = ccp_aes_xts_init_tfm; |
---|
| 260 | + alg->exit = ccp_aes_xts_exit_tfm; |
---|
| 261 | + |
---|
| 262 | + ret = crypto_register_skcipher(alg); |
---|
268 | 263 | if (ret) { |
---|
269 | | - pr_err("%s ablkcipher algorithm registration error (%d)\n", |
---|
270 | | - alg->cra_name, ret); |
---|
| 264 | + pr_err("%s skcipher algorithm registration error (%d)\n", |
---|
| 265 | + alg->base.cra_name, ret); |
---|
271 | 266 | kfree(ccp_alg); |
---|
272 | 267 | return ret; |
---|
273 | 268 | } |
---|