| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Cryptographic API. |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Support for OMAP AES GCM HW acceleration. |
|---|
| 5 | 6 | * |
|---|
| 6 | 7 | * Copyright (c) 2016 Texas Instruments Incorporated |
|---|
| 7 | | - * |
|---|
| 8 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 9 | | - * it under the terms of the GNU General Public License version 2 as published |
|---|
| 10 | | - * by the Free Software Foundation. |
|---|
| 11 | | - * |
|---|
| 12 | 8 | */ |
|---|
| 13 | 9 | |
|---|
| 14 | 10 | #include <linux/errno.h> |
|---|
| .. | .. |
|---|
| 17 | 13 | #include <linux/dmaengine.h> |
|---|
| 18 | 14 | #include <linux/omap-dma.h> |
|---|
| 19 | 15 | #include <linux/interrupt.h> |
|---|
| 16 | +#include <linux/pm_runtime.h> |
|---|
| 20 | 17 | #include <crypto/aes.h> |
|---|
| 21 | 18 | #include <crypto/gcm.h> |
|---|
| 22 | 19 | #include <crypto/scatterwalk.h> |
|---|
| .. | .. |
|---|
| 33 | 30 | { |
|---|
| 34 | 31 | struct aead_request *req = dd->aead_req; |
|---|
| 35 | 32 | |
|---|
| 36 | | - dd->flags &= ~FLAGS_BUSY; |
|---|
| 37 | 33 | dd->in_sg = NULL; |
|---|
| 38 | 34 | dd->out_sg = NULL; |
|---|
| 39 | 35 | |
|---|
| 40 | | - req->base.complete(&req->base, ret); |
|---|
| 36 | + crypto_finalize_aead_request(dd->engine, req, ret); |
|---|
| 37 | + |
|---|
| 38 | + pm_runtime_mark_last_busy(dd->dev); |
|---|
| 39 | + pm_runtime_put_autosuspend(dd->dev); |
|---|
| 41 | 40 | } |
|---|
| 42 | 41 | |
|---|
| 43 | 42 | static void omap_aes_gcm_done_task(struct omap_aes_dev *dd) |
|---|
| .. | .. |
|---|
| 78 | 77 | tag = (u8 *)rctx->auth_tag; |
|---|
| 79 | 78 | for (i = 0; i < dd->authsize; i++) { |
|---|
| 80 | 79 | if (tag[i]) { |
|---|
| 81 | | - dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n"); |
|---|
| 82 | 80 | ret = -EBADMSG; |
|---|
| 83 | 81 | } |
|---|
| 84 | 82 | } |
|---|
| 85 | 83 | } |
|---|
| 86 | 84 | |
|---|
| 87 | 85 | omap_aes_gcm_finish_req(dd, ret); |
|---|
| 88 | | - omap_aes_gcm_handle_queue(dd, NULL); |
|---|
| 89 | 86 | } |
|---|
| 90 | 87 | |
|---|
| 91 | 88 | static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd, |
|---|
| .. | .. |
|---|
| 124 | 121 | OMAP_CRYPTO_FORCE_SINGLE_ENTRY, |
|---|
| 125 | 122 | FLAGS_ASSOC_DATA_ST_SHIFT, |
|---|
| 126 | 123 | &dd->flags); |
|---|
| 124 | + if (ret) |
|---|
| 125 | + return ret; |
|---|
| 127 | 126 | } |
|---|
| 128 | 127 | |
|---|
| 129 | 128 | if (cryptlen) { |
|---|
| 130 | 129 | tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen); |
|---|
| 130 | + |
|---|
| 131 | + if (nsg) |
|---|
| 132 | + sg_unmark_end(dd->in_sgl); |
|---|
| 131 | 133 | |
|---|
| 132 | 134 | ret = omap_crypto_align_sg(&tmp, cryptlen, |
|---|
| 133 | 135 | AES_BLOCK_SIZE, &dd->in_sgl[nsg], |
|---|
| .. | .. |
|---|
| 136 | 138 | OMAP_CRYPTO_FORCE_SINGLE_ENTRY, |
|---|
| 137 | 139 | FLAGS_IN_DATA_ST_SHIFT, |
|---|
| 138 | 140 | &dd->flags); |
|---|
| 141 | + if (ret) |
|---|
| 142 | + return ret; |
|---|
| 139 | 143 | } |
|---|
| 140 | 144 | |
|---|
| 141 | 145 | dd->in_sg = dd->in_sgl; |
|---|
| .. | .. |
|---|
| 146 | 150 | dd->out_sg = req->dst; |
|---|
| 147 | 151 | dd->orig_out = req->dst; |
|---|
| 148 | 152 | |
|---|
| 149 | | - dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen); |
|---|
| 153 | + dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, req->assoclen); |
|---|
| 150 | 154 | |
|---|
| 151 | 155 | flags = 0; |
|---|
| 152 | 156 | if (req->src == req->dst || dd->out_sg == sg_arr) |
|---|
| 153 | 157 | flags |= OMAP_CRYPTO_FORCE_COPY; |
|---|
| 154 | 158 | |
|---|
| 155 | | - ret = omap_crypto_align_sg(&dd->out_sg, cryptlen, |
|---|
| 156 | | - AES_BLOCK_SIZE, &dd->out_sgl, |
|---|
| 157 | | - flags, |
|---|
| 158 | | - FLAGS_OUT_DATA_ST_SHIFT, &dd->flags); |
|---|
| 159 | | - if (ret) |
|---|
| 160 | | - return ret; |
|---|
| 159 | + if (cryptlen) { |
|---|
| 160 | + ret = omap_crypto_align_sg(&dd->out_sg, cryptlen, |
|---|
| 161 | + AES_BLOCK_SIZE, &dd->out_sgl, |
|---|
| 162 | + flags, |
|---|
| 163 | + FLAGS_OUT_DATA_ST_SHIFT, &dd->flags); |
|---|
| 164 | + if (ret) |
|---|
| 165 | + return ret; |
|---|
| 166 | + } |
|---|
| 161 | 167 | |
|---|
| 162 | 168 | dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen); |
|---|
| 163 | 169 | dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen); |
|---|
| .. | .. |
|---|
| 165 | 171 | return 0; |
|---|
| 166 | 172 | } |
|---|
| 167 | 173 | |
|---|
| 168 | | -static void omap_aes_gcm_complete(struct crypto_async_request *req, int err) |
|---|
| 169 | | -{ |
|---|
| 170 | | - struct omap_aes_gcm_result *res = req->data; |
|---|
| 171 | | - |
|---|
| 172 | | - if (err == -EINPROGRESS) |
|---|
| 173 | | - return; |
|---|
| 174 | | - |
|---|
| 175 | | - res->err = err; |
|---|
| 176 | | - complete(&res->completion); |
|---|
| 177 | | -} |
|---|
| 178 | | - |
|---|
| 179 | 174 | static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv) |
|---|
| 180 | 175 | { |
|---|
| 181 | | - struct scatterlist iv_sg, tag_sg; |
|---|
| 182 | | - struct skcipher_request *sk_req; |
|---|
| 183 | | - struct omap_aes_gcm_result result; |
|---|
| 184 | | - struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); |
|---|
| 185 | | - int ret = 0; |
|---|
| 176 | + struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); |
|---|
| 186 | 177 | |
|---|
| 187 | | - sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL); |
|---|
| 188 | | - if (!sk_req) { |
|---|
| 189 | | - pr_err("skcipher: Failed to allocate request\n"); |
|---|
| 190 | | - return -ENOMEM; |
|---|
| 191 | | - } |
|---|
| 192 | | - |
|---|
| 193 | | - init_completion(&result.completion); |
|---|
| 194 | | - |
|---|
| 195 | | - sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE); |
|---|
| 196 | | - sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE); |
|---|
| 197 | | - skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
|---|
| 198 | | - omap_aes_gcm_complete, &result); |
|---|
| 199 | | - ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen); |
|---|
| 200 | | - skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE, |
|---|
| 201 | | - NULL); |
|---|
| 202 | | - ret = crypto_skcipher_encrypt(sk_req); |
|---|
| 203 | | - switch (ret) { |
|---|
| 204 | | - case 0: |
|---|
| 205 | | - break; |
|---|
| 206 | | - case -EINPROGRESS: |
|---|
| 207 | | - case -EBUSY: |
|---|
| 208 | | - ret = wait_for_completion_interruptible(&result.completion); |
|---|
| 209 | | - if (!ret) { |
|---|
| 210 | | - ret = result.err; |
|---|
| 211 | | - if (!ret) { |
|---|
| 212 | | - reinit_completion(&result.completion); |
|---|
| 213 | | - break; |
|---|
| 214 | | - } |
|---|
| 215 | | - } |
|---|
| 216 | | - /* fall through */ |
|---|
| 217 | | - default: |
|---|
| 218 | | - pr_err("Encryption of IV failed for GCM mode\n"); |
|---|
| 219 | | - break; |
|---|
| 220 | | - } |
|---|
| 221 | | - |
|---|
| 222 | | - skcipher_request_free(sk_req); |
|---|
| 223 | | - return ret; |
|---|
| 178 | + aes_encrypt(&ctx->actx, (u8 *)tag, (u8 *)iv); |
|---|
| 179 | + return 0; |
|---|
| 224 | 180 | } |
|---|
| 225 | 181 | |
|---|
| 226 | 182 | void omap_aes_gcm_dma_out_callback(void *data) |
|---|
| .. | .. |
|---|
| 250 | 206 | static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd, |
|---|
| 251 | 207 | struct aead_request *req) |
|---|
| 252 | 208 | { |
|---|
| 253 | | - struct omap_aes_ctx *ctx; |
|---|
| 254 | | - struct aead_request *backlog; |
|---|
| 255 | | - struct omap_aes_reqctx *rctx; |
|---|
| 256 | | - unsigned long flags; |
|---|
| 257 | | - int err, ret = 0; |
|---|
| 258 | | - |
|---|
| 259 | | - spin_lock_irqsave(&dd->lock, flags); |
|---|
| 260 | 209 | if (req) |
|---|
| 261 | | - ret = aead_enqueue_request(&dd->aead_queue, req); |
|---|
| 262 | | - if (dd->flags & FLAGS_BUSY) { |
|---|
| 263 | | - spin_unlock_irqrestore(&dd->lock, flags); |
|---|
| 264 | | - return ret; |
|---|
| 265 | | - } |
|---|
| 210 | + return crypto_transfer_aead_request_to_engine(dd->engine, req); |
|---|
| 266 | 211 | |
|---|
| 267 | | - backlog = aead_get_backlog(&dd->aead_queue); |
|---|
| 268 | | - req = aead_dequeue_request(&dd->aead_queue); |
|---|
| 269 | | - if (req) |
|---|
| 270 | | - dd->flags |= FLAGS_BUSY; |
|---|
| 271 | | - spin_unlock_irqrestore(&dd->lock, flags); |
|---|
| 212 | + return 0; |
|---|
| 213 | +} |
|---|
| 272 | 214 | |
|---|
| 273 | | - if (!req) |
|---|
| 274 | | - return ret; |
|---|
| 215 | +static int omap_aes_gcm_prepare_req(struct crypto_engine *engine, void *areq) |
|---|
| 216 | +{ |
|---|
| 217 | + struct aead_request *req = container_of(areq, struct aead_request, |
|---|
| 218 | + base); |
|---|
| 219 | + struct omap_aes_reqctx *rctx = aead_request_ctx(req); |
|---|
| 220 | + struct omap_aes_dev *dd = rctx->dd; |
|---|
| 221 | + struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); |
|---|
| 222 | + int err; |
|---|
| 275 | 223 | |
|---|
| 276 | | - if (backlog) |
|---|
| 277 | | - backlog->base.complete(&backlog->base, -EINPROGRESS); |
|---|
| 278 | | - |
|---|
| 279 | | - ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); |
|---|
| 280 | | - rctx = aead_request_ctx(req); |
|---|
| 281 | | - |
|---|
| 282 | | - dd->ctx = ctx; |
|---|
| 283 | | - rctx->dd = dd; |
|---|
| 284 | 224 | dd->aead_req = req; |
|---|
| 285 | 225 | |
|---|
| 286 | 226 | rctx->mode &= FLAGS_MODE_MASK; |
|---|
| .. | .. |
|---|
| 290 | 230 | if (err) |
|---|
| 291 | 231 | return err; |
|---|
| 292 | 232 | |
|---|
| 293 | | - err = omap_aes_write_ctrl(dd); |
|---|
| 294 | | - if (!err) |
|---|
| 295 | | - err = omap_aes_crypt_dma_start(dd); |
|---|
| 233 | + dd->ctx = &ctx->octx; |
|---|
| 296 | 234 | |
|---|
| 297 | | - if (err) { |
|---|
| 298 | | - omap_aes_gcm_finish_req(dd, err); |
|---|
| 299 | | - omap_aes_gcm_handle_queue(dd, NULL); |
|---|
| 300 | | - } |
|---|
| 301 | | - |
|---|
| 302 | | - return ret; |
|---|
| 235 | + return omap_aes_write_ctrl(dd); |
|---|
| 303 | 236 | } |
|---|
| 304 | 237 | |
|---|
| 305 | 238 | static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode) |
|---|
| .. | .. |
|---|
| 354 | 287 | |
|---|
| 355 | 288 | int omap_aes_4106gcm_encrypt(struct aead_request *req) |
|---|
| 356 | 289 | { |
|---|
| 357 | | - struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); |
|---|
| 290 | + struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); |
|---|
| 358 | 291 | struct omap_aes_reqctx *rctx = aead_request_ctx(req); |
|---|
| 359 | 292 | |
|---|
| 360 | | - memcpy(rctx->iv, ctx->nonce, 4); |
|---|
| 293 | + memcpy(rctx->iv, ctx->octx.nonce, 4); |
|---|
| 361 | 294 | memcpy(rctx->iv + 4, req->iv, 8); |
|---|
| 362 | | - return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM | |
|---|
| 295 | + return crypto_ipsec_check_assoclen(req->assoclen) ?: |
|---|
| 296 | + omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM | |
|---|
| 363 | 297 | FLAGS_RFC4106_GCM); |
|---|
| 364 | 298 | } |
|---|
| 365 | 299 | |
|---|
| 366 | 300 | int omap_aes_4106gcm_decrypt(struct aead_request *req) |
|---|
| 367 | 301 | { |
|---|
| 368 | | - struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); |
|---|
| 302 | + struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); |
|---|
| 369 | 303 | struct omap_aes_reqctx *rctx = aead_request_ctx(req); |
|---|
| 370 | 304 | |
|---|
| 371 | | - memcpy(rctx->iv, ctx->nonce, 4); |
|---|
| 305 | + memcpy(rctx->iv, ctx->octx.nonce, 4); |
|---|
| 372 | 306 | memcpy(rctx->iv + 4, req->iv, 8); |
|---|
| 373 | | - return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM); |
|---|
| 307 | + return crypto_ipsec_check_assoclen(req->assoclen) ?: |
|---|
| 308 | + omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM); |
|---|
| 374 | 309 | } |
|---|
| 375 | 310 | |
|---|
| 376 | 311 | int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, |
|---|
| 377 | 312 | unsigned int keylen) |
|---|
| 378 | 313 | { |
|---|
| 379 | | - struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); |
|---|
| 314 | + struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); |
|---|
| 315 | + int ret; |
|---|
| 380 | 316 | |
|---|
| 381 | | - if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && |
|---|
| 382 | | - keylen != AES_KEYSIZE_256) |
|---|
| 383 | | - return -EINVAL; |
|---|
| 317 | + ret = aes_expandkey(&ctx->actx, key, keylen); |
|---|
| 318 | + if (ret) |
|---|
| 319 | + return ret; |
|---|
| 384 | 320 | |
|---|
| 385 | | - memcpy(ctx->key, key, keylen); |
|---|
| 386 | | - ctx->keylen = keylen; |
|---|
| 321 | + memcpy(ctx->octx.key, key, keylen); |
|---|
| 322 | + ctx->octx.keylen = keylen; |
|---|
| 387 | 323 | |
|---|
| 388 | 324 | return 0; |
|---|
| 389 | 325 | } |
|---|
| .. | .. |
|---|
| 391 | 327 | int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key, |
|---|
| 392 | 328 | unsigned int keylen) |
|---|
| 393 | 329 | { |
|---|
| 394 | | - struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); |
|---|
| 330 | + struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); |
|---|
| 331 | + int ret; |
|---|
| 395 | 332 | |
|---|
| 396 | 333 | if (keylen < 4) |
|---|
| 397 | 334 | return -EINVAL; |
|---|
| 398 | | - |
|---|
| 399 | 335 | keylen -= 4; |
|---|
| 400 | | - if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && |
|---|
| 401 | | - keylen != AES_KEYSIZE_256) |
|---|
| 402 | | - return -EINVAL; |
|---|
| 403 | 336 | |
|---|
| 404 | | - memcpy(ctx->key, key, keylen); |
|---|
| 405 | | - memcpy(ctx->nonce, key + keylen, 4); |
|---|
| 406 | | - ctx->keylen = keylen; |
|---|
| 337 | + ret = aes_expandkey(&ctx->actx, key, keylen); |
|---|
| 338 | + if (ret) |
|---|
| 339 | + return ret; |
|---|
| 340 | + |
|---|
| 341 | + memcpy(ctx->octx.key, key, keylen); |
|---|
| 342 | + memcpy(ctx->octx.nonce, key + keylen, 4); |
|---|
| 343 | + ctx->octx.keylen = keylen; |
|---|
| 344 | + |
|---|
| 345 | + return 0; |
|---|
| 346 | +} |
|---|
| 347 | + |
|---|
| 348 | +int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) |
|---|
| 349 | +{ |
|---|
| 350 | + return crypto_gcm_check_authsize(authsize); |
|---|
| 351 | +} |
|---|
| 352 | + |
|---|
| 353 | +int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent, |
|---|
| 354 | + unsigned int authsize) |
|---|
| 355 | +{ |
|---|
| 356 | + return crypto_rfc4106_check_authsize(authsize); |
|---|
| 357 | +} |
|---|
| 358 | + |
|---|
| 359 | +static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq) |
|---|
| 360 | +{ |
|---|
| 361 | + struct aead_request *req = container_of(areq, struct aead_request, |
|---|
| 362 | + base); |
|---|
| 363 | + struct omap_aes_reqctx *rctx = aead_request_ctx(req); |
|---|
| 364 | + struct omap_aes_dev *dd = rctx->dd; |
|---|
| 365 | + int ret = 0; |
|---|
| 366 | + |
|---|
| 367 | + if (!dd) |
|---|
| 368 | + return -ENODEV; |
|---|
| 369 | + |
|---|
| 370 | + if (dd->in_sg_len) |
|---|
| 371 | + ret = omap_aes_crypt_dma_start(dd); |
|---|
| 372 | + else |
|---|
| 373 | + omap_aes_gcm_dma_out_callback(dd); |
|---|
| 374 | + |
|---|
| 375 | + return ret; |
|---|
| 376 | +} |
|---|
| 377 | + |
|---|
| 378 | +int omap_aes_gcm_cra_init(struct crypto_aead *tfm) |
|---|
| 379 | +{ |
|---|
| 380 | + struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); |
|---|
| 381 | + |
|---|
| 382 | + ctx->enginectx.op.prepare_request = omap_aes_gcm_prepare_req; |
|---|
| 383 | + ctx->enginectx.op.unprepare_request = NULL; |
|---|
| 384 | + ctx->enginectx.op.do_one_request = omap_aes_gcm_crypt_req; |
|---|
| 385 | + |
|---|
| 386 | + crypto_aead_set_reqsize(tfm, sizeof(struct omap_aes_reqctx)); |
|---|
| 407 | 387 | |
|---|
| 408 | 388 | return 0; |
|---|
| 409 | 389 | } |
|---|