| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Freescale i.MX23/i.MX28 Data Co-Processor driver |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright (C) 2013 Marek Vasut <marex@denx.de> |
|---|
| 5 | | - * |
|---|
| 6 | | - * The code contained herein is licensed under the GNU General Public |
|---|
| 7 | | - * License. You may obtain a copy of the GNU General Public License |
|---|
| 8 | | - * Version 2 or later at the following locations: |
|---|
| 9 | | - * |
|---|
| 10 | | - * http://www.opensource.org/licenses/gpl-license.html |
|---|
| 11 | | - * http://www.gnu.org/copyleft/gpl.html |
|---|
| 12 | 6 | */ |
|---|
| 13 | 7 | |
|---|
| 14 | 8 | #include <linux/dma-mapping.h> |
|---|
| .. | .. |
|---|
| 20 | 14 | #include <linux/of.h> |
|---|
| 21 | 15 | #include <linux/platform_device.h> |
|---|
| 22 | 16 | #include <linux/stmp_device.h> |
|---|
| 17 | +#include <linux/clk.h> |
|---|
| 23 | 18 | |
|---|
| 24 | 19 | #include <crypto/aes.h> |
|---|
| 25 | 20 | #include <crypto/sha.h> |
|---|
| .. | .. |
|---|
| 83 | 78 | spinlock_t lock[DCP_MAX_CHANS]; |
|---|
| 84 | 79 | struct task_struct *thread[DCP_MAX_CHANS]; |
|---|
| 85 | 80 | struct crypto_queue queue[DCP_MAX_CHANS]; |
|---|
| 81 | + struct clk *dcp_clk; |
|---|
| 86 | 82 | }; |
|---|
| 87 | 83 | |
|---|
| 88 | 84 | enum dcp_chan { |
|---|
| .. | .. |
|---|
| 109 | 105 | struct dcp_aes_req_ctx { |
|---|
| 110 | 106 | unsigned int enc:1; |
|---|
| 111 | 107 | unsigned int ecb:1; |
|---|
| 108 | + struct skcipher_request fallback_req; // keep at the end |
|---|
| 112 | 109 | }; |
|---|
| 113 | 110 | |
|---|
| 114 | 111 | struct dcp_sha_req_ctx { |
|---|
| 115 | 112 | unsigned int init:1; |
|---|
| 116 | 113 | unsigned int fini:1; |
|---|
| 114 | +}; |
|---|
| 115 | + |
|---|
| 116 | +struct dcp_export_state { |
|---|
| 117 | + struct dcp_sha_req_ctx req_ctx; |
|---|
| 118 | + struct dcp_async_ctx async_ctx; |
|---|
| 117 | 119 | }; |
|---|
| 118 | 120 | |
|---|
| 119 | 121 | /* |
|---|
| .. | .. |
|---|
| 215 | 217 | * Encryption (AES128) |
|---|
| 216 | 218 | */ |
|---|
| 217 | 219 | static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, |
|---|
| 218 | | - struct ablkcipher_request *req, int init) |
|---|
| 220 | + struct skcipher_request *req, int init) |
|---|
| 219 | 221 | { |
|---|
| 220 | 222 | dma_addr_t key_phys, src_phys, dst_phys; |
|---|
| 221 | 223 | struct dcp *sdcp = global_sdcp; |
|---|
| 222 | 224 | struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; |
|---|
| 223 | | - struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); |
|---|
| 225 | + struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); |
|---|
| 224 | 226 | int ret; |
|---|
| 225 | 227 | |
|---|
| 226 | 228 | key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, |
|---|
| .. | .. |
|---|
| 291 | 293 | { |
|---|
| 292 | 294 | struct dcp *sdcp = global_sdcp; |
|---|
| 293 | 295 | |
|---|
| 294 | | - struct ablkcipher_request *req = ablkcipher_request_cast(arq); |
|---|
| 296 | + struct skcipher_request *req = skcipher_request_cast(arq); |
|---|
| 295 | 297 | struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); |
|---|
| 296 | | - struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); |
|---|
| 298 | + struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); |
|---|
| 297 | 299 | |
|---|
| 298 | 300 | struct scatterlist *dst = req->dst; |
|---|
| 299 | 301 | struct scatterlist *src = req->src; |
|---|
| .. | .. |
|---|
| 321 | 323 | |
|---|
| 322 | 324 | if (!rctx->ecb) { |
|---|
| 323 | 325 | /* Copy the CBC IV just past the key. */ |
|---|
| 324 | | - memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128); |
|---|
| 326 | + memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128); |
|---|
| 325 | 327 | /* CBC needs the INIT set. */ |
|---|
| 326 | 328 | init = 1; |
|---|
| 327 | 329 | } else { |
|---|
| 328 | 330 | memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); |
|---|
| 329 | 331 | } |
|---|
| 330 | 332 | |
|---|
| 331 | | - for_each_sg(req->src, src, sg_nents(src), i) { |
|---|
| 333 | + for_each_sg(req->src, src, sg_nents(req->src), i) { |
|---|
| 332 | 334 | src_buf = sg_virt(src); |
|---|
| 333 | 335 | len = sg_dma_len(src); |
|---|
| 334 | 336 | tlen += len; |
|---|
| 335 | | - limit_hit = tlen > req->nbytes; |
|---|
| 337 | + limit_hit = tlen > req->cryptlen; |
|---|
| 336 | 338 | |
|---|
| 337 | 339 | if (limit_hit) |
|---|
| 338 | | - len = req->nbytes - (tlen - len); |
|---|
| 340 | + len = req->cryptlen - (tlen - len); |
|---|
| 339 | 341 | |
|---|
| 340 | 342 | do { |
|---|
| 341 | 343 | if (actx->fill + len > out_off) |
|---|
| .. | .. |
|---|
| 374 | 376 | /* Copy the IV for CBC for chaining */ |
|---|
| 375 | 377 | if (!rctx->ecb) { |
|---|
| 376 | 378 | if (rctx->enc) |
|---|
| 377 | | - memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE), |
|---|
| 379 | + memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE), |
|---|
| 378 | 380 | AES_BLOCK_SIZE); |
|---|
| 379 | 381 | else |
|---|
| 380 | | - memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE), |
|---|
| 382 | + memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE), |
|---|
| 381 | 383 | AES_BLOCK_SIZE); |
|---|
| 382 | 384 | } |
|---|
| 383 | 385 | |
|---|
| .. | .. |
|---|
| 421 | 423 | return 0; |
|---|
| 422 | 424 | } |
|---|
| 423 | 425 | |
|---|
| 424 | | -static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc) |
|---|
| 426 | +static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc) |
|---|
| 425 | 427 | { |
|---|
| 426 | | - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
|---|
| 427 | | - struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
|---|
| 428 | | - SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); |
|---|
| 428 | + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
|---|
| 429 | + struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); |
|---|
| 430 | + struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm); |
|---|
| 429 | 431 | int ret; |
|---|
| 430 | 432 | |
|---|
| 431 | | - skcipher_request_set_tfm(subreq, ctx->fallback); |
|---|
| 432 | | - skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); |
|---|
| 433 | | - skcipher_request_set_crypt(subreq, req->src, req->dst, |
|---|
| 434 | | - req->nbytes, req->info); |
|---|
| 433 | + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); |
|---|
| 434 | + skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, |
|---|
| 435 | + req->base.complete, req->base.data); |
|---|
| 436 | + skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst, |
|---|
| 437 | + req->cryptlen, req->iv); |
|---|
| 435 | 438 | |
|---|
| 436 | 439 | if (enc) |
|---|
| 437 | | - ret = crypto_skcipher_encrypt(subreq); |
|---|
| 440 | + ret = crypto_skcipher_encrypt(&rctx->fallback_req); |
|---|
| 438 | 441 | else |
|---|
| 439 | | - ret = crypto_skcipher_decrypt(subreq); |
|---|
| 440 | | - |
|---|
| 441 | | - skcipher_request_zero(subreq); |
|---|
| 442 | + ret = crypto_skcipher_decrypt(&rctx->fallback_req); |
|---|
| 442 | 443 | |
|---|
| 443 | 444 | return ret; |
|---|
| 444 | 445 | } |
|---|
| 445 | 446 | |
|---|
| 446 | | -static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb) |
|---|
| 447 | +static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb) |
|---|
| 447 | 448 | { |
|---|
| 448 | 449 | struct dcp *sdcp = global_sdcp; |
|---|
| 449 | 450 | struct crypto_async_request *arq = &req->base; |
|---|
| 450 | 451 | struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); |
|---|
| 451 | | - struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); |
|---|
| 452 | + struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); |
|---|
| 452 | 453 | int ret; |
|---|
| 453 | 454 | |
|---|
| 454 | 455 | if (unlikely(actx->key_len != AES_KEYSIZE_128)) |
|---|
| .. | .. |
|---|
| 464 | 465 | |
|---|
| 465 | 466 | wake_up_process(sdcp->thread[actx->chan]); |
|---|
| 466 | 467 | |
|---|
| 467 | | - return -EINPROGRESS; |
|---|
| 468 | + return ret; |
|---|
| 468 | 469 | } |
|---|
| 469 | 470 | |
|---|
| 470 | | -static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req) |
|---|
| 471 | +static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req) |
|---|
| 471 | 472 | { |
|---|
| 472 | 473 | return mxs_dcp_aes_enqueue(req, 0, 1); |
|---|
| 473 | 474 | } |
|---|
| 474 | 475 | |
|---|
| 475 | | -static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req) |
|---|
| 476 | +static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req) |
|---|
| 476 | 477 | { |
|---|
| 477 | 478 | return mxs_dcp_aes_enqueue(req, 1, 1); |
|---|
| 478 | 479 | } |
|---|
| 479 | 480 | |
|---|
| 480 | | -static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req) |
|---|
| 481 | +static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req) |
|---|
| 481 | 482 | { |
|---|
| 482 | 483 | return mxs_dcp_aes_enqueue(req, 0, 0); |
|---|
| 483 | 484 | } |
|---|
| 484 | 485 | |
|---|
| 485 | | -static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req) |
|---|
| 486 | +static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req) |
|---|
| 486 | 487 | { |
|---|
| 487 | 488 | return mxs_dcp_aes_enqueue(req, 1, 0); |
|---|
| 488 | 489 | } |
|---|
| 489 | 490 | |
|---|
| 490 | | -static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, |
|---|
| 491 | +static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, |
|---|
| 491 | 492 | unsigned int len) |
|---|
| 492 | 493 | { |
|---|
| 493 | | - struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm); |
|---|
| 494 | | - unsigned int ret; |
|---|
| 494 | + struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); |
|---|
| 495 | 495 | |
|---|
| 496 | 496 | /* |
|---|
| 497 | 497 | * AES 128 is supposed by the hardware, store key into temporary |
|---|
| .. | .. |
|---|
| 512 | 512 | crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); |
|---|
| 513 | 513 | crypto_skcipher_set_flags(actx->fallback, |
|---|
| 514 | 514 | tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); |
|---|
| 515 | | - |
|---|
| 516 | | - ret = crypto_skcipher_setkey(actx->fallback, key, len); |
|---|
| 517 | | - if (!ret) |
|---|
| 518 | | - return 0; |
|---|
| 519 | | - |
|---|
| 520 | | - tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; |
|---|
| 521 | | - tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) & |
|---|
| 522 | | - CRYPTO_TFM_RES_MASK; |
|---|
| 523 | | - |
|---|
| 524 | | - return ret; |
|---|
| 515 | + return crypto_skcipher_setkey(actx->fallback, key, len); |
|---|
| 525 | 516 | } |
|---|
| 526 | 517 | |
|---|
| 527 | | -static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm) |
|---|
| 518 | +static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm) |
|---|
| 528 | 519 | { |
|---|
| 529 | | - const char *name = crypto_tfm_alg_name(tfm); |
|---|
| 530 | | - const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; |
|---|
| 531 | | - struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); |
|---|
| 520 | + const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm)); |
|---|
| 521 | + struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); |
|---|
| 532 | 522 | struct crypto_skcipher *blk; |
|---|
| 533 | 523 | |
|---|
| 534 | | - blk = crypto_alloc_skcipher(name, 0, flags); |
|---|
| 524 | + blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); |
|---|
| 535 | 525 | if (IS_ERR(blk)) |
|---|
| 536 | 526 | return PTR_ERR(blk); |
|---|
| 537 | 527 | |
|---|
| 538 | 528 | actx->fallback = blk; |
|---|
| 539 | | - tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx); |
|---|
| 529 | + crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) + |
|---|
| 530 | + crypto_skcipher_reqsize(blk)); |
|---|
| 540 | 531 | return 0; |
|---|
| 541 | 532 | } |
|---|
| 542 | 533 | |
|---|
| 543 | | -static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm) |
|---|
| 534 | +static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm) |
|---|
| 544 | 535 | { |
|---|
| 545 | | - struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); |
|---|
| 536 | + struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); |
|---|
| 546 | 537 | |
|---|
| 547 | 538 | crypto_free_skcipher(actx->fallback); |
|---|
| 548 | 539 | } |
|---|
| .. | .. |
|---|
| 699 | 690 | |
|---|
| 700 | 691 | struct crypto_async_request *backlog; |
|---|
| 701 | 692 | struct crypto_async_request *arq; |
|---|
| 702 | | - |
|---|
| 703 | | - struct dcp_sha_req_ctx *rctx; |
|---|
| 704 | | - |
|---|
| 705 | | - struct ahash_request *req; |
|---|
| 706 | | - int ret, fini; |
|---|
| 693 | + int ret; |
|---|
| 707 | 694 | |
|---|
| 708 | 695 | while (!kthread_should_stop()) { |
|---|
| 709 | 696 | set_current_state(TASK_INTERRUPTIBLE); |
|---|
| .. | .. |
|---|
| 724 | 711 | backlog->complete(backlog, -EINPROGRESS); |
|---|
| 725 | 712 | |
|---|
| 726 | 713 | if (arq) { |
|---|
| 727 | | - req = ahash_request_cast(arq); |
|---|
| 728 | | - rctx = ahash_request_ctx(req); |
|---|
| 729 | | - |
|---|
| 730 | 714 | ret = dcp_sha_req_to_buf(arq); |
|---|
| 731 | | - fini = rctx->fini; |
|---|
| 732 | 715 | arq->complete(arq, ret); |
|---|
| 733 | 716 | } |
|---|
| 734 | 717 | } |
|---|
| .. | .. |
|---|
| 796 | 779 | wake_up_process(sdcp->thread[actx->chan]); |
|---|
| 797 | 780 | mutex_unlock(&actx->mutex); |
|---|
| 798 | 781 | |
|---|
| 799 | | - return -EINPROGRESS; |
|---|
| 782 | + return ret; |
|---|
| 800 | 783 | } |
|---|
| 801 | 784 | |
|---|
| 802 | 785 | static int dcp_sha_update(struct ahash_request *req) |
|---|
| .. | .. |
|---|
| 827 | 810 | return dcp_sha_finup(req); |
|---|
| 828 | 811 | } |
|---|
| 829 | 812 | |
|---|
| 830 | | -static int dcp_sha_noimport(struct ahash_request *req, const void *in) |
|---|
| 813 | +static int dcp_sha_import(struct ahash_request *req, const void *in) |
|---|
| 831 | 814 | { |
|---|
| 832 | | - return -ENOSYS; |
|---|
| 815 | + struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); |
|---|
| 816 | + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
|---|
| 817 | + struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); |
|---|
| 818 | + const struct dcp_export_state *export = in; |
|---|
| 819 | + |
|---|
| 820 | + memset(rctx, 0, sizeof(struct dcp_sha_req_ctx)); |
|---|
| 821 | + memset(actx, 0, sizeof(struct dcp_async_ctx)); |
|---|
| 822 | + memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx)); |
|---|
| 823 | + memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx)); |
|---|
| 824 | + |
|---|
| 825 | + return 0; |
|---|
| 833 | 826 | } |
|---|
| 834 | 827 | |
|---|
| 835 | | -static int dcp_sha_noexport(struct ahash_request *req, void *out) |
|---|
| 828 | +static int dcp_sha_export(struct ahash_request *req, void *out) |
|---|
| 836 | 829 | { |
|---|
| 837 | | - return -ENOSYS; |
|---|
| 830 | + struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req); |
|---|
| 831 | + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
|---|
| 832 | + struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm); |
|---|
| 833 | + struct dcp_export_state *export = out; |
|---|
| 834 | + |
|---|
| 835 | + memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx)); |
|---|
| 836 | + memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx)); |
|---|
| 837 | + |
|---|
| 838 | + return 0; |
|---|
| 838 | 839 | } |
|---|
| 839 | 840 | |
|---|
| 840 | 841 | static int dcp_sha_cra_init(struct crypto_tfm *tfm) |
|---|
| .. | .. |
|---|
| 849 | 850 | } |
|---|
| 850 | 851 | |
|---|
| 851 | 852 | /* AES 128 ECB and AES 128 CBC */ |
|---|
| 852 | | -static struct crypto_alg dcp_aes_algs[] = { |
|---|
| 853 | +static struct skcipher_alg dcp_aes_algs[] = { |
|---|
| 853 | 854 | { |
|---|
| 854 | | - .cra_name = "ecb(aes)", |
|---|
| 855 | | - .cra_driver_name = "ecb-aes-dcp", |
|---|
| 856 | | - .cra_priority = 400, |
|---|
| 857 | | - .cra_alignmask = 15, |
|---|
| 858 | | - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
|---|
| 859 | | - CRYPTO_ALG_ASYNC | |
|---|
| 855 | + .base.cra_name = "ecb(aes)", |
|---|
| 856 | + .base.cra_driver_name = "ecb-aes-dcp", |
|---|
| 857 | + .base.cra_priority = 400, |
|---|
| 858 | + .base.cra_alignmask = 15, |
|---|
| 859 | + .base.cra_flags = CRYPTO_ALG_ASYNC | |
|---|
| 860 | 860 | CRYPTO_ALG_NEED_FALLBACK, |
|---|
| 861 | | - .cra_init = mxs_dcp_aes_fallback_init, |
|---|
| 862 | | - .cra_exit = mxs_dcp_aes_fallback_exit, |
|---|
| 863 | | - .cra_blocksize = AES_BLOCK_SIZE, |
|---|
| 864 | | - .cra_ctxsize = sizeof(struct dcp_async_ctx), |
|---|
| 865 | | - .cra_type = &crypto_ablkcipher_type, |
|---|
| 866 | | - .cra_module = THIS_MODULE, |
|---|
| 867 | | - .cra_u = { |
|---|
| 868 | | - .ablkcipher = { |
|---|
| 869 | | - .min_keysize = AES_MIN_KEY_SIZE, |
|---|
| 870 | | - .max_keysize = AES_MAX_KEY_SIZE, |
|---|
| 871 | | - .setkey = mxs_dcp_aes_setkey, |
|---|
| 872 | | - .encrypt = mxs_dcp_aes_ecb_encrypt, |
|---|
| 873 | | - .decrypt = mxs_dcp_aes_ecb_decrypt |
|---|
| 874 | | - }, |
|---|
| 875 | | - }, |
|---|
| 861 | + .base.cra_blocksize = AES_BLOCK_SIZE, |
|---|
| 862 | + .base.cra_ctxsize = sizeof(struct dcp_async_ctx), |
|---|
| 863 | + .base.cra_module = THIS_MODULE, |
|---|
| 864 | + |
|---|
| 865 | + .min_keysize = AES_MIN_KEY_SIZE, |
|---|
| 866 | + .max_keysize = AES_MAX_KEY_SIZE, |
|---|
| 867 | + .setkey = mxs_dcp_aes_setkey, |
|---|
| 868 | + .encrypt = mxs_dcp_aes_ecb_encrypt, |
|---|
| 869 | + .decrypt = mxs_dcp_aes_ecb_decrypt, |
|---|
| 870 | + .init = mxs_dcp_aes_fallback_init_tfm, |
|---|
| 871 | + .exit = mxs_dcp_aes_fallback_exit_tfm, |
|---|
| 876 | 872 | }, { |
|---|
| 877 | | - .cra_name = "cbc(aes)", |
|---|
| 878 | | - .cra_driver_name = "cbc-aes-dcp", |
|---|
| 879 | | - .cra_priority = 400, |
|---|
| 880 | | - .cra_alignmask = 15, |
|---|
| 881 | | - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
|---|
| 882 | | - CRYPTO_ALG_ASYNC | |
|---|
| 873 | + .base.cra_name = "cbc(aes)", |
|---|
| 874 | + .base.cra_driver_name = "cbc-aes-dcp", |
|---|
| 875 | + .base.cra_priority = 400, |
|---|
| 876 | + .base.cra_alignmask = 15, |
|---|
| 877 | + .base.cra_flags = CRYPTO_ALG_ASYNC | |
|---|
| 883 | 878 | CRYPTO_ALG_NEED_FALLBACK, |
|---|
| 884 | | - .cra_init = mxs_dcp_aes_fallback_init, |
|---|
| 885 | | - .cra_exit = mxs_dcp_aes_fallback_exit, |
|---|
| 886 | | - .cra_blocksize = AES_BLOCK_SIZE, |
|---|
| 887 | | - .cra_ctxsize = sizeof(struct dcp_async_ctx), |
|---|
| 888 | | - .cra_type = &crypto_ablkcipher_type, |
|---|
| 889 | | - .cra_module = THIS_MODULE, |
|---|
| 890 | | - .cra_u = { |
|---|
| 891 | | - .ablkcipher = { |
|---|
| 892 | | - .min_keysize = AES_MIN_KEY_SIZE, |
|---|
| 893 | | - .max_keysize = AES_MAX_KEY_SIZE, |
|---|
| 894 | | - .setkey = mxs_dcp_aes_setkey, |
|---|
| 895 | | - .encrypt = mxs_dcp_aes_cbc_encrypt, |
|---|
| 896 | | - .decrypt = mxs_dcp_aes_cbc_decrypt, |
|---|
| 897 | | - .ivsize = AES_BLOCK_SIZE, |
|---|
| 898 | | - }, |
|---|
| 899 | | - }, |
|---|
| 879 | + .base.cra_blocksize = AES_BLOCK_SIZE, |
|---|
| 880 | + .base.cra_ctxsize = sizeof(struct dcp_async_ctx), |
|---|
| 881 | + .base.cra_module = THIS_MODULE, |
|---|
| 882 | + |
|---|
| 883 | + .min_keysize = AES_MIN_KEY_SIZE, |
|---|
| 884 | + .max_keysize = AES_MAX_KEY_SIZE, |
|---|
| 885 | + .setkey = mxs_dcp_aes_setkey, |
|---|
| 886 | + .encrypt = mxs_dcp_aes_cbc_encrypt, |
|---|
| 887 | + .decrypt = mxs_dcp_aes_cbc_decrypt, |
|---|
| 888 | + .ivsize = AES_BLOCK_SIZE, |
|---|
| 889 | + .init = mxs_dcp_aes_fallback_init_tfm, |
|---|
| 890 | + .exit = mxs_dcp_aes_fallback_exit_tfm, |
|---|
| 900 | 891 | }, |
|---|
| 901 | 892 | }; |
|---|
| 902 | 893 | |
|---|
| .. | .. |
|---|
| 907 | 898 | .final = dcp_sha_final, |
|---|
| 908 | 899 | .finup = dcp_sha_finup, |
|---|
| 909 | 900 | .digest = dcp_sha_digest, |
|---|
| 910 | | - .import = dcp_sha_noimport, |
|---|
| 911 | | - .export = dcp_sha_noexport, |
|---|
| 901 | + .import = dcp_sha_import, |
|---|
| 902 | + .export = dcp_sha_export, |
|---|
| 912 | 903 | .halg = { |
|---|
| 913 | 904 | .digestsize = SHA1_DIGEST_SIZE, |
|---|
| 905 | + .statesize = sizeof(struct dcp_export_state), |
|---|
| 914 | 906 | .base = { |
|---|
| 915 | 907 | .cra_name = "sha1", |
|---|
| 916 | 908 | .cra_driver_name = "sha1-dcp", |
|---|
| .. | .. |
|---|
| 933 | 925 | .final = dcp_sha_final, |
|---|
| 934 | 926 | .finup = dcp_sha_finup, |
|---|
| 935 | 927 | .digest = dcp_sha_digest, |
|---|
| 936 | | - .import = dcp_sha_noimport, |
|---|
| 937 | | - .export = dcp_sha_noexport, |
|---|
| 928 | + .import = dcp_sha_import, |
|---|
| 929 | + .export = dcp_sha_export, |
|---|
| 938 | 930 | .halg = { |
|---|
| 939 | 931 | .digestsize = SHA256_DIGEST_SIZE, |
|---|
| 932 | + .statesize = sizeof(struct dcp_export_state), |
|---|
| 940 | 933 | .base = { |
|---|
| 941 | 934 | .cra_name = "sha256", |
|---|
| 942 | 935 | .cra_driver_name = "sha256-dcp", |
|---|
| .. | .. |
|---|
| 979 | 972 | struct device *dev = &pdev->dev; |
|---|
| 980 | 973 | struct dcp *sdcp = NULL; |
|---|
| 981 | 974 | int i, ret; |
|---|
| 982 | | - |
|---|
| 983 | | - struct resource *iores; |
|---|
| 984 | 975 | int dcp_vmi_irq, dcp_irq; |
|---|
| 985 | 976 | |
|---|
| 986 | 977 | if (global_sdcp) { |
|---|
| .. | .. |
|---|
| 988 | 979 | return -ENODEV; |
|---|
| 989 | 980 | } |
|---|
| 990 | 981 | |
|---|
| 991 | | - iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
|---|
| 992 | 982 | dcp_vmi_irq = platform_get_irq(pdev, 0); |
|---|
| 993 | | - if (dcp_vmi_irq < 0) { |
|---|
| 994 | | - dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq); |
|---|
| 983 | + if (dcp_vmi_irq < 0) |
|---|
| 995 | 984 | return dcp_vmi_irq; |
|---|
| 996 | | - } |
|---|
| 997 | 985 | |
|---|
| 998 | 986 | dcp_irq = platform_get_irq(pdev, 1); |
|---|
| 999 | | - if (dcp_irq < 0) { |
|---|
| 1000 | | - dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_irq); |
|---|
| 987 | + if (dcp_irq < 0) |
|---|
| 1001 | 988 | return dcp_irq; |
|---|
| 1002 | | - } |
|---|
| 1003 | 989 | |
|---|
| 1004 | 990 | sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); |
|---|
| 1005 | 991 | if (!sdcp) |
|---|
| 1006 | 992 | return -ENOMEM; |
|---|
| 1007 | 993 | |
|---|
| 1008 | 994 | sdcp->dev = dev; |
|---|
| 1009 | | - sdcp->base = devm_ioremap_resource(dev, iores); |
|---|
| 995 | + sdcp->base = devm_platform_ioremap_resource(pdev, 0); |
|---|
| 1010 | 996 | if (IS_ERR(sdcp->base)) |
|---|
| 1011 | 997 | return PTR_ERR(sdcp->base); |
|---|
| 1012 | 998 | |
|---|
| .. | .. |
|---|
| 1034 | 1020 | /* Re-align the structure so it fits the DCP constraints. */ |
|---|
| 1035 | 1021 | sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); |
|---|
| 1036 | 1022 | |
|---|
| 1037 | | - /* Restart the DCP block. */ |
|---|
| 1038 | | - ret = stmp_reset_block(sdcp->base); |
|---|
| 1023 | + /* DCP clock is optional, only used on some SOCs */ |
|---|
| 1024 | + sdcp->dcp_clk = devm_clk_get(dev, "dcp"); |
|---|
| 1025 | + if (IS_ERR(sdcp->dcp_clk)) { |
|---|
| 1026 | + if (sdcp->dcp_clk != ERR_PTR(-ENOENT)) |
|---|
| 1027 | + return PTR_ERR(sdcp->dcp_clk); |
|---|
| 1028 | + sdcp->dcp_clk = NULL; |
|---|
| 1029 | + } |
|---|
| 1030 | + ret = clk_prepare_enable(sdcp->dcp_clk); |
|---|
| 1039 | 1031 | if (ret) |
|---|
| 1040 | 1032 | return ret; |
|---|
| 1033 | + |
|---|
| 1034 | + /* Restart the DCP block. */ |
|---|
| 1035 | + ret = stmp_reset_block(sdcp->base); |
|---|
| 1036 | + if (ret) { |
|---|
| 1037 | + dev_err(dev, "Failed reset\n"); |
|---|
| 1038 | + goto err_disable_unprepare_clk; |
|---|
| 1039 | + } |
|---|
| 1041 | 1040 | |
|---|
| 1042 | 1041 | /* Initialize control register. */ |
|---|
| 1043 | 1042 | writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | |
|---|
| .. | .. |
|---|
| 1075 | 1074 | NULL, "mxs_dcp_chan/sha"); |
|---|
| 1076 | 1075 | if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { |
|---|
| 1077 | 1076 | dev_err(dev, "Error starting SHA thread!\n"); |
|---|
| 1078 | | - return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); |
|---|
| 1077 | + ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); |
|---|
| 1078 | + goto err_disable_unprepare_clk; |
|---|
| 1079 | 1079 | } |
|---|
| 1080 | 1080 | |
|---|
| 1081 | 1081 | sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, |
|---|
| .. | .. |
|---|
| 1090 | 1090 | sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1); |
|---|
| 1091 | 1091 | |
|---|
| 1092 | 1092 | if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { |
|---|
| 1093 | | - ret = crypto_register_algs(dcp_aes_algs, |
|---|
| 1094 | | - ARRAY_SIZE(dcp_aes_algs)); |
|---|
| 1093 | + ret = crypto_register_skciphers(dcp_aes_algs, |
|---|
| 1094 | + ARRAY_SIZE(dcp_aes_algs)); |
|---|
| 1095 | 1095 | if (ret) { |
|---|
| 1096 | 1096 | /* Failed to register algorithm. */ |
|---|
| 1097 | 1097 | dev_err(dev, "Failed to register AES crypto!\n"); |
|---|
| .. | .. |
|---|
| 1125 | 1125 | |
|---|
| 1126 | 1126 | err_unregister_aes: |
|---|
| 1127 | 1127 | if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) |
|---|
| 1128 | | - crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); |
|---|
| 1128 | + crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); |
|---|
| 1129 | 1129 | |
|---|
| 1130 | 1130 | err_destroy_aes_thread: |
|---|
| 1131 | 1131 | kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); |
|---|
| 1132 | 1132 | |
|---|
| 1133 | 1133 | err_destroy_sha_thread: |
|---|
| 1134 | 1134 | kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); |
|---|
| 1135 | + |
|---|
| 1136 | +err_disable_unprepare_clk: |
|---|
| 1137 | + clk_disable_unprepare(sdcp->dcp_clk); |
|---|
| 1138 | + |
|---|
| 1135 | 1139 | return ret; |
|---|
| 1136 | 1140 | } |
|---|
| 1137 | 1141 | |
|---|
| .. | .. |
|---|
| 1146 | 1150 | crypto_unregister_ahash(&dcp_sha1_alg); |
|---|
| 1147 | 1151 | |
|---|
| 1148 | 1152 | if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) |
|---|
| 1149 | | - crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); |
|---|
| 1153 | + crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs)); |
|---|
| 1150 | 1154 | |
|---|
| 1151 | 1155 | kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); |
|---|
| 1152 | 1156 | kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); |
|---|
| 1153 | 1157 | |
|---|
| 1158 | + clk_disable_unprepare(sdcp->dcp_clk); |
|---|
| 1159 | + |
|---|
| 1154 | 1160 | platform_set_drvdata(pdev, NULL); |
|---|
| 1155 | 1161 | |
|---|
| 1156 | 1162 | global_sdcp = NULL; |
|---|