hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/drivers/crypto/mxs-dcp.c
....@@ -1,14 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Freescale i.MX23/i.MX28 Data Co-Processor driver
34 *
45 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
5
- *
6
- * The code contained herein is licensed under the GNU General Public
7
- * License. You may obtain a copy of the GNU General Public License
8
- * Version 2 or later at the following locations:
9
- *
10
- * http://www.opensource.org/licenses/gpl-license.html
11
- * http://www.gnu.org/copyleft/gpl.html
126 */
137
148 #include <linux/dma-mapping.h>
....@@ -20,6 +14,7 @@
2014 #include <linux/of.h>
2115 #include <linux/platform_device.h>
2216 #include <linux/stmp_device.h>
17
+#include <linux/clk.h>
2318
2419 #include <crypto/aes.h>
2520 #include <crypto/sha.h>
....@@ -83,6 +78,7 @@
8378 spinlock_t lock[DCP_MAX_CHANS];
8479 struct task_struct *thread[DCP_MAX_CHANS];
8580 struct crypto_queue queue[DCP_MAX_CHANS];
81
+ struct clk *dcp_clk;
8682 };
8783
8884 enum dcp_chan {
....@@ -109,11 +105,17 @@
109105 struct dcp_aes_req_ctx {
110106 unsigned int enc:1;
111107 unsigned int ecb:1;
108
+ struct skcipher_request fallback_req; // keep at the end
112109 };
113110
114111 struct dcp_sha_req_ctx {
115112 unsigned int init:1;
116113 unsigned int fini:1;
114
+};
115
+
116
+struct dcp_export_state {
117
+ struct dcp_sha_req_ctx req_ctx;
118
+ struct dcp_async_ctx async_ctx;
117119 };
118120
119121 /*
....@@ -215,12 +217,12 @@
215217 * Encryption (AES128)
216218 */
217219 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
218
- struct ablkcipher_request *req, int init)
220
+ struct skcipher_request *req, int init)
219221 {
220222 dma_addr_t key_phys, src_phys, dst_phys;
221223 struct dcp *sdcp = global_sdcp;
222224 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
223
- struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
225
+ struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
224226 int ret;
225227
226228 key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
....@@ -291,9 +293,9 @@
291293 {
292294 struct dcp *sdcp = global_sdcp;
293295
294
- struct ablkcipher_request *req = ablkcipher_request_cast(arq);
296
+ struct skcipher_request *req = skcipher_request_cast(arq);
295297 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
296
- struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
298
+ struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
297299
298300 struct scatterlist *dst = req->dst;
299301 struct scatterlist *src = req->src;
....@@ -321,21 +323,21 @@
321323
322324 if (!rctx->ecb) {
323325 /* Copy the CBC IV just past the key. */
324
- memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
326
+ memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
325327 /* CBC needs the INIT set. */
326328 init = 1;
327329 } else {
328330 memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
329331 }
330332
331
- for_each_sg(req->src, src, sg_nents(src), i) {
333
+ for_each_sg(req->src, src, sg_nents(req->src), i) {
332334 src_buf = sg_virt(src);
333335 len = sg_dma_len(src);
334336 tlen += len;
335
- limit_hit = tlen > req->nbytes;
337
+ limit_hit = tlen > req->cryptlen;
336338
337339 if (limit_hit)
338
- len = req->nbytes - (tlen - len);
340
+ len = req->cryptlen - (tlen - len);
339341
340342 do {
341343 if (actx->fill + len > out_off)
....@@ -374,10 +376,10 @@
374376 /* Copy the IV for CBC for chaining */
375377 if (!rctx->ecb) {
376378 if (rctx->enc)
377
- memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
379
+ memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
378380 AES_BLOCK_SIZE);
379381 else
380
- memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
382
+ memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
381383 AES_BLOCK_SIZE);
382384 }
383385
....@@ -421,34 +423,33 @@
421423 return 0;
422424 }
423425
424
-static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
426
+static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
425427 {
426
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
427
- struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
428
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
428
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
429
+ struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
430
+ struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
429431 int ret;
430432
431
- skcipher_request_set_tfm(subreq, ctx->fallback);
432
- skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
433
- skcipher_request_set_crypt(subreq, req->src, req->dst,
434
- req->nbytes, req->info);
433
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
434
+ skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
435
+ req->base.complete, req->base.data);
436
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
437
+ req->cryptlen, req->iv);
435438
436439 if (enc)
437
- ret = crypto_skcipher_encrypt(subreq);
440
+ ret = crypto_skcipher_encrypt(&rctx->fallback_req);
438441 else
439
- ret = crypto_skcipher_decrypt(subreq);
440
-
441
- skcipher_request_zero(subreq);
442
+ ret = crypto_skcipher_decrypt(&rctx->fallback_req);
442443
443444 return ret;
444445 }
445446
446
-static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
447
+static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
447448 {
448449 struct dcp *sdcp = global_sdcp;
449450 struct crypto_async_request *arq = &req->base;
450451 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
451
- struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
452
+ struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
452453 int ret;
453454
454455 if (unlikely(actx->key_len != AES_KEYSIZE_128))
....@@ -464,34 +465,33 @@
464465
465466 wake_up_process(sdcp->thread[actx->chan]);
466467
467
- return -EINPROGRESS;
468
+ return ret;
468469 }
469470
470
-static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
471
+static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
471472 {
472473 return mxs_dcp_aes_enqueue(req, 0, 1);
473474 }
474475
475
-static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
476
+static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
476477 {
477478 return mxs_dcp_aes_enqueue(req, 1, 1);
478479 }
479480
480
-static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
481
+static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
481482 {
482483 return mxs_dcp_aes_enqueue(req, 0, 0);
483484 }
484485
485
-static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
486
+static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
486487 {
487488 return mxs_dcp_aes_enqueue(req, 1, 0);
488489 }
489490
490
-static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
491
+static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
491492 unsigned int len)
492493 {
493
- struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
494
- unsigned int ret;
494
+ struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
495495
496496 /*
497497 * AES 128 is supposed by the hardware, store key into temporary
....@@ -512,37 +512,28 @@
512512 crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
513513 crypto_skcipher_set_flags(actx->fallback,
514514 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
515
-
516
- ret = crypto_skcipher_setkey(actx->fallback, key, len);
517
- if (!ret)
518
- return 0;
519
-
520
- tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
521
- tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) &
522
- CRYPTO_TFM_RES_MASK;
523
-
524
- return ret;
515
+ return crypto_skcipher_setkey(actx->fallback, key, len);
525516 }
526517
527
-static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
518
+static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
528519 {
529
- const char *name = crypto_tfm_alg_name(tfm);
530
- const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
531
- struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
520
+ const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
521
+ struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
532522 struct crypto_skcipher *blk;
533523
534
- blk = crypto_alloc_skcipher(name, 0, flags);
524
+ blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
535525 if (IS_ERR(blk))
536526 return PTR_ERR(blk);
537527
538528 actx->fallback = blk;
539
- tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
529
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) +
530
+ crypto_skcipher_reqsize(blk));
540531 return 0;
541532 }
542533
543
-static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
534
+static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
544535 {
545
- struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
536
+ struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
546537
547538 crypto_free_skcipher(actx->fallback);
548539 }
....@@ -699,11 +690,7 @@
699690
700691 struct crypto_async_request *backlog;
701692 struct crypto_async_request *arq;
702
-
703
- struct dcp_sha_req_ctx *rctx;
704
-
705
- struct ahash_request *req;
706
- int ret, fini;
693
+ int ret;
707694
708695 while (!kthread_should_stop()) {
709696 set_current_state(TASK_INTERRUPTIBLE);
....@@ -724,11 +711,7 @@
724711 backlog->complete(backlog, -EINPROGRESS);
725712
726713 if (arq) {
727
- req = ahash_request_cast(arq);
728
- rctx = ahash_request_ctx(req);
729
-
730714 ret = dcp_sha_req_to_buf(arq);
731
- fini = rctx->fini;
732715 arq->complete(arq, ret);
733716 }
734717 }
....@@ -796,7 +779,7 @@
796779 wake_up_process(sdcp->thread[actx->chan]);
797780 mutex_unlock(&actx->mutex);
798781
799
- return -EINPROGRESS;
782
+ return ret;
800783 }
801784
802785 static int dcp_sha_update(struct ahash_request *req)
....@@ -827,14 +810,32 @@
827810 return dcp_sha_finup(req);
828811 }
829812
830
-static int dcp_sha_noimport(struct ahash_request *req, const void *in)
813
+static int dcp_sha_import(struct ahash_request *req, const void *in)
831814 {
832
- return -ENOSYS;
815
+ struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
816
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
817
+ struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
818
+ const struct dcp_export_state *export = in;
819
+
820
+ memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
821
+ memset(actx, 0, sizeof(struct dcp_async_ctx));
822
+ memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
823
+ memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
824
+
825
+ return 0;
833826 }
834827
835
-static int dcp_sha_noexport(struct ahash_request *req, void *out)
828
+static int dcp_sha_export(struct ahash_request *req, void *out)
836829 {
837
- return -ENOSYS;
830
+ struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
831
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
832
+ struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
833
+ struct dcp_export_state *export = out;
834
+
835
+ memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
836
+ memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
837
+
838
+ return 0;
838839 }
839840
840841 static int dcp_sha_cra_init(struct crypto_tfm *tfm)
....@@ -849,54 +850,44 @@
849850 }
850851
851852 /* AES 128 ECB and AES 128 CBC */
852
-static struct crypto_alg dcp_aes_algs[] = {
853
+static struct skcipher_alg dcp_aes_algs[] = {
853854 {
854
- .cra_name = "ecb(aes)",
855
- .cra_driver_name = "ecb-aes-dcp",
856
- .cra_priority = 400,
857
- .cra_alignmask = 15,
858
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
859
- CRYPTO_ALG_ASYNC |
855
+ .base.cra_name = "ecb(aes)",
856
+ .base.cra_driver_name = "ecb-aes-dcp",
857
+ .base.cra_priority = 400,
858
+ .base.cra_alignmask = 15,
859
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
860860 CRYPTO_ALG_NEED_FALLBACK,
861
- .cra_init = mxs_dcp_aes_fallback_init,
862
- .cra_exit = mxs_dcp_aes_fallback_exit,
863
- .cra_blocksize = AES_BLOCK_SIZE,
864
- .cra_ctxsize = sizeof(struct dcp_async_ctx),
865
- .cra_type = &crypto_ablkcipher_type,
866
- .cra_module = THIS_MODULE,
867
- .cra_u = {
868
- .ablkcipher = {
869
- .min_keysize = AES_MIN_KEY_SIZE,
870
- .max_keysize = AES_MAX_KEY_SIZE,
871
- .setkey = mxs_dcp_aes_setkey,
872
- .encrypt = mxs_dcp_aes_ecb_encrypt,
873
- .decrypt = mxs_dcp_aes_ecb_decrypt
874
- },
875
- },
861
+ .base.cra_blocksize = AES_BLOCK_SIZE,
862
+ .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
863
+ .base.cra_module = THIS_MODULE,
864
+
865
+ .min_keysize = AES_MIN_KEY_SIZE,
866
+ .max_keysize = AES_MAX_KEY_SIZE,
867
+ .setkey = mxs_dcp_aes_setkey,
868
+ .encrypt = mxs_dcp_aes_ecb_encrypt,
869
+ .decrypt = mxs_dcp_aes_ecb_decrypt,
870
+ .init = mxs_dcp_aes_fallback_init_tfm,
871
+ .exit = mxs_dcp_aes_fallback_exit_tfm,
876872 }, {
877
- .cra_name = "cbc(aes)",
878
- .cra_driver_name = "cbc-aes-dcp",
879
- .cra_priority = 400,
880
- .cra_alignmask = 15,
881
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
882
- CRYPTO_ALG_ASYNC |
873
+ .base.cra_name = "cbc(aes)",
874
+ .base.cra_driver_name = "cbc-aes-dcp",
875
+ .base.cra_priority = 400,
876
+ .base.cra_alignmask = 15,
877
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
883878 CRYPTO_ALG_NEED_FALLBACK,
884
- .cra_init = mxs_dcp_aes_fallback_init,
885
- .cra_exit = mxs_dcp_aes_fallback_exit,
886
- .cra_blocksize = AES_BLOCK_SIZE,
887
- .cra_ctxsize = sizeof(struct dcp_async_ctx),
888
- .cra_type = &crypto_ablkcipher_type,
889
- .cra_module = THIS_MODULE,
890
- .cra_u = {
891
- .ablkcipher = {
892
- .min_keysize = AES_MIN_KEY_SIZE,
893
- .max_keysize = AES_MAX_KEY_SIZE,
894
- .setkey = mxs_dcp_aes_setkey,
895
- .encrypt = mxs_dcp_aes_cbc_encrypt,
896
- .decrypt = mxs_dcp_aes_cbc_decrypt,
897
- .ivsize = AES_BLOCK_SIZE,
898
- },
899
- },
879
+ .base.cra_blocksize = AES_BLOCK_SIZE,
880
+ .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
881
+ .base.cra_module = THIS_MODULE,
882
+
883
+ .min_keysize = AES_MIN_KEY_SIZE,
884
+ .max_keysize = AES_MAX_KEY_SIZE,
885
+ .setkey = mxs_dcp_aes_setkey,
886
+ .encrypt = mxs_dcp_aes_cbc_encrypt,
887
+ .decrypt = mxs_dcp_aes_cbc_decrypt,
888
+ .ivsize = AES_BLOCK_SIZE,
889
+ .init = mxs_dcp_aes_fallback_init_tfm,
890
+ .exit = mxs_dcp_aes_fallback_exit_tfm,
900891 },
901892 };
902893
....@@ -907,10 +898,11 @@
907898 .final = dcp_sha_final,
908899 .finup = dcp_sha_finup,
909900 .digest = dcp_sha_digest,
910
- .import = dcp_sha_noimport,
911
- .export = dcp_sha_noexport,
901
+ .import = dcp_sha_import,
902
+ .export = dcp_sha_export,
912903 .halg = {
913904 .digestsize = SHA1_DIGEST_SIZE,
905
+ .statesize = sizeof(struct dcp_export_state),
914906 .base = {
915907 .cra_name = "sha1",
916908 .cra_driver_name = "sha1-dcp",
....@@ -933,10 +925,11 @@
933925 .final = dcp_sha_final,
934926 .finup = dcp_sha_finup,
935927 .digest = dcp_sha_digest,
936
- .import = dcp_sha_noimport,
937
- .export = dcp_sha_noexport,
928
+ .import = dcp_sha_import,
929
+ .export = dcp_sha_export,
938930 .halg = {
939931 .digestsize = SHA256_DIGEST_SIZE,
932
+ .statesize = sizeof(struct dcp_export_state),
940933 .base = {
941934 .cra_name = "sha256",
942935 .cra_driver_name = "sha256-dcp",
....@@ -979,8 +972,6 @@
979972 struct device *dev = &pdev->dev;
980973 struct dcp *sdcp = NULL;
981974 int i, ret;
982
-
983
- struct resource *iores;
984975 int dcp_vmi_irq, dcp_irq;
985976
986977 if (global_sdcp) {
....@@ -988,25 +979,20 @@
988979 return -ENODEV;
989980 }
990981
991
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
992982 dcp_vmi_irq = platform_get_irq(pdev, 0);
993
- if (dcp_vmi_irq < 0) {
994
- dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq);
983
+ if (dcp_vmi_irq < 0)
995984 return dcp_vmi_irq;
996
- }
997985
998986 dcp_irq = platform_get_irq(pdev, 1);
999
- if (dcp_irq < 0) {
1000
- dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_irq);
987
+ if (dcp_irq < 0)
1001988 return dcp_irq;
1002
- }
1003989
1004990 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
1005991 if (!sdcp)
1006992 return -ENOMEM;
1007993
1008994 sdcp->dev = dev;
1009
- sdcp->base = devm_ioremap_resource(dev, iores);
995
+ sdcp->base = devm_platform_ioremap_resource(pdev, 0);
1010996 if (IS_ERR(sdcp->base))
1011997 return PTR_ERR(sdcp->base);
1012998
....@@ -1034,10 +1020,23 @@
10341020 /* Re-align the structure so it fits the DCP constraints. */
10351021 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
10361022
1037
- /* Restart the DCP block. */
1038
- ret = stmp_reset_block(sdcp->base);
1023
+ /* DCP clock is optional, only used on some SOCs */
1024
+ sdcp->dcp_clk = devm_clk_get(dev, "dcp");
1025
+ if (IS_ERR(sdcp->dcp_clk)) {
1026
+ if (sdcp->dcp_clk != ERR_PTR(-ENOENT))
1027
+ return PTR_ERR(sdcp->dcp_clk);
1028
+ sdcp->dcp_clk = NULL;
1029
+ }
1030
+ ret = clk_prepare_enable(sdcp->dcp_clk);
10391031 if (ret)
10401032 return ret;
1033
+
1034
+ /* Restart the DCP block. */
1035
+ ret = stmp_reset_block(sdcp->base);
1036
+ if (ret) {
1037
+ dev_err(dev, "Failed reset\n");
1038
+ goto err_disable_unprepare_clk;
1039
+ }
10411040
10421041 /* Initialize control register. */
10431042 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
....@@ -1075,7 +1074,8 @@
10751074 NULL, "mxs_dcp_chan/sha");
10761075 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
10771076 dev_err(dev, "Error starting SHA thread!\n");
1078
- return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1077
+ ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1078
+ goto err_disable_unprepare_clk;
10791079 }
10801080
10811081 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
....@@ -1090,8 +1090,8 @@
10901090 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
10911091
10921092 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1093
- ret = crypto_register_algs(dcp_aes_algs,
1094
- ARRAY_SIZE(dcp_aes_algs));
1093
+ ret = crypto_register_skciphers(dcp_aes_algs,
1094
+ ARRAY_SIZE(dcp_aes_algs));
10951095 if (ret) {
10961096 /* Failed to register algorithm. */
10971097 dev_err(dev, "Failed to register AES crypto!\n");
....@@ -1125,13 +1125,17 @@
11251125
11261126 err_unregister_aes:
11271127 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1128
- crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1128
+ crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
11291129
11301130 err_destroy_aes_thread:
11311131 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
11321132
11331133 err_destroy_sha_thread:
11341134 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1135
+
1136
+err_disable_unprepare_clk:
1137
+ clk_disable_unprepare(sdcp->dcp_clk);
1138
+
11351139 return ret;
11361140 }
11371141
....@@ -1146,11 +1150,13 @@
11461150 crypto_unregister_ahash(&dcp_sha1_alg);
11471151
11481152 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1149
- crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1153
+ crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
11501154
11511155 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
11521156 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
11531157
1158
+ clk_disable_unprepare(sdcp->dcp_clk);
1159
+
11541160 platform_set_drvdata(pdev, NULL);
11551161
11561162 global_sdcp = NULL;