From 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 10 May 2024 07:44:59 +0000 Subject: [PATCH] gmac get mac form eeprom --- kernel/drivers/crypto/omap-aes.c | 399 ++++++++++++++++++++++++++------------------------------ 1 files changed, 187 insertions(+), 212 deletions(-) diff --git a/kernel/drivers/crypto/omap-aes.c b/kernel/drivers/crypto/omap-aes.c index a5d6e1a..a196bb8 100644 --- a/kernel/drivers/crypto/omap-aes.c +++ b/kernel/drivers/crypto/omap-aes.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Cryptographic API. * @@ -6,11 +7,6 @@ * Copyright (c) 2010 Nokia Corporation * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> * Copyright (c) 2011 Texas Instruments Incorporated - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * */ #define pr_fmt(fmt) "%20s: " fmt, __func__ @@ -107,7 +103,7 @@ dd->err = 0; } - err = pm_runtime_get_sync(dd->dev); + err = pm_runtime_resume_and_get(dd->dev); if (err < 0) { dev_err(dd->dev, "failed to get sync: %d\n", err); return err; @@ -143,11 +139,11 @@ for (i = 0; i < key32; i++) { omap_aes_write(dd, AES_REG_KEY(dd, i), - __le32_to_cpu(dd->ctx->key[i])); + (__force u32)cpu_to_le32(dd->ctx->key[i])); } - if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info) - omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4); + if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->iv) + omap_aes_write_n(dd, AES_REG_IV(dd, 0), (void *)dd->req->iv, 4); if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv) { rctx = aead_request_ctx(dd->aead_req); @@ -273,13 +269,14 @@ struct scatterlist *out_sg, int in_sg_len, int out_sg_len) { - struct dma_async_tx_descriptor *tx_in, *tx_out; + struct dma_async_tx_descriptor *tx_in, *tx_out = NULL, *cb_desc; struct dma_slave_config cfg; int ret; if (dd->pio_only) { scatterwalk_start(&dd->in_walk, dd->in_sg); - scatterwalk_start(&dd->out_walk, dd->out_sg); + if (out_sg_len) + scatterwalk_start(&dd->out_walk, dd->out_sg); /* Enable DATAIN interrupt and let it take care of the rest */ @@ -316,34 +313,45 @@ /* No callback necessary */ tx_in->callback_param = dd; + tx_in->callback = NULL; /* OUT */ - ret = dmaengine_slave_config(dd->dma_lch_out, &cfg); - if (ret) { - dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n", - ret); - return ret; - } + if (out_sg_len) { + ret = dmaengine_slave_config(dd->dma_lch_out, &cfg); + if (ret) { + dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n", + ret); + return ret; + } - tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len, - DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!tx_out) { - dev_err(dd->dev, "OUT prep_slave_sg() failed\n"); - return -EINVAL; + tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, + out_sg_len, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx_out) { + dev_err(dd->dev, "OUT prep_slave_sg() failed\n"); + return -EINVAL; + } + + cb_desc = tx_out; + } else { + cb_desc = tx_in; } if (dd->flags & FLAGS_GCM) - tx_out->callback = omap_aes_gcm_dma_out_callback; + cb_desc->callback = omap_aes_gcm_dma_out_callback; else - tx_out->callback = omap_aes_dma_out_callback; - tx_out->callback_param = dd; + cb_desc->callback = omap_aes_dma_out_callback; + cb_desc->callback_param = dd; + dmaengine_submit(tx_in); - dmaengine_submit(tx_out); + if (tx_out) + dmaengine_submit(tx_out); dma_async_issue_pending(dd->dma_lch_in); - dma_async_issue_pending(dd->dma_lch_out); + if (out_sg_len) + dma_async_issue_pending(dd->dma_lch_out); /* start DMA */ dd->pdata->trigger(dd, dd->total); @@ -355,7 +363,7 @@ { int err; - pr_debug("total: %d\n", dd->total); + pr_debug("total: %zu\n", dd->total); if (!dd->pio_only) { err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, @@ -365,11 +373,13 @@ return -EINVAL; } - err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, - DMA_FROM_DEVICE); - if (!err) { - dev_err(dd->dev, "dma_map_sg() error\n"); - return -EINVAL; + if (dd->out_sg_len) { + err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + return -EINVAL; + } } } @@ -377,8 +387,9 @@ dd->out_sg_len); if (err && !dd->pio_only) { dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); - dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, - DMA_FROM_DEVICE); + if (dd->out_sg_len) + dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); } return err; @@ -386,11 +397,11 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) { - struct ablkcipher_request *req = dd->req; + struct skcipher_request *req = dd->req; pr_debug("err: %d\n", err); - crypto_finalize_ablkcipher_request(dd->engine, req, err); + crypto_finalize_skcipher_request(dd->engine, req, err); pm_runtime_mark_last_busy(dd->dev); pm_runtime_put_autosuspend(dd->dev); @@ -398,7 +409,7 @@ int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) { - pr_debug("total: %d\n", dd->total); + pr_debug("total: %zu\n", dd->total); omap_aes_dma_stop(dd); @@ -407,10 +418,10 @@ } static int omap_aes_handle_queue(struct omap_aes_dev *dd, - struct ablkcipher_request *req) + struct skcipher_request *req) { if (req) - return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req); + return crypto_transfer_skcipher_request_to_engine(dd->engine, req); return 0; } @@ -418,10 +429,10 @@ static int omap_aes_prepare_req(struct crypto_engine *engine, void *areq) { - struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base); - struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( - crypto_ablkcipher_reqtfm(req)); - struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); + struct skcipher_request *req = container_of(areq, struct skcipher_request, base); + struct omap_aes_ctx *ctx = crypto_skcipher_ctx( + crypto_skcipher_reqtfm(req)); + struct omap_aes_reqctx *rctx = skcipher_request_ctx(req); struct omap_aes_dev *dd = rctx->dd; int ret; u16 flags; @@ -431,8 +442,8 @@ /* assign new request to device */ dd->req = req; - dd->total = req->nbytes; - dd->total_save = req->nbytes; + dd->total = req->cryptlen; + dd->total_save = req->cryptlen; dd->in_sg = req->src; dd->out_sg = req->dst; dd->orig_out = req->dst; @@ -473,14 +484,22 @@ static int omap_aes_crypt_req(struct crypto_engine *engine, void *areq) { - struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base); - struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); + struct skcipher_request *req = container_of(areq, struct skcipher_request, base); + struct omap_aes_reqctx *rctx = skcipher_request_ctx(req); struct omap_aes_dev *dd = rctx->dd; if (!dd) return -ENODEV; return omap_aes_crypt_dma_start(dd); +} + +static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf) +{ + int i; + + for (i = 0; i < 4; i++) + ((u32 *)ivbuf)[i] = omap_aes_read(dd, AES_REG_IV(dd, i)); } static void omap_aes_done_task(unsigned long data) @@ -498,44 +517,49 @@ omap_aes_crypt_dma_stop(dd); } - omap_crypto_cleanup(dd->in_sgl, NULL, 0, dd->total_save, + omap_crypto_cleanup(dd->in_sg, NULL, 0, dd->total_save, FLAGS_IN_DATA_ST_SHIFT, dd->flags); - omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save, + omap_crypto_cleanup(dd->out_sg, dd->orig_out, 0, dd->total_save, FLAGS_OUT_DATA_ST_SHIFT, dd->flags); + + /* Update IV output */ + if (dd->flags & (FLAGS_CBC | FLAGS_CTR)) + omap_aes_copy_ivout(dd, dd->req->iv); omap_aes_finish_req(dd, 0); pr_debug("exit\n"); } -static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) +static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode) { - struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( - crypto_ablkcipher_reqtfm(req)); - struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); + struct omap_aes_ctx *ctx = crypto_skcipher_ctx( + crypto_skcipher_reqtfm(req)); + struct omap_aes_reqctx *rctx = skcipher_request_ctx(req); struct omap_aes_dev *dd; int ret; - pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, + if ((req->cryptlen % AES_BLOCK_SIZE) && !(mode & FLAGS_CTR)) + return -EINVAL; + + pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); - if (req->nbytes < aes_fallback_sz) { - SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); - - skcipher_request_set_tfm(subreq, ctx->fallback); - skcipher_request_set_callback(subreq, req->base.flags, NULL, - NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->nbytes, req->info); + if (req->cryptlen < aes_fallback_sz) { + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); if (mode & FLAGS_ENCRYPT) - ret = crypto_skcipher_encrypt(subreq); + ret = crypto_skcipher_encrypt(&rctx->fallback_req); else - ret = crypto_skcipher_decrypt(subreq); - - skcipher_request_zero(subreq); + ret = crypto_skcipher_decrypt(&rctx->fallback_req); return ret; } dd = omap_aes_find_dev(rctx); @@ -549,10 +573,10 @@ /* ********************** ALG API ************************************ */ -static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, +static int omap_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { - struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); int ret; if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && @@ -575,32 +599,32 @@ return 0; } -static int omap_aes_ecb_encrypt(struct ablkcipher_request *req) +static int omap_aes_ecb_encrypt(struct skcipher_request *req) { return omap_aes_crypt(req, FLAGS_ENCRYPT); } -static int omap_aes_ecb_decrypt(struct ablkcipher_request *req) +static int omap_aes_ecb_decrypt(struct skcipher_request *req) { return omap_aes_crypt(req, 0); } -static int omap_aes_cbc_encrypt(struct ablkcipher_request *req) +static int omap_aes_cbc_encrypt(struct skcipher_request *req) { return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); } -static int omap_aes_cbc_decrypt(struct ablkcipher_request *req) +static int omap_aes_cbc_decrypt(struct skcipher_request *req) { return omap_aes_crypt(req, FLAGS_CBC); } -static int omap_aes_ctr_encrypt(struct ablkcipher_request *req) +static int omap_aes_ctr_encrypt(struct skcipher_request *req) { return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR); } -static int omap_aes_ctr_decrypt(struct ablkcipher_request *req) +static int omap_aes_ctr_decrypt(struct skcipher_request *req) { return omap_aes_crypt(req, FLAGS_CTR); } @@ -610,20 +634,20 @@ static int omap_aes_crypt_req(struct crypto_engine *engine, void *req); -static int omap_aes_cra_init(struct crypto_tfm *tfm) +static int omap_aes_init_tfm(struct crypto_skcipher *tfm) { - const char *name = crypto_tfm_alg_name(tfm); - const u32 flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; - struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); + const char *name = crypto_tfm_alg_name(&tfm->base); + struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *blk; - blk = crypto_alloc_skcipher(name, 0, flags); + blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(blk)) return PTR_ERR(blk); ctx->fallback = blk; - tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); + crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx) + + crypto_skcipher_reqsize(blk)); ctx->enginectx.op.prepare_request = omap_aes_prepare_req; ctx->enginectx.op.unprepare_request = NULL; @@ -632,39 +656,9 @@ return 0; } -static int omap_aes_gcm_cra_init(struct crypto_aead *tfm) +static void omap_aes_exit_tfm(struct crypto_skcipher *tfm) { - struct omap_aes_dev *dd = NULL; - struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); - int err; - - /* Find AES device, currently picks the first device */ - spin_lock_bh(&list_lock); - list_for_each_entry(dd, &dev_list, list) { - break; - } - spin_unlock_bh(&list_lock); - - err = pm_runtime_get_sync(dd->dev); - if (err < 0) { - dev_err(dd->dev, "%s: failed to get_sync(%d)\n", - __func__, err); - return err; - } - - tfm->reqsize = sizeof(struct omap_aes_reqctx); - ctx->ctr = crypto_alloc_skcipher("ecb(aes)", 0, 0); - if (IS_ERR(ctx->ctr)) { - pr_warn("could not load aes driver for encrypting IV\n"); - return PTR_ERR(ctx->ctr); - } - - return 0; -} - -static void omap_aes_cra_exit(struct crypto_tfm *tfm) -{ - struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); + struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); if (ctx->fallback) crypto_free_skcipher(ctx->fallback); @@ -672,91 +666,71 @@ ctx->fallback = NULL; } -static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm) -{ - struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); - - omap_aes_cra_exit(crypto_aead_tfm(tfm)); - - if (ctx->ctr) - crypto_free_skcipher(ctx->ctr); -} - /* ********************** ALGS ************************************ */ -static struct crypto_alg algs_ecb_cbc[] = { +static struct skcipher_alg algs_ecb_cbc[] = { { - .cra_name = "ecb(aes)", - .cra_driver_name = "ecb-aes-omap", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct omap_aes_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = omap_aes_cra_init, - .cra_exit = omap_aes_cra_exit, - .cra_u.ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = omap_aes_setkey, - .encrypt = omap_aes_ecb_encrypt, - .decrypt = omap_aes_ecb_decrypt, - } + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-omap", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct omap_aes_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = omap_aes_setkey, + .encrypt = omap_aes_ecb_encrypt, + .decrypt = omap_aes_ecb_decrypt, + .init = omap_aes_init_tfm, + .exit = omap_aes_exit_tfm, }, { - .cra_name = "cbc(aes)", - .cra_driver_name = "cbc-aes-omap", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct omap_aes_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = omap_aes_cra_init, - .cra_exit = omap_aes_cra_exit, - .cra_u.ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = omap_aes_setkey, - .encrypt = omap_aes_cbc_encrypt, - .decrypt = omap_aes_cbc_decrypt, - } + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-omap", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct omap_aes_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = omap_aes_setkey, + .encrypt = omap_aes_cbc_encrypt, + .decrypt = omap_aes_cbc_decrypt, + .init = omap_aes_init_tfm, + .exit = omap_aes_exit_tfm, } }; -static struct crypto_alg algs_ctr[] = { +static struct skcipher_alg algs_ctr[] = { { - .cra_name = "ctr(aes)", - .cra_driver_name = "ctr-aes-omap", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct omap_aes_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_ablkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = omap_aes_cra_init, - .cra_exit = omap_aes_cra_exit, - .cra_u.ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .geniv = "eseqiv", - .ivsize = AES_BLOCK_SIZE, - .setkey = omap_aes_setkey, - .encrypt = omap_aes_ctr_encrypt, - .decrypt = omap_aes_ctr_decrypt, - } -} , + .base.cra_name = "ctr(aes)", + .base.cra_driver_name = "ctr-aes-omap", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct omap_aes_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = omap_aes_setkey, + .encrypt = omap_aes_ctr_encrypt, + .decrypt = omap_aes_ctr_decrypt, + .init = omap_aes_init_tfm, + .exit = omap_aes_exit_tfm, +} }; static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = { @@ -775,15 +749,15 @@ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct omap_aes_ctx), + .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx), .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, .init = omap_aes_gcm_cra_init, - .exit = omap_aes_gcm_cra_exit, .ivsize = GCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, .setkey = omap_aes_gcm_setkey, + .setauthsize = omap_aes_gcm_setauthsize, .encrypt = omap_aes_gcm_encrypt, .decrypt = omap_aes_gcm_decrypt, }, @@ -795,15 +769,15 @@ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct omap_aes_ctx), + .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx), .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, .init = omap_aes_gcm_cra_init, - .exit = omap_aes_gcm_cra_exit, .maxauthsize = AES_BLOCK_SIZE, .ivsize = GCM_RFC4106_IV_SIZE, .setkey = omap_aes_4106gcm_setkey, + .setauthsize = omap_aes_4106gcm_setauthsize, .encrypt = omap_aes_4106gcm_encrypt, .decrypt = omap_aes_4106gcm_decrypt, }, @@ -1127,7 +1101,7 @@ { struct device *dev = &pdev->dev; struct omap_aes_dev *dd; - struct crypto_alg *algp; + struct skcipher_alg *algp; struct aead_alg *aalg; struct resource res; int err = -ENOMEM, i, j, irq = -1; @@ -1159,7 +1133,7 @@ pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); pm_runtime_enable(dev); - err = pm_runtime_get_sync(dev); + err = pm_runtime_resume_and_get(dev); if (err < 0) { dev_err(dev, "%s: failed to get_sync(%d)\n", __func__, err); @@ -1186,7 +1160,6 @@ irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(dev, "can't get IRQ resource\n"); err = irq; goto err_irq; } @@ -1202,9 +1175,9 @@ spin_lock_init(&dd->lock); INIT_LIST_HEAD(&dd->list); - spin_lock(&list_lock); + spin_lock_bh(&list_lock); list_add_tail(&dd->list, &dev_list); - spin_unlock(&list_lock); + spin_unlock_bh(&list_lock); /* Initialize crypto engine */ dd->engine = crypto_engine_alloc_init(dev, 1); @@ -1222,10 +1195,9 @@ for (j = 0; j < dd->pdata->algs_info[i].size; j++) { algp = &dd->pdata->algs_info[i].algs_list[j]; - pr_debug("reg alg: %s\n", algp->cra_name); - INIT_LIST_HEAD(&algp->cra_list); + pr_debug("reg alg: %s\n", algp->base.cra_name); - err = crypto_register_alg(algp); + err = crypto_register_skcipher(algp); if (err) goto err_algs; @@ -1238,10 +1210,8 @@ !dd->pdata->aead_algs_info->registered) { for (i = 0; i < dd->pdata->aead_algs_info->size; i++) { aalg = &dd->pdata->aead_algs_info->algs_list[i]; - algp = &aalg->base; - pr_debug("reg alg: %s\n", algp->cra_name); - INIT_LIST_HEAD(&algp->cra_list); + pr_debug("reg alg: %s\n", aalg->base.cra_name); err = crypto_register_aead(aalg); if (err) @@ -1266,7 +1236,7 @@ err_algs: for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) - crypto_unregister_alg( + crypto_unregister_skcipher( &dd->pdata->algs_info[i].algs_list[j]); err_engine: @@ -1294,18 +1264,22 @@ if (!dd) return -ENODEV; - spin_lock(&list_lock); + spin_lock_bh(&list_lock); list_del(&dd->list); - spin_unlock(&list_lock); + spin_unlock_bh(&list_lock); for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) - for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) - crypto_unregister_alg( + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) { + crypto_unregister_skcipher( &dd->pdata->algs_info[i].algs_list[j]); + dd->pdata->algs_info[i].registered--; + } - for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) { + for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) { aalg = &dd->pdata->aead_algs_info->algs_list[i]; crypto_unregister_aead(aalg); + dd->pdata->aead_algs_info->registered--; + } crypto_engine_exit(dd->engine); @@ -1313,7 +1287,8 @@ tasklet_kill(&dd->done_task); omap_aes_dma_cleanup(dd); pm_runtime_disable(dd->dev); - dd = NULL; + + sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group); return 0; } -- Gitblit v1.6.2