From 072de836f53be56a70cecf70b43ae43b7ce17376 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 10:08:36 +0000
Subject: [PATCH] mk-rootfs.sh
---
kernel/drivers/crypto/sahara.c | 280 +++++++++++++++++++++++++------------------------------
1 files changed, 126 insertions(+), 154 deletions(-)
diff --git a/kernel/drivers/crypto/sahara.c b/kernel/drivers/crypto/sahara.c
index e7540a5..2043dd0 100644
--- a/kernel/drivers/crypto/sahara.c
+++ b/kernel/drivers/crypto/sahara.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Cryptographic API.
*
@@ -6,10 +7,6 @@
* Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
* Copyright (c) 2013 Vista Silicon S.L.
* Author: Javier Martin <javier.martin@vista-silicon.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*
* Based on omap-aes.c and tegra-aes.c
*/
@@ -21,17 +18,17 @@
#include <crypto/sha.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/spinlock.h>
#define SHA_BUFFER_LEN PAGE_SIZE
#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
@@ -154,6 +151,7 @@
struct sahara_aes_reqctx {
unsigned long mode;
+ struct skcipher_request fallback_req; // keep at the end
};
/*
@@ -197,7 +195,7 @@
void __iomem *regs_base;
struct clk *clk_ipg;
struct clk *clk_ahb;
- struct mutex queue_mutex;
+ spinlock_t queue_spinlock;
struct task_struct *kthread;
struct completion dma_completion;
@@ -354,7 +352,7 @@
{
u8 state;
- if (!IS_ENABLED(DEBUG))
+ if (!__is_defined(DEBUG))
return;
state = SAHARA_STATUS_GET_STATE(status);
@@ -406,7 +404,7 @@
{
int i;
- if (!IS_ENABLED(DEBUG))
+ if (!__is_defined(DEBUG))
return;
for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
@@ -427,7 +425,7 @@
{
int i;
- if (!IS_ENABLED(DEBUG))
+ if (!__is_defined(DEBUG))
return;
for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
@@ -550,7 +548,7 @@
return -EINVAL;
}
-static int sahara_aes_process(struct ablkcipher_request *req)
+static int sahara_aes_process(struct skcipher_request *req)
{
struct sahara_dev *dev = dev_ptr;
struct sahara_ctx *ctx;
@@ -561,20 +559,20 @@
/* Request is ready to be dispatched by the device */
dev_dbg(dev->device,
"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
- req->nbytes, req->src, req->dst);
+ req->cryptlen, req->src, req->dst);
/* assign new request to device */
- dev->total = req->nbytes;
+ dev->total = req->cryptlen;
dev->in_sg = req->src;
dev->out_sg = req->dst;
- rctx = ablkcipher_request_ctx(req);
- ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx = skcipher_request_ctx(req);
+ ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
rctx->mode &= FLAGS_MODE_MASK;
dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
- if ((dev->flags & FLAGS_CBC) && req->info)
- memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
+ if ((dev->flags & FLAGS_CBC) && req->iv)
+ memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
/* assign new context to device */
dev->ctx = ctx;
@@ -600,11 +598,10 @@
return 0;
}
-static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- int ret;
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->keylen = keylen;
@@ -624,25 +621,19 @@
crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
-
- tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_skcipher_get_flags(ctx->fallback) &
- CRYPTO_TFM_RES_MASK;
- return ret;
+ return crypto_skcipher_setkey(ctx->fallback, key, keylen);
}
-static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_dev *dev = dev_ptr;
int err = 0;
dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
- req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
+ req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
dev_err(dev->device,
"request size is not exact amount of AES blocks\n");
return -EINVAL;
@@ -650,124 +641,116 @@
rctx->mode = mode;
- mutex_lock(&dev->queue_mutex);
- err = ablkcipher_enqueue_request(&dev->queue, req);
- mutex_unlock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
+ err = crypto_enqueue_request(&dev->queue, &req->base);
+ spin_unlock_bh(&dev->queue_spinlock);
wake_up_process(dev->kthread);
return err;
}
-static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- int err;
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ return crypto_skcipher_encrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, FLAGS_ENCRYPT);
}
-static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- int err;
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
- err = crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ return crypto_skcipher_decrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, 0);
}
-static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- int err;
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ return crypto_skcipher_encrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
}
-static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- int err;
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
- err = crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ return crypto_skcipher_decrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, FLAGS_CBC);
}
-static int sahara_aes_cra_init(struct crypto_tfm *tfm)
+static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
{
- const char *name = crypto_tfm_alg_name(tfm);
- struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->fallback = crypto_alloc_skcipher(name, 0,
- CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback)) {
pr_err("Error allocating fallback algo %s\n", name);
return PTR_ERR(ctx->fallback);
}
- tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
+ crypto_skcipher_reqsize(ctx->fallback));
return 0;
}
-static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
+static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
{
- struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_skcipher(ctx->fallback);
}
@@ -1059,10 +1042,10 @@
do {
__set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
backlog = crypto_get_backlog(&dev->queue);
async_req = crypto_dequeue_request(&dev->queue);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -1075,8 +1058,8 @@
ret = sahara_sha_process(req);
} else {
- struct ablkcipher_request *req =
- ablkcipher_request_cast(async_req);
+ struct skcipher_request *req =
+ skcipher_request_cast(async_req);
ret = sahara_aes_process(req);
}
@@ -1108,9 +1091,9 @@
rctx->first = 1;
}
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
ret = crypto_enqueue_request(&dev->queue, &req->base);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
wake_up_process(dev->kthread);
@@ -1193,48 +1176,42 @@
return 0;
}
-static struct crypto_alg aes_algs[] = {
+static struct skcipher_alg aes_algs[] = {
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "sahara-ecb-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct sahara_ctx),
- .cra_alignmask = 0x0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = sahara_aes_cra_init,
- .cra_exit = sahara_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE ,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = sahara_aes_setkey,
- .encrypt = sahara_aes_ecb_encrypt,
- .decrypt = sahara_aes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "sahara-ecb-aes",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sahara_ctx),
+ .base.cra_alignmask = 0x0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = sahara_aes_init_tfm,
+ .exit = sahara_aes_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE ,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sahara_aes_setkey,
+ .encrypt = sahara_aes_ecb_encrypt,
+ .decrypt = sahara_aes_ecb_decrypt,
}, {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "sahara-cbc-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct sahara_ctx),
- .cra_alignmask = 0x0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = sahara_aes_cra_init,
- .cra_exit = sahara_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE ,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = sahara_aes_setkey,
- .encrypt = sahara_aes_cbc_encrypt,
- .decrypt = sahara_aes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "sahara-cbc-aes",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sahara_ctx),
+ .base.cra_alignmask = 0x0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = sahara_aes_init_tfm,
+ .exit = sahara_aes_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE ,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sahara_aes_setkey,
+ .encrypt = sahara_aes_cbc_encrypt,
+ .decrypt = sahara_aes_cbc_decrypt,
}
};
@@ -1322,8 +1299,7 @@
unsigned int i, j, k, l;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- INIT_LIST_HEAD(&aes_algs[i].cra_list);
- err = crypto_register_alg(&aes_algs[i]);
+ err = crypto_register_skcipher(&aes_algs[i]);
if (err)
goto err_aes_algs;
}
@@ -1353,7 +1329,7 @@
err_aes_algs:
for (j = 0; j < i; j++)
- crypto_unregister_alg(&aes_algs[j]);
+ crypto_unregister_skcipher(&aes_algs[j]);
return err;
}
@@ -1363,7 +1339,7 @@
unsigned int i;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
- crypto_unregister_alg(&aes_algs[i]);
+ crypto_unregister_skcipher(&aes_algs[i]);
for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
crypto_unregister_ahash(&sha_v3_algs[i]);
@@ -1389,7 +1365,6 @@
static int sahara_probe(struct platform_device *pdev)
{
struct sahara_dev *dev;
- struct resource *res;
u32 version;
int irq;
int err;
@@ -1403,17 +1378,14 @@
platform_set_drvdata(pdev, dev);
/* Get the base address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
+ dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs_base))
return PTR_ERR(dev->regs_base);
/* Get the IRQ */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get irq resource\n");
+ if (irq < 0)
return irq;
- }
err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
0, dev_name(&pdev->dev), dev);
@@ -1482,7 +1454,7 @@
crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
- mutex_init(&dev->queue_mutex);
+ spin_lock_init(&dev->queue_spinlock);
dev_ptr = dev;
--
Gitblit v1.6.2