From 748e4f3d702def1a4bff191e0cf93b6a05340f01 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 10 May 2024 07:41:34 +0000 Subject: [PATCH] add gpio led uart --- kernel/arch/arm/crypto/aes-neonbs-glue.c | 237 +++++++++++++++++++++++++++++++++++++++++++++------------- 1 files changed, 183 insertions(+), 54 deletions(-) diff --git a/kernel/arch/arm/crypto/aes-neonbs-glue.c b/kernel/arch/arm/crypto/aes-neonbs-glue.c index 617c2c9..5c6cd3c 100644 --- a/kernel/arch/arm/crypto/aes-neonbs-glue.c +++ b/kernel/arch/arm/crypto/aes-neonbs-glue.c @@ -1,18 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Bit sliced AES using NEON instructions * * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <asm/neon.h> +#include <asm/simd.h> #include <crypto/aes.h> -#include <crypto/cbc.h> +#include <crypto/ctr.h> +#include <crypto/internal/cipher.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> #include <crypto/xts.h> #include <linux/module.h> @@ -20,9 +20,11 @@ MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("ecb(aes)"); -MODULE_ALIAS_CRYPTO("cbc(aes)"); +MODULE_ALIAS_CRYPTO("cbc(aes)-all"); MODULE_ALIAS_CRYPTO("ctr(aes)"); MODULE_ALIAS_CRYPTO("xts(aes)"); + +MODULE_IMPORT_NS(CRYPTO_INTERNAL); asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds); @@ -38,9 +40,9 @@ int rounds, int blocks, u8 ctr[], u8 final[]); asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], - int rounds, int blocks, u8 iv[]); + int rounds, int blocks, u8 iv[], int); asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], - int rounds, int blocks, u8 iv[]); + int rounds, int blocks, u8 iv[], int); struct aesbs_ctx { int rounds; @@ -49,12 +51,18 @@ struct aesbs_cbc_ctx { struct aesbs_ctx key; - struct crypto_cipher *enc_tfm; + struct crypto_skcipher *enc_tfm; }; struct aesbs_xts_ctx { struct aesbs_ctx key; + struct crypto_cipher *cts_tfm; struct crypto_cipher *tweak_tfm; +}; + +struct aesbs_ctr_ctx { + struct aesbs_ctx key; /* must be first member */ + struct crypto_aes_ctx fallback; }; static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key, @@ -64,7 +72,7 @@ struct crypto_aes_ctx rk; int err; - err = crypto_aes_expand_key(&rk, in_key, key_len); + err = aes_expandkey(&rk, in_key, key_len); if (err) return err; @@ -86,9 +94,8 @@ struct skcipher_walk walk; int err; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_neon_begin(); while (walk.nbytes >= AES_BLOCK_SIZE) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; @@ -96,12 +103,13 @@ blocks = round_down(blocks, walk.stride / AES_BLOCK_SIZE); + kernel_neon_begin(); fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk, ctx->rounds, blocks); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); } - kernel_neon_end(); return err; } @@ -123,7 +131,7 @@ struct crypto_aes_ctx rk; int err; - err = crypto_aes_expand_key(&rk, in_key, key_len); + err = aes_expandkey(&rk, in_key, key_len); if (err) return err; @@ -132,20 +140,25 @@ kernel_neon_begin(); aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds); kernel_neon_end(); + memzero_explicit(&rk, sizeof(rk)); - return crypto_cipher_setkey(ctx->enc_tfm, in_key, key_len); -} - -static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) -{ - struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_cipher_encrypt_one(ctx->enc_tfm, dst, src); + return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len); } static int cbc_encrypt(struct skcipher_request *req) { - return crypto_cbc_encrypt_walk(req, cbc_encrypt_one); + struct skcipher_request *subreq = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + + skcipher_request_set_tfm(subreq, ctx->enc_tfm); + skcipher_request_set_callback(subreq, + skcipher_request_flags(req), + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); + + return crypto_skcipher_encrypt(subreq); } static int cbc_decrypt(struct skcipher_request *req) @@ -155,9 +168,8 @@ struct skcipher_walk walk; int err; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_neon_begin(); while (walk.nbytes >= AES_BLOCK_SIZE) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; @@ -165,31 +177,59 @@ blocks = round_down(blocks, walk.stride / AES_BLOCK_SIZE); + kernel_neon_begin(); aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk, ctx->key.rounds, blocks, walk.iv); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); } - kernel_neon_end(); return err; } -static int cbc_init(struct crypto_tfm *tfm) +static int cbc_init(struct crypto_skcipher *tfm) { - struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + unsigned int reqsize; - ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0); + ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->enc_tfm)) + return PTR_ERR(ctx->enc_tfm); - return PTR_ERR_OR_ZERO(ctx->enc_tfm); + reqsize = sizeof(struct skcipher_request); + reqsize += crypto_skcipher_reqsize(ctx->enc_tfm); + crypto_skcipher_set_reqsize(tfm, reqsize); + + return 0; } -static void cbc_exit(struct crypto_tfm *tfm) +static void cbc_exit(struct crypto_skcipher *tfm) { - struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_cipher(ctx->enc_tfm); + crypto_free_skcipher(ctx->enc_tfm); +} + +static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + err = aes_expandkey(&ctx->fallback, in_key, key_len); + if (err) + return err; + + ctx->key.rounds = 6 + key_len / 4; + + kernel_neon_begin(); + aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds); + kernel_neon_end(); + + return 0; } static int ctr_encrypt(struct skcipher_request *req) @@ -200,9 +240,8 @@ u8 buf[AES_BLOCK_SIZE]; int err; - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); - kernel_neon_begin(); while (walk.nbytes > 0) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL; @@ -213,8 +252,10 @@ final = NULL; } + kernel_neon_begin(); aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk, ctx->rounds, blocks, walk.iv, final); + kernel_neon_end(); if (final) { u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; @@ -229,9 +270,31 @@ err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); } - kernel_neon_end(); return err; +} + +static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) +{ + struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); + unsigned long flags; + + /* + * Temporarily disable interrupts to avoid races where + * cachelines are evicted when the CPU is interrupted + * to do something else. + */ + local_irq_save(flags); + aes_encrypt(&ctx->fallback, dst, src); + local_irq_restore(flags); +} + +static int ctr_encrypt_sync(struct skcipher_request *req) +{ + if (!crypto_simd_usable()) + return crypto_ctr_encrypt_walk(req, ctr_encrypt_one); + + return ctr_encrypt(req); } static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, @@ -245,6 +308,9 @@ return err; key_len /= 2; + err = crypto_cipher_setkey(ctx->cts_tfm, in_key, key_len); + if (err) + return err; err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len); if (err) return err; @@ -252,30 +318,53 @@ return aesbs_setkey(tfm, in_key, key_len); } -static int xts_init(struct crypto_tfm *tfm) +static int xts_init(struct crypto_skcipher *tfm) { - struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); + struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + + ctx->cts_tfm = crypto_alloc_cipher("aes", 0, 0); + if (IS_ERR(ctx->cts_tfm)) + return PTR_ERR(ctx->cts_tfm); ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0); + if (IS_ERR(ctx->tweak_tfm)) + crypto_free_cipher(ctx->cts_tfm); return PTR_ERR_OR_ZERO(ctx->tweak_tfm); } -static void xts_exit(struct crypto_tfm *tfm) +static void xts_exit(struct crypto_skcipher *tfm) { - struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); + struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); crypto_free_cipher(ctx->tweak_tfm); + crypto_free_cipher(ctx->cts_tfm); } -static int __xts_crypt(struct skcipher_request *req, +static int __xts_crypt(struct skcipher_request *req, bool encrypt, void (*fn)(u8 out[], u8 const in[], u8 const rk[], - int rounds, int blocks, u8 iv[])) + int rounds, int blocks, u8 iv[], int)) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + int tail = req->cryptlen % AES_BLOCK_SIZE; + struct skcipher_request subreq; + u8 buf[2 * AES_BLOCK_SIZE]; struct skcipher_walk walk; int err; + + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + + if (unlikely(tail)) { + skcipher_request_set_tfm(&subreq, tfm); + skcipher_request_set_callback(&subreq, + skcipher_request_flags(req), + NULL, NULL); + skcipher_request_set_crypt(&subreq, req->src, req->dst, + req->cryptlen - tail, req->iv); + req = &subreq; + } err = skcipher_walk_virt(&walk, req, true); if (err) @@ -283,32 +372,55 @@ crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv); - kernel_neon_begin(); while (walk.nbytes >= AES_BLOCK_SIZE) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; + int reorder_last_tweak = !encrypt && tail > 0; - if (walk.nbytes < walk.total) + if (walk.nbytes < walk.total) { blocks = round_down(blocks, walk.stride / AES_BLOCK_SIZE); + reorder_last_tweak = 0; + } + kernel_neon_begin(); fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk, - ctx->key.rounds, blocks, walk.iv); + ctx->key.rounds, blocks, walk.iv, reorder_last_tweak); + kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); } - kernel_neon_end(); - return err; + if (err || likely(!tail)) + return err; + + /* handle ciphertext stealing */ + scatterwalk_map_and_copy(buf, req->dst, req->cryptlen - AES_BLOCK_SIZE, + AES_BLOCK_SIZE, 0); + memcpy(buf + AES_BLOCK_SIZE, buf, tail); + scatterwalk_map_and_copy(buf, req->src, req->cryptlen, tail, 0); + + crypto_xor(buf, req->iv, AES_BLOCK_SIZE); + + if (encrypt) + crypto_cipher_encrypt_one(ctx->cts_tfm, buf, buf); + else + crypto_cipher_decrypt_one(ctx->cts_tfm, buf, buf); + + crypto_xor(buf, req->iv, AES_BLOCK_SIZE); + + scatterwalk_map_and_copy(buf, req->dst, req->cryptlen - AES_BLOCK_SIZE, + AES_BLOCK_SIZE + tail, 1); + return 0; } static int xts_encrypt(struct skcipher_request *req) { - return __xts_crypt(req, aesbs_xts_encrypt); + return __xts_crypt(req, true, aesbs_xts_encrypt); } static int xts_decrypt(struct skcipher_request *req) { - return __xts_crypt(req, aesbs_xts_decrypt); + return __xts_crypt(req, false, aesbs_xts_decrypt); } static struct skcipher_alg aes_algs[] = { { @@ -333,9 +445,8 @@ .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx), .base.cra_module = THIS_MODULE, - .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_init = cbc_init, - .base.cra_exit = cbc_exit, + .base.cra_flags = CRYPTO_ALG_INTERNAL | + CRYPTO_ALG_NEED_FALLBACK, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -344,6 +455,8 @@ .setkey = aesbs_cbc_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, + .init = cbc_init, + .exit = cbc_exit, }, { .base.cra_name = "__ctr(aes)", .base.cra_driver_name = "__ctr-aes-neonbs", @@ -362,6 +475,22 @@ .encrypt = ctr_encrypt, .decrypt = ctr_encrypt, }, { + .base.cra_name = "ctr(aes)", + .base.cra_driver_name = "ctr-aes-neonbs-sync", + .base.cra_priority = 250 - 1, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct aesbs_ctr_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .chunksize = AES_BLOCK_SIZE, + .walksize = 8 * AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aesbs_ctr_setkey_sync, + .encrypt = ctr_encrypt_sync, + .decrypt = ctr_encrypt_sync, +}, { .base.cra_name = "__xts(aes)", .base.cra_driver_name = "__xts-aes-neonbs", .base.cra_priority = 250, @@ -369,8 +498,6 @@ .base.cra_ctxsize = sizeof(struct aesbs_xts_ctx), .base.cra_module = THIS_MODULE, .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_init = xts_init, - .base.cra_exit = xts_exit, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, @@ -379,6 +506,8 @@ .setkey = aesbs_xts_setkey, .encrypt = xts_encrypt, .decrypt = xts_decrypt, + .init = xts_init, + .exit = xts_exit, } }; static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)]; -- Gitblit v1.6.2