forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/arch/arm64/crypto/sha256-glue.c
....@@ -1,22 +1,17 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Linux/arm64 port of the OpenSSL SHA256 implementation for AArch64
34 *
45 * Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
5
- *
6
- * This program is free software; you can redistribute it and/or modify it
7
- * under the terms of the GNU General Public License as published by the Free
8
- * Software Foundation; either version 2 of the License, or (at your option)
9
- * any later version.
10
- *
116 */
127
138 #include <asm/hwcap.h>
149 #include <asm/neon.h>
1510 #include <asm/simd.h>
1611 #include <crypto/internal/hash.h>
12
+#include <crypto/internal/simd.h>
1713 #include <crypto/sha.h>
1814 #include <crypto/sha256_base.h>
19
-#include <linux/cryptohash.h>
2015 #include <linux/types.h>
2116 #include <linux/string.h>
2217
....@@ -31,39 +26,50 @@
3126 unsigned int num_blks);
3227 EXPORT_SYMBOL(sha256_block_data_order);
3328
29
+static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
30
+ int blocks)
31
+{
32
+ sha256_block_data_order(sst->state, src, blocks);
33
+}
34
+
3435 asmlinkage void sha256_block_neon(u32 *digest, const void *data,
3536 unsigned int num_blks);
3637
37
-static int sha256_update(struct shash_desc *desc, const u8 *data,
38
- unsigned int len)
38
+static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
39
+ int blocks)
3940 {
40
- return sha256_base_do_update(desc, data, len,
41
- (sha256_block_fn *)sha256_block_data_order);
41
+ sha256_block_neon(sst->state, src, blocks);
4242 }
4343
44
-static int sha256_finup(struct shash_desc *desc, const u8 *data,
45
- unsigned int len, u8 *out)
44
+static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
45
+ unsigned int len)
46
+{
47
+ return sha256_base_do_update(desc, data, len,
48
+ __sha256_block_data_order);
49
+}
50
+
51
+static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
52
+ unsigned int len, u8 *out)
4653 {
4754 if (len)
4855 sha256_base_do_update(desc, data, len,
49
- (sha256_block_fn *)sha256_block_data_order);
50
- sha256_base_do_finalize(desc,
51
- (sha256_block_fn *)sha256_block_data_order);
56
+ __sha256_block_data_order);
57
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
5258
5359 return sha256_base_finish(desc, out);
5460 }
5561
56
-static int sha256_final(struct shash_desc *desc, u8 *out)
62
+static int crypto_sha256_arm64_final(struct shash_desc *desc, u8 *out)
5763 {
58
- return sha256_finup(desc, NULL, 0, out);
64
+ return crypto_sha256_arm64_finup(desc, NULL, 0, out);
5965 }
6066
6167 static struct shash_alg algs[] = { {
6268 .digestsize = SHA256_DIGEST_SIZE,
6369 .init = sha256_base_init,
64
- .update = sha256_update,
65
- .final = sha256_final,
66
- .finup = sha256_finup,
70
+ .update = crypto_sha256_arm64_update,
71
+ .final = crypto_sha256_arm64_final,
72
+ .finup = crypto_sha256_arm64_finup,
6773 .descsize = sizeof(struct sha256_state),
6874 .base.cra_name = "sha256",
6975 .base.cra_driver_name = "sha256-arm64",
....@@ -73,9 +79,9 @@
7379 }, {
7480 .digestsize = SHA224_DIGEST_SIZE,
7581 .init = sha224_base_init,
76
- .update = sha256_update,
77
- .final = sha256_final,
78
- .finup = sha256_finup,
82
+ .update = crypto_sha256_arm64_update,
83
+ .final = crypto_sha256_arm64_final,
84
+ .finup = crypto_sha256_arm64_finup,
7985 .descsize = sizeof(struct sha256_state),
8086 .base.cra_name = "sha224",
8187 .base.cra_driver_name = "sha224-arm64",
....@@ -89,9 +95,9 @@
8995 {
9096 struct sha256_state *sctx = shash_desc_ctx(desc);
9197
92
- if (!may_use_simd())
98
+ if (!crypto_simd_usable())
9399 return sha256_base_do_update(desc, data, len,
94
- (sha256_block_fn *)sha256_block_data_order);
100
+ __sha256_block_data_order);
95101
96102 while (len > 0) {
97103 unsigned int chunk = len;
....@@ -101,14 +107,13 @@
101107 * input when running on a preemptible kernel, but process the
102108 * data block by block instead.
103109 */
104
- if (IS_ENABLED(CONFIG_PREEMPT) &&
110
+ if (IS_ENABLED(CONFIG_PREEMPTION) &&
105111 chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
106112 chunk = SHA256_BLOCK_SIZE -
107113 sctx->count % SHA256_BLOCK_SIZE;
108114
109115 kernel_neon_begin();
110
- sha256_base_do_update(desc, data, chunk,
111
- (sha256_block_fn *)sha256_block_neon);
116
+ sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
112117 kernel_neon_end();
113118 data += chunk;
114119 len -= chunk;
....@@ -119,18 +124,16 @@
119124 static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
120125 unsigned int len, u8 *out)
121126 {
122
- if (!may_use_simd()) {
127
+ if (!crypto_simd_usable()) {
123128 if (len)
124129 sha256_base_do_update(desc, data, len,
125
- (sha256_block_fn *)sha256_block_data_order);
126
- sha256_base_do_finalize(desc,
127
- (sha256_block_fn *)sha256_block_data_order);
130
+ __sha256_block_data_order);
131
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
128132 } else {
129133 if (len)
130134 sha256_update_neon(desc, data, len);
131135 kernel_neon_begin();
132
- sha256_base_do_finalize(desc,
133
- (sha256_block_fn *)sha256_block_neon);
136
+ sha256_base_do_finalize(desc, __sha256_block_neon);
134137 kernel_neon_end();
135138 }
136139 return sha256_base_finish(desc, out);
....@@ -173,7 +176,7 @@
173176 if (ret)
174177 return ret;
175178
176
- if (elf_hwcap & HWCAP_ASIMD) {
179
+ if (cpu_have_named_feature(ASIMD)) {
177180 ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs));
178181 if (ret)
179182 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
....@@ -183,7 +186,7 @@
183186
184187 static void __exit sha256_mod_fini(void)
185188 {
186
- if (elf_hwcap & HWCAP_ASIMD)
189
+ if (cpu_have_named_feature(ASIMD))
187190 crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs));
188191 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
189192 }