forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 297b60346df8beafee954a0fd7c2d64f33f3b9bc
kernel/arch/x86/crypto/aesni-intel_glue.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Support for Intel AES-NI instructions. This file contains glue
34 * code, the real AES implementation is in intel-aes_asm.S.
....@@ -12,11 +13,6 @@
1213 * Tadeusz Struk (tadeusz.struk@intel.com)
1314 * Aidan O'Mahony (aidan.o.mahony@intel.com)
1415 * Copyright (c) 2010, Intel Corporation.
15
- *
16
- * This program is free software; you can redistribute it and/or modify
17
- * it under the terms of the GNU General Public License as published by
18
- * the Free Software Foundation; either version 2 of the License, or
19
- * (at your option) any later version.
2016 */
2117
2218 #include <linux/hardirq.h>
....@@ -25,14 +21,12 @@
2521 #include <linux/err.h>
2622 #include <crypto/algapi.h>
2723 #include <crypto/aes.h>
28
-#include <crypto/cryptd.h>
2924 #include <crypto/ctr.h>
3025 #include <crypto/b128ops.h>
3126 #include <crypto/gcm.h>
3227 #include <crypto/xts.h>
3328 #include <asm/cpu_device_id.h>
34
-#include <asm/fpu/api.h>
35
-#include <asm/crypto/aes.h>
29
+#include <asm/simd.h>
3630 #include <crypto/scatterwalk.h>
3731 #include <crypto/internal/aead.h>
3832 #include <crypto/internal/simd.h>
....@@ -84,15 +78,13 @@
8478 u8 current_counter[GCM_BLOCK_LEN];
8579 u64 partial_block_len;
8680 u64 unused;
87
- u8 hash_keys[GCM_BLOCK_LEN * 8];
81
+ u8 hash_keys[GCM_BLOCK_LEN * 16];
8882 };
8983
9084 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
9185 unsigned int key_len);
92
-asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
93
- const u8 *in);
94
-asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
95
- const u8 *in);
86
+asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
87
+asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
9688 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
9789 const u8 *in, unsigned int len);
9890 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
....@@ -102,11 +94,14 @@
10294 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
10395 const u8 *in, unsigned int len, u8 *iv);
10496
105
-int crypto_fpu_init(void);
106
-void crypto_fpu_exit(void);
107
-
10897 #define AVX_GEN2_OPTSIZE 640
10998 #define AVX_GEN4_OPTSIZE 4096
99
+
100
+asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
101
+ const u8 *in, unsigned int len, u8 *iv);
102
+
103
+asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
104
+ const u8 *in, unsigned int len, u8 *iv);
110105
111106 #ifdef CONFIG_X86_64
112107
....@@ -114,9 +109,6 @@
114109 const u8 *in, unsigned int len, u8 *iv);
115110 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
116111 const u8 *in, unsigned int len, u8 *iv);
117
-
118
-asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
119
- const u8 *in, bool enc, u8 *iv);
120112
121113 /* asmlinkage void aesni_gcm_enc()
122114 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
....@@ -178,7 +170,24 @@
178170 struct gcm_context_data *gdata,
179171 u8 *auth_tag, unsigned long auth_tag_len);
180172
181
-#ifdef CONFIG_AS_AVX
173
+static const struct aesni_gcm_tfm_s {
174
+ void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv,
175
+ u8 *hash_subkey, const u8 *aad, unsigned long aad_len);
176
+ void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
177
+ const u8 *in, unsigned long plaintext_len);
178
+ void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
179
+ const u8 *in, unsigned long ciphertext_len);
180
+ void (*finalize)(void *ctx, struct gcm_context_data *gdata,
181
+ u8 *auth_tag, unsigned long auth_tag_len);
182
+} *aesni_gcm_tfm;
183
+
184
+static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
185
+ .init = &aesni_gcm_init,
186
+ .enc_update = &aesni_gcm_enc_update,
187
+ .dec_update = &aesni_gcm_dec_update,
188
+ .finalize = &aesni_gcm_finalize,
189
+};
190
+
182191 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
183192 void *keys, u8 *out, unsigned int num_bytes);
184193 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
....@@ -186,135 +195,88 @@
186195 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
187196 void *keys, u8 *out, unsigned int num_bytes);
188197 /*
189
- * asmlinkage void aesni_gcm_precomp_avx_gen2()
198
+ * asmlinkage void aesni_gcm_init_avx_gen2()
190199 * gcm_data *my_ctx_data, context data
191200 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
192201 */
193
-asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
202
+asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
203
+ struct gcm_context_data *gdata,
204
+ u8 *iv,
205
+ u8 *hash_subkey,
206
+ const u8 *aad,
207
+ unsigned long aad_len);
194208
195
-asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
209
+asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
210
+ struct gcm_context_data *gdata, u8 *out,
211
+ const u8 *in, unsigned long plaintext_len);
212
+asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
213
+ struct gcm_context_data *gdata, u8 *out,
214
+ const u8 *in,
215
+ unsigned long ciphertext_len);
216
+asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
217
+ struct gcm_context_data *gdata,
218
+ u8 *auth_tag, unsigned long auth_tag_len);
219
+
220
+asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx,
221
+ struct gcm_context_data *gdata, u8 *out,
196222 const u8 *in, unsigned long plaintext_len, u8 *iv,
197223 const u8 *aad, unsigned long aad_len,
198224 u8 *auth_tag, unsigned long auth_tag_len);
199225
200
-asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
226
+asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx,
227
+ struct gcm_context_data *gdata, u8 *out,
201228 const u8 *in, unsigned long ciphertext_len, u8 *iv,
202229 const u8 *aad, unsigned long aad_len,
203230 u8 *auth_tag, unsigned long auth_tag_len);
204231
205
-static void aesni_gcm_enc_avx(void *ctx,
206
- struct gcm_context_data *data, u8 *out,
207
- const u8 *in, unsigned long plaintext_len, u8 *iv,
208
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
209
- u8 *auth_tag, unsigned long auth_tag_len)
210
-{
211
- struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
212
- if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
213
- aesni_gcm_enc(ctx, data, out, in,
214
- plaintext_len, iv, hash_subkey, aad,
215
- aad_len, auth_tag, auth_tag_len);
216
- } else {
217
- aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
218
- aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
219
- aad_len, auth_tag, auth_tag_len);
220
- }
221
-}
232
+static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
233
+ .init = &aesni_gcm_init_avx_gen2,
234
+ .enc_update = &aesni_gcm_enc_update_avx_gen2,
235
+ .dec_update = &aesni_gcm_dec_update_avx_gen2,
236
+ .finalize = &aesni_gcm_finalize_avx_gen2,
237
+};
222238
223
-static void aesni_gcm_dec_avx(void *ctx,
224
- struct gcm_context_data *data, u8 *out,
225
- const u8 *in, unsigned long ciphertext_len, u8 *iv,
226
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
227
- u8 *auth_tag, unsigned long auth_tag_len)
228
-{
229
- struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
230
- if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
231
- aesni_gcm_dec(ctx, data, out, in,
232
- ciphertext_len, iv, hash_subkey, aad,
233
- aad_len, auth_tag, auth_tag_len);
234
- } else {
235
- aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
236
- aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
237
- aad_len, auth_tag, auth_tag_len);
238
- }
239
-}
240
-#endif
241
-
242
-#ifdef CONFIG_AS_AVX2
243239 /*
244
- * asmlinkage void aesni_gcm_precomp_avx_gen4()
240
+ * asmlinkage void aesni_gcm_init_avx_gen4()
245241 * gcm_data *my_ctx_data, context data
246242 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
247243 */
248
-asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
244
+asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
245
+ struct gcm_context_data *gdata,
246
+ u8 *iv,
247
+ u8 *hash_subkey,
248
+ const u8 *aad,
249
+ unsigned long aad_len);
249250
250
-asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
251
+asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
252
+ struct gcm_context_data *gdata, u8 *out,
253
+ const u8 *in, unsigned long plaintext_len);
254
+asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
255
+ struct gcm_context_data *gdata, u8 *out,
256
+ const u8 *in,
257
+ unsigned long ciphertext_len);
258
+asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
259
+ struct gcm_context_data *gdata,
260
+ u8 *auth_tag, unsigned long auth_tag_len);
261
+
262
+asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx,
263
+ struct gcm_context_data *gdata, u8 *out,
251264 const u8 *in, unsigned long plaintext_len, u8 *iv,
252265 const u8 *aad, unsigned long aad_len,
253266 u8 *auth_tag, unsigned long auth_tag_len);
254267
255
-asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
268
+asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx,
269
+ struct gcm_context_data *gdata, u8 *out,
256270 const u8 *in, unsigned long ciphertext_len, u8 *iv,
257271 const u8 *aad, unsigned long aad_len,
258272 u8 *auth_tag, unsigned long auth_tag_len);
259273
260
-static void aesni_gcm_enc_avx2(void *ctx,
261
- struct gcm_context_data *data, u8 *out,
262
- const u8 *in, unsigned long plaintext_len, u8 *iv,
263
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
264
- u8 *auth_tag, unsigned long auth_tag_len)
265
-{
266
- struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
267
- if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
268
- aesni_gcm_enc(ctx, data, out, in,
269
- plaintext_len, iv, hash_subkey, aad,
270
- aad_len, auth_tag, auth_tag_len);
271
- } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
272
- aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
273
- aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
274
- aad_len, auth_tag, auth_tag_len);
275
- } else {
276
- aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
277
- aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
278
- aad_len, auth_tag, auth_tag_len);
279
- }
280
-}
281
-
282
-static void aesni_gcm_dec_avx2(void *ctx,
283
- struct gcm_context_data *data, u8 *out,
284
- const u8 *in, unsigned long ciphertext_len, u8 *iv,
285
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
286
- u8 *auth_tag, unsigned long auth_tag_len)
287
-{
288
- struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
289
- if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
290
- aesni_gcm_dec(ctx, data, out, in,
291
- ciphertext_len, iv, hash_subkey,
292
- aad, aad_len, auth_tag, auth_tag_len);
293
- } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
294
- aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
295
- aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
296
- aad_len, auth_tag, auth_tag_len);
297
- } else {
298
- aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
299
- aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
300
- aad_len, auth_tag, auth_tag_len);
301
- }
302
-}
303
-#endif
304
-
305
-static void (*aesni_gcm_enc_tfm)(void *ctx,
306
- struct gcm_context_data *data, u8 *out,
307
- const u8 *in, unsigned long plaintext_len,
308
- u8 *iv, u8 *hash_subkey, const u8 *aad,
309
- unsigned long aad_len, u8 *auth_tag,
310
- unsigned long auth_tag_len);
311
-
312
-static void (*aesni_gcm_dec_tfm)(void *ctx,
313
- struct gcm_context_data *data, u8 *out,
314
- const u8 *in, unsigned long ciphertext_len,
315
- u8 *iv, u8 *hash_subkey, const u8 *aad,
316
- unsigned long aad_len, u8 *auth_tag,
317
- unsigned long auth_tag_len);
274
+static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
275
+ .init = &aesni_gcm_init_avx_gen4,
276
+ .enc_update = &aesni_gcm_enc_update_avx_gen4,
277
+ .dec_update = &aesni_gcm_dec_update_avx_gen4,
278
+ .finalize = &aesni_gcm_finalize_avx_gen4,
279
+};
318280
319281 static inline struct
320282 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
....@@ -351,17 +313,14 @@
351313 const u8 *in_key, unsigned int key_len)
352314 {
353315 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
354
- u32 *flags = &tfm->crt_flags;
355316 int err;
356317
357318 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
358
- key_len != AES_KEYSIZE_256) {
359
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
319
+ key_len != AES_KEYSIZE_256)
360320 return -EINVAL;
361
- }
362321
363
- if (!irq_fpu_usable())
364
- err = crypto_aes_expand_key(ctx, in_key, key_len);
322
+ if (!crypto_simd_usable())
323
+ err = aes_expandkey(ctx, in_key, key_len);
365324 else {
366325 kernel_fpu_begin();
367326 err = aesni_set_key(ctx, in_key, key_len);
....@@ -377,44 +336,30 @@
377336 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
378337 }
379338
380
-static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
339
+static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
381340 {
382341 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
383342
384
- if (!irq_fpu_usable())
385
- crypto_aes_encrypt_x86(ctx, dst, src);
386
- else {
343
+ if (!crypto_simd_usable()) {
344
+ aes_encrypt(ctx, dst, src);
345
+ } else {
387346 kernel_fpu_begin();
388347 aesni_enc(ctx, dst, src);
389348 kernel_fpu_end();
390349 }
391350 }
392351
393
-static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
352
+static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
394353 {
395354 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
396355
397
- if (!irq_fpu_usable())
398
- crypto_aes_decrypt_x86(ctx, dst, src);
399
- else {
356
+ if (!crypto_simd_usable()) {
357
+ aes_decrypt(ctx, dst, src);
358
+ } else {
400359 kernel_fpu_begin();
401360 aesni_dec(ctx, dst, src);
402361 kernel_fpu_end();
403362 }
404
-}
405
-
406
-static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
407
-{
408
- struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
409
-
410
- aesni_enc(ctx, dst, src);
411
-}
412
-
413
-static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
414
-{
415
- struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
416
-
417
- aesni_dec(ctx, dst, src);
418363 }
419364
420365 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
....@@ -434,14 +379,14 @@
434379
435380 err = skcipher_walk_virt(&walk, req, true);
436381
382
+ kernel_fpu_begin();
437383 while ((nbytes = walk.nbytes)) {
438
- kernel_fpu_begin();
439384 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
440385 nbytes & AES_BLOCK_MASK);
441
- kernel_fpu_end();
442386 nbytes &= AES_BLOCK_SIZE - 1;
443387 err = skcipher_walk_done(&walk, nbytes);
444388 }
389
+ kernel_fpu_end();
445390
446391 return err;
447392 }
....@@ -456,14 +401,14 @@
456401
457402 err = skcipher_walk_virt(&walk, req, true);
458403
404
+ kernel_fpu_begin();
459405 while ((nbytes = walk.nbytes)) {
460
- kernel_fpu_begin();
461406 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
462407 nbytes & AES_BLOCK_MASK);
463
- kernel_fpu_end();
464408 nbytes &= AES_BLOCK_SIZE - 1;
465409 err = skcipher_walk_done(&walk, nbytes);
466410 }
411
+ kernel_fpu_end();
467412
468413 return err;
469414 }
....@@ -478,14 +423,14 @@
478423
479424 err = skcipher_walk_virt(&walk, req, true);
480425
426
+ kernel_fpu_begin();
481427 while ((nbytes = walk.nbytes)) {
482
- kernel_fpu_begin();
483428 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
484429 nbytes & AES_BLOCK_MASK, walk.iv);
485
- kernel_fpu_end();
486430 nbytes &= AES_BLOCK_SIZE - 1;
487431 err = skcipher_walk_done(&walk, nbytes);
488432 }
433
+ kernel_fpu_end();
489434
490435 return err;
491436 }
....@@ -500,14 +445,14 @@
500445
501446 err = skcipher_walk_virt(&walk, req, true);
502447
448
+ kernel_fpu_begin();
503449 while ((nbytes = walk.nbytes)) {
504
- kernel_fpu_begin();
505450 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
506451 nbytes & AES_BLOCK_MASK, walk.iv);
507
- kernel_fpu_end();
508452 nbytes &= AES_BLOCK_SIZE - 1;
509453 err = skcipher_walk_done(&walk, nbytes);
510454 }
455
+ kernel_fpu_end();
511456
512457 return err;
513458 }
....@@ -528,7 +473,6 @@
528473 crypto_inc(ctrblk, AES_BLOCK_SIZE);
529474 }
530475
531
-#ifdef CONFIG_AS_AVX
532476 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
533477 const u8 *in, unsigned int len, u8 *iv)
534478 {
....@@ -545,7 +489,6 @@
545489 else
546490 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
547491 }
548
-#endif
549492
550493 static int ctr_crypt(struct skcipher_request *req)
551494 {
....@@ -557,20 +500,18 @@
557500
558501 err = skcipher_walk_virt(&walk, req, true);
559502
503
+ kernel_fpu_begin();
560504 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
561
- kernel_fpu_begin();
562505 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
563506 nbytes & AES_BLOCK_MASK, walk.iv);
564
- kernel_fpu_end();
565507 nbytes &= AES_BLOCK_SIZE - 1;
566508 err = skcipher_walk_done(&walk, nbytes);
567509 }
568510 if (walk.nbytes) {
569
- kernel_fpu_begin();
570511 ctr_crypt_final(ctx, &walk);
571
- kernel_fpu_end();
572512 err = skcipher_walk_done(&walk, 0);
573513 }
514
+ kernel_fpu_end();
574515
575516 return err;
576517 }
....@@ -599,29 +540,24 @@
599540 }
600541
601542
602
-static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
543
+static void aesni_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
603544 {
604
- aesni_enc(ctx, out, in);
545
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_enc);
605546 }
606547
607
-static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
548
+static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
608549 {
609
- glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
550
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec);
610551 }
611552
612
-static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
553
+static void aesni_xts_enc32(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
613554 {
614
- glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
555
+ aesni_xts_encrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv);
615556 }
616557
617
-static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
558
+static void aesni_xts_dec32(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
618559 {
619
- aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
620
-}
621
-
622
-static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
623
-{
624
- aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
560
+ aesni_xts_decrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv);
625561 }
626562
627563 static const struct common_glue_ctx aesni_enc_xts = {
....@@ -629,11 +565,11 @@
629565 .fpu_blocks_limit = 1,
630566
631567 .funcs = { {
632
- .num_blocks = 8,
633
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
568
+ .num_blocks = 32,
569
+ .fn_u = { .xts = aesni_xts_enc32 }
634570 }, {
635571 .num_blocks = 1,
636
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
572
+ .fn_u = { .xts = aesni_xts_enc }
637573 } }
638574 };
639575
....@@ -642,11 +578,11 @@
642578 .fpu_blocks_limit = 1,
643579
644580 .funcs = { {
645
- .num_blocks = 8,
646
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
581
+ .num_blocks = 32,
582
+ .fn_u = { .xts = aesni_xts_dec32 }
647583 }, {
648584 .num_blocks = 1,
649
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
585
+ .fn_u = { .xts = aesni_xts_dec }
650586 } }
651587 };
652588
....@@ -655,10 +591,10 @@
655591 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
656592 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
657593
658
- return glue_xts_req_128bit(&aesni_enc_xts, req,
659
- XTS_TWEAK_CAST(aesni_xts_tweak),
594
+ return glue_xts_req_128bit(&aesni_enc_xts, req, aesni_enc,
660595 aes_ctx(ctx->raw_tweak_ctx),
661
- aes_ctx(ctx->raw_crypt_ctx));
596
+ aes_ctx(ctx->raw_crypt_ctx),
597
+ false);
662598 }
663599
664600 static int xts_decrypt(struct skcipher_request *req)
....@@ -666,58 +602,30 @@
666602 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
667603 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
668604
669
- return glue_xts_req_128bit(&aesni_dec_xts, req,
670
- XTS_TWEAK_CAST(aesni_xts_tweak),
605
+ return glue_xts_req_128bit(&aesni_dec_xts, req, aesni_enc,
671606 aes_ctx(ctx->raw_tweak_ctx),
672
- aes_ctx(ctx->raw_crypt_ctx));
673
-}
674
-
675
-static int rfc4106_init(struct crypto_aead *aead)
676
-{
677
- struct cryptd_aead *cryptd_tfm;
678
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
679
-
680
- cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
681
- CRYPTO_ALG_INTERNAL,
682
- CRYPTO_ALG_INTERNAL);
683
- if (IS_ERR(cryptd_tfm))
684
- return PTR_ERR(cryptd_tfm);
685
-
686
- *ctx = cryptd_tfm;
687
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
688
- return 0;
689
-}
690
-
691
-static void rfc4106_exit(struct crypto_aead *aead)
692
-{
693
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
694
-
695
- cryptd_free_aead(*ctx);
607
+ aes_ctx(ctx->raw_crypt_ctx),
608
+ true);
696609 }
697610
698611 static int
699612 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
700613 {
701
- struct crypto_cipher *tfm;
614
+ struct crypto_aes_ctx ctx;
702615 int ret;
703616
704
- tfm = crypto_alloc_cipher("aes", 0, 0);
705
- if (IS_ERR(tfm))
706
- return PTR_ERR(tfm);
707
-
708
- ret = crypto_cipher_setkey(tfm, key, key_len);
617
+ ret = aes_expandkey(&ctx, key, key_len);
709618 if (ret)
710
- goto out_free_cipher;
619
+ return ret;
711620
712621 /* Clear the data in the hash sub key container to zero.*/
713622 /* We want to cipher all zeros to create the hash sub key. */
714623 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
715624
716
- crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);
625
+ aes_encrypt(&ctx, hash_subkey, hash_subkey);
717626
718
-out_free_cipher:
719
- crypto_free_cipher(tfm);
720
- return ret;
627
+ memzero_explicit(&ctx, sizeof(ctx));
628
+ return 0;
721629 }
722630
723631 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
....@@ -725,10 +633,9 @@
725633 {
726634 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
727635
728
- if (key_len < 4) {
729
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
636
+ if (key_len < 4)
730637 return -EINVAL;
731
- }
638
+
732639 /*Account for 4 byte nonce at the end.*/
733640 key_len -= 4;
734641
....@@ -739,15 +646,8 @@
739646 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
740647 }
741648
742
-static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key,
743
- unsigned int key_len)
744
-{
745
- struct cryptd_aead **ctx = crypto_aead_ctx(parent);
746
- struct cryptd_aead *cryptd_tfm = *ctx;
747
-
748
- return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
749
-}
750
-
649
+/* This is the Integrity Check Value (aka the authentication tag) length and can
650
+ * be 8, 12 or 16 bytes long. */
751651 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
752652 unsigned int authsize)
753653 {
....@@ -761,17 +661,6 @@
761661 }
762662
763663 return 0;
764
-}
765
-
766
-/* This is the Integrity Check Value (aka the authentication tag length and can
767
- * be 8, 12 or 16 bytes long. */
768
-static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent,
769
- unsigned int authsize)
770
-{
771
- struct cryptd_aead **ctx = crypto_aead_ctx(parent);
772
- struct cryptd_aead *cryptd_tfm = *ctx;
773
-
774
- return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
775664 }
776665
777666 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
....@@ -799,7 +688,9 @@
799688 {
800689 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
801690 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
802
- struct gcm_context_data data AESNI_ALIGN_ATTR;
691
+ const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
692
+ u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
693
+ struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
803694 struct scatter_walk dst_sg_walk = {};
804695 unsigned long left = req->cryptlen;
805696 unsigned long len, srclen, dstlen;
....@@ -815,6 +706,11 @@
815706
816707 if (!enc)
817708 left -= auth_tag_len;
709
+
710
+ if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4)
711
+ gcm_tfm = &aesni_gcm_tfm_avx_gen2;
712
+ if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2)
713
+ gcm_tfm = &aesni_gcm_tfm_sse;
818714
819715 /* Linearize assoc, if not already linear */
820716 if (req->src->length >= assoclen && req->src->length &&
....@@ -843,8 +739,7 @@
843739 }
844740
845741 kernel_fpu_begin();
846
- aesni_gcm_init(aes_ctx, &data, iv,
847
- hash_subkey, assoc, assoclen);
742
+ gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
848743 if (req->src != req->dst) {
849744 while (left) {
850745 src = scatterwalk_map(&src_sg_walk);
....@@ -854,10 +749,10 @@
854749 len = min(srclen, dstlen);
855750 if (len) {
856751 if (enc)
857
- aesni_gcm_enc_update(aes_ctx, &data,
752
+ gcm_tfm->enc_update(aes_ctx, data,
858753 dst, src, len);
859754 else
860
- aesni_gcm_dec_update(aes_ctx, &data,
755
+ gcm_tfm->dec_update(aes_ctx, data,
861756 dst, src, len);
862757 }
863758 left -= len;
....@@ -875,10 +770,10 @@
875770 len = scatterwalk_clamp(&src_sg_walk, left);
876771 if (len) {
877772 if (enc)
878
- aesni_gcm_enc_update(aes_ctx, &data,
773
+ gcm_tfm->enc_update(aes_ctx, data,
879774 src, src, len);
880775 else
881
- aesni_gcm_dec_update(aes_ctx, &data,
776
+ gcm_tfm->dec_update(aes_ctx, data,
882777 src, src, len);
883778 }
884779 left -= len;
....@@ -887,7 +782,7 @@
887782 scatterwalk_done(&src_sg_walk, 1, left);
888783 }
889784 }
890
- aesni_gcm_finalize(aes_ctx, &data, authTag, auth_tag_len);
785
+ gcm_tfm->finalize(aes_ctx, data, authTag, auth_tag_len);
891786 kernel_fpu_end();
892787
893788 if (!assocmem)
....@@ -920,147 +815,15 @@
920815 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
921816 u8 *hash_subkey, u8 *iv, void *aes_ctx)
922817 {
923
- u8 one_entry_in_sg = 0;
924
- u8 *src, *dst, *assoc;
925
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
926
- unsigned long auth_tag_len = crypto_aead_authsize(tfm);
927
- struct scatter_walk src_sg_walk;
928
- struct scatter_walk dst_sg_walk = {};
929
- struct gcm_context_data data AESNI_ALIGN_ATTR;
930
-
931
- if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
932
- aesni_gcm_enc_tfm == aesni_gcm_enc ||
933
- req->cryptlen < AVX_GEN2_OPTSIZE) {
934
- return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
935
- aes_ctx);
936
- }
937
- if (sg_is_last(req->src) &&
938
- (!PageHighMem(sg_page(req->src)) ||
939
- req->src->offset + req->src->length <= PAGE_SIZE) &&
940
- sg_is_last(req->dst) &&
941
- (!PageHighMem(sg_page(req->dst)) ||
942
- req->dst->offset + req->dst->length <= PAGE_SIZE)) {
943
- one_entry_in_sg = 1;
944
- scatterwalk_start(&src_sg_walk, req->src);
945
- assoc = scatterwalk_map(&src_sg_walk);
946
- src = assoc + req->assoclen;
947
- dst = src;
948
- if (unlikely(req->src != req->dst)) {
949
- scatterwalk_start(&dst_sg_walk, req->dst);
950
- dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
951
- }
952
- } else {
953
- /* Allocate memory for src, dst, assoc */
954
- assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
955
- GFP_ATOMIC);
956
- if (unlikely(!assoc))
957
- return -ENOMEM;
958
- scatterwalk_map_and_copy(assoc, req->src, 0,
959
- req->assoclen + req->cryptlen, 0);
960
- src = assoc + req->assoclen;
961
- dst = src;
962
- }
963
-
964
- kernel_fpu_begin();
965
- aesni_gcm_enc_tfm(aes_ctx, &data, dst, src, req->cryptlen, iv,
966
- hash_subkey, assoc, assoclen,
967
- dst + req->cryptlen, auth_tag_len);
968
- kernel_fpu_end();
969
-
970
- /* The authTag (aka the Integrity Check Value) needs to be written
971
- * back to the packet. */
972
- if (one_entry_in_sg) {
973
- if (unlikely(req->src != req->dst)) {
974
- scatterwalk_unmap(dst - req->assoclen);
975
- scatterwalk_advance(&dst_sg_walk, req->dst->length);
976
- scatterwalk_done(&dst_sg_walk, 1, 0);
977
- }
978
- scatterwalk_unmap(assoc);
979
- scatterwalk_advance(&src_sg_walk, req->src->length);
980
- scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
981
- } else {
982
- scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
983
- req->cryptlen + auth_tag_len, 1);
984
- kfree(assoc);
985
- }
986
- return 0;
818
+ return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
819
+ aes_ctx);
987820 }
988821
989822 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
990823 u8 *hash_subkey, u8 *iv, void *aes_ctx)
991824 {
992
- u8 one_entry_in_sg = 0;
993
- u8 *src, *dst, *assoc;
994
- unsigned long tempCipherLen = 0;
995
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
996
- unsigned long auth_tag_len = crypto_aead_authsize(tfm);
997
- u8 authTag[16];
998
- struct scatter_walk src_sg_walk;
999
- struct scatter_walk dst_sg_walk = {};
1000
- struct gcm_context_data data AESNI_ALIGN_ATTR;
1001
- int retval = 0;
1002
-
1003
- if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
1004
- aesni_gcm_enc_tfm == aesni_gcm_enc ||
1005
- req->cryptlen < AVX_GEN2_OPTSIZE) {
1006
- return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
1007
- aes_ctx);
1008
- }
1009
- tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1010
-
1011
- if (sg_is_last(req->src) &&
1012
- (!PageHighMem(sg_page(req->src)) ||
1013
- req->src->offset + req->src->length <= PAGE_SIZE) &&
1014
- sg_is_last(req->dst) && req->dst->length &&
1015
- (!PageHighMem(sg_page(req->dst)) ||
1016
- req->dst->offset + req->dst->length <= PAGE_SIZE)) {
1017
- one_entry_in_sg = 1;
1018
- scatterwalk_start(&src_sg_walk, req->src);
1019
- assoc = scatterwalk_map(&src_sg_walk);
1020
- src = assoc + req->assoclen;
1021
- dst = src;
1022
- if (unlikely(req->src != req->dst)) {
1023
- scatterwalk_start(&dst_sg_walk, req->dst);
1024
- dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
1025
- }
1026
- } else {
1027
- /* Allocate memory for src, dst, assoc */
1028
- assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1029
- if (!assoc)
1030
- return -ENOMEM;
1031
- scatterwalk_map_and_copy(assoc, req->src, 0,
1032
- req->assoclen + req->cryptlen, 0);
1033
- src = assoc + req->assoclen;
1034
- dst = src;
1035
- }
1036
-
1037
-
1038
- kernel_fpu_begin();
1039
- aesni_gcm_dec_tfm(aes_ctx, &data, dst, src, tempCipherLen, iv,
1040
- hash_subkey, assoc, assoclen,
1041
- authTag, auth_tag_len);
1042
- kernel_fpu_end();
1043
-
1044
- /* Compare generated tag with passed in tag. */
1045
- retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1046
- -EBADMSG : 0;
1047
-
1048
- if (one_entry_in_sg) {
1049
- if (unlikely(req->src != req->dst)) {
1050
- scatterwalk_unmap(dst - req->assoclen);
1051
- scatterwalk_advance(&dst_sg_walk, req->dst->length);
1052
- scatterwalk_done(&dst_sg_walk, 1, 0);
1053
- }
1054
- scatterwalk_unmap(assoc);
1055
- scatterwalk_advance(&src_sg_walk, req->src->length);
1056
- scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1057
- } else {
1058
- scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1059
- tempCipherLen, 1);
1060
- kfree(assoc);
1061
- }
1062
- return retval;
1063
-
825
+ return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
826
+ aes_ctx);
1064827 }
1065828
1066829 static int helper_rfc4106_encrypt(struct aead_request *req)
....@@ -1068,7 +831,8 @@
1068831 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1069832 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1070833 void *aes_ctx = &(ctx->aes_key_expanded);
1071
- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
834
+ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
835
+ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1072836 unsigned int i;
1073837 __be32 counter = cpu_to_be32(1);
1074838
....@@ -1095,7 +859,8 @@
1095859 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1096860 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1097861 void *aes_ctx = &(ctx->aes_key_expanded);
1098
- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
862
+ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
863
+ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1099864 unsigned int i;
1100865
1101866 if (unlikely(req->assoclen != 16 && req->assoclen != 20))
....@@ -1115,41 +880,9 @@
1115880 return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
1116881 aes_ctx);
1117882 }
1118
-
1119
-static int gcmaes_wrapper_encrypt(struct aead_request *req)
1120
-{
1121
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1122
- struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1123
- struct cryptd_aead *cryptd_tfm = *ctx;
1124
-
1125
- tfm = &cryptd_tfm->base;
1126
- if (irq_fpu_usable() && (!in_atomic() ||
1127
- !cryptd_aead_queued(cryptd_tfm)))
1128
- tfm = cryptd_aead_child(cryptd_tfm);
1129
-
1130
- aead_request_set_tfm(req, tfm);
1131
-
1132
- return crypto_aead_encrypt(req);
1133
-}
1134
-
1135
-static int gcmaes_wrapper_decrypt(struct aead_request *req)
1136
-{
1137
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1138
- struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1139
- struct cryptd_aead *cryptd_tfm = *ctx;
1140
-
1141
- tfm = &cryptd_tfm->base;
1142
- if (irq_fpu_usable() && (!in_atomic() ||
1143
- !cryptd_aead_queued(cryptd_tfm)))
1144
- tfm = cryptd_aead_child(cryptd_tfm);
1145
-
1146
- aead_request_set_tfm(req, tfm);
1147
-
1148
- return crypto_aead_decrypt(req);
1149
-}
1150883 #endif
1151884
1152
-static struct crypto_alg aesni_algs[] = { {
885
+static struct crypto_alg aesni_cipher_alg = {
1153886 .cra_name = "aes",
1154887 .cra_driver_name = "aes-aesni",
1155888 .cra_priority = 300,
....@@ -1162,28 +895,11 @@
1162895 .cia_min_keysize = AES_MIN_KEY_SIZE,
1163896 .cia_max_keysize = AES_MAX_KEY_SIZE,
1164897 .cia_setkey = aes_set_key,
1165
- .cia_encrypt = aes_encrypt,
1166
- .cia_decrypt = aes_decrypt
898
+ .cia_encrypt = aesni_encrypt,
899
+ .cia_decrypt = aesni_decrypt
1167900 }
1168901 }
1169
-}, {
1170
- .cra_name = "__aes",
1171
- .cra_driver_name = "__aes-aesni",
1172
- .cra_priority = 300,
1173
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
1174
- .cra_blocksize = AES_BLOCK_SIZE,
1175
- .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
1176
- .cra_module = THIS_MODULE,
1177
- .cra_u = {
1178
- .cipher = {
1179
- .cia_min_keysize = AES_MIN_KEY_SIZE,
1180
- .cia_max_keysize = AES_MAX_KEY_SIZE,
1181
- .cia_setkey = aes_set_key,
1182
- .cia_encrypt = __aes_encrypt,
1183
- .cia_decrypt = __aes_decrypt
1184
- }
1185
- }
1186
-} };
902
+};
1187903
1188904 static struct skcipher_alg aesni_skciphers[] = {
1189905 {
....@@ -1258,22 +974,6 @@
1258974 static
1259975 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1260976
1261
-static struct {
1262
- const char *algname;
1263
- const char *drvname;
1264
- const char *basename;
1265
- struct simd_skcipher_alg *simd;
1266
-} aesni_simd_skciphers2[] = {
1267
-#if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
1268
- IS_BUILTIN(CONFIG_CRYPTO_PCBC)
1269
- {
1270
- .algname = "pcbc(aes)",
1271
- .drvname = "pcbc-aes-aesni",
1272
- .basename = "fpu(pcbc(__aes-aesni))",
1273
- },
1274
-#endif
1275
-};
1276
-
1277977 #ifdef CONFIG_X86_64
1278978 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1279979 unsigned int key_len)
....@@ -1290,7 +990,8 @@
1290990 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1291991 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1292992 void *aes_ctx = &(ctx->aes_key_expanded);
1293
- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
993
+ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
994
+ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1294995 __be32 counter = cpu_to_be32(1);
1295996
1296997 memcpy(iv, req->iv, 12);
....@@ -1306,7 +1007,8 @@
13061007 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
13071008 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
13081009 void *aes_ctx = &(ctx->aes_key_expanded);
1309
- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1010
+ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1011
+ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
13101012
13111013 memcpy(iv, req->iv, 12);
13121014 *((__be32 *)(iv+12)) = counter;
....@@ -1315,31 +1017,7 @@
13151017 aes_ctx);
13161018 }
13171019
1318
-static int generic_gcmaes_init(struct crypto_aead *aead)
1319
-{
1320
- struct cryptd_aead *cryptd_tfm;
1321
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
1322
-
1323
- cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni",
1324
- CRYPTO_ALG_INTERNAL,
1325
- CRYPTO_ALG_INTERNAL);
1326
- if (IS_ERR(cryptd_tfm))
1327
- return PTR_ERR(cryptd_tfm);
1328
-
1329
- *ctx = cryptd_tfm;
1330
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
1331
-
1332
- return 0;
1333
-}
1334
-
1335
-static void generic_gcmaes_exit(struct crypto_aead *aead)
1336
-{
1337
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
1338
-
1339
- cryptd_free_aead(*ctx);
1340
-}
1341
-
1342
-static struct aead_alg aesni_aead_algs[] = { {
1020
+static struct aead_alg aesni_aeads[] = { {
13431021 .setkey = common_rfc4106_set_key,
13441022 .setauthsize = common_rfc4106_set_authsize,
13451023 .encrypt = helper_rfc4106_encrypt,
....@@ -1347,30 +1025,13 @@
13471025 .ivsize = GCM_RFC4106_IV_SIZE,
13481026 .maxauthsize = 16,
13491027 .base = {
1350
- .cra_name = "__gcm-aes-aesni",
1351
- .cra_driver_name = "__driver-gcm-aes-aesni",
1028
+ .cra_name = "__rfc4106(gcm(aes))",
1029
+ .cra_driver_name = "__rfc4106-gcm-aesni",
1030
+ .cra_priority = 400,
13521031 .cra_flags = CRYPTO_ALG_INTERNAL,
13531032 .cra_blocksize = 1,
13541033 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx),
13551034 .cra_alignmask = AESNI_ALIGN - 1,
1356
- .cra_module = THIS_MODULE,
1357
- },
1358
-}, {
1359
- .init = rfc4106_init,
1360
- .exit = rfc4106_exit,
1361
- .setkey = gcmaes_wrapper_set_key,
1362
- .setauthsize = gcmaes_wrapper_set_authsize,
1363
- .encrypt = gcmaes_wrapper_encrypt,
1364
- .decrypt = gcmaes_wrapper_decrypt,
1365
- .ivsize = GCM_RFC4106_IV_SIZE,
1366
- .maxauthsize = 16,
1367
- .base = {
1368
- .cra_name = "rfc4106(gcm(aes))",
1369
- .cra_driver_name = "rfc4106-gcm-aesni",
1370
- .cra_priority = 400,
1371
- .cra_flags = CRYPTO_ALG_ASYNC,
1372
- .cra_blocksize = 1,
1373
- .cra_ctxsize = sizeof(struct cryptd_aead *),
13741035 .cra_module = THIS_MODULE,
13751036 },
13761037 }, {
....@@ -1381,165 +1042,86 @@
13811042 .ivsize = GCM_AES_IV_SIZE,
13821043 .maxauthsize = 16,
13831044 .base = {
1384
- .cra_name = "__generic-gcm-aes-aesni",
1385
- .cra_driver_name = "__driver-generic-gcm-aes-aesni",
1386
- .cra_priority = 0,
1045
+ .cra_name = "__gcm(aes)",
1046
+ .cra_driver_name = "__generic-gcm-aesni",
1047
+ .cra_priority = 400,
13871048 .cra_flags = CRYPTO_ALG_INTERNAL,
13881049 .cra_blocksize = 1,
13891050 .cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
13901051 .cra_alignmask = AESNI_ALIGN - 1,
13911052 .cra_module = THIS_MODULE,
13921053 },
1393
-}, {
1394
- .init = generic_gcmaes_init,
1395
- .exit = generic_gcmaes_exit,
1396
- .setkey = gcmaes_wrapper_set_key,
1397
- .setauthsize = gcmaes_wrapper_set_authsize,
1398
- .encrypt = gcmaes_wrapper_encrypt,
1399
- .decrypt = gcmaes_wrapper_decrypt,
1400
- .ivsize = GCM_AES_IV_SIZE,
1401
- .maxauthsize = 16,
1402
- .base = {
1403
- .cra_name = "gcm(aes)",
1404
- .cra_driver_name = "generic-gcm-aesni",
1405
- .cra_priority = 400,
1406
- .cra_flags = CRYPTO_ALG_ASYNC,
1407
- .cra_blocksize = 1,
1408
- .cra_ctxsize = sizeof(struct cryptd_aead *),
1409
- .cra_module = THIS_MODULE,
1410
- },
14111054 } };
14121055 #else
1413
-static struct aead_alg aesni_aead_algs[0];
1056
+static struct aead_alg aesni_aeads[0];
14141057 #endif
14151058
1059
+static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
14161060
14171061 static const struct x86_cpu_id aesni_cpu_id[] = {
1418
- X86_FEATURE_MATCH(X86_FEATURE_AES),
1062
+ X86_MATCH_FEATURE(X86_FEATURE_AES, NULL),
14191063 {}
14201064 };
14211065 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
14221066
1423
-static void aesni_free_simds(void)
1424
-{
1425
- int i;
1426
-
1427
- for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
1428
- aesni_simd_skciphers[i]; i++)
1429
- simd_skcipher_free(aesni_simd_skciphers[i]);
1430
-
1431
- for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
1432
- if (aesni_simd_skciphers2[i].simd)
1433
- simd_skcipher_free(aesni_simd_skciphers2[i].simd);
1434
-}
1435
-
14361067 static int __init aesni_init(void)
14371068 {
1438
- struct simd_skcipher_alg *simd;
1439
- const char *basename;
1440
- const char *algname;
1441
- const char *drvname;
14421069 int err;
1443
- int i;
14441070
14451071 if (!x86_match_cpu(aesni_cpu_id))
14461072 return -ENODEV;
14471073 #ifdef CONFIG_X86_64
1448
-#ifdef CONFIG_AS_AVX2
14491074 if (boot_cpu_has(X86_FEATURE_AVX2)) {
14501075 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1451
- aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1452
- aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1076
+ aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4;
14531077 } else
1454
-#endif
1455
-#ifdef CONFIG_AS_AVX
14561078 if (boot_cpu_has(X86_FEATURE_AVX)) {
14571079 pr_info("AVX version of gcm_enc/dec engaged.\n");
1458
- aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1459
- aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1460
- } else
1461
-#endif
1462
- {
1080
+ aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2;
1081
+ } else {
14631082 pr_info("SSE version of gcm_enc/dec engaged.\n");
1464
- aesni_gcm_enc_tfm = aesni_gcm_enc;
1465
- aesni_gcm_dec_tfm = aesni_gcm_dec;
1083
+ aesni_gcm_tfm = &aesni_gcm_tfm_sse;
14661084 }
14671085 aesni_ctr_enc_tfm = aesni_ctr_enc;
1468
-#ifdef CONFIG_AS_AVX
14691086 if (boot_cpu_has(X86_FEATURE_AVX)) {
14701087 /* optimize performance of ctr mode encryption transform */
14711088 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
14721089 pr_info("AES CTR mode by8 optimization enabled\n");
14731090 }
14741091 #endif
1475
-#endif
14761092
1477
- err = crypto_fpu_init();
1093
+ err = crypto_register_alg(&aesni_cipher_alg);
14781094 if (err)
14791095 return err;
14801096
1481
- err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1097
+ err = simd_register_skciphers_compat(aesni_skciphers,
1098
+ ARRAY_SIZE(aesni_skciphers),
1099
+ aesni_simd_skciphers);
14821100 if (err)
1483
- goto fpu_exit;
1101
+ goto unregister_cipher;
14841102
1485
- err = crypto_register_skciphers(aesni_skciphers,
1486
- ARRAY_SIZE(aesni_skciphers));
1487
- if (err)
1488
- goto unregister_algs;
1489
-
1490
- err = crypto_register_aeads(aesni_aead_algs,
1491
- ARRAY_SIZE(aesni_aead_algs));
1103
+ err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1104
+ aesni_simd_aeads);
14921105 if (err)
14931106 goto unregister_skciphers;
14941107
1495
- for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
1496
- algname = aesni_skciphers[i].base.cra_name + 2;
1497
- drvname = aesni_skciphers[i].base.cra_driver_name + 2;
1498
- basename = aesni_skciphers[i].base.cra_driver_name;
1499
- simd = simd_skcipher_create_compat(algname, drvname, basename);
1500
- err = PTR_ERR(simd);
1501
- if (IS_ERR(simd))
1502
- goto unregister_simds;
1503
-
1504
- aesni_simd_skciphers[i] = simd;
1505
- }
1506
-
1507
- for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) {
1508
- algname = aesni_simd_skciphers2[i].algname;
1509
- drvname = aesni_simd_skciphers2[i].drvname;
1510
- basename = aesni_simd_skciphers2[i].basename;
1511
- simd = simd_skcipher_create_compat(algname, drvname, basename);
1512
- err = PTR_ERR(simd);
1513
- if (IS_ERR(simd))
1514
- continue;
1515
-
1516
- aesni_simd_skciphers2[i].simd = simd;
1517
- }
1518
-
15191108 return 0;
15201109
1521
-unregister_simds:
1522
- aesni_free_simds();
1523
- crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
15241110 unregister_skciphers:
1525
- crypto_unregister_skciphers(aesni_skciphers,
1526
- ARRAY_SIZE(aesni_skciphers));
1527
-unregister_algs:
1528
- crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1529
-fpu_exit:
1530
- crypto_fpu_exit();
1111
+ simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1112
+ aesni_simd_skciphers);
1113
+unregister_cipher:
1114
+ crypto_unregister_alg(&aesni_cipher_alg);
15311115 return err;
15321116 }
15331117
15341118 static void __exit aesni_exit(void)
15351119 {
1536
- aesni_free_simds();
1537
- crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1538
- crypto_unregister_skciphers(aesni_skciphers,
1539
- ARRAY_SIZE(aesni_skciphers));
1540
- crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1541
-
1542
- crypto_fpu_exit();
1120
+ simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1121
+ aesni_simd_aeads);
1122
+ simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1123
+ aesni_simd_skciphers);
1124
+ crypto_unregister_alg(&aesni_cipher_alg);
15431125 }
15441126
15451127 late_initcall(aesni_init);