hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/x86/crypto/aesni-intel_glue.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Support for Intel AES-NI instructions. This file contains glue
34 * code, the real AES implementation is in intel-aes_asm.S.
....@@ -12,11 +13,6 @@
1213 * Tadeusz Struk (tadeusz.struk@intel.com)
1314 * Aidan O'Mahony (aidan.o.mahony@intel.com)
1415 * Copyright (c) 2010, Intel Corporation.
15
- *
16
- * This program is free software; you can redistribute it and/or modify
17
- * it under the terms of the GNU General Public License as published by
18
- * the Free Software Foundation; either version 2 of the License, or
19
- * (at your option) any later version.
2016 */
2117
2218 #include <linux/hardirq.h>
....@@ -25,14 +21,12 @@
2521 #include <linux/err.h>
2622 #include <crypto/algapi.h>
2723 #include <crypto/aes.h>
28
-#include <crypto/cryptd.h>
2924 #include <crypto/ctr.h>
3025 #include <crypto/b128ops.h>
3126 #include <crypto/gcm.h>
3227 #include <crypto/xts.h>
3328 #include <asm/cpu_device_id.h>
34
-#include <asm/fpu/api.h>
35
-#include <asm/crypto/aes.h>
29
+#include <asm/simd.h>
3630 #include <crypto/scatterwalk.h>
3731 #include <crypto/internal/aead.h>
3832 #include <crypto/internal/simd.h>
....@@ -84,15 +78,13 @@
8478 u8 current_counter[GCM_BLOCK_LEN];
8579 u64 partial_block_len;
8680 u64 unused;
87
- u8 hash_keys[GCM_BLOCK_LEN * 8];
81
+ u8 hash_keys[GCM_BLOCK_LEN * 16];
8882 };
8983
9084 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
9185 unsigned int key_len);
92
-asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
93
- const u8 *in);
94
-asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
95
- const u8 *in);
86
+asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
87
+asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
9688 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
9789 const u8 *in, unsigned int len);
9890 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
....@@ -102,11 +94,14 @@
10294 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
10395 const u8 *in, unsigned int len, u8 *iv);
10496
105
-int crypto_fpu_init(void);
106
-void crypto_fpu_exit(void);
107
-
10897 #define AVX_GEN2_OPTSIZE 640
10998 #define AVX_GEN4_OPTSIZE 4096
99
+
100
+asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
101
+ const u8 *in, unsigned int len, u8 *iv);
102
+
103
+asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
104
+ const u8 *in, unsigned int len, u8 *iv);
110105
111106 #ifdef CONFIG_X86_64
112107
....@@ -114,9 +109,6 @@
114109 const u8 *in, unsigned int len, u8 *iv);
115110 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
116111 const u8 *in, unsigned int len, u8 *iv);
117
-
118
-asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
119
- const u8 *in, bool enc, u8 *iv);
120112
121113 /* asmlinkage void aesni_gcm_enc()
122114 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
....@@ -178,7 +170,24 @@
178170 struct gcm_context_data *gdata,
179171 u8 *auth_tag, unsigned long auth_tag_len);
180172
181
-#ifdef CONFIG_AS_AVX
173
+static const struct aesni_gcm_tfm_s {
174
+ void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv,
175
+ u8 *hash_subkey, const u8 *aad, unsigned long aad_len);
176
+ void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
177
+ const u8 *in, unsigned long plaintext_len);
178
+ void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
179
+ const u8 *in, unsigned long ciphertext_len);
180
+ void (*finalize)(void *ctx, struct gcm_context_data *gdata,
181
+ u8 *auth_tag, unsigned long auth_tag_len);
182
+} *aesni_gcm_tfm;
183
+
184
+static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
185
+ .init = &aesni_gcm_init,
186
+ .enc_update = &aesni_gcm_enc_update,
187
+ .dec_update = &aesni_gcm_dec_update,
188
+ .finalize = &aesni_gcm_finalize,
189
+};
190
+
182191 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
183192 void *keys, u8 *out, unsigned int num_bytes);
184193 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
....@@ -186,135 +195,88 @@
186195 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
187196 void *keys, u8 *out, unsigned int num_bytes);
188197 /*
189
- * asmlinkage void aesni_gcm_precomp_avx_gen2()
198
+ * asmlinkage void aesni_gcm_init_avx_gen2()
190199 * gcm_data *my_ctx_data, context data
191200 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
192201 */
193
-asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
202
+asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
203
+ struct gcm_context_data *gdata,
204
+ u8 *iv,
205
+ u8 *hash_subkey,
206
+ const u8 *aad,
207
+ unsigned long aad_len);
194208
195
-asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
209
+asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
210
+ struct gcm_context_data *gdata, u8 *out,
211
+ const u8 *in, unsigned long plaintext_len);
212
+asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
213
+ struct gcm_context_data *gdata, u8 *out,
214
+ const u8 *in,
215
+ unsigned long ciphertext_len);
216
+asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
217
+ struct gcm_context_data *gdata,
218
+ u8 *auth_tag, unsigned long auth_tag_len);
219
+
220
+asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx,
221
+ struct gcm_context_data *gdata, u8 *out,
196222 const u8 *in, unsigned long plaintext_len, u8 *iv,
197223 const u8 *aad, unsigned long aad_len,
198224 u8 *auth_tag, unsigned long auth_tag_len);
199225
200
-asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
226
+asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx,
227
+ struct gcm_context_data *gdata, u8 *out,
201228 const u8 *in, unsigned long ciphertext_len, u8 *iv,
202229 const u8 *aad, unsigned long aad_len,
203230 u8 *auth_tag, unsigned long auth_tag_len);
204231
205
-static void aesni_gcm_enc_avx(void *ctx,
206
- struct gcm_context_data *data, u8 *out,
207
- const u8 *in, unsigned long plaintext_len, u8 *iv,
208
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
209
- u8 *auth_tag, unsigned long auth_tag_len)
210
-{
211
- struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
212
- if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
213
- aesni_gcm_enc(ctx, data, out, in,
214
- plaintext_len, iv, hash_subkey, aad,
215
- aad_len, auth_tag, auth_tag_len);
216
- } else {
217
- aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
218
- aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
219
- aad_len, auth_tag, auth_tag_len);
220
- }
221
-}
232
+static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
233
+ .init = &aesni_gcm_init_avx_gen2,
234
+ .enc_update = &aesni_gcm_enc_update_avx_gen2,
235
+ .dec_update = &aesni_gcm_dec_update_avx_gen2,
236
+ .finalize = &aesni_gcm_finalize_avx_gen2,
237
+};
222238
223
-static void aesni_gcm_dec_avx(void *ctx,
224
- struct gcm_context_data *data, u8 *out,
225
- const u8 *in, unsigned long ciphertext_len, u8 *iv,
226
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
227
- u8 *auth_tag, unsigned long auth_tag_len)
228
-{
229
- struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
230
- if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
231
- aesni_gcm_dec(ctx, data, out, in,
232
- ciphertext_len, iv, hash_subkey, aad,
233
- aad_len, auth_tag, auth_tag_len);
234
- } else {
235
- aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
236
- aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
237
- aad_len, auth_tag, auth_tag_len);
238
- }
239
-}
240
-#endif
241
-
242
-#ifdef CONFIG_AS_AVX2
243239 /*
244
- * asmlinkage void aesni_gcm_precomp_avx_gen4()
240
+ * asmlinkage void aesni_gcm_init_avx_gen4()
245241 * gcm_data *my_ctx_data, context data
246242 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
247243 */
248
-asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
244
+asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
245
+ struct gcm_context_data *gdata,
246
+ u8 *iv,
247
+ u8 *hash_subkey,
248
+ const u8 *aad,
249
+ unsigned long aad_len);
249250
250
-asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
251
+asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
252
+ struct gcm_context_data *gdata, u8 *out,
253
+ const u8 *in, unsigned long plaintext_len);
254
+asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
255
+ struct gcm_context_data *gdata, u8 *out,
256
+ const u8 *in,
257
+ unsigned long ciphertext_len);
258
+asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
259
+ struct gcm_context_data *gdata,
260
+ u8 *auth_tag, unsigned long auth_tag_len);
261
+
262
+asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx,
263
+ struct gcm_context_data *gdata, u8 *out,
251264 const u8 *in, unsigned long plaintext_len, u8 *iv,
252265 const u8 *aad, unsigned long aad_len,
253266 u8 *auth_tag, unsigned long auth_tag_len);
254267
255
-asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
268
+asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx,
269
+ struct gcm_context_data *gdata, u8 *out,
256270 const u8 *in, unsigned long ciphertext_len, u8 *iv,
257271 const u8 *aad, unsigned long aad_len,
258272 u8 *auth_tag, unsigned long auth_tag_len);
259273
260
-static void aesni_gcm_enc_avx2(void *ctx,
261
- struct gcm_context_data *data, u8 *out,
262
- const u8 *in, unsigned long plaintext_len, u8 *iv,
263
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
264
- u8 *auth_tag, unsigned long auth_tag_len)
265
-{
266
- struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
267
- if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
268
- aesni_gcm_enc(ctx, data, out, in,
269
- plaintext_len, iv, hash_subkey, aad,
270
- aad_len, auth_tag, auth_tag_len);
271
- } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
272
- aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
273
- aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
274
- aad_len, auth_tag, auth_tag_len);
275
- } else {
276
- aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
277
- aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
278
- aad_len, auth_tag, auth_tag_len);
279
- }
280
-}
281
-
282
-static void aesni_gcm_dec_avx2(void *ctx,
283
- struct gcm_context_data *data, u8 *out,
284
- const u8 *in, unsigned long ciphertext_len, u8 *iv,
285
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
286
- u8 *auth_tag, unsigned long auth_tag_len)
287
-{
288
- struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
289
- if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
290
- aesni_gcm_dec(ctx, data, out, in,
291
- ciphertext_len, iv, hash_subkey,
292
- aad, aad_len, auth_tag, auth_tag_len);
293
- } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
294
- aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
295
- aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
296
- aad_len, auth_tag, auth_tag_len);
297
- } else {
298
- aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
299
- aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
300
- aad_len, auth_tag, auth_tag_len);
301
- }
302
-}
303
-#endif
304
-
305
-static void (*aesni_gcm_enc_tfm)(void *ctx,
306
- struct gcm_context_data *data, u8 *out,
307
- const u8 *in, unsigned long plaintext_len,
308
- u8 *iv, u8 *hash_subkey, const u8 *aad,
309
- unsigned long aad_len, u8 *auth_tag,
310
- unsigned long auth_tag_len);
311
-
312
-static void (*aesni_gcm_dec_tfm)(void *ctx,
313
- struct gcm_context_data *data, u8 *out,
314
- const u8 *in, unsigned long ciphertext_len,
315
- u8 *iv, u8 *hash_subkey, const u8 *aad,
316
- unsigned long aad_len, u8 *auth_tag,
317
- unsigned long auth_tag_len);
274
+static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
275
+ .init = &aesni_gcm_init_avx_gen4,
276
+ .enc_update = &aesni_gcm_enc_update_avx_gen4,
277
+ .dec_update = &aesni_gcm_dec_update_avx_gen4,
278
+ .finalize = &aesni_gcm_finalize_avx_gen4,
279
+};
318280
319281 static inline struct
320282 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
....@@ -351,17 +313,14 @@
351313 const u8 *in_key, unsigned int key_len)
352314 {
353315 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
354
- u32 *flags = &tfm->crt_flags;
355316 int err;
356317
357318 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
358
- key_len != AES_KEYSIZE_256) {
359
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
319
+ key_len != AES_KEYSIZE_256)
360320 return -EINVAL;
361
- }
362321
363
- if (!irq_fpu_usable())
364
- err = crypto_aes_expand_key(ctx, in_key, key_len);
322
+ if (!crypto_simd_usable())
323
+ err = aes_expandkey(ctx, in_key, key_len);
365324 else {
366325 kernel_fpu_begin();
367326 err = aesni_set_key(ctx, in_key, key_len);
....@@ -377,44 +336,30 @@
377336 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
378337 }
379338
380
-static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
339
+static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
381340 {
382341 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
383342
384
- if (!irq_fpu_usable())
385
- crypto_aes_encrypt_x86(ctx, dst, src);
386
- else {
343
+ if (!crypto_simd_usable()) {
344
+ aes_encrypt(ctx, dst, src);
345
+ } else {
387346 kernel_fpu_begin();
388347 aesni_enc(ctx, dst, src);
389348 kernel_fpu_end();
390349 }
391350 }
392351
393
-static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
352
+static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
394353 {
395354 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
396355
397
- if (!irq_fpu_usable())
398
- crypto_aes_decrypt_x86(ctx, dst, src);
399
- else {
356
+ if (!crypto_simd_usable()) {
357
+ aes_decrypt(ctx, dst, src);
358
+ } else {
400359 kernel_fpu_begin();
401360 aesni_dec(ctx, dst, src);
402361 kernel_fpu_end();
403362 }
404
-}
405
-
406
-static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
407
-{
408
- struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
409
-
410
- aesni_enc(ctx, dst, src);
411
-}
412
-
413
-static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
414
-{
415
- struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
416
-
417
- aesni_dec(ctx, dst, src);
418363 }
419364
420365 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
....@@ -434,14 +379,14 @@
434379
435380 err = skcipher_walk_virt(&walk, req, true);
436381
437
- kernel_fpu_begin();
438382 while ((nbytes = walk.nbytes)) {
383
+ kernel_fpu_begin();
439384 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
440385 nbytes & AES_BLOCK_MASK);
386
+ kernel_fpu_end();
441387 nbytes &= AES_BLOCK_SIZE - 1;
442388 err = skcipher_walk_done(&walk, nbytes);
443389 }
444
- kernel_fpu_end();
445390
446391 return err;
447392 }
....@@ -456,14 +401,14 @@
456401
457402 err = skcipher_walk_virt(&walk, req, true);
458403
459
- kernel_fpu_begin();
460404 while ((nbytes = walk.nbytes)) {
405
+ kernel_fpu_begin();
461406 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
462407 nbytes & AES_BLOCK_MASK);
408
+ kernel_fpu_end();
463409 nbytes &= AES_BLOCK_SIZE - 1;
464410 err = skcipher_walk_done(&walk, nbytes);
465411 }
466
- kernel_fpu_end();
467412
468413 return err;
469414 }
....@@ -478,14 +423,14 @@
478423
479424 err = skcipher_walk_virt(&walk, req, true);
480425
481
- kernel_fpu_begin();
482426 while ((nbytes = walk.nbytes)) {
427
+ kernel_fpu_begin();
483428 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
484429 nbytes & AES_BLOCK_MASK, walk.iv);
430
+ kernel_fpu_end();
485431 nbytes &= AES_BLOCK_SIZE - 1;
486432 err = skcipher_walk_done(&walk, nbytes);
487433 }
488
- kernel_fpu_end();
489434
490435 return err;
491436 }
....@@ -500,14 +445,14 @@
500445
501446 err = skcipher_walk_virt(&walk, req, true);
502447
503
- kernel_fpu_begin();
504448 while ((nbytes = walk.nbytes)) {
449
+ kernel_fpu_begin();
505450 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
506451 nbytes & AES_BLOCK_MASK, walk.iv);
452
+ kernel_fpu_end();
507453 nbytes &= AES_BLOCK_SIZE - 1;
508454 err = skcipher_walk_done(&walk, nbytes);
509455 }
510
- kernel_fpu_end();
511456
512457 return err;
513458 }
....@@ -528,7 +473,6 @@
528473 crypto_inc(ctrblk, AES_BLOCK_SIZE);
529474 }
530475
531
-#ifdef CONFIG_AS_AVX
532476 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
533477 const u8 *in, unsigned int len, u8 *iv)
534478 {
....@@ -545,7 +489,6 @@
545489 else
546490 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
547491 }
548
-#endif
549492
550493 static int ctr_crypt(struct skcipher_request *req)
551494 {
....@@ -557,18 +500,20 @@
557500
558501 err = skcipher_walk_virt(&walk, req, true);
559502
560
- kernel_fpu_begin();
561503 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
504
+ kernel_fpu_begin();
562505 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
563506 nbytes & AES_BLOCK_MASK, walk.iv);
507
+ kernel_fpu_end();
564508 nbytes &= AES_BLOCK_SIZE - 1;
565509 err = skcipher_walk_done(&walk, nbytes);
566510 }
567511 if (walk.nbytes) {
512
+ kernel_fpu_begin();
568513 ctr_crypt_final(ctx, &walk);
514
+ kernel_fpu_end();
569515 err = skcipher_walk_done(&walk, 0);
570516 }
571
- kernel_fpu_end();
572517
573518 return err;
574519 }
....@@ -597,29 +542,24 @@
597542 }
598543
599544
600
-static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
545
+static void aesni_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
601546 {
602
- aesni_enc(ctx, out, in);
547
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_enc);
603548 }
604549
605
-static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
550
+static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
606551 {
607
- glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
552
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec);
608553 }
609554
610
-static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
555
+static void aesni_xts_enc32(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
611556 {
612
- glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
557
+ aesni_xts_encrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv);
613558 }
614559
615
-static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
560
+static void aesni_xts_dec32(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
616561 {
617
- aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
618
-}
619
-
620
-static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
621
-{
622
- aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
562
+ aesni_xts_decrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv);
623563 }
624564
625565 static const struct common_glue_ctx aesni_enc_xts = {
....@@ -627,11 +567,11 @@
627567 .fpu_blocks_limit = 1,
628568
629569 .funcs = { {
630
- .num_blocks = 8,
631
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
570
+ .num_blocks = 32,
571
+ .fn_u = { .xts = aesni_xts_enc32 }
632572 }, {
633573 .num_blocks = 1,
634
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
574
+ .fn_u = { .xts = aesni_xts_enc }
635575 } }
636576 };
637577
....@@ -640,11 +580,11 @@
640580 .fpu_blocks_limit = 1,
641581
642582 .funcs = { {
643
- .num_blocks = 8,
644
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
583
+ .num_blocks = 32,
584
+ .fn_u = { .xts = aesni_xts_dec32 }
645585 }, {
646586 .num_blocks = 1,
647
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
587
+ .fn_u = { .xts = aesni_xts_dec }
648588 } }
649589 };
650590
....@@ -653,10 +593,10 @@
653593 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
654594 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
655595
656
- return glue_xts_req_128bit(&aesni_enc_xts, req,
657
- XTS_TWEAK_CAST(aesni_xts_tweak),
596
+ return glue_xts_req_128bit(&aesni_enc_xts, req, aesni_enc,
658597 aes_ctx(ctx->raw_tweak_ctx),
659
- aes_ctx(ctx->raw_crypt_ctx));
598
+ aes_ctx(ctx->raw_crypt_ctx),
599
+ false);
660600 }
661601
662602 static int xts_decrypt(struct skcipher_request *req)
....@@ -664,58 +604,30 @@
664604 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
665605 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
666606
667
- return glue_xts_req_128bit(&aesni_dec_xts, req,
668
- XTS_TWEAK_CAST(aesni_xts_tweak),
607
+ return glue_xts_req_128bit(&aesni_dec_xts, req, aesni_enc,
669608 aes_ctx(ctx->raw_tweak_ctx),
670
- aes_ctx(ctx->raw_crypt_ctx));
671
-}
672
-
673
-static int rfc4106_init(struct crypto_aead *aead)
674
-{
675
- struct cryptd_aead *cryptd_tfm;
676
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
677
-
678
- cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
679
- CRYPTO_ALG_INTERNAL,
680
- CRYPTO_ALG_INTERNAL);
681
- if (IS_ERR(cryptd_tfm))
682
- return PTR_ERR(cryptd_tfm);
683
-
684
- *ctx = cryptd_tfm;
685
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
686
- return 0;
687
-}
688
-
689
-static void rfc4106_exit(struct crypto_aead *aead)
690
-{
691
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
692
-
693
- cryptd_free_aead(*ctx);
609
+ aes_ctx(ctx->raw_crypt_ctx),
610
+ true);
694611 }
695612
696613 static int
697614 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
698615 {
699
- struct crypto_cipher *tfm;
616
+ struct crypto_aes_ctx ctx;
700617 int ret;
701618
702
- tfm = crypto_alloc_cipher("aes", 0, 0);
703
- if (IS_ERR(tfm))
704
- return PTR_ERR(tfm);
705
-
706
- ret = crypto_cipher_setkey(tfm, key, key_len);
619
+ ret = aes_expandkey(&ctx, key, key_len);
707620 if (ret)
708
- goto out_free_cipher;
621
+ return ret;
709622
710623 /* Clear the data in the hash sub key container to zero.*/
711624 /* We want to cipher all zeros to create the hash sub key. */
712625 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
713626
714
- crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);
627
+ aes_encrypt(&ctx, hash_subkey, hash_subkey);
715628
716
-out_free_cipher:
717
- crypto_free_cipher(tfm);
718
- return ret;
629
+ memzero_explicit(&ctx, sizeof(ctx));
630
+ return 0;
719631 }
720632
721633 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
....@@ -723,10 +635,9 @@
723635 {
724636 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
725637
726
- if (key_len < 4) {
727
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
638
+ if (key_len < 4)
728639 return -EINVAL;
729
- }
640
+
730641 /*Account for 4 byte nonce at the end.*/
731642 key_len -= 4;
732643
....@@ -737,15 +648,8 @@
737648 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
738649 }
739650
740
-static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key,
741
- unsigned int key_len)
742
-{
743
- struct cryptd_aead **ctx = crypto_aead_ctx(parent);
744
- struct cryptd_aead *cryptd_tfm = *ctx;
745
-
746
- return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
747
-}
748
-
651
+/* This is the Integrity Check Value (aka the authentication tag) length and can
652
+ * be 8, 12 or 16 bytes long. */
749653 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
750654 unsigned int authsize)
751655 {
....@@ -759,17 +663,6 @@
759663 }
760664
761665 return 0;
762
-}
763
-
764
-/* This is the Integrity Check Value (aka the authentication tag length and can
765
- * be 8, 12 or 16 bytes long. */
766
-static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent,
767
- unsigned int authsize)
768
-{
769
- struct cryptd_aead **ctx = crypto_aead_ctx(parent);
770
- struct cryptd_aead *cryptd_tfm = *ctx;
771
-
772
- return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
773666 }
774667
775668 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
....@@ -797,7 +690,9 @@
797690 {
798691 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
799692 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
800
- struct gcm_context_data data AESNI_ALIGN_ATTR;
693
+ const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
694
+ u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
695
+ struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
801696 struct scatter_walk dst_sg_walk = {};
802697 unsigned long left = req->cryptlen;
803698 unsigned long len, srclen, dstlen;
....@@ -813,6 +708,11 @@
813708
814709 if (!enc)
815710 left -= auth_tag_len;
711
+
712
+ if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4)
713
+ gcm_tfm = &aesni_gcm_tfm_avx_gen2;
714
+ if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2)
715
+ gcm_tfm = &aesni_gcm_tfm_sse;
816716
817717 /* Linearize assoc, if not already linear */
818718 if (req->src->length >= assoclen && req->src->length &&
....@@ -841,8 +741,7 @@
841741 }
842742
843743 kernel_fpu_begin();
844
- aesni_gcm_init(aes_ctx, &data, iv,
845
- hash_subkey, assoc, assoclen);
744
+ gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
846745 if (req->src != req->dst) {
847746 while (left) {
848747 src = scatterwalk_map(&src_sg_walk);
....@@ -852,10 +751,10 @@
852751 len = min(srclen, dstlen);
853752 if (len) {
854753 if (enc)
855
- aesni_gcm_enc_update(aes_ctx, &data,
754
+ gcm_tfm->enc_update(aes_ctx, data,
856755 dst, src, len);
857756 else
858
- aesni_gcm_dec_update(aes_ctx, &data,
757
+ gcm_tfm->dec_update(aes_ctx, data,
859758 dst, src, len);
860759 }
861760 left -= len;
....@@ -873,10 +772,10 @@
873772 len = scatterwalk_clamp(&src_sg_walk, left);
874773 if (len) {
875774 if (enc)
876
- aesni_gcm_enc_update(aes_ctx, &data,
775
+ gcm_tfm->enc_update(aes_ctx, data,
877776 src, src, len);
878777 else
879
- aesni_gcm_dec_update(aes_ctx, &data,
778
+ gcm_tfm->dec_update(aes_ctx, data,
880779 src, src, len);
881780 }
882781 left -= len;
....@@ -885,7 +784,7 @@
885784 scatterwalk_done(&src_sg_walk, 1, left);
886785 }
887786 }
888
- aesni_gcm_finalize(aes_ctx, &data, authTag, auth_tag_len);
787
+ gcm_tfm->finalize(aes_ctx, data, authTag, auth_tag_len);
889788 kernel_fpu_end();
890789
891790 if (!assocmem)
....@@ -918,147 +817,15 @@
918817 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
919818 u8 *hash_subkey, u8 *iv, void *aes_ctx)
920819 {
921
- u8 one_entry_in_sg = 0;
922
- u8 *src, *dst, *assoc;
923
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
924
- unsigned long auth_tag_len = crypto_aead_authsize(tfm);
925
- struct scatter_walk src_sg_walk;
926
- struct scatter_walk dst_sg_walk = {};
927
- struct gcm_context_data data AESNI_ALIGN_ATTR;
928
-
929
- if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
930
- aesni_gcm_enc_tfm == aesni_gcm_enc ||
931
- req->cryptlen < AVX_GEN2_OPTSIZE) {
932
- return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
933
- aes_ctx);
934
- }
935
- if (sg_is_last(req->src) &&
936
- (!PageHighMem(sg_page(req->src)) ||
937
- req->src->offset + req->src->length <= PAGE_SIZE) &&
938
- sg_is_last(req->dst) &&
939
- (!PageHighMem(sg_page(req->dst)) ||
940
- req->dst->offset + req->dst->length <= PAGE_SIZE)) {
941
- one_entry_in_sg = 1;
942
- scatterwalk_start(&src_sg_walk, req->src);
943
- assoc = scatterwalk_map(&src_sg_walk);
944
- src = assoc + req->assoclen;
945
- dst = src;
946
- if (unlikely(req->src != req->dst)) {
947
- scatterwalk_start(&dst_sg_walk, req->dst);
948
- dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
949
- }
950
- } else {
951
- /* Allocate memory for src, dst, assoc */
952
- assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
953
- GFP_ATOMIC);
954
- if (unlikely(!assoc))
955
- return -ENOMEM;
956
- scatterwalk_map_and_copy(assoc, req->src, 0,
957
- req->assoclen + req->cryptlen, 0);
958
- src = assoc + req->assoclen;
959
- dst = src;
960
- }
961
-
962
- kernel_fpu_begin();
963
- aesni_gcm_enc_tfm(aes_ctx, &data, dst, src, req->cryptlen, iv,
964
- hash_subkey, assoc, assoclen,
965
- dst + req->cryptlen, auth_tag_len);
966
- kernel_fpu_end();
967
-
968
- /* The authTag (aka the Integrity Check Value) needs to be written
969
- * back to the packet. */
970
- if (one_entry_in_sg) {
971
- if (unlikely(req->src != req->dst)) {
972
- scatterwalk_unmap(dst - req->assoclen);
973
- scatterwalk_advance(&dst_sg_walk, req->dst->length);
974
- scatterwalk_done(&dst_sg_walk, 1, 0);
975
- }
976
- scatterwalk_unmap(assoc);
977
- scatterwalk_advance(&src_sg_walk, req->src->length);
978
- scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
979
- } else {
980
- scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
981
- req->cryptlen + auth_tag_len, 1);
982
- kfree(assoc);
983
- }
984
- return 0;
820
+ return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
821
+ aes_ctx);
985822 }
986823
987824 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
988825 u8 *hash_subkey, u8 *iv, void *aes_ctx)
989826 {
990
- u8 one_entry_in_sg = 0;
991
- u8 *src, *dst, *assoc;
992
- unsigned long tempCipherLen = 0;
993
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
994
- unsigned long auth_tag_len = crypto_aead_authsize(tfm);
995
- u8 authTag[16];
996
- struct scatter_walk src_sg_walk;
997
- struct scatter_walk dst_sg_walk = {};
998
- struct gcm_context_data data AESNI_ALIGN_ATTR;
999
- int retval = 0;
1000
-
1001
- if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
1002
- aesni_gcm_enc_tfm == aesni_gcm_enc ||
1003
- req->cryptlen < AVX_GEN2_OPTSIZE) {
1004
- return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
1005
- aes_ctx);
1006
- }
1007
- tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1008
-
1009
- if (sg_is_last(req->src) &&
1010
- (!PageHighMem(sg_page(req->src)) ||
1011
- req->src->offset + req->src->length <= PAGE_SIZE) &&
1012
- sg_is_last(req->dst) && req->dst->length &&
1013
- (!PageHighMem(sg_page(req->dst)) ||
1014
- req->dst->offset + req->dst->length <= PAGE_SIZE)) {
1015
- one_entry_in_sg = 1;
1016
- scatterwalk_start(&src_sg_walk, req->src);
1017
- assoc = scatterwalk_map(&src_sg_walk);
1018
- src = assoc + req->assoclen;
1019
- dst = src;
1020
- if (unlikely(req->src != req->dst)) {
1021
- scatterwalk_start(&dst_sg_walk, req->dst);
1022
- dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
1023
- }
1024
- } else {
1025
- /* Allocate memory for src, dst, assoc */
1026
- assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1027
- if (!assoc)
1028
- return -ENOMEM;
1029
- scatterwalk_map_and_copy(assoc, req->src, 0,
1030
- req->assoclen + req->cryptlen, 0);
1031
- src = assoc + req->assoclen;
1032
- dst = src;
1033
- }
1034
-
1035
-
1036
- kernel_fpu_begin();
1037
- aesni_gcm_dec_tfm(aes_ctx, &data, dst, src, tempCipherLen, iv,
1038
- hash_subkey, assoc, assoclen,
1039
- authTag, auth_tag_len);
1040
- kernel_fpu_end();
1041
-
1042
- /* Compare generated tag with passed in tag. */
1043
- retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1044
- -EBADMSG : 0;
1045
-
1046
- if (one_entry_in_sg) {
1047
- if (unlikely(req->src != req->dst)) {
1048
- scatterwalk_unmap(dst - req->assoclen);
1049
- scatterwalk_advance(&dst_sg_walk, req->dst->length);
1050
- scatterwalk_done(&dst_sg_walk, 1, 0);
1051
- }
1052
- scatterwalk_unmap(assoc);
1053
- scatterwalk_advance(&src_sg_walk, req->src->length);
1054
- scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1055
- } else {
1056
- scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1057
- tempCipherLen, 1);
1058
- kfree(assoc);
1059
- }
1060
- return retval;
1061
-
827
+ return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
828
+ aes_ctx);
1062829 }
1063830
1064831 static int helper_rfc4106_encrypt(struct aead_request *req)
....@@ -1066,7 +833,8 @@
1066833 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1067834 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1068835 void *aes_ctx = &(ctx->aes_key_expanded);
1069
- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
836
+ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
837
+ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1070838 unsigned int i;
1071839 __be32 counter = cpu_to_be32(1);
1072840
....@@ -1093,7 +861,8 @@
1093861 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1094862 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1095863 void *aes_ctx = &(ctx->aes_key_expanded);
1096
- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
864
+ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
865
+ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1097866 unsigned int i;
1098867
1099868 if (unlikely(req->assoclen != 16 && req->assoclen != 20))
....@@ -1113,41 +882,9 @@
1113882 return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
1114883 aes_ctx);
1115884 }
1116
-
1117
-static int gcmaes_wrapper_encrypt(struct aead_request *req)
1118
-{
1119
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1120
- struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1121
- struct cryptd_aead *cryptd_tfm = *ctx;
1122
-
1123
- tfm = &cryptd_tfm->base;
1124
- if (irq_fpu_usable() && (!in_atomic() ||
1125
- !cryptd_aead_queued(cryptd_tfm)))
1126
- tfm = cryptd_aead_child(cryptd_tfm);
1127
-
1128
- aead_request_set_tfm(req, tfm);
1129
-
1130
- return crypto_aead_encrypt(req);
1131
-}
1132
-
1133
-static int gcmaes_wrapper_decrypt(struct aead_request *req)
1134
-{
1135
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1136
- struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1137
- struct cryptd_aead *cryptd_tfm = *ctx;
1138
-
1139
- tfm = &cryptd_tfm->base;
1140
- if (irq_fpu_usable() && (!in_atomic() ||
1141
- !cryptd_aead_queued(cryptd_tfm)))
1142
- tfm = cryptd_aead_child(cryptd_tfm);
1143
-
1144
- aead_request_set_tfm(req, tfm);
1145
-
1146
- return crypto_aead_decrypt(req);
1147
-}
1148885 #endif
1149886
1150
-static struct crypto_alg aesni_algs[] = { {
887
+static struct crypto_alg aesni_cipher_alg = {
1151888 .cra_name = "aes",
1152889 .cra_driver_name = "aes-aesni",
1153890 .cra_priority = 300,
....@@ -1160,28 +897,11 @@
1160897 .cia_min_keysize = AES_MIN_KEY_SIZE,
1161898 .cia_max_keysize = AES_MAX_KEY_SIZE,
1162899 .cia_setkey = aes_set_key,
1163
- .cia_encrypt = aes_encrypt,
1164
- .cia_decrypt = aes_decrypt
900
+ .cia_encrypt = aesni_encrypt,
901
+ .cia_decrypt = aesni_decrypt
1165902 }
1166903 }
1167
-}, {
1168
- .cra_name = "__aes",
1169
- .cra_driver_name = "__aes-aesni",
1170
- .cra_priority = 300,
1171
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
1172
- .cra_blocksize = AES_BLOCK_SIZE,
1173
- .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
1174
- .cra_module = THIS_MODULE,
1175
- .cra_u = {
1176
- .cipher = {
1177
- .cia_min_keysize = AES_MIN_KEY_SIZE,
1178
- .cia_max_keysize = AES_MAX_KEY_SIZE,
1179
- .cia_setkey = aes_set_key,
1180
- .cia_encrypt = __aes_encrypt,
1181
- .cia_decrypt = __aes_decrypt
1182
- }
1183
- }
1184
-} };
904
+};
1185905
1186906 static struct skcipher_alg aesni_skciphers[] = {
1187907 {
....@@ -1256,22 +976,6 @@
1256976 static
1257977 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1258978
1259
-static struct {
1260
- const char *algname;
1261
- const char *drvname;
1262
- const char *basename;
1263
- struct simd_skcipher_alg *simd;
1264
-} aesni_simd_skciphers2[] = {
1265
-#if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
1266
- IS_BUILTIN(CONFIG_CRYPTO_PCBC)
1267
- {
1268
- .algname = "pcbc(aes)",
1269
- .drvname = "pcbc-aes-aesni",
1270
- .basename = "fpu(pcbc(__aes-aesni))",
1271
- },
1272
-#endif
1273
-};
1274
-
1275979 #ifdef CONFIG_X86_64
1276980 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1277981 unsigned int key_len)
....@@ -1288,7 +992,8 @@
1288992 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1289993 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1290994 void *aes_ctx = &(ctx->aes_key_expanded);
1291
- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
995
+ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
996
+ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1292997 __be32 counter = cpu_to_be32(1);
1293998
1294999 memcpy(iv, req->iv, 12);
....@@ -1304,7 +1009,8 @@
13041009 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
13051010 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
13061011 void *aes_ctx = &(ctx->aes_key_expanded);
1307
- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1012
+ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1013
+ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
13081014
13091015 memcpy(iv, req->iv, 12);
13101016 *((__be32 *)(iv+12)) = counter;
....@@ -1313,31 +1019,7 @@
13131019 aes_ctx);
13141020 }
13151021
1316
-static int generic_gcmaes_init(struct crypto_aead *aead)
1317
-{
1318
- struct cryptd_aead *cryptd_tfm;
1319
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
1320
-
1321
- cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni",
1322
- CRYPTO_ALG_INTERNAL,
1323
- CRYPTO_ALG_INTERNAL);
1324
- if (IS_ERR(cryptd_tfm))
1325
- return PTR_ERR(cryptd_tfm);
1326
-
1327
- *ctx = cryptd_tfm;
1328
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
1329
-
1330
- return 0;
1331
-}
1332
-
1333
-static void generic_gcmaes_exit(struct crypto_aead *aead)
1334
-{
1335
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
1336
-
1337
- cryptd_free_aead(*ctx);
1338
-}
1339
-
1340
-static struct aead_alg aesni_aead_algs[] = { {
1022
+static struct aead_alg aesni_aeads[] = { {
13411023 .setkey = common_rfc4106_set_key,
13421024 .setauthsize = common_rfc4106_set_authsize,
13431025 .encrypt = helper_rfc4106_encrypt,
....@@ -1345,30 +1027,13 @@
13451027 .ivsize = GCM_RFC4106_IV_SIZE,
13461028 .maxauthsize = 16,
13471029 .base = {
1348
- .cra_name = "__gcm-aes-aesni",
1349
- .cra_driver_name = "__driver-gcm-aes-aesni",
1030
+ .cra_name = "__rfc4106(gcm(aes))",
1031
+ .cra_driver_name = "__rfc4106-gcm-aesni",
1032
+ .cra_priority = 400,
13501033 .cra_flags = CRYPTO_ALG_INTERNAL,
13511034 .cra_blocksize = 1,
13521035 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx),
13531036 .cra_alignmask = AESNI_ALIGN - 1,
1354
- .cra_module = THIS_MODULE,
1355
- },
1356
-}, {
1357
- .init = rfc4106_init,
1358
- .exit = rfc4106_exit,
1359
- .setkey = gcmaes_wrapper_set_key,
1360
- .setauthsize = gcmaes_wrapper_set_authsize,
1361
- .encrypt = gcmaes_wrapper_encrypt,
1362
- .decrypt = gcmaes_wrapper_decrypt,
1363
- .ivsize = GCM_RFC4106_IV_SIZE,
1364
- .maxauthsize = 16,
1365
- .base = {
1366
- .cra_name = "rfc4106(gcm(aes))",
1367
- .cra_driver_name = "rfc4106-gcm-aesni",
1368
- .cra_priority = 400,
1369
- .cra_flags = CRYPTO_ALG_ASYNC,
1370
- .cra_blocksize = 1,
1371
- .cra_ctxsize = sizeof(struct cryptd_aead *),
13721037 .cra_module = THIS_MODULE,
13731038 },
13741039 }, {
....@@ -1379,165 +1044,86 @@
13791044 .ivsize = GCM_AES_IV_SIZE,
13801045 .maxauthsize = 16,
13811046 .base = {
1382
- .cra_name = "__generic-gcm-aes-aesni",
1383
- .cra_driver_name = "__driver-generic-gcm-aes-aesni",
1384
- .cra_priority = 0,
1047
+ .cra_name = "__gcm(aes)",
1048
+ .cra_driver_name = "__generic-gcm-aesni",
1049
+ .cra_priority = 400,
13851050 .cra_flags = CRYPTO_ALG_INTERNAL,
13861051 .cra_blocksize = 1,
13871052 .cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
13881053 .cra_alignmask = AESNI_ALIGN - 1,
13891054 .cra_module = THIS_MODULE,
13901055 },
1391
-}, {
1392
- .init = generic_gcmaes_init,
1393
- .exit = generic_gcmaes_exit,
1394
- .setkey = gcmaes_wrapper_set_key,
1395
- .setauthsize = gcmaes_wrapper_set_authsize,
1396
- .encrypt = gcmaes_wrapper_encrypt,
1397
- .decrypt = gcmaes_wrapper_decrypt,
1398
- .ivsize = GCM_AES_IV_SIZE,
1399
- .maxauthsize = 16,
1400
- .base = {
1401
- .cra_name = "gcm(aes)",
1402
- .cra_driver_name = "generic-gcm-aesni",
1403
- .cra_priority = 400,
1404
- .cra_flags = CRYPTO_ALG_ASYNC,
1405
- .cra_blocksize = 1,
1406
- .cra_ctxsize = sizeof(struct cryptd_aead *),
1407
- .cra_module = THIS_MODULE,
1408
- },
14091056 } };
14101057 #else
1411
-static struct aead_alg aesni_aead_algs[0];
1058
+static struct aead_alg aesni_aeads[0];
14121059 #endif
14131060
1061
+static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
14141062
14151063 static const struct x86_cpu_id aesni_cpu_id[] = {
1416
- X86_FEATURE_MATCH(X86_FEATURE_AES),
1064
+ X86_MATCH_FEATURE(X86_FEATURE_AES, NULL),
14171065 {}
14181066 };
14191067 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
14201068
1421
-static void aesni_free_simds(void)
1422
-{
1423
- int i;
1424
-
1425
- for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
1426
- aesni_simd_skciphers[i]; i++)
1427
- simd_skcipher_free(aesni_simd_skciphers[i]);
1428
-
1429
- for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
1430
- if (aesni_simd_skciphers2[i].simd)
1431
- simd_skcipher_free(aesni_simd_skciphers2[i].simd);
1432
-}
1433
-
14341069 static int __init aesni_init(void)
14351070 {
1436
- struct simd_skcipher_alg *simd;
1437
- const char *basename;
1438
- const char *algname;
1439
- const char *drvname;
14401071 int err;
1441
- int i;
14421072
14431073 if (!x86_match_cpu(aesni_cpu_id))
14441074 return -ENODEV;
14451075 #ifdef CONFIG_X86_64
1446
-#ifdef CONFIG_AS_AVX2
14471076 if (boot_cpu_has(X86_FEATURE_AVX2)) {
14481077 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1449
- aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1450
- aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1078
+ aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4;
14511079 } else
1452
-#endif
1453
-#ifdef CONFIG_AS_AVX
14541080 if (boot_cpu_has(X86_FEATURE_AVX)) {
14551081 pr_info("AVX version of gcm_enc/dec engaged.\n");
1456
- aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1457
- aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1458
- } else
1459
-#endif
1460
- {
1082
+ aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2;
1083
+ } else {
14611084 pr_info("SSE version of gcm_enc/dec engaged.\n");
1462
- aesni_gcm_enc_tfm = aesni_gcm_enc;
1463
- aesni_gcm_dec_tfm = aesni_gcm_dec;
1085
+ aesni_gcm_tfm = &aesni_gcm_tfm_sse;
14641086 }
14651087 aesni_ctr_enc_tfm = aesni_ctr_enc;
1466
-#ifdef CONFIG_AS_AVX
14671088 if (boot_cpu_has(X86_FEATURE_AVX)) {
14681089 /* optimize performance of ctr mode encryption transform */
14691090 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
14701091 pr_info("AES CTR mode by8 optimization enabled\n");
14711092 }
14721093 #endif
1473
-#endif
14741094
1475
- err = crypto_fpu_init();
1095
+ err = crypto_register_alg(&aesni_cipher_alg);
14761096 if (err)
14771097 return err;
14781098
1479
- err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1099
+ err = simd_register_skciphers_compat(aesni_skciphers,
1100
+ ARRAY_SIZE(aesni_skciphers),
1101
+ aesni_simd_skciphers);
14801102 if (err)
1481
- goto fpu_exit;
1103
+ goto unregister_cipher;
14821104
1483
- err = crypto_register_skciphers(aesni_skciphers,
1484
- ARRAY_SIZE(aesni_skciphers));
1485
- if (err)
1486
- goto unregister_algs;
1487
-
1488
- err = crypto_register_aeads(aesni_aead_algs,
1489
- ARRAY_SIZE(aesni_aead_algs));
1105
+ err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1106
+ aesni_simd_aeads);
14901107 if (err)
14911108 goto unregister_skciphers;
14921109
1493
- for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
1494
- algname = aesni_skciphers[i].base.cra_name + 2;
1495
- drvname = aesni_skciphers[i].base.cra_driver_name + 2;
1496
- basename = aesni_skciphers[i].base.cra_driver_name;
1497
- simd = simd_skcipher_create_compat(algname, drvname, basename);
1498
- err = PTR_ERR(simd);
1499
- if (IS_ERR(simd))
1500
- goto unregister_simds;
1501
-
1502
- aesni_simd_skciphers[i] = simd;
1503
- }
1504
-
1505
- for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) {
1506
- algname = aesni_simd_skciphers2[i].algname;
1507
- drvname = aesni_simd_skciphers2[i].drvname;
1508
- basename = aesni_simd_skciphers2[i].basename;
1509
- simd = simd_skcipher_create_compat(algname, drvname, basename);
1510
- err = PTR_ERR(simd);
1511
- if (IS_ERR(simd))
1512
- continue;
1513
-
1514
- aesni_simd_skciphers2[i].simd = simd;
1515
- }
1516
-
15171110 return 0;
15181111
1519
-unregister_simds:
1520
- aesni_free_simds();
1521
- crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
15221112 unregister_skciphers:
1523
- crypto_unregister_skciphers(aesni_skciphers,
1524
- ARRAY_SIZE(aesni_skciphers));
1525
-unregister_algs:
1526
- crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1527
-fpu_exit:
1528
- crypto_fpu_exit();
1113
+ simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1114
+ aesni_simd_skciphers);
1115
+unregister_cipher:
1116
+ crypto_unregister_alg(&aesni_cipher_alg);
15291117 return err;
15301118 }
15311119
15321120 static void __exit aesni_exit(void)
15331121 {
1534
- aesni_free_simds();
1535
- crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1536
- crypto_unregister_skciphers(aesni_skciphers,
1537
- ARRAY_SIZE(aesni_skciphers));
1538
- crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1539
-
1540
- crypto_fpu_exit();
1122
+ simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1123
+ aesni_simd_aeads);
1124
+ simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1125
+ aesni_simd_skciphers);
1126
+ crypto_unregister_alg(&aesni_cipher_alg);
15411127 }
15421128
15431129 late_initcall(aesni_init);