hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/powerpc/crypto/aes-spe-glue.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Glue code for AES implementation for SPE instructions (PPC)
34 *
....@@ -5,12 +6,6 @@
56 * about the SPE registers so it can run from interrupt context.
67 *
78 * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de>
8
- *
9
- * This program is free software; you can redistribute it and/or modify it
10
- * under the terms of the GNU General Public License as published by the Free
11
- * Software Foundation; either version 2 of the License, or (at your option)
12
- * any later version.
13
- *
149 */
1510
1611 #include <crypto/aes.h>
....@@ -22,7 +17,10 @@
2217 #include <asm/byteorder.h>
2318 #include <asm/switch_to.h>
2419 #include <crypto/algapi.h>
20
+#include <crypto/internal/skcipher.h>
2521 #include <crypto/xts.h>
22
+#include <crypto/gf128mul.h>
23
+#include <crypto/scatterwalk.h>
2624
2725 /*
2826 * MAX_BYTES defines the number of bytes that are allowed to be processed
....@@ -96,13 +94,6 @@
9694 {
9795 struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm);
9896
99
- if (key_len != AES_KEYSIZE_128 &&
100
- key_len != AES_KEYSIZE_192 &&
101
- key_len != AES_KEYSIZE_256) {
102
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
103
- return -EINVAL;
104
- }
105
-
10697 switch (key_len) {
10798 case AES_KEYSIZE_128:
10899 ctx->rounds = 4;
....@@ -116,6 +107,8 @@
116107 ctx->rounds = 6;
117108 ppc_expand_key_256(ctx->key_enc, in_key);
118109 break;
110
+ default:
111
+ return -EINVAL;
119112 }
120113
121114 ppc_generate_decrypt_key(ctx->key_dec, ctx->key_enc, key_len);
....@@ -123,24 +116,23 @@
123116 return 0;
124117 }
125118
126
-static int ppc_xts_setkey(struct crypto_tfm *tfm, const u8 *in_key,
119
+static int ppc_aes_setkey_skcipher(struct crypto_skcipher *tfm,
120
+ const u8 *in_key, unsigned int key_len)
121
+{
122
+ return ppc_aes_setkey(crypto_skcipher_tfm(tfm), in_key, key_len);
123
+}
124
+
125
+static int ppc_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
127126 unsigned int key_len)
128127 {
129
- struct ppc_xts_ctx *ctx = crypto_tfm_ctx(tfm);
128
+ struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
130129 int err;
131130
132
- err = xts_check_key(tfm, in_key, key_len);
131
+ err = xts_verify_key(tfm, in_key, key_len);
133132 if (err)
134133 return err;
135134
136135 key_len >>= 1;
137
-
138
- if (key_len != AES_KEYSIZE_128 &&
139
- key_len != AES_KEYSIZE_192 &&
140
- key_len != AES_KEYSIZE_256) {
141
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
142
- return -EINVAL;
143
- }
144136
145137 switch (key_len) {
146138 case AES_KEYSIZE_128:
....@@ -158,6 +150,8 @@
158150 ppc_expand_key_256(ctx->key_enc, in_key);
159151 ppc_expand_key_256(ctx->key_twk, in_key + AES_KEYSIZE_256);
160152 break;
153
+ default:
154
+ return -EINVAL;
161155 }
162156
163157 ppc_generate_decrypt_key(ctx->key_dec, ctx->key_enc, key_len);
....@@ -183,208 +177,229 @@
183177 spe_end();
184178 }
185179
186
-static int ppc_ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
187
- struct scatterlist *src, unsigned int nbytes)
180
+static int ppc_ecb_crypt(struct skcipher_request *req, bool enc)
188181 {
189
- struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
190
- struct blkcipher_walk walk;
191
- unsigned int ubytes;
182
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
183
+ struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
184
+ struct skcipher_walk walk;
185
+ unsigned int nbytes;
192186 int err;
193187
194
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
195
- blkcipher_walk_init(&walk, dst, src, nbytes);
196
- err = blkcipher_walk_virt(desc, &walk);
188
+ err = skcipher_walk_virt(&walk, req, false);
197189
198
- while ((nbytes = walk.nbytes)) {
199
- ubytes = nbytes > MAX_BYTES ?
200
- nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
201
- nbytes -= ubytes;
190
+ while ((nbytes = walk.nbytes) != 0) {
191
+ nbytes = min_t(unsigned int, nbytes, MAX_BYTES);
192
+ nbytes = round_down(nbytes, AES_BLOCK_SIZE);
202193
203194 spe_begin();
204
- ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
205
- ctx->key_enc, ctx->rounds, nbytes);
195
+ if (enc)
196
+ ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
197
+ ctx->key_enc, ctx->rounds, nbytes);
198
+ else
199
+ ppc_decrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
200
+ ctx->key_dec, ctx->rounds, nbytes);
206201 spe_end();
207202
208
- err = blkcipher_walk_done(desc, &walk, ubytes);
203
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
209204 }
210205
211206 return err;
212207 }
213208
214
-static int ppc_ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
215
- struct scatterlist *src, unsigned int nbytes)
209
+static int ppc_ecb_encrypt(struct skcipher_request *req)
216210 {
217
- struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
218
- struct blkcipher_walk walk;
219
- unsigned int ubytes;
211
+ return ppc_ecb_crypt(req, true);
212
+}
213
+
214
+static int ppc_ecb_decrypt(struct skcipher_request *req)
215
+{
216
+ return ppc_ecb_crypt(req, false);
217
+}
218
+
219
+static int ppc_cbc_crypt(struct skcipher_request *req, bool enc)
220
+{
221
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
222
+ struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
223
+ struct skcipher_walk walk;
224
+ unsigned int nbytes;
220225 int err;
221226
222
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
223
- blkcipher_walk_init(&walk, dst, src, nbytes);
224
- err = blkcipher_walk_virt(desc, &walk);
227
+ err = skcipher_walk_virt(&walk, req, false);
225228
226
- while ((nbytes = walk.nbytes)) {
227
- ubytes = nbytes > MAX_BYTES ?
228
- nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
229
- nbytes -= ubytes;
229
+ while ((nbytes = walk.nbytes) != 0) {
230
+ nbytes = min_t(unsigned int, nbytes, MAX_BYTES);
231
+ nbytes = round_down(nbytes, AES_BLOCK_SIZE);
230232
231233 spe_begin();
232
- ppc_decrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
233
- ctx->key_dec, ctx->rounds, nbytes);
234
+ if (enc)
235
+ ppc_encrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
236
+ ctx->key_enc, ctx->rounds, nbytes,
237
+ walk.iv);
238
+ else
239
+ ppc_decrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
240
+ ctx->key_dec, ctx->rounds, nbytes,
241
+ walk.iv);
234242 spe_end();
235243
236
- err = blkcipher_walk_done(desc, &walk, ubytes);
244
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
237245 }
238246
239247 return err;
240248 }
241249
242
-static int ppc_cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
243
- struct scatterlist *src, unsigned int nbytes)
250
+static int ppc_cbc_encrypt(struct skcipher_request *req)
244251 {
245
- struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
246
- struct blkcipher_walk walk;
247
- unsigned int ubytes;
248
- int err;
249
-
250
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
251
- blkcipher_walk_init(&walk, dst, src, nbytes);
252
- err = blkcipher_walk_virt(desc, &walk);
253
-
254
- while ((nbytes = walk.nbytes)) {
255
- ubytes = nbytes > MAX_BYTES ?
256
- nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
257
- nbytes -= ubytes;
258
-
259
- spe_begin();
260
- ppc_encrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
261
- ctx->key_enc, ctx->rounds, nbytes, walk.iv);
262
- spe_end();
263
-
264
- err = blkcipher_walk_done(desc, &walk, ubytes);
265
- }
266
-
267
- return err;
252
+ return ppc_cbc_crypt(req, true);
268253 }
269254
270
-static int ppc_cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
271
- struct scatterlist *src, unsigned int nbytes)
255
+static int ppc_cbc_decrypt(struct skcipher_request *req)
272256 {
273
- struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
274
- struct blkcipher_walk walk;
275
- unsigned int ubytes;
276
- int err;
277
-
278
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
279
- blkcipher_walk_init(&walk, dst, src, nbytes);
280
- err = blkcipher_walk_virt(desc, &walk);
281
-
282
- while ((nbytes = walk.nbytes)) {
283
- ubytes = nbytes > MAX_BYTES ?
284
- nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
285
- nbytes -= ubytes;
286
-
287
- spe_begin();
288
- ppc_decrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
289
- ctx->key_dec, ctx->rounds, nbytes, walk.iv);
290
- spe_end();
291
-
292
- err = blkcipher_walk_done(desc, &walk, ubytes);
293
- }
294
-
295
- return err;
257
+ return ppc_cbc_crypt(req, false);
296258 }
297259
298
-static int ppc_ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
299
- struct scatterlist *src, unsigned int nbytes)
260
+static int ppc_ctr_crypt(struct skcipher_request *req)
300261 {
301
- struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
302
- struct blkcipher_walk walk;
303
- unsigned int pbytes, ubytes;
262
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
263
+ struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
264
+ struct skcipher_walk walk;
265
+ unsigned int nbytes;
304266 int err;
305267
306
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
307
- blkcipher_walk_init(&walk, dst, src, nbytes);
308
- err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
268
+ err = skcipher_walk_virt(&walk, req, false);
309269
310
- while ((pbytes = walk.nbytes)) {
311
- pbytes = pbytes > MAX_BYTES ? MAX_BYTES : pbytes;
312
- pbytes = pbytes == nbytes ?
313
- nbytes : pbytes & ~(AES_BLOCK_SIZE - 1);
314
- ubytes = walk.nbytes - pbytes;
270
+ while ((nbytes = walk.nbytes) != 0) {
271
+ nbytes = min_t(unsigned int, nbytes, MAX_BYTES);
272
+ if (nbytes < walk.total)
273
+ nbytes = round_down(nbytes, AES_BLOCK_SIZE);
315274
316275 spe_begin();
317276 ppc_crypt_ctr(walk.dst.virt.addr, walk.src.virt.addr,
318
- ctx->key_enc, ctx->rounds, pbytes , walk.iv);
277
+ ctx->key_enc, ctx->rounds, nbytes, walk.iv);
319278 spe_end();
320279
321
- nbytes -= pbytes;
322
- err = blkcipher_walk_done(desc, &walk, ubytes);
280
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
323281 }
324282
325283 return err;
326284 }
327285
328
-static int ppc_xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
329
- struct scatterlist *src, unsigned int nbytes)
286
+static int ppc_xts_crypt(struct skcipher_request *req, bool enc)
330287 {
331
- struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
332
- struct blkcipher_walk walk;
333
- unsigned int ubytes;
288
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
289
+ struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
290
+ struct skcipher_walk walk;
291
+ unsigned int nbytes;
334292 int err;
335293 u32 *twk;
336294
337
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
338
- blkcipher_walk_init(&walk, dst, src, nbytes);
339
- err = blkcipher_walk_virt(desc, &walk);
295
+ err = skcipher_walk_virt(&walk, req, false);
340296 twk = ctx->key_twk;
341297
342
- while ((nbytes = walk.nbytes)) {
343
- ubytes = nbytes > MAX_BYTES ?
344
- nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
345
- nbytes -= ubytes;
298
+ while ((nbytes = walk.nbytes) != 0) {
299
+ nbytes = min_t(unsigned int, nbytes, MAX_BYTES);
300
+ nbytes = round_down(nbytes, AES_BLOCK_SIZE);
346301
347302 spe_begin();
348
- ppc_encrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
349
- ctx->key_enc, ctx->rounds, nbytes, walk.iv, twk);
303
+ if (enc)
304
+ ppc_encrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
305
+ ctx->key_enc, ctx->rounds, nbytes,
306
+ walk.iv, twk);
307
+ else
308
+ ppc_decrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
309
+ ctx->key_dec, ctx->rounds, nbytes,
310
+ walk.iv, twk);
350311 spe_end();
351312
352313 twk = NULL;
353
- err = blkcipher_walk_done(desc, &walk, ubytes);
314
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
354315 }
355316
356317 return err;
357318 }
358319
359
-static int ppc_xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
360
- struct scatterlist *src, unsigned int nbytes)
320
+static int ppc_xts_encrypt(struct skcipher_request *req)
361321 {
362
- struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
363
- struct blkcipher_walk walk;
364
- unsigned int ubytes;
322
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
323
+ struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
324
+ int tail = req->cryptlen % AES_BLOCK_SIZE;
325
+ int offset = req->cryptlen - tail - AES_BLOCK_SIZE;
326
+ struct skcipher_request subreq;
327
+ u8 b[2][AES_BLOCK_SIZE];
365328 int err;
366
- u32 *twk;
367329
368
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
369
- blkcipher_walk_init(&walk, dst, src, nbytes);
370
- err = blkcipher_walk_virt(desc, &walk);
371
- twk = ctx->key_twk;
330
+ if (req->cryptlen < AES_BLOCK_SIZE)
331
+ return -EINVAL;
372332
373
- while ((nbytes = walk.nbytes)) {
374
- ubytes = nbytes > MAX_BYTES ?
375
- nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
376
- nbytes -= ubytes;
377
-
378
- spe_begin();
379
- ppc_decrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
380
- ctx->key_dec, ctx->rounds, nbytes, walk.iv, twk);
381
- spe_end();
382
-
383
- twk = NULL;
384
- err = blkcipher_walk_done(desc, &walk, ubytes);
333
+ if (tail) {
334
+ subreq = *req;
335
+ skcipher_request_set_crypt(&subreq, req->src, req->dst,
336
+ req->cryptlen - tail, req->iv);
337
+ req = &subreq;
385338 }
386339
387
- return err;
340
+ err = ppc_xts_crypt(req, true);
341
+ if (err || !tail)
342
+ return err;
343
+
344
+ scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE, 0);
345
+ memcpy(b[1], b[0], tail);
346
+ scatterwalk_map_and_copy(b[0], req->src, offset + AES_BLOCK_SIZE, tail, 0);
347
+
348
+ spe_begin();
349
+ ppc_encrypt_xts(b[0], b[0], ctx->key_enc, ctx->rounds, AES_BLOCK_SIZE,
350
+ req->iv, NULL);
351
+ spe_end();
352
+
353
+ scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE + tail, 1);
354
+
355
+ return 0;
356
+}
357
+
358
+static int ppc_xts_decrypt(struct skcipher_request *req)
359
+{
360
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
361
+ struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
362
+ int tail = req->cryptlen % AES_BLOCK_SIZE;
363
+ int offset = req->cryptlen - tail - AES_BLOCK_SIZE;
364
+ struct skcipher_request subreq;
365
+ u8 b[3][AES_BLOCK_SIZE];
366
+ le128 twk;
367
+ int err;
368
+
369
+ if (req->cryptlen < AES_BLOCK_SIZE)
370
+ return -EINVAL;
371
+
372
+ if (tail) {
373
+ subreq = *req;
374
+ skcipher_request_set_crypt(&subreq, req->src, req->dst,
375
+ offset, req->iv);
376
+ req = &subreq;
377
+ }
378
+
379
+ err = ppc_xts_crypt(req, false);
380
+ if (err || !tail)
381
+ return err;
382
+
383
+ scatterwalk_map_and_copy(b[1], req->src, offset, AES_BLOCK_SIZE + tail, 0);
384
+
385
+ spe_begin();
386
+ if (!offset)
387
+ ppc_encrypt_ecb(req->iv, req->iv, ctx->key_twk, ctx->rounds,
388
+ AES_BLOCK_SIZE);
389
+
390
+ gf128mul_x_ble(&twk, (le128 *)req->iv);
391
+
392
+ ppc_decrypt_xts(b[1], b[1], ctx->key_dec, ctx->rounds, AES_BLOCK_SIZE,
393
+ (u8 *)&twk, NULL);
394
+ memcpy(b[0], b[2], tail);
395
+ memcpy(b[0] + tail, b[1] + tail, AES_BLOCK_SIZE - tail);
396
+ ppc_decrypt_xts(b[0], b[0], ctx->key_dec, ctx->rounds, AES_BLOCK_SIZE,
397
+ req->iv, NULL);
398
+ spe_end();
399
+
400
+ scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE + tail, 1);
401
+
402
+ return 0;
388403 }
389404
390405 /*
....@@ -393,9 +408,9 @@
393408 * This improves IPsec thoughput by another few percent. Additionally we assume
394409 * that AES context is always aligned to at least 8 bytes because it is created
395410 * with kmalloc() in the crypto infrastructure
396
- *
397411 */
398
-static struct crypto_alg aes_algs[] = { {
412
+
413
+static struct crypto_alg aes_cipher_alg = {
399414 .cra_name = "aes",
400415 .cra_driver_name = "aes-ppc-spe",
401416 .cra_priority = 300,
....@@ -413,96 +428,84 @@
413428 .cia_decrypt = ppc_aes_decrypt
414429 }
415430 }
416
-}, {
417
- .cra_name = "ecb(aes)",
418
- .cra_driver_name = "ecb-ppc-spe",
419
- .cra_priority = 300,
420
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
421
- .cra_blocksize = AES_BLOCK_SIZE,
422
- .cra_ctxsize = sizeof(struct ppc_aes_ctx),
423
- .cra_alignmask = 0,
424
- .cra_type = &crypto_blkcipher_type,
425
- .cra_module = THIS_MODULE,
426
- .cra_u = {
427
- .blkcipher = {
428
- .min_keysize = AES_MIN_KEY_SIZE,
429
- .max_keysize = AES_MAX_KEY_SIZE,
430
- .ivsize = AES_BLOCK_SIZE,
431
- .setkey = ppc_aes_setkey,
432
- .encrypt = ppc_ecb_encrypt,
433
- .decrypt = ppc_ecb_decrypt,
434
- }
431
+};
432
+
433
+static struct skcipher_alg aes_skcipher_algs[] = {
434
+ {
435
+ .base.cra_name = "ecb(aes)",
436
+ .base.cra_driver_name = "ecb-ppc-spe",
437
+ .base.cra_priority = 300,
438
+ .base.cra_blocksize = AES_BLOCK_SIZE,
439
+ .base.cra_ctxsize = sizeof(struct ppc_aes_ctx),
440
+ .base.cra_module = THIS_MODULE,
441
+ .min_keysize = AES_MIN_KEY_SIZE,
442
+ .max_keysize = AES_MAX_KEY_SIZE,
443
+ .setkey = ppc_aes_setkey_skcipher,
444
+ .encrypt = ppc_ecb_encrypt,
445
+ .decrypt = ppc_ecb_decrypt,
446
+ }, {
447
+ .base.cra_name = "cbc(aes)",
448
+ .base.cra_driver_name = "cbc-ppc-spe",
449
+ .base.cra_priority = 300,
450
+ .base.cra_blocksize = AES_BLOCK_SIZE,
451
+ .base.cra_ctxsize = sizeof(struct ppc_aes_ctx),
452
+ .base.cra_module = THIS_MODULE,
453
+ .min_keysize = AES_MIN_KEY_SIZE,
454
+ .max_keysize = AES_MAX_KEY_SIZE,
455
+ .ivsize = AES_BLOCK_SIZE,
456
+ .setkey = ppc_aes_setkey_skcipher,
457
+ .encrypt = ppc_cbc_encrypt,
458
+ .decrypt = ppc_cbc_decrypt,
459
+ }, {
460
+ .base.cra_name = "ctr(aes)",
461
+ .base.cra_driver_name = "ctr-ppc-spe",
462
+ .base.cra_priority = 300,
463
+ .base.cra_blocksize = 1,
464
+ .base.cra_ctxsize = sizeof(struct ppc_aes_ctx),
465
+ .base.cra_module = THIS_MODULE,
466
+ .min_keysize = AES_MIN_KEY_SIZE,
467
+ .max_keysize = AES_MAX_KEY_SIZE,
468
+ .ivsize = AES_BLOCK_SIZE,
469
+ .setkey = ppc_aes_setkey_skcipher,
470
+ .encrypt = ppc_ctr_crypt,
471
+ .decrypt = ppc_ctr_crypt,
472
+ .chunksize = AES_BLOCK_SIZE,
473
+ }, {
474
+ .base.cra_name = "xts(aes)",
475
+ .base.cra_driver_name = "xts-ppc-spe",
476
+ .base.cra_priority = 300,
477
+ .base.cra_blocksize = AES_BLOCK_SIZE,
478
+ .base.cra_ctxsize = sizeof(struct ppc_xts_ctx),
479
+ .base.cra_module = THIS_MODULE,
480
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
481
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
482
+ .ivsize = AES_BLOCK_SIZE,
483
+ .setkey = ppc_xts_setkey,
484
+ .encrypt = ppc_xts_encrypt,
485
+ .decrypt = ppc_xts_decrypt,
435486 }
436
-}, {
437
- .cra_name = "cbc(aes)",
438
- .cra_driver_name = "cbc-ppc-spe",
439
- .cra_priority = 300,
440
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
441
- .cra_blocksize = AES_BLOCK_SIZE,
442
- .cra_ctxsize = sizeof(struct ppc_aes_ctx),
443
- .cra_alignmask = 0,
444
- .cra_type = &crypto_blkcipher_type,
445
- .cra_module = THIS_MODULE,
446
- .cra_u = {
447
- .blkcipher = {
448
- .min_keysize = AES_MIN_KEY_SIZE,
449
- .max_keysize = AES_MAX_KEY_SIZE,
450
- .ivsize = AES_BLOCK_SIZE,
451
- .setkey = ppc_aes_setkey,
452
- .encrypt = ppc_cbc_encrypt,
453
- .decrypt = ppc_cbc_decrypt,
454
- }
455
- }
456
-}, {
457
- .cra_name = "ctr(aes)",
458
- .cra_driver_name = "ctr-ppc-spe",
459
- .cra_priority = 300,
460
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
461
- .cra_blocksize = 1,
462
- .cra_ctxsize = sizeof(struct ppc_aes_ctx),
463
- .cra_alignmask = 0,
464
- .cra_type = &crypto_blkcipher_type,
465
- .cra_module = THIS_MODULE,
466
- .cra_u = {
467
- .blkcipher = {
468
- .min_keysize = AES_MIN_KEY_SIZE,
469
- .max_keysize = AES_MAX_KEY_SIZE,
470
- .ivsize = AES_BLOCK_SIZE,
471
- .setkey = ppc_aes_setkey,
472
- .encrypt = ppc_ctr_crypt,
473
- .decrypt = ppc_ctr_crypt,
474
- }
475
- }
476
-}, {
477
- .cra_name = "xts(aes)",
478
- .cra_driver_name = "xts-ppc-spe",
479
- .cra_priority = 300,
480
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
481
- .cra_blocksize = AES_BLOCK_SIZE,
482
- .cra_ctxsize = sizeof(struct ppc_xts_ctx),
483
- .cra_alignmask = 0,
484
- .cra_type = &crypto_blkcipher_type,
485
- .cra_module = THIS_MODULE,
486
- .cra_u = {
487
- .blkcipher = {
488
- .min_keysize = AES_MIN_KEY_SIZE * 2,
489
- .max_keysize = AES_MAX_KEY_SIZE * 2,
490
- .ivsize = AES_BLOCK_SIZE,
491
- .setkey = ppc_xts_setkey,
492
- .encrypt = ppc_xts_encrypt,
493
- .decrypt = ppc_xts_decrypt,
494
- }
495
- }
496
-} };
487
+};
497488
498489 static int __init ppc_aes_mod_init(void)
499490 {
500
- return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs));
491
+ int err;
492
+
493
+ err = crypto_register_alg(&aes_cipher_alg);
494
+ if (err)
495
+ return err;
496
+
497
+ err = crypto_register_skciphers(aes_skcipher_algs,
498
+ ARRAY_SIZE(aes_skcipher_algs));
499
+ if (err)
500
+ crypto_unregister_alg(&aes_cipher_alg);
501
+ return err;
501502 }
502503
503504 static void __exit ppc_aes_mod_fini(void)
504505 {
505
- crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs));
506
+ crypto_unregister_alg(&aes_cipher_alg);
507
+ crypto_unregister_skciphers(aes_skcipher_algs,
508
+ ARRAY_SIZE(aes_skcipher_algs));
506509 }
507510
508511 module_init(ppc_aes_mod_init);