hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/crypto/lrw.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /* LRW: as defined by Cyril Guyot in
23 * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
34 *
....@@ -5,15 +6,10 @@
56 *
67 * Based on ecb.c
78 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8
- *
9
- * This program is free software; you can redistribute it and/or modify it
10
- * under the terms of the GNU General Public License as published by the Free
11
- * Software Foundation; either version 2 of the License, or (at your option)
12
- * any later version.
139 */
1410 /* This implementation is checked against the test vectors in the above
1511 * document and by a test vector provided by Ken Buchanan at
16
- * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
12
+ * https://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
1713 *
1814 * The test vectors are included in the testing module tcrypt.[ch] */
1915
....@@ -29,11 +25,9 @@
2925 #include <crypto/b128ops.h>
3026 #include <crypto/gf128mul.h>
3127
32
-#define LRW_BUFFER_SIZE 128u
33
-
3428 #define LRW_BLOCK_SIZE 16
3529
36
-struct priv {
30
+struct lrw_tfm_ctx {
3731 struct crypto_skcipher *child;
3832
3933 /*
....@@ -55,24 +49,12 @@
5549 be128 mulinc[128];
5650 };
5751
58
-struct rctx {
59
- be128 buf[LRW_BUFFER_SIZE / sizeof(be128)];
60
-
52
+struct lrw_request_ctx {
6153 be128 t;
62
-
63
- be128 *ext;
64
-
65
- struct scatterlist srcbuf[2];
66
- struct scatterlist dstbuf[2];
67
- struct scatterlist *src;
68
- struct scatterlist *dst;
69
-
70
- unsigned int left;
71
-
7254 struct skcipher_request subreq;
7355 };
7456
75
-static inline void setbit128_bbe(void *b, int bit)
57
+static inline void lrw_setbit128_bbe(void *b, int bit)
7658 {
7759 __set_bit(bit ^ (0x80 -
7860 #ifdef __BIG_ENDIAN
....@@ -83,10 +65,10 @@
8365 ), b);
8466 }
8567
86
-static int setkey(struct crypto_skcipher *parent, const u8 *key,
87
- unsigned int keylen)
68
+static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key,
69
+ unsigned int keylen)
8870 {
89
- struct priv *ctx = crypto_skcipher_ctx(parent);
71
+ struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
9072 struct crypto_skcipher *child = ctx->child;
9173 int err, bsize = LRW_BLOCK_SIZE;
9274 const u8 *tweak = key + keylen - bsize;
....@@ -97,8 +79,6 @@
9779 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
9880 CRYPTO_TFM_REQ_MASK);
9981 err = crypto_skcipher_setkey(child, key, keylen - bsize);
100
- crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
101
- CRYPTO_TFM_RES_MASK);
10282 if (err)
10383 return err;
10484
....@@ -112,7 +92,7 @@
11292
11393 /* initialize optimization table */
11494 for (i = 0; i < 128; i++) {
115
- setbit128_bbe(&tmp, i);
95
+ lrw_setbit128_bbe(&tmp, i);
11696 ctx->mulinc[i] = tmp;
11797 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
11898 }
....@@ -120,27 +100,27 @@
120100 return 0;
121101 }
122102
123
-static inline void inc(be128 *iv)
103
+/*
104
+ * Returns the number of trailing '1' bits in the words of the counter, which is
105
+ * represented by 4 32-bit words, arranged from least to most significant.
106
+ * At the same time, increments the counter by one.
107
+ *
108
+ * For example:
109
+ *
110
+ * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 };
111
+ * int i = lrw_next_index(&counter);
112
+ * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 }
113
+ */
114
+static int lrw_next_index(u32 *counter)
124115 {
125
- be64_add_cpu(&iv->b, 1);
126
- if (!iv->b)
127
- be64_add_cpu(&iv->a, 1);
128
-}
116
+ int i, res = 0;
129117
130
-/* this returns the number of consequative 1 bits starting
131
- * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
132
-static inline int get_index128(be128 *block)
133
-{
134
- int x;
135
- __be32 *p = (__be32 *) block;
118
+ for (i = 0; i < 4; i++) {
119
+ if (counter[i] + 1 != 0)
120
+ return res + ffz(counter[i]++);
136121
137
- for (p += 3, x = 0; x < 128; p--, x += 32) {
138
- u32 val = be32_to_cpup(p);
139
-
140
- if (!~val)
141
- continue;
142
-
143
- return x + ffz(val);
122
+ counter[i] = 0;
123
+ res += 32;
144124 }
145125
146126 /*
....@@ -151,86 +131,39 @@
151131 return 127;
152132 }
153133
154
-static int post_crypt(struct skcipher_request *req)
134
+/*
135
+ * We compute the tweak masks twice (both before and after the ECB encryption or
136
+ * decryption) to avoid having to allocate a temporary buffer and/or make
137
+ * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
138
+ * just doing the lrw_next_index() calls again.
139
+ */
140
+static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass)
155141 {
156
- struct rctx *rctx = skcipher_request_ctx(req);
157
- be128 *buf = rctx->ext ?: rctx->buf;
158
- struct skcipher_request *subreq;
159142 const int bs = LRW_BLOCK_SIZE;
160
- struct skcipher_walk w;
161
- struct scatterlist *sg;
162
- unsigned offset;
163
- int err;
164
-
165
- subreq = &rctx->subreq;
166
- err = skcipher_walk_virt(&w, subreq, false);
167
-
168
- while (w.nbytes) {
169
- unsigned int avail = w.nbytes;
170
- be128 *wdst;
171
-
172
- wdst = w.dst.virt.addr;
173
-
174
- do {
175
- be128_xor(wdst, buf++, wdst);
176
- wdst++;
177
- } while ((avail -= bs) >= bs);
178
-
179
- err = skcipher_walk_done(&w, avail);
180
- }
181
-
182
- rctx->left -= subreq->cryptlen;
183
-
184
- if (err || !rctx->left)
185
- goto out;
186
-
187
- rctx->dst = rctx->dstbuf;
188
-
189
- scatterwalk_done(&w.out, 0, 1);
190
- sg = w.out.sg;
191
- offset = w.out.offset;
192
-
193
- if (rctx->dst != sg) {
194
- rctx->dst[0] = *sg;
195
- sg_unmark_end(rctx->dst);
196
- scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
197
- }
198
- rctx->dst[0].length -= offset - sg->offset;
199
- rctx->dst[0].offset = offset;
200
-
201
-out:
202
- return err;
203
-}
204
-
205
-static int pre_crypt(struct skcipher_request *req)
206
-{
207143 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
208
- struct rctx *rctx = skcipher_request_ctx(req);
209
- struct priv *ctx = crypto_skcipher_ctx(tfm);
210
- be128 *buf = rctx->ext ?: rctx->buf;
211
- struct skcipher_request *subreq;
212
- const int bs = LRW_BLOCK_SIZE;
144
+ const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
145
+ struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
146
+ be128 t = rctx->t;
213147 struct skcipher_walk w;
214
- struct scatterlist *sg;
215
- unsigned cryptlen;
216
- unsigned offset;
217
- be128 *iv;
218
- bool more;
148
+ __be32 *iv;
149
+ u32 counter[4];
219150 int err;
220151
221
- subreq = &rctx->subreq;
222
- skcipher_request_set_tfm(subreq, tfm);
152
+ if (second_pass) {
153
+ req = &rctx->subreq;
154
+ /* set to our TFM to enforce correct alignment: */
155
+ skcipher_request_set_tfm(req, tfm);
156
+ }
223157
224
- cryptlen = subreq->cryptlen;
225
- more = rctx->left > cryptlen;
226
- if (!more)
227
- cryptlen = rctx->left;
158
+ err = skcipher_walk_virt(&w, req, false);
159
+ if (err)
160
+ return err;
228161
229
- skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
230
- cryptlen, req->iv);
231
-
232
- err = skcipher_walk_virt(&w, subreq, false);
233
- iv = w.iv;
162
+ iv = (__be32 *)w.iv;
163
+ counter[0] = be32_to_cpu(iv[3]);
164
+ counter[1] = be32_to_cpu(iv[2]);
165
+ counter[2] = be32_to_cpu(iv[1]);
166
+ counter[3] = be32_to_cpu(iv[0]);
234167
235168 while (w.nbytes) {
236169 unsigned int avail = w.nbytes;
....@@ -241,195 +174,99 @@
241174 wdst = w.dst.virt.addr;
242175
243176 do {
244
- *buf++ = rctx->t;
245
- be128_xor(wdst++, &rctx->t, wsrc++);
177
+ be128_xor(wdst++, &t, wsrc++);
246178
247179 /* T <- I*Key2, using the optimization
248180 * discussed in the specification */
249
- be128_xor(&rctx->t, &rctx->t,
250
- &ctx->mulinc[get_index128(iv)]);
251
- inc(iv);
181
+ be128_xor(&t, &t,
182
+ &ctx->mulinc[lrw_next_index(counter)]);
252183 } while ((avail -= bs) >= bs);
184
+
185
+ if (second_pass && w.nbytes == w.total) {
186
+ iv[0] = cpu_to_be32(counter[3]);
187
+ iv[1] = cpu_to_be32(counter[2]);
188
+ iv[2] = cpu_to_be32(counter[1]);
189
+ iv[3] = cpu_to_be32(counter[0]);
190
+ }
253191
254192 err = skcipher_walk_done(&w, avail);
255193 }
256194
257
- skcipher_request_set_tfm(subreq, ctx->child);
258
- skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
259
- cryptlen, NULL);
260
-
261
- if (err || !more)
262
- goto out;
263
-
264
- rctx->src = rctx->srcbuf;
265
-
266
- scatterwalk_done(&w.in, 0, 1);
267
- sg = w.in.sg;
268
- offset = w.in.offset;
269
-
270
- if (rctx->src != sg) {
271
- rctx->src[0] = *sg;
272
- sg_unmark_end(rctx->src);
273
- scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
274
- }
275
- rctx->src[0].length -= offset - sg->offset;
276
- rctx->src[0].offset = offset;
277
-
278
-out:
279195 return err;
280196 }
281197
282
-static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
198
+static int lrw_xor_tweak_pre(struct skcipher_request *req)
283199 {
284
- struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
285
- struct rctx *rctx = skcipher_request_ctx(req);
286
- struct skcipher_request *subreq;
287
- gfp_t gfp;
200
+ return lrw_xor_tweak(req, false);
201
+}
288202
289
- subreq = &rctx->subreq;
290
- skcipher_request_set_callback(subreq, req->base.flags, done, req);
203
+static int lrw_xor_tweak_post(struct skcipher_request *req)
204
+{
205
+ return lrw_xor_tweak(req, true);
206
+}
291207
292
- gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
293
- GFP_ATOMIC;
294
- rctx->ext = NULL;
208
+static void lrw_crypt_done(struct crypto_async_request *areq, int err)
209
+{
210
+ struct skcipher_request *req = areq->data;
295211
296
- subreq->cryptlen = LRW_BUFFER_SIZE;
297
- if (req->cryptlen > LRW_BUFFER_SIZE) {
298
- unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
212
+ if (!err) {
213
+ struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
299214
300
- rctx->ext = kmalloc(n, gfp);
301
- if (rctx->ext)
302
- subreq->cryptlen = n;
215
+ rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
216
+ err = lrw_xor_tweak_post(req);
303217 }
304218
305
- rctx->src = req->src;
306
- rctx->dst = req->dst;
307
- rctx->left = req->cryptlen;
219
+ skcipher_request_complete(req, err);
220
+}
221
+
222
+static void lrw_init_crypt(struct skcipher_request *req)
223
+{
224
+ const struct lrw_tfm_ctx *ctx =
225
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
226
+ struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
227
+ struct skcipher_request *subreq = &rctx->subreq;
228
+
229
+ skcipher_request_set_tfm(subreq, ctx->child);
230
+ skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done,
231
+ req);
232
+ /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */
233
+ skcipher_request_set_crypt(subreq, req->dst, req->dst,
234
+ req->cryptlen, req->iv);
308235
309236 /* calculate first value of T */
310237 memcpy(&rctx->t, req->iv, sizeof(rctx->t));
311238
312239 /* T <- I*Key2 */
313240 gf128mul_64k_bbe(&rctx->t, ctx->table);
314
-
315
- return 0;
316241 }
317242
318
-static void exit_crypt(struct skcipher_request *req)
243
+static int lrw_encrypt(struct skcipher_request *req)
319244 {
320
- struct rctx *rctx = skcipher_request_ctx(req);
245
+ struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
246
+ struct skcipher_request *subreq = &rctx->subreq;
321247
322
- rctx->left = 0;
323
-
324
- if (rctx->ext)
325
- kzfree(rctx->ext);
248
+ lrw_init_crypt(req);
249
+ return lrw_xor_tweak_pre(req) ?:
250
+ crypto_skcipher_encrypt(subreq) ?:
251
+ lrw_xor_tweak_post(req);
326252 }
327253
328
-static int do_encrypt(struct skcipher_request *req, int err)
254
+static int lrw_decrypt(struct skcipher_request *req)
329255 {
330
- struct rctx *rctx = skcipher_request_ctx(req);
331
- struct skcipher_request *subreq;
256
+ struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
257
+ struct skcipher_request *subreq = &rctx->subreq;
332258
333
- subreq = &rctx->subreq;
334
-
335
- while (!err && rctx->left) {
336
- err = pre_crypt(req) ?:
337
- crypto_skcipher_encrypt(subreq) ?:
338
- post_crypt(req);
339
-
340
- if (err == -EINPROGRESS || err == -EBUSY)
341
- return err;
342
- }
343
-
344
- exit_crypt(req);
345
- return err;
259
+ lrw_init_crypt(req);
260
+ return lrw_xor_tweak_pre(req) ?:
261
+ crypto_skcipher_decrypt(subreq) ?:
262
+ lrw_xor_tweak_post(req);
346263 }
347264
348
-static void encrypt_done(struct crypto_async_request *areq, int err)
349
-{
350
- struct skcipher_request *req = areq->data;
351
- struct skcipher_request *subreq;
352
- struct rctx *rctx;
353
-
354
- rctx = skcipher_request_ctx(req);
355
-
356
- if (err == -EINPROGRESS) {
357
- if (rctx->left != req->cryptlen)
358
- return;
359
- goto out;
360
- }
361
-
362
- subreq = &rctx->subreq;
363
- subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
364
-
365
- err = do_encrypt(req, err ?: post_crypt(req));
366
- if (rctx->left)
367
- return;
368
-
369
-out:
370
- skcipher_request_complete(req, err);
371
-}
372
-
373
-static int encrypt(struct skcipher_request *req)
374
-{
375
- return do_encrypt(req, init_crypt(req, encrypt_done));
376
-}
377
-
378
-static int do_decrypt(struct skcipher_request *req, int err)
379
-{
380
- struct rctx *rctx = skcipher_request_ctx(req);
381
- struct skcipher_request *subreq;
382
-
383
- subreq = &rctx->subreq;
384
-
385
- while (!err && rctx->left) {
386
- err = pre_crypt(req) ?:
387
- crypto_skcipher_decrypt(subreq) ?:
388
- post_crypt(req);
389
-
390
- if (err == -EINPROGRESS || err == -EBUSY)
391
- return err;
392
- }
393
-
394
- exit_crypt(req);
395
- return err;
396
-}
397
-
398
-static void decrypt_done(struct crypto_async_request *areq, int err)
399
-{
400
- struct skcipher_request *req = areq->data;
401
- struct skcipher_request *subreq;
402
- struct rctx *rctx;
403
-
404
- rctx = skcipher_request_ctx(req);
405
-
406
- if (err == -EINPROGRESS) {
407
- if (rctx->left != req->cryptlen)
408
- return;
409
- goto out;
410
- }
411
-
412
- subreq = &rctx->subreq;
413
- subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
414
-
415
- err = do_decrypt(req, err ?: post_crypt(req));
416
- if (rctx->left)
417
- return;
418
-
419
-out:
420
- skcipher_request_complete(req, err);
421
-}
422
-
423
-static int decrypt(struct skcipher_request *req)
424
-{
425
- return do_decrypt(req, init_crypt(req, decrypt_done));
426
-}
427
-
428
-static int init_tfm(struct crypto_skcipher *tfm)
265
+static int lrw_init_tfm(struct crypto_skcipher *tfm)
429266 {
430267 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
431268 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
432
- struct priv *ctx = crypto_skcipher_ctx(tfm);
269
+ struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
433270 struct crypto_skcipher *cipher;
434271
435272 cipher = crypto_spawn_skcipher(spawn);
....@@ -439,42 +276,39 @@
439276 ctx->child = cipher;
440277
441278 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
442
- sizeof(struct rctx));
279
+ sizeof(struct lrw_request_ctx));
443280
444281 return 0;
445282 }
446283
447
-static void exit_tfm(struct crypto_skcipher *tfm)
284
+static void lrw_exit_tfm(struct crypto_skcipher *tfm)
448285 {
449
- struct priv *ctx = crypto_skcipher_ctx(tfm);
286
+ struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
450287
451288 if (ctx->table)
452289 gf128mul_free_64k(ctx->table);
453290 crypto_free_skcipher(ctx->child);
454291 }
455292
456
-static void free_inst(struct skcipher_instance *inst)
293
+static void lrw_free_instance(struct skcipher_instance *inst)
457294 {
458295 crypto_drop_skcipher(skcipher_instance_ctx(inst));
459296 kfree(inst);
460297 }
461298
462
-static int create(struct crypto_template *tmpl, struct rtattr **tb)
299
+static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
463300 {
464301 struct crypto_skcipher_spawn *spawn;
465302 struct skcipher_instance *inst;
466
- struct crypto_attr_type *algt;
467303 struct skcipher_alg *alg;
468304 const char *cipher_name;
469305 char ecb_name[CRYPTO_MAX_ALG_NAME];
306
+ u32 mask;
470307 int err;
471308
472
- algt = crypto_get_attr_type(tb);
473
- if (IS_ERR(algt))
474
- return PTR_ERR(algt);
475
-
476
- if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
477
- return -EINVAL;
309
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
310
+ if (err)
311
+ return err;
478312
479313 cipher_name = crypto_attr_alg_name(tb[1]);
480314 if (IS_ERR(cipher_name))
....@@ -486,19 +320,17 @@
486320
487321 spawn = skcipher_instance_ctx(inst);
488322
489
- crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
490
- err = crypto_grab_skcipher(spawn, cipher_name, 0,
491
- crypto_requires_sync(algt->type,
492
- algt->mask));
323
+ err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
324
+ cipher_name, 0, mask);
493325 if (err == -ENOENT) {
494326 err = -ENAMETOOLONG;
495327 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
496328 cipher_name) >= CRYPTO_MAX_ALG_NAME)
497329 goto err_free_inst;
498330
499
- err = crypto_grab_skcipher(spawn, ecb_name, 0,
500
- crypto_requires_sync(algt->type,
501
- algt->mask));
331
+ err = crypto_grab_skcipher(spawn,
332
+ skcipher_crypto_instance(inst),
333
+ ecb_name, 0, mask);
502334 }
503335
504336 if (err)
....@@ -508,15 +340,15 @@
508340
509341 err = -EINVAL;
510342 if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
511
- goto err_drop_spawn;
343
+ goto err_free_inst;
512344
513345 if (crypto_skcipher_alg_ivsize(alg))
514
- goto err_drop_spawn;
346
+ goto err_free_inst;
515347
516348 err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
517349 &alg->base);
518350 if (err)
519
- goto err_drop_spawn;
351
+ goto err_free_inst;
520352
521353 err = -EINVAL;
522354 cipher_name = alg->base.cra_name;
....@@ -525,30 +357,29 @@
525357 * cipher name.
526358 */
527359 if (!strncmp(cipher_name, "ecb(", 4)) {
528
- unsigned len;
360
+ int len;
529361
530
- len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
531
- if (len < 2 || len >= sizeof(ecb_name))
532
- goto err_drop_spawn;
362
+ len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
363
+ if (len < 2)
364
+ goto err_free_inst;
533365
534366 if (ecb_name[len - 1] != ')')
535
- goto err_drop_spawn;
367
+ goto err_free_inst;
536368
537369 ecb_name[len - 1] = 0;
538370
539371 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
540372 "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
541373 err = -ENAMETOOLONG;
542
- goto err_drop_spawn;
374
+ goto err_free_inst;
543375 }
544376 } else
545
- goto err_drop_spawn;
377
+ goto err_free_inst;
546378
547
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
548379 inst->alg.base.cra_priority = alg->base.cra_priority;
549380 inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
550381 inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
551
- (__alignof__(u64) - 1);
382
+ (__alignof__(be128) - 1);
552383
553384 inst->alg.ivsize = LRW_BLOCK_SIZE;
554385 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
....@@ -556,49 +387,43 @@
556387 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
557388 LRW_BLOCK_SIZE;
558389
559
- inst->alg.base.cra_ctxsize = sizeof(struct priv);
390
+ inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx);
560391
561
- inst->alg.init = init_tfm;
562
- inst->alg.exit = exit_tfm;
392
+ inst->alg.init = lrw_init_tfm;
393
+ inst->alg.exit = lrw_exit_tfm;
563394
564
- inst->alg.setkey = setkey;
565
- inst->alg.encrypt = encrypt;
566
- inst->alg.decrypt = decrypt;
395
+ inst->alg.setkey = lrw_setkey;
396
+ inst->alg.encrypt = lrw_encrypt;
397
+ inst->alg.decrypt = lrw_decrypt;
567398
568
- inst->free = free_inst;
399
+ inst->free = lrw_free_instance;
569400
570401 err = skcipher_register_instance(tmpl, inst);
571
- if (err)
572
- goto err_drop_spawn;
573
-
574
-out:
575
- return err;
576
-
577
-err_drop_spawn:
578
- crypto_drop_skcipher(spawn);
402
+ if (err) {
579403 err_free_inst:
580
- kfree(inst);
581
- goto out;
404
+ lrw_free_instance(inst);
405
+ }
406
+ return err;
582407 }
583408
584
-static struct crypto_template crypto_tmpl = {
409
+static struct crypto_template lrw_tmpl = {
585410 .name = "lrw",
586
- .create = create,
411
+ .create = lrw_create,
587412 .module = THIS_MODULE,
588413 };
589414
590
-static int __init crypto_module_init(void)
415
+static int __init lrw_module_init(void)
591416 {
592
- return crypto_register_template(&crypto_tmpl);
417
+ return crypto_register_template(&lrw_tmpl);
593418 }
594419
595
-static void __exit crypto_module_exit(void)
420
+static void __exit lrw_module_exit(void)
596421 {
597
- crypto_unregister_template(&crypto_tmpl);
422
+ crypto_unregister_template(&lrw_tmpl);
598423 }
599424
600
-module_init(crypto_module_init);
601
-module_exit(crypto_module_exit);
425
+subsys_initcall(lrw_module_init);
426
+module_exit(lrw_module_exit);
602427
603428 MODULE_LICENSE("GPL");
604429 MODULE_DESCRIPTION("LRW block cipher mode");