hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/crypto/xts.c
....@@ -1,18 +1,13 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /* XTS: as defined in IEEE1619/D16
23 * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
3
- * (sector sizes which are not a multiple of 16 bytes are,
4
- * however currently unsupported)
54 *
65 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
76 *
87 * Based on ecb.c
98 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
10
- *
11
- * This program is free software; you can redistribute it and/or modify it
12
- * under the terms of the GNU General Public License as published by the Free
13
- * Software Foundation; either version 2 of the License, or (at your option)
14
- * any later version.
159 */
10
+#include <crypto/internal/cipher.h>
1611 #include <crypto/internal/skcipher.h>
1712 #include <crypto/scatterwalk.h>
1813 #include <linux/err.h>
....@@ -26,9 +21,7 @@
2621 #include <crypto/b128ops.h>
2722 #include <crypto/gf128mul.h>
2823
29
-#define XTS_BUFFER_SIZE 128u
30
-
31
-struct priv {
24
+struct xts_tfm_ctx {
3225 struct crypto_skcipher *child;
3326 struct crypto_cipher *tweak;
3427 };
....@@ -38,27 +31,17 @@
3831 char name[CRYPTO_MAX_ALG_NAME];
3932 };
4033
41
-struct rctx {
42
- le128 buf[XTS_BUFFER_SIZE / sizeof(le128)];
43
-
34
+struct xts_request_ctx {
4435 le128 t;
45
-
46
- le128 *ext;
47
-
48
- struct scatterlist srcbuf[2];
49
- struct scatterlist dstbuf[2];
50
- struct scatterlist *src;
51
- struct scatterlist *dst;
52
-
53
- unsigned int left;
54
-
36
+ struct scatterlist *tail;
37
+ struct scatterlist sg[2];
5538 struct skcipher_request subreq;
5639 };
5740
58
-static int setkey(struct crypto_skcipher *parent, const u8 *key,
59
- unsigned int keylen)
41
+static int xts_setkey(struct crypto_skcipher *parent, const u8 *key,
42
+ unsigned int keylen)
6043 {
61
- struct priv *ctx = crypto_skcipher_ctx(parent);
44
+ struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
6245 struct crypto_skcipher *child;
6346 struct crypto_cipher *tweak;
6447 int err;
....@@ -79,8 +62,6 @@
7962 crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
8063 CRYPTO_TFM_REQ_MASK);
8164 err = crypto_cipher_setkey(tweak, key + keylen, keylen);
82
- crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) &
83
- CRYPTO_TFM_RES_MASK);
8465 if (err)
8566 return err;
8667
....@@ -89,88 +70,32 @@
8970 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
9071 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
9172 CRYPTO_TFM_REQ_MASK);
92
- err = crypto_skcipher_setkey(child, key, keylen);
93
- crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
94
- CRYPTO_TFM_RES_MASK);
95
-
96
- return err;
73
+ return crypto_skcipher_setkey(child, key, keylen);
9774 }
9875
99
-static int post_crypt(struct skcipher_request *req)
76
+/*
77
+ * We compute the tweak masks twice (both before and after the ECB encryption or
78
+ * decryption) to avoid having to allocate a temporary buffer and/or make
79
+ * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
80
+ * just doing the gf128mul_x_ble() calls again.
81
+ */
82
+static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
83
+ bool enc)
10084 {
101
- struct rctx *rctx = skcipher_request_ctx(req);
102
- le128 *buf = rctx->ext ?: rctx->buf;
103
- struct skcipher_request *subreq;
85
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
86
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
87
+ const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
10488 const int bs = XTS_BLOCK_SIZE;
10589 struct skcipher_walk w;
106
- struct scatterlist *sg;
107
- unsigned offset;
90
+ le128 t = rctx->t;
10891 int err;
10992
110
- subreq = &rctx->subreq;
111
- err = skcipher_walk_virt(&w, subreq, false);
112
-
113
- while (w.nbytes) {
114
- unsigned int avail = w.nbytes;
115
- le128 *wdst;
116
-
117
- wdst = w.dst.virt.addr;
118
-
119
- do {
120
- le128_xor(wdst, buf++, wdst);
121
- wdst++;
122
- } while ((avail -= bs) >= bs);
123
-
124
- err = skcipher_walk_done(&w, avail);
93
+ if (second_pass) {
94
+ req = &rctx->subreq;
95
+ /* set to our TFM to enforce correct alignment: */
96
+ skcipher_request_set_tfm(req, tfm);
12597 }
126
-
127
- rctx->left -= subreq->cryptlen;
128
-
129
- if (err || !rctx->left)
130
- goto out;
131
-
132
- rctx->dst = rctx->dstbuf;
133
-
134
- scatterwalk_done(&w.out, 0, 1);
135
- sg = w.out.sg;
136
- offset = w.out.offset;
137
-
138
- if (rctx->dst != sg) {
139
- rctx->dst[0] = *sg;
140
- sg_unmark_end(rctx->dst);
141
- scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
142
- }
143
- rctx->dst[0].length -= offset - sg->offset;
144
- rctx->dst[0].offset = offset;
145
-
146
-out:
147
- return err;
148
-}
149
-
150
-static int pre_crypt(struct skcipher_request *req)
151
-{
152
- struct rctx *rctx = skcipher_request_ctx(req);
153
- le128 *buf = rctx->ext ?: rctx->buf;
154
- struct skcipher_request *subreq;
155
- const int bs = XTS_BLOCK_SIZE;
156
- struct skcipher_walk w;
157
- struct scatterlist *sg;
158
- unsigned cryptlen;
159
- unsigned offset;
160
- bool more;
161
- int err;
162
-
163
- subreq = &rctx->subreq;
164
- cryptlen = subreq->cryptlen;
165
-
166
- more = rctx->left > cryptlen;
167
- if (!more)
168
- cryptlen = rctx->left;
169
-
170
- skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
171
- cryptlen, NULL);
172
-
173
- err = skcipher_walk_virt(&w, subreq, false);
98
+ err = skcipher_walk_virt(&w, req, false);
17499
175100 while (w.nbytes) {
176101 unsigned int avail = w.nbytes;
....@@ -181,65 +106,151 @@
181106 wdst = w.dst.virt.addr;
182107
183108 do {
184
- *buf++ = rctx->t;
185
- le128_xor(wdst++, &rctx->t, wsrc++);
186
- gf128mul_x_ble(&rctx->t, &rctx->t);
109
+ if (unlikely(cts) &&
110
+ w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) {
111
+ if (!enc) {
112
+ if (second_pass)
113
+ rctx->t = t;
114
+ gf128mul_x_ble(&t, &t);
115
+ }
116
+ le128_xor(wdst, &t, wsrc);
117
+ if (enc && second_pass)
118
+ gf128mul_x_ble(&rctx->t, &t);
119
+ skcipher_walk_done(&w, avail - bs);
120
+ return 0;
121
+ }
122
+
123
+ le128_xor(wdst++, &t, wsrc++);
124
+ gf128mul_x_ble(&t, &t);
187125 } while ((avail -= bs) >= bs);
188126
189127 err = skcipher_walk_done(&w, avail);
190128 }
191129
192
- skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
193
- cryptlen, NULL);
194
-
195
- if (err || !more)
196
- goto out;
197
-
198
- rctx->src = rctx->srcbuf;
199
-
200
- scatterwalk_done(&w.in, 0, 1);
201
- sg = w.in.sg;
202
- offset = w.in.offset;
203
-
204
- if (rctx->src != sg) {
205
- rctx->src[0] = *sg;
206
- sg_unmark_end(rctx->src);
207
- scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
208
- }
209
- rctx->src[0].length -= offset - sg->offset;
210
- rctx->src[0].offset = offset;
211
-
212
-out:
213130 return err;
214131 }
215132
216
-static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
133
+static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc)
217134 {
218
- struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
219
- struct rctx *rctx = skcipher_request_ctx(req);
220
- struct skcipher_request *subreq;
221
- gfp_t gfp;
135
+ return xts_xor_tweak(req, false, enc);
136
+}
222137
223
- subreq = &rctx->subreq;
224
- skcipher_request_set_tfm(subreq, ctx->child);
225
- skcipher_request_set_callback(subreq, req->base.flags, done, req);
138
+static int xts_xor_tweak_post(struct skcipher_request *req, bool enc)
139
+{
140
+ return xts_xor_tweak(req, true, enc);
141
+}
226142
227
- gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
228
- GFP_ATOMIC;
229
- rctx->ext = NULL;
143
+static void xts_cts_done(struct crypto_async_request *areq, int err)
144
+{
145
+ struct skcipher_request *req = areq->data;
146
+ le128 b;
230147
231
- subreq->cryptlen = XTS_BUFFER_SIZE;
232
- if (req->cryptlen > XTS_BUFFER_SIZE) {
233
- unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
148
+ if (!err) {
149
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
234150
235
- rctx->ext = kmalloc(n, gfp);
236
- if (rctx->ext)
237
- subreq->cryptlen = n;
151
+ scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
152
+ le128_xor(&b, &rctx->t, &b);
153
+ scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
238154 }
239155
240
- rctx->src = req->src;
241
- rctx->dst = req->dst;
242
- rctx->left = req->cryptlen;
156
+ skcipher_request_complete(req, err);
157
+}
158
+
159
+static int xts_cts_final(struct skcipher_request *req,
160
+ int (*crypt)(struct skcipher_request *req))
161
+{
162
+ const struct xts_tfm_ctx *ctx =
163
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
164
+ int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
165
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
166
+ struct skcipher_request *subreq = &rctx->subreq;
167
+ int tail = req->cryptlen % XTS_BLOCK_SIZE;
168
+ le128 b[2];
169
+ int err;
170
+
171
+ rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst,
172
+ offset - XTS_BLOCK_SIZE);
173
+
174
+ scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
175
+ b[1] = b[0];
176
+ scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
177
+
178
+ le128_xor(b, &rctx->t, b);
179
+
180
+ scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
181
+
182
+ skcipher_request_set_tfm(subreq, ctx->child);
183
+ skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done,
184
+ req);
185
+ skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
186
+ XTS_BLOCK_SIZE, NULL);
187
+
188
+ err = crypt(subreq);
189
+ if (err)
190
+ return err;
191
+
192
+ scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
193
+ le128_xor(b, &rctx->t, b);
194
+ scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
195
+
196
+ return 0;
197
+}
198
+
199
+static void xts_encrypt_done(struct crypto_async_request *areq, int err)
200
+{
201
+ struct skcipher_request *req = areq->data;
202
+
203
+ if (!err) {
204
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
205
+
206
+ rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
207
+ err = xts_xor_tweak_post(req, true);
208
+
209
+ if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
210
+ err = xts_cts_final(req, crypto_skcipher_encrypt);
211
+ if (err == -EINPROGRESS || err == -EBUSY)
212
+ return;
213
+ }
214
+ }
215
+
216
+ skcipher_request_complete(req, err);
217
+}
218
+
219
+static void xts_decrypt_done(struct crypto_async_request *areq, int err)
220
+{
221
+ struct skcipher_request *req = areq->data;
222
+
223
+ if (!err) {
224
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
225
+
226
+ rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
227
+ err = xts_xor_tweak_post(req, false);
228
+
229
+ if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
230
+ err = xts_cts_final(req, crypto_skcipher_decrypt);
231
+ if (err == -EINPROGRESS || err == -EBUSY)
232
+ return;
233
+ }
234
+ }
235
+
236
+ skcipher_request_complete(req, err);
237
+}
238
+
239
+static int xts_init_crypt(struct skcipher_request *req,
240
+ crypto_completion_t compl)
241
+{
242
+ const struct xts_tfm_ctx *ctx =
243
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
244
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
245
+ struct skcipher_request *subreq = &rctx->subreq;
246
+
247
+ if (req->cryptlen < XTS_BLOCK_SIZE)
248
+ return -EINVAL;
249
+
250
+ skcipher_request_set_tfm(subreq, ctx->child);
251
+ skcipher_request_set_callback(subreq, req->base.flags, compl, req);
252
+ skcipher_request_set_crypt(subreq, req->dst, req->dst,
253
+ req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL);
243254
244255 /* calculate first value of T */
245256 crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
....@@ -247,121 +258,45 @@
247258 return 0;
248259 }
249260
250
-static void exit_crypt(struct skcipher_request *req)
261
+static int xts_encrypt(struct skcipher_request *req)
251262 {
252
- struct rctx *rctx = skcipher_request_ctx(req);
263
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
264
+ struct skcipher_request *subreq = &rctx->subreq;
265
+ int err;
253266
254
- rctx->left = 0;
267
+ err = xts_init_crypt(req, xts_encrypt_done) ?:
268
+ xts_xor_tweak_pre(req, true) ?:
269
+ crypto_skcipher_encrypt(subreq) ?:
270
+ xts_xor_tweak_post(req, true);
255271
256
- if (rctx->ext)
257
- kzfree(rctx->ext);
272
+ if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
273
+ return err;
274
+
275
+ return xts_cts_final(req, crypto_skcipher_encrypt);
258276 }
259277
260
-static int do_encrypt(struct skcipher_request *req, int err)
278
+static int xts_decrypt(struct skcipher_request *req)
261279 {
262
- struct rctx *rctx = skcipher_request_ctx(req);
263
- struct skcipher_request *subreq;
280
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
281
+ struct skcipher_request *subreq = &rctx->subreq;
282
+ int err;
264283
265
- subreq = &rctx->subreq;
284
+ err = xts_init_crypt(req, xts_decrypt_done) ?:
285
+ xts_xor_tweak_pre(req, false) ?:
286
+ crypto_skcipher_decrypt(subreq) ?:
287
+ xts_xor_tweak_post(req, false);
266288
267
- while (!err && rctx->left) {
268
- err = pre_crypt(req) ?:
269
- crypto_skcipher_encrypt(subreq) ?:
270
- post_crypt(req);
289
+ if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
290
+ return err;
271291
272
- if (err == -EINPROGRESS || err == -EBUSY)
273
- return err;
274
- }
275
-
276
- exit_crypt(req);
277
- return err;
292
+ return xts_cts_final(req, crypto_skcipher_decrypt);
278293 }
279294
280
-static void encrypt_done(struct crypto_async_request *areq, int err)
281
-{
282
- struct skcipher_request *req = areq->data;
283
- struct skcipher_request *subreq;
284
- struct rctx *rctx;
285
-
286
- rctx = skcipher_request_ctx(req);
287
-
288
- if (err == -EINPROGRESS) {
289
- if (rctx->left != req->cryptlen)
290
- return;
291
- goto out;
292
- }
293
-
294
- subreq = &rctx->subreq;
295
- subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
296
-
297
- err = do_encrypt(req, err ?: post_crypt(req));
298
- if (rctx->left)
299
- return;
300
-
301
-out:
302
- skcipher_request_complete(req, err);
303
-}
304
-
305
-static int encrypt(struct skcipher_request *req)
306
-{
307
- return do_encrypt(req, init_crypt(req, encrypt_done));
308
-}
309
-
310
-static int do_decrypt(struct skcipher_request *req, int err)
311
-{
312
- struct rctx *rctx = skcipher_request_ctx(req);
313
- struct skcipher_request *subreq;
314
-
315
- subreq = &rctx->subreq;
316
-
317
- while (!err && rctx->left) {
318
- err = pre_crypt(req) ?:
319
- crypto_skcipher_decrypt(subreq) ?:
320
- post_crypt(req);
321
-
322
- if (err == -EINPROGRESS || err == -EBUSY)
323
- return err;
324
- }
325
-
326
- exit_crypt(req);
327
- return err;
328
-}
329
-
330
-static void decrypt_done(struct crypto_async_request *areq, int err)
331
-{
332
- struct skcipher_request *req = areq->data;
333
- struct skcipher_request *subreq;
334
- struct rctx *rctx;
335
-
336
- rctx = skcipher_request_ctx(req);
337
-
338
- if (err == -EINPROGRESS) {
339
- if (rctx->left != req->cryptlen)
340
- return;
341
- goto out;
342
- }
343
-
344
- subreq = &rctx->subreq;
345
- subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
346
-
347
- err = do_decrypt(req, err ?: post_crypt(req));
348
- if (rctx->left)
349
- return;
350
-
351
-out:
352
- skcipher_request_complete(req, err);
353
-}
354
-
355
-static int decrypt(struct skcipher_request *req)
356
-{
357
- return do_decrypt(req, init_crypt(req, decrypt_done));
358
-}
359
-
360
-static int init_tfm(struct crypto_skcipher *tfm)
295
+static int xts_init_tfm(struct crypto_skcipher *tfm)
361296 {
362297 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
363298 struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
364
- struct priv *ctx = crypto_skcipher_ctx(tfm);
299
+ struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
365300 struct crypto_skcipher *child;
366301 struct crypto_cipher *tweak;
367302
....@@ -380,41 +315,39 @@
380315 ctx->tweak = tweak;
381316
382317 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
383
- sizeof(struct rctx));
318
+ sizeof(struct xts_request_ctx));
384319
385320 return 0;
386321 }
387322
388
-static void exit_tfm(struct crypto_skcipher *tfm)
323
+static void xts_exit_tfm(struct crypto_skcipher *tfm)
389324 {
390
- struct priv *ctx = crypto_skcipher_ctx(tfm);
325
+ struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
391326
392327 crypto_free_skcipher(ctx->child);
393328 crypto_free_cipher(ctx->tweak);
394329 }
395330
396
-static void free_inst(struct skcipher_instance *inst)
331
+static void xts_free_instance(struct skcipher_instance *inst)
397332 {
398
- crypto_drop_skcipher(skcipher_instance_ctx(inst));
333
+ struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
334
+
335
+ crypto_drop_skcipher(&ictx->spawn);
399336 kfree(inst);
400337 }
401338
402
-static int create(struct crypto_template *tmpl, struct rtattr **tb)
339
+static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
403340 {
404341 struct skcipher_instance *inst;
405
- struct crypto_attr_type *algt;
406342 struct xts_instance_ctx *ctx;
407343 struct skcipher_alg *alg;
408344 const char *cipher_name;
409345 u32 mask;
410346 int err;
411347
412
- algt = crypto_get_attr_type(tb);
413
- if (IS_ERR(algt))
414
- return PTR_ERR(algt);
415
-
416
- if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
417
- return -EINVAL;
348
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
349
+ if (err)
350
+ return err;
418351
419352 cipher_name = crypto_attr_alg_name(tb[1]);
420353 if (IS_ERR(cipher_name))
....@@ -426,20 +359,17 @@
426359
427360 ctx = skcipher_instance_ctx(inst);
428361
429
- crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
430
-
431
- mask = crypto_requires_off(algt->type, algt->mask,
432
- CRYPTO_ALG_NEED_FALLBACK |
433
- CRYPTO_ALG_ASYNC);
434
-
435
- err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask);
362
+ err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
363
+ cipher_name, 0, mask);
436364 if (err == -ENOENT) {
437365 err = -ENAMETOOLONG;
438366 if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
439367 cipher_name) >= CRYPTO_MAX_ALG_NAME)
440368 goto err_free_inst;
441369
442
- err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask);
370
+ err = crypto_grab_skcipher(&ctx->spawn,
371
+ skcipher_crypto_instance(inst),
372
+ ctx->name, 0, mask);
443373 }
444374
445375 if (err)
....@@ -449,15 +379,15 @@
449379
450380 err = -EINVAL;
451381 if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
452
- goto err_drop_spawn;
382
+ goto err_free_inst;
453383
454384 if (crypto_skcipher_alg_ivsize(alg))
455
- goto err_drop_spawn;
385
+ goto err_free_inst;
456386
457387 err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
458388 &alg->base);
459389 if (err)
460
- goto err_drop_spawn;
390
+ goto err_free_inst;
461391
462392 err = -EINVAL;
463393 cipher_name = alg->base.cra_name;
....@@ -466,26 +396,25 @@
466396 * cipher name.
467397 */
468398 if (!strncmp(cipher_name, "ecb(", 4)) {
469
- unsigned len;
399
+ int len;
470400
471
- len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
472
- if (len < 2 || len >= sizeof(ctx->name))
473
- goto err_drop_spawn;
401
+ len = strscpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
402
+ if (len < 2)
403
+ goto err_free_inst;
474404
475405 if (ctx->name[len - 1] != ')')
476
- goto err_drop_spawn;
406
+ goto err_free_inst;
477407
478408 ctx->name[len - 1] = 0;
479409
480410 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
481411 "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
482412 err = -ENAMETOOLONG;
483
- goto err_drop_spawn;
413
+ goto err_free_inst;
484414 }
485415 } else
486
- goto err_drop_spawn;
416
+ goto err_free_inst;
487417
488
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
489418 inst->alg.base.cra_priority = alg->base.cra_priority;
490419 inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
491420 inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
....@@ -495,50 +424,45 @@
495424 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
496425 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
497426
498
- inst->alg.base.cra_ctxsize = sizeof(struct priv);
427
+ inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx);
499428
500
- inst->alg.init = init_tfm;
501
- inst->alg.exit = exit_tfm;
429
+ inst->alg.init = xts_init_tfm;
430
+ inst->alg.exit = xts_exit_tfm;
502431
503
- inst->alg.setkey = setkey;
504
- inst->alg.encrypt = encrypt;
505
- inst->alg.decrypt = decrypt;
432
+ inst->alg.setkey = xts_setkey;
433
+ inst->alg.encrypt = xts_encrypt;
434
+ inst->alg.decrypt = xts_decrypt;
506435
507
- inst->free = free_inst;
436
+ inst->free = xts_free_instance;
508437
509438 err = skcipher_register_instance(tmpl, inst);
510
- if (err)
511
- goto err_drop_spawn;
512
-
513
-out:
514
- return err;
515
-
516
-err_drop_spawn:
517
- crypto_drop_skcipher(&ctx->spawn);
439
+ if (err) {
518440 err_free_inst:
519
- kfree(inst);
520
- goto out;
441
+ xts_free_instance(inst);
442
+ }
443
+ return err;
521444 }
522445
523
-static struct crypto_template crypto_tmpl = {
446
+static struct crypto_template xts_tmpl = {
524447 .name = "xts",
525
- .create = create,
448
+ .create = xts_create,
526449 .module = THIS_MODULE,
527450 };
528451
529
-static int __init crypto_module_init(void)
452
+static int __init xts_module_init(void)
530453 {
531
- return crypto_register_template(&crypto_tmpl);
454
+ return crypto_register_template(&xts_tmpl);
532455 }
533456
534
-static void __exit crypto_module_exit(void)
457
+static void __exit xts_module_exit(void)
535458 {
536
- crypto_unregister_template(&crypto_tmpl);
459
+ crypto_unregister_template(&xts_tmpl);
537460 }
538461
539
-module_init(crypto_module_init);
540
-module_exit(crypto_module_exit);
462
+subsys_initcall(xts_module_init);
463
+module_exit(xts_module_exit);
541464
542465 MODULE_LICENSE("GPL");
543466 MODULE_DESCRIPTION("XTS block cipher mode");
544467 MODULE_ALIAS_CRYPTO("xts");
468
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);