.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* LRW: as defined by Cyril Guyot in |
---|
2 | 3 | * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf |
---|
3 | 4 | * |
---|
.. | .. |
---|
5 | 6 | * |
---|
6 | 7 | * Based on ecb.c |
---|
7 | 8 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
---|
8 | | - * |
---|
9 | | - * This program is free software; you can redistribute it and/or modify it |
---|
10 | | - * under the terms of the GNU General Public License as published by the Free |
---|
11 | | - * Software Foundation; either version 2 of the License, or (at your option) |
---|
12 | | - * any later version. |
---|
13 | 9 | */ |
---|
14 | 10 | /* This implementation is checked against the test vectors in the above |
---|
15 | 11 | * document and by a test vector provided by Ken Buchanan at |
---|
16 | | - * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html |
---|
| 12 | + * https://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html |
---|
17 | 13 | * |
---|
18 | 14 | * The test vectors are included in the testing module tcrypt.[ch] */ |
---|
19 | 15 | |
---|
.. | .. |
---|
29 | 25 | #include <crypto/b128ops.h> |
---|
30 | 26 | #include <crypto/gf128mul.h> |
---|
31 | 27 | |
---|
32 | | -#define LRW_BUFFER_SIZE 128u |
---|
33 | | - |
---|
34 | 28 | #define LRW_BLOCK_SIZE 16 |
---|
35 | 29 | |
---|
36 | | -struct priv { |
---|
| 30 | +struct lrw_tfm_ctx { |
---|
37 | 31 | struct crypto_skcipher *child; |
---|
38 | 32 | |
---|
39 | 33 | /* |
---|
.. | .. |
---|
55 | 49 | be128 mulinc[128]; |
---|
56 | 50 | }; |
---|
57 | 51 | |
---|
58 | | -struct rctx { |
---|
59 | | - be128 buf[LRW_BUFFER_SIZE / sizeof(be128)]; |
---|
60 | | - |
---|
| 52 | +struct lrw_request_ctx { |
---|
61 | 53 | be128 t; |
---|
62 | | - |
---|
63 | | - be128 *ext; |
---|
64 | | - |
---|
65 | | - struct scatterlist srcbuf[2]; |
---|
66 | | - struct scatterlist dstbuf[2]; |
---|
67 | | - struct scatterlist *src; |
---|
68 | | - struct scatterlist *dst; |
---|
69 | | - |
---|
70 | | - unsigned int left; |
---|
71 | | - |
---|
72 | 54 | struct skcipher_request subreq; |
---|
73 | 55 | }; |
---|
74 | 56 | |
---|
75 | | -static inline void setbit128_bbe(void *b, int bit) |
---|
| 57 | +static inline void lrw_setbit128_bbe(void *b, int bit) |
---|
76 | 58 | { |
---|
77 | 59 | __set_bit(bit ^ (0x80 - |
---|
78 | 60 | #ifdef __BIG_ENDIAN |
---|
.. | .. |
---|
83 | 65 | ), b); |
---|
84 | 66 | } |
---|
85 | 67 | |
---|
86 | | -static int setkey(struct crypto_skcipher *parent, const u8 *key, |
---|
87 | | - unsigned int keylen) |
---|
| 68 | +static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key, |
---|
| 69 | + unsigned int keylen) |
---|
88 | 70 | { |
---|
89 | | - struct priv *ctx = crypto_skcipher_ctx(parent); |
---|
| 71 | + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent); |
---|
90 | 72 | struct crypto_skcipher *child = ctx->child; |
---|
91 | 73 | int err, bsize = LRW_BLOCK_SIZE; |
---|
92 | 74 | const u8 *tweak = key + keylen - bsize; |
---|
.. | .. |
---|
97 | 79 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & |
---|
98 | 80 | CRYPTO_TFM_REQ_MASK); |
---|
99 | 81 | err = crypto_skcipher_setkey(child, key, keylen - bsize); |
---|
100 | | - crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & |
---|
101 | | - CRYPTO_TFM_RES_MASK); |
---|
102 | 82 | if (err) |
---|
103 | 83 | return err; |
---|
104 | 84 | |
---|
.. | .. |
---|
112 | 92 | |
---|
113 | 93 | /* initialize optimization table */ |
---|
114 | 94 | for (i = 0; i < 128; i++) { |
---|
115 | | - setbit128_bbe(&tmp, i); |
---|
| 95 | + lrw_setbit128_bbe(&tmp, i); |
---|
116 | 96 | ctx->mulinc[i] = tmp; |
---|
117 | 97 | gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); |
---|
118 | 98 | } |
---|
.. | .. |
---|
120 | 100 | return 0; |
---|
121 | 101 | } |
---|
122 | 102 | |
---|
123 | | -static inline void inc(be128 *iv) |
---|
| 103 | +/* |
---|
| 104 | + * Returns the number of trailing '1' bits in the words of the counter, which is |
---|
| 105 | + * represented by 4 32-bit words, arranged from least to most significant. |
---|
| 106 | + * At the same time, increments the counter by one. |
---|
| 107 | + * |
---|
| 108 | + * For example: |
---|
| 109 | + * |
---|
| 110 | + * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 }; |
---|
| 111 | + * int i = lrw_next_index(&counter); |
---|
| 112 | + * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 } |
---|
| 113 | + */ |
---|
| 114 | +static int lrw_next_index(u32 *counter) |
---|
124 | 115 | { |
---|
125 | | - be64_add_cpu(&iv->b, 1); |
---|
126 | | - if (!iv->b) |
---|
127 | | - be64_add_cpu(&iv->a, 1); |
---|
128 | | -} |
---|
| 116 | + int i, res = 0; |
---|
129 | 117 | |
---|
130 | | -/* this returns the number of consequative 1 bits starting |
---|
131 | | - * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ |
---|
132 | | -static inline int get_index128(be128 *block) |
---|
133 | | -{ |
---|
134 | | - int x; |
---|
135 | | - __be32 *p = (__be32 *) block; |
---|
| 118 | + for (i = 0; i < 4; i++) { |
---|
| 119 | + if (counter[i] + 1 != 0) |
---|
| 120 | + return res + ffz(counter[i]++); |
---|
136 | 121 | |
---|
137 | | - for (p += 3, x = 0; x < 128; p--, x += 32) { |
---|
138 | | - u32 val = be32_to_cpup(p); |
---|
139 | | - |
---|
140 | | - if (!~val) |
---|
141 | | - continue; |
---|
142 | | - |
---|
143 | | - return x + ffz(val); |
---|
| 122 | + counter[i] = 0; |
---|
| 123 | + res += 32; |
---|
144 | 124 | } |
---|
145 | 125 | |
---|
146 | 126 | /* |
---|
.. | .. |
---|
151 | 131 | return 127; |
---|
152 | 132 | } |
---|
153 | 133 | |
---|
154 | | -static int post_crypt(struct skcipher_request *req) |
---|
| 134 | +/* |
---|
| 135 | + * We compute the tweak masks twice (both before and after the ECB encryption or |
---|
| 136 | + * decryption) to avoid having to allocate a temporary buffer and/or make |
---|
| 137 | + * mutliple calls to the 'ecb(..)' instance, which usually would be slower than |
---|
| 138 | + * just doing the lrw_next_index() calls again. |
---|
| 139 | + */ |
---|
| 140 | +static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass) |
---|
155 | 141 | { |
---|
156 | | - struct rctx *rctx = skcipher_request_ctx(req); |
---|
157 | | - be128 *buf = rctx->ext ?: rctx->buf; |
---|
158 | | - struct skcipher_request *subreq; |
---|
159 | 142 | const int bs = LRW_BLOCK_SIZE; |
---|
160 | | - struct skcipher_walk w; |
---|
161 | | - struct scatterlist *sg; |
---|
162 | | - unsigned offset; |
---|
163 | | - int err; |
---|
164 | | - |
---|
165 | | - subreq = &rctx->subreq; |
---|
166 | | - err = skcipher_walk_virt(&w, subreq, false); |
---|
167 | | - |
---|
168 | | - while (w.nbytes) { |
---|
169 | | - unsigned int avail = w.nbytes; |
---|
170 | | - be128 *wdst; |
---|
171 | | - |
---|
172 | | - wdst = w.dst.virt.addr; |
---|
173 | | - |
---|
174 | | - do { |
---|
175 | | - be128_xor(wdst, buf++, wdst); |
---|
176 | | - wdst++; |
---|
177 | | - } while ((avail -= bs) >= bs); |
---|
178 | | - |
---|
179 | | - err = skcipher_walk_done(&w, avail); |
---|
180 | | - } |
---|
181 | | - |
---|
182 | | - rctx->left -= subreq->cryptlen; |
---|
183 | | - |
---|
184 | | - if (err || !rctx->left) |
---|
185 | | - goto out; |
---|
186 | | - |
---|
187 | | - rctx->dst = rctx->dstbuf; |
---|
188 | | - |
---|
189 | | - scatterwalk_done(&w.out, 0, 1); |
---|
190 | | - sg = w.out.sg; |
---|
191 | | - offset = w.out.offset; |
---|
192 | | - |
---|
193 | | - if (rctx->dst != sg) { |
---|
194 | | - rctx->dst[0] = *sg; |
---|
195 | | - sg_unmark_end(rctx->dst); |
---|
196 | | - scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2); |
---|
197 | | - } |
---|
198 | | - rctx->dst[0].length -= offset - sg->offset; |
---|
199 | | - rctx->dst[0].offset = offset; |
---|
200 | | - |
---|
201 | | -out: |
---|
202 | | - return err; |
---|
203 | | -} |
---|
204 | | - |
---|
205 | | -static int pre_crypt(struct skcipher_request *req) |
---|
206 | | -{ |
---|
207 | 143 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
---|
208 | | - struct rctx *rctx = skcipher_request_ctx(req); |
---|
209 | | - struct priv *ctx = crypto_skcipher_ctx(tfm); |
---|
210 | | - be128 *buf = rctx->ext ?: rctx->buf; |
---|
211 | | - struct skcipher_request *subreq; |
---|
212 | | - const int bs = LRW_BLOCK_SIZE; |
---|
| 144 | + const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
---|
| 145 | + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); |
---|
| 146 | + be128 t = rctx->t; |
---|
213 | 147 | struct skcipher_walk w; |
---|
214 | | - struct scatterlist *sg; |
---|
215 | | - unsigned cryptlen; |
---|
216 | | - unsigned offset; |
---|
217 | | - be128 *iv; |
---|
218 | | - bool more; |
---|
| 148 | + __be32 *iv; |
---|
| 149 | + u32 counter[4]; |
---|
219 | 150 | int err; |
---|
220 | 151 | |
---|
221 | | - subreq = &rctx->subreq; |
---|
222 | | - skcipher_request_set_tfm(subreq, tfm); |
---|
| 152 | + if (second_pass) { |
---|
| 153 | + req = &rctx->subreq; |
---|
| 154 | + /* set to our TFM to enforce correct alignment: */ |
---|
| 155 | + skcipher_request_set_tfm(req, tfm); |
---|
| 156 | + } |
---|
223 | 157 | |
---|
224 | | - cryptlen = subreq->cryptlen; |
---|
225 | | - more = rctx->left > cryptlen; |
---|
226 | | - if (!more) |
---|
227 | | - cryptlen = rctx->left; |
---|
| 158 | + err = skcipher_walk_virt(&w, req, false); |
---|
| 159 | + if (err) |
---|
| 160 | + return err; |
---|
228 | 161 | |
---|
229 | | - skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, |
---|
230 | | - cryptlen, req->iv); |
---|
231 | | - |
---|
232 | | - err = skcipher_walk_virt(&w, subreq, false); |
---|
233 | | - iv = w.iv; |
---|
| 162 | + iv = (__be32 *)w.iv; |
---|
| 163 | + counter[0] = be32_to_cpu(iv[3]); |
---|
| 164 | + counter[1] = be32_to_cpu(iv[2]); |
---|
| 165 | + counter[2] = be32_to_cpu(iv[1]); |
---|
| 166 | + counter[3] = be32_to_cpu(iv[0]); |
---|
234 | 167 | |
---|
235 | 168 | while (w.nbytes) { |
---|
236 | 169 | unsigned int avail = w.nbytes; |
---|
.. | .. |
---|
241 | 174 | wdst = w.dst.virt.addr; |
---|
242 | 175 | |
---|
243 | 176 | do { |
---|
244 | | - *buf++ = rctx->t; |
---|
245 | | - be128_xor(wdst++, &rctx->t, wsrc++); |
---|
| 177 | + be128_xor(wdst++, &t, wsrc++); |
---|
246 | 178 | |
---|
247 | 179 | /* T <- I*Key2, using the optimization |
---|
248 | 180 | * discussed in the specification */ |
---|
249 | | - be128_xor(&rctx->t, &rctx->t, |
---|
250 | | - &ctx->mulinc[get_index128(iv)]); |
---|
251 | | - inc(iv); |
---|
| 181 | + be128_xor(&t, &t, |
---|
| 182 | + &ctx->mulinc[lrw_next_index(counter)]); |
---|
252 | 183 | } while ((avail -= bs) >= bs); |
---|
| 184 | + |
---|
| 185 | + if (second_pass && w.nbytes == w.total) { |
---|
| 186 | + iv[0] = cpu_to_be32(counter[3]); |
---|
| 187 | + iv[1] = cpu_to_be32(counter[2]); |
---|
| 188 | + iv[2] = cpu_to_be32(counter[1]); |
---|
| 189 | + iv[3] = cpu_to_be32(counter[0]); |
---|
| 190 | + } |
---|
253 | 191 | |
---|
254 | 192 | err = skcipher_walk_done(&w, avail); |
---|
255 | 193 | } |
---|
256 | 194 | |
---|
257 | | - skcipher_request_set_tfm(subreq, ctx->child); |
---|
258 | | - skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, |
---|
259 | | - cryptlen, NULL); |
---|
260 | | - |
---|
261 | | - if (err || !more) |
---|
262 | | - goto out; |
---|
263 | | - |
---|
264 | | - rctx->src = rctx->srcbuf; |
---|
265 | | - |
---|
266 | | - scatterwalk_done(&w.in, 0, 1); |
---|
267 | | - sg = w.in.sg; |
---|
268 | | - offset = w.in.offset; |
---|
269 | | - |
---|
270 | | - if (rctx->src != sg) { |
---|
271 | | - rctx->src[0] = *sg; |
---|
272 | | - sg_unmark_end(rctx->src); |
---|
273 | | - scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2); |
---|
274 | | - } |
---|
275 | | - rctx->src[0].length -= offset - sg->offset; |
---|
276 | | - rctx->src[0].offset = offset; |
---|
277 | | - |
---|
278 | | -out: |
---|
279 | 195 | return err; |
---|
280 | 196 | } |
---|
281 | 197 | |
---|
282 | | -static int init_crypt(struct skcipher_request *req, crypto_completion_t done) |
---|
| 198 | +static int lrw_xor_tweak_pre(struct skcipher_request *req) |
---|
283 | 199 | { |
---|
284 | | - struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); |
---|
285 | | - struct rctx *rctx = skcipher_request_ctx(req); |
---|
286 | | - struct skcipher_request *subreq; |
---|
287 | | - gfp_t gfp; |
---|
| 200 | + return lrw_xor_tweak(req, false); |
---|
| 201 | +} |
---|
288 | 202 | |
---|
289 | | - subreq = &rctx->subreq; |
---|
290 | | - skcipher_request_set_callback(subreq, req->base.flags, done, req); |
---|
| 203 | +static int lrw_xor_tweak_post(struct skcipher_request *req) |
---|
| 204 | +{ |
---|
| 205 | + return lrw_xor_tweak(req, true); |
---|
| 206 | +} |
---|
291 | 207 | |
---|
292 | | - gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
---|
293 | | - GFP_ATOMIC; |
---|
294 | | - rctx->ext = NULL; |
---|
| 208 | +static void lrw_crypt_done(struct crypto_async_request *areq, int err) |
---|
| 209 | +{ |
---|
| 210 | + struct skcipher_request *req = areq->data; |
---|
295 | 211 | |
---|
296 | | - subreq->cryptlen = LRW_BUFFER_SIZE; |
---|
297 | | - if (req->cryptlen > LRW_BUFFER_SIZE) { |
---|
298 | | - unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE); |
---|
| 212 | + if (!err) { |
---|
| 213 | + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); |
---|
299 | 214 | |
---|
300 | | - rctx->ext = kmalloc(n, gfp); |
---|
301 | | - if (rctx->ext) |
---|
302 | | - subreq->cryptlen = n; |
---|
| 215 | + rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; |
---|
| 216 | + err = lrw_xor_tweak_post(req); |
---|
303 | 217 | } |
---|
304 | 218 | |
---|
305 | | - rctx->src = req->src; |
---|
306 | | - rctx->dst = req->dst; |
---|
307 | | - rctx->left = req->cryptlen; |
---|
| 219 | + skcipher_request_complete(req, err); |
---|
| 220 | +} |
---|
| 221 | + |
---|
| 222 | +static void lrw_init_crypt(struct skcipher_request *req) |
---|
| 223 | +{ |
---|
| 224 | + const struct lrw_tfm_ctx *ctx = |
---|
| 225 | + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); |
---|
| 226 | + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); |
---|
| 227 | + struct skcipher_request *subreq = &rctx->subreq; |
---|
| 228 | + |
---|
| 229 | + skcipher_request_set_tfm(subreq, ctx->child); |
---|
| 230 | + skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done, |
---|
| 231 | + req); |
---|
| 232 | + /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */ |
---|
| 233 | + skcipher_request_set_crypt(subreq, req->dst, req->dst, |
---|
| 234 | + req->cryptlen, req->iv); |
---|
308 | 235 | |
---|
309 | 236 | /* calculate first value of T */ |
---|
310 | 237 | memcpy(&rctx->t, req->iv, sizeof(rctx->t)); |
---|
311 | 238 | |
---|
312 | 239 | /* T <- I*Key2 */ |
---|
313 | 240 | gf128mul_64k_bbe(&rctx->t, ctx->table); |
---|
314 | | - |
---|
315 | | - return 0; |
---|
316 | 241 | } |
---|
317 | 242 | |
---|
318 | | -static void exit_crypt(struct skcipher_request *req) |
---|
| 243 | +static int lrw_encrypt(struct skcipher_request *req) |
---|
319 | 244 | { |
---|
320 | | - struct rctx *rctx = skcipher_request_ctx(req); |
---|
| 245 | + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); |
---|
| 246 | + struct skcipher_request *subreq = &rctx->subreq; |
---|
321 | 247 | |
---|
322 | | - rctx->left = 0; |
---|
323 | | - |
---|
324 | | - if (rctx->ext) |
---|
325 | | - kzfree(rctx->ext); |
---|
| 248 | + lrw_init_crypt(req); |
---|
| 249 | + return lrw_xor_tweak_pre(req) ?: |
---|
| 250 | + crypto_skcipher_encrypt(subreq) ?: |
---|
| 251 | + lrw_xor_tweak_post(req); |
---|
326 | 252 | } |
---|
327 | 253 | |
---|
328 | | -static int do_encrypt(struct skcipher_request *req, int err) |
---|
| 254 | +static int lrw_decrypt(struct skcipher_request *req) |
---|
329 | 255 | { |
---|
330 | | - struct rctx *rctx = skcipher_request_ctx(req); |
---|
331 | | - struct skcipher_request *subreq; |
---|
| 256 | + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); |
---|
| 257 | + struct skcipher_request *subreq = &rctx->subreq; |
---|
332 | 258 | |
---|
333 | | - subreq = &rctx->subreq; |
---|
334 | | - |
---|
335 | | - while (!err && rctx->left) { |
---|
336 | | - err = pre_crypt(req) ?: |
---|
337 | | - crypto_skcipher_encrypt(subreq) ?: |
---|
338 | | - post_crypt(req); |
---|
339 | | - |
---|
340 | | - if (err == -EINPROGRESS || err == -EBUSY) |
---|
341 | | - return err; |
---|
342 | | - } |
---|
343 | | - |
---|
344 | | - exit_crypt(req); |
---|
345 | | - return err; |
---|
| 259 | + lrw_init_crypt(req); |
---|
| 260 | + return lrw_xor_tweak_pre(req) ?: |
---|
| 261 | + crypto_skcipher_decrypt(subreq) ?: |
---|
| 262 | + lrw_xor_tweak_post(req); |
---|
346 | 263 | } |
---|
347 | 264 | |
---|
348 | | -static void encrypt_done(struct crypto_async_request *areq, int err) |
---|
349 | | -{ |
---|
350 | | - struct skcipher_request *req = areq->data; |
---|
351 | | - struct skcipher_request *subreq; |
---|
352 | | - struct rctx *rctx; |
---|
353 | | - |
---|
354 | | - rctx = skcipher_request_ctx(req); |
---|
355 | | - |
---|
356 | | - if (err == -EINPROGRESS) { |
---|
357 | | - if (rctx->left != req->cryptlen) |
---|
358 | | - return; |
---|
359 | | - goto out; |
---|
360 | | - } |
---|
361 | | - |
---|
362 | | - subreq = &rctx->subreq; |
---|
363 | | - subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; |
---|
364 | | - |
---|
365 | | - err = do_encrypt(req, err ?: post_crypt(req)); |
---|
366 | | - if (rctx->left) |
---|
367 | | - return; |
---|
368 | | - |
---|
369 | | -out: |
---|
370 | | - skcipher_request_complete(req, err); |
---|
371 | | -} |
---|
372 | | - |
---|
373 | | -static int encrypt(struct skcipher_request *req) |
---|
374 | | -{ |
---|
375 | | - return do_encrypt(req, init_crypt(req, encrypt_done)); |
---|
376 | | -} |
---|
377 | | - |
---|
378 | | -static int do_decrypt(struct skcipher_request *req, int err) |
---|
379 | | -{ |
---|
380 | | - struct rctx *rctx = skcipher_request_ctx(req); |
---|
381 | | - struct skcipher_request *subreq; |
---|
382 | | - |
---|
383 | | - subreq = &rctx->subreq; |
---|
384 | | - |
---|
385 | | - while (!err && rctx->left) { |
---|
386 | | - err = pre_crypt(req) ?: |
---|
387 | | - crypto_skcipher_decrypt(subreq) ?: |
---|
388 | | - post_crypt(req); |
---|
389 | | - |
---|
390 | | - if (err == -EINPROGRESS || err == -EBUSY) |
---|
391 | | - return err; |
---|
392 | | - } |
---|
393 | | - |
---|
394 | | - exit_crypt(req); |
---|
395 | | - return err; |
---|
396 | | -} |
---|
397 | | - |
---|
398 | | -static void decrypt_done(struct crypto_async_request *areq, int err) |
---|
399 | | -{ |
---|
400 | | - struct skcipher_request *req = areq->data; |
---|
401 | | - struct skcipher_request *subreq; |
---|
402 | | - struct rctx *rctx; |
---|
403 | | - |
---|
404 | | - rctx = skcipher_request_ctx(req); |
---|
405 | | - |
---|
406 | | - if (err == -EINPROGRESS) { |
---|
407 | | - if (rctx->left != req->cryptlen) |
---|
408 | | - return; |
---|
409 | | - goto out; |
---|
410 | | - } |
---|
411 | | - |
---|
412 | | - subreq = &rctx->subreq; |
---|
413 | | - subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; |
---|
414 | | - |
---|
415 | | - err = do_decrypt(req, err ?: post_crypt(req)); |
---|
416 | | - if (rctx->left) |
---|
417 | | - return; |
---|
418 | | - |
---|
419 | | -out: |
---|
420 | | - skcipher_request_complete(req, err); |
---|
421 | | -} |
---|
422 | | - |
---|
423 | | -static int decrypt(struct skcipher_request *req) |
---|
424 | | -{ |
---|
425 | | - return do_decrypt(req, init_crypt(req, decrypt_done)); |
---|
426 | | -} |
---|
427 | | - |
---|
428 | | -static int init_tfm(struct crypto_skcipher *tfm) |
---|
| 265 | +static int lrw_init_tfm(struct crypto_skcipher *tfm) |
---|
429 | 266 | { |
---|
430 | 267 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
---|
431 | 268 | struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); |
---|
432 | | - struct priv *ctx = crypto_skcipher_ctx(tfm); |
---|
| 269 | + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
---|
433 | 270 | struct crypto_skcipher *cipher; |
---|
434 | 271 | |
---|
435 | 272 | cipher = crypto_spawn_skcipher(spawn); |
---|
.. | .. |
---|
439 | 276 | ctx->child = cipher; |
---|
440 | 277 | |
---|
441 | 278 | crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + |
---|
442 | | - sizeof(struct rctx)); |
---|
| 279 | + sizeof(struct lrw_request_ctx)); |
---|
443 | 280 | |
---|
444 | 281 | return 0; |
---|
445 | 282 | } |
---|
446 | 283 | |
---|
447 | | -static void exit_tfm(struct crypto_skcipher *tfm) |
---|
| 284 | +static void lrw_exit_tfm(struct crypto_skcipher *tfm) |
---|
448 | 285 | { |
---|
449 | | - struct priv *ctx = crypto_skcipher_ctx(tfm); |
---|
| 286 | + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
---|
450 | 287 | |
---|
451 | 288 | if (ctx->table) |
---|
452 | 289 | gf128mul_free_64k(ctx->table); |
---|
453 | 290 | crypto_free_skcipher(ctx->child); |
---|
454 | 291 | } |
---|
455 | 292 | |
---|
456 | | -static void free_inst(struct skcipher_instance *inst) |
---|
| 293 | +static void lrw_free_instance(struct skcipher_instance *inst) |
---|
457 | 294 | { |
---|
458 | 295 | crypto_drop_skcipher(skcipher_instance_ctx(inst)); |
---|
459 | 296 | kfree(inst); |
---|
460 | 297 | } |
---|
461 | 298 | |
---|
462 | | -static int create(struct crypto_template *tmpl, struct rtattr **tb) |
---|
| 299 | +static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) |
---|
463 | 300 | { |
---|
464 | 301 | struct crypto_skcipher_spawn *spawn; |
---|
465 | 302 | struct skcipher_instance *inst; |
---|
466 | | - struct crypto_attr_type *algt; |
---|
467 | 303 | struct skcipher_alg *alg; |
---|
468 | 304 | const char *cipher_name; |
---|
469 | 305 | char ecb_name[CRYPTO_MAX_ALG_NAME]; |
---|
| 306 | + u32 mask; |
---|
470 | 307 | int err; |
---|
471 | 308 | |
---|
472 | | - algt = crypto_get_attr_type(tb); |
---|
473 | | - if (IS_ERR(algt)) |
---|
474 | | - return PTR_ERR(algt); |
---|
475 | | - |
---|
476 | | - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) |
---|
477 | | - return -EINVAL; |
---|
| 309 | + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); |
---|
| 310 | + if (err) |
---|
| 311 | + return err; |
---|
478 | 312 | |
---|
479 | 313 | cipher_name = crypto_attr_alg_name(tb[1]); |
---|
480 | 314 | if (IS_ERR(cipher_name)) |
---|
.. | .. |
---|
486 | 320 | |
---|
487 | 321 | spawn = skcipher_instance_ctx(inst); |
---|
488 | 322 | |
---|
489 | | - crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); |
---|
490 | | - err = crypto_grab_skcipher(spawn, cipher_name, 0, |
---|
491 | | - crypto_requires_sync(algt->type, |
---|
492 | | - algt->mask)); |
---|
| 323 | + err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), |
---|
| 324 | + cipher_name, 0, mask); |
---|
493 | 325 | if (err == -ENOENT) { |
---|
494 | 326 | err = -ENAMETOOLONG; |
---|
495 | 327 | if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", |
---|
496 | 328 | cipher_name) >= CRYPTO_MAX_ALG_NAME) |
---|
497 | 329 | goto err_free_inst; |
---|
498 | 330 | |
---|
499 | | - err = crypto_grab_skcipher(spawn, ecb_name, 0, |
---|
500 | | - crypto_requires_sync(algt->type, |
---|
501 | | - algt->mask)); |
---|
| 331 | + err = crypto_grab_skcipher(spawn, |
---|
| 332 | + skcipher_crypto_instance(inst), |
---|
| 333 | + ecb_name, 0, mask); |
---|
502 | 334 | } |
---|
503 | 335 | |
---|
504 | 336 | if (err) |
---|
.. | .. |
---|
508 | 340 | |
---|
509 | 341 | err = -EINVAL; |
---|
510 | 342 | if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) |
---|
511 | | - goto err_drop_spawn; |
---|
| 343 | + goto err_free_inst; |
---|
512 | 344 | |
---|
513 | 345 | if (crypto_skcipher_alg_ivsize(alg)) |
---|
514 | | - goto err_drop_spawn; |
---|
| 346 | + goto err_free_inst; |
---|
515 | 347 | |
---|
516 | 348 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", |
---|
517 | 349 | &alg->base); |
---|
518 | 350 | if (err) |
---|
519 | | - goto err_drop_spawn; |
---|
| 351 | + goto err_free_inst; |
---|
520 | 352 | |
---|
521 | 353 | err = -EINVAL; |
---|
522 | 354 | cipher_name = alg->base.cra_name; |
---|
.. | .. |
---|
525 | 357 | * cipher name. |
---|
526 | 358 | */ |
---|
527 | 359 | if (!strncmp(cipher_name, "ecb(", 4)) { |
---|
528 | | - unsigned len; |
---|
| 360 | + int len; |
---|
529 | 361 | |
---|
530 | | - len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); |
---|
531 | | - if (len < 2 || len >= sizeof(ecb_name)) |
---|
532 | | - goto err_drop_spawn; |
---|
| 362 | + len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); |
---|
| 363 | + if (len < 2) |
---|
| 364 | + goto err_free_inst; |
---|
533 | 365 | |
---|
534 | 366 | if (ecb_name[len - 1] != ')') |
---|
535 | | - goto err_drop_spawn; |
---|
| 367 | + goto err_free_inst; |
---|
536 | 368 | |
---|
537 | 369 | ecb_name[len - 1] = 0; |
---|
538 | 370 | |
---|
539 | 371 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
---|
540 | 372 | "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) { |
---|
541 | 373 | err = -ENAMETOOLONG; |
---|
542 | | - goto err_drop_spawn; |
---|
| 374 | + goto err_free_inst; |
---|
543 | 375 | } |
---|
544 | 376 | } else |
---|
545 | | - goto err_drop_spawn; |
---|
| 377 | + goto err_free_inst; |
---|
546 | 378 | |
---|
547 | | - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; |
---|
548 | 379 | inst->alg.base.cra_priority = alg->base.cra_priority; |
---|
549 | 380 | inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; |
---|
550 | 381 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask | |
---|
551 | | - (__alignof__(u64) - 1); |
---|
| 382 | + (__alignof__(be128) - 1); |
---|
552 | 383 | |
---|
553 | 384 | inst->alg.ivsize = LRW_BLOCK_SIZE; |
---|
554 | 385 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + |
---|
.. | .. |
---|
556 | 387 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + |
---|
557 | 388 | LRW_BLOCK_SIZE; |
---|
558 | 389 | |
---|
559 | | - inst->alg.base.cra_ctxsize = sizeof(struct priv); |
---|
| 390 | + inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx); |
---|
560 | 391 | |
---|
561 | | - inst->alg.init = init_tfm; |
---|
562 | | - inst->alg.exit = exit_tfm; |
---|
| 392 | + inst->alg.init = lrw_init_tfm; |
---|
| 393 | + inst->alg.exit = lrw_exit_tfm; |
---|
563 | 394 | |
---|
564 | | - inst->alg.setkey = setkey; |
---|
565 | | - inst->alg.encrypt = encrypt; |
---|
566 | | - inst->alg.decrypt = decrypt; |
---|
| 395 | + inst->alg.setkey = lrw_setkey; |
---|
| 396 | + inst->alg.encrypt = lrw_encrypt; |
---|
| 397 | + inst->alg.decrypt = lrw_decrypt; |
---|
567 | 398 | |
---|
568 | | - inst->free = free_inst; |
---|
| 399 | + inst->free = lrw_free_instance; |
---|
569 | 400 | |
---|
570 | 401 | err = skcipher_register_instance(tmpl, inst); |
---|
571 | | - if (err) |
---|
572 | | - goto err_drop_spawn; |
---|
573 | | - |
---|
574 | | -out: |
---|
575 | | - return err; |
---|
576 | | - |
---|
577 | | -err_drop_spawn: |
---|
578 | | - crypto_drop_skcipher(spawn); |
---|
| 402 | + if (err) { |
---|
579 | 403 | err_free_inst: |
---|
580 | | - kfree(inst); |
---|
581 | | - goto out; |
---|
| 404 | + lrw_free_instance(inst); |
---|
| 405 | + } |
---|
| 406 | + return err; |
---|
582 | 407 | } |
---|
583 | 408 | |
---|
584 | | -static struct crypto_template crypto_tmpl = { |
---|
| 409 | +static struct crypto_template lrw_tmpl = { |
---|
585 | 410 | .name = "lrw", |
---|
586 | | - .create = create, |
---|
| 411 | + .create = lrw_create, |
---|
587 | 412 | .module = THIS_MODULE, |
---|
588 | 413 | }; |
---|
589 | 414 | |
---|
590 | | -static int __init crypto_module_init(void) |
---|
| 415 | +static int __init lrw_module_init(void) |
---|
591 | 416 | { |
---|
592 | | - return crypto_register_template(&crypto_tmpl); |
---|
| 417 | + return crypto_register_template(&lrw_tmpl); |
---|
593 | 418 | } |
---|
594 | 419 | |
---|
595 | | -static void __exit crypto_module_exit(void) |
---|
| 420 | +static void __exit lrw_module_exit(void) |
---|
596 | 421 | { |
---|
597 | | - crypto_unregister_template(&crypto_tmpl); |
---|
| 422 | + crypto_unregister_template(&lrw_tmpl); |
---|
598 | 423 | } |
---|
599 | 424 | |
---|
600 | | -module_init(crypto_module_init); |
---|
601 | | -module_exit(crypto_module_exit); |
---|
| 425 | +subsys_initcall(lrw_module_init); |
---|
| 426 | +module_exit(lrw_module_exit); |
---|
602 | 427 | |
---|
603 | 428 | MODULE_LICENSE("GPL"); |
---|
604 | 429 | MODULE_DESCRIPTION("LRW block cipher mode"); |
---|