.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* XTS: as defined in IEEE1619/D16 |
---|
2 | 3 | * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf |
---|
3 | | - * (sector sizes which are not a multiple of 16 bytes are, |
---|
4 | | - * however currently unsupported) |
---|
5 | 4 | * |
---|
6 | 5 | * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org> |
---|
7 | 6 | * |
---|
8 | 7 | * Based on ecb.c |
---|
9 | 8 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
---|
10 | | - * |
---|
11 | | - * This program is free software; you can redistribute it and/or modify it |
---|
12 | | - * under the terms of the GNU General Public License as published by the Free |
---|
13 | | - * Software Foundation; either version 2 of the License, or (at your option) |
---|
14 | | - * any later version. |
---|
15 | 9 | */ |
---|
| 10 | +#include <crypto/internal/cipher.h> |
---|
16 | 11 | #include <crypto/internal/skcipher.h> |
---|
17 | 12 | #include <crypto/scatterwalk.h> |
---|
18 | 13 | #include <linux/err.h> |
---|
.. | .. |
---|
26 | 21 | #include <crypto/b128ops.h> |
---|
27 | 22 | #include <crypto/gf128mul.h> |
---|
28 | 23 | |
---|
29 | | -#define XTS_BUFFER_SIZE 128u |
---|
30 | | - |
---|
31 | | -struct priv { |
---|
| 24 | +struct xts_tfm_ctx { |
---|
32 | 25 | struct crypto_skcipher *child; |
---|
33 | 26 | struct crypto_cipher *tweak; |
---|
34 | 27 | }; |
---|
.. | .. |
---|
38 | 31 | char name[CRYPTO_MAX_ALG_NAME]; |
---|
39 | 32 | }; |
---|
40 | 33 | |
---|
41 | | -struct rctx { |
---|
42 | | - le128 buf[XTS_BUFFER_SIZE / sizeof(le128)]; |
---|
43 | | - |
---|
| 34 | +struct xts_request_ctx { |
---|
44 | 35 | le128 t; |
---|
45 | | - |
---|
46 | | - le128 *ext; |
---|
47 | | - |
---|
48 | | - struct scatterlist srcbuf[2]; |
---|
49 | | - struct scatterlist dstbuf[2]; |
---|
50 | | - struct scatterlist *src; |
---|
51 | | - struct scatterlist *dst; |
---|
52 | | - |
---|
53 | | - unsigned int left; |
---|
54 | | - |
---|
| 36 | + struct scatterlist *tail; |
---|
| 37 | + struct scatterlist sg[2]; |
---|
55 | 38 | struct skcipher_request subreq; |
---|
56 | 39 | }; |
---|
57 | 40 | |
---|
58 | | -static int setkey(struct crypto_skcipher *parent, const u8 *key, |
---|
59 | | - unsigned int keylen) |
---|
| 41 | +static int xts_setkey(struct crypto_skcipher *parent, const u8 *key, |
---|
| 42 | + unsigned int keylen) |
---|
60 | 43 | { |
---|
61 | | - struct priv *ctx = crypto_skcipher_ctx(parent); |
---|
| 44 | + struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent); |
---|
62 | 45 | struct crypto_skcipher *child; |
---|
63 | 46 | struct crypto_cipher *tweak; |
---|
64 | 47 | int err; |
---|
.. | .. |
---|
79 | 62 | crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) & |
---|
80 | 63 | CRYPTO_TFM_REQ_MASK); |
---|
81 | 64 | err = crypto_cipher_setkey(tweak, key + keylen, keylen); |
---|
82 | | - crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) & |
---|
83 | | - CRYPTO_TFM_RES_MASK); |
---|
84 | 65 | if (err) |
---|
85 | 66 | return err; |
---|
86 | 67 | |
---|
.. | .. |
---|
89 | 70 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
---|
90 | 71 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & |
---|
91 | 72 | CRYPTO_TFM_REQ_MASK); |
---|
92 | | - err = crypto_skcipher_setkey(child, key, keylen); |
---|
93 | | - crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & |
---|
94 | | - CRYPTO_TFM_RES_MASK); |
---|
95 | | - |
---|
96 | | - return err; |
---|
| 73 | + return crypto_skcipher_setkey(child, key, keylen); |
---|
97 | 74 | } |
---|
98 | 75 | |
---|
99 | | -static int post_crypt(struct skcipher_request *req) |
---|
| 76 | +/* |
---|
| 77 | + * We compute the tweak masks twice (both before and after the ECB encryption or |
---|
| 78 | + * decryption) to avoid having to allocate a temporary buffer and/or make |
---|
| 79 | + * mutliple calls to the 'ecb(..)' instance, which usually would be slower than |
---|
| 80 | + * just doing the gf128mul_x_ble() calls again. |
---|
| 81 | + */ |
---|
| 82 | +static int xts_xor_tweak(struct skcipher_request *req, bool second_pass, |
---|
| 83 | + bool enc) |
---|
100 | 84 | { |
---|
101 | | - struct rctx *rctx = skcipher_request_ctx(req); |
---|
102 | | - le128 *buf = rctx->ext ?: rctx->buf; |
---|
103 | | - struct skcipher_request *subreq; |
---|
| 85 | + struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
---|
| 86 | + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
---|
| 87 | + const bool cts = (req->cryptlen % XTS_BLOCK_SIZE); |
---|
104 | 88 | const int bs = XTS_BLOCK_SIZE; |
---|
105 | 89 | struct skcipher_walk w; |
---|
106 | | - struct scatterlist *sg; |
---|
107 | | - unsigned offset; |
---|
| 90 | + le128 t = rctx->t; |
---|
108 | 91 | int err; |
---|
109 | 92 | |
---|
110 | | - subreq = &rctx->subreq; |
---|
111 | | - err = skcipher_walk_virt(&w, subreq, false); |
---|
112 | | - |
---|
113 | | - while (w.nbytes) { |
---|
114 | | - unsigned int avail = w.nbytes; |
---|
115 | | - le128 *wdst; |
---|
116 | | - |
---|
117 | | - wdst = w.dst.virt.addr; |
---|
118 | | - |
---|
119 | | - do { |
---|
120 | | - le128_xor(wdst, buf++, wdst); |
---|
121 | | - wdst++; |
---|
122 | | - } while ((avail -= bs) >= bs); |
---|
123 | | - |
---|
124 | | - err = skcipher_walk_done(&w, avail); |
---|
| 93 | + if (second_pass) { |
---|
| 94 | + req = &rctx->subreq; |
---|
| 95 | + /* set to our TFM to enforce correct alignment: */ |
---|
| 96 | + skcipher_request_set_tfm(req, tfm); |
---|
125 | 97 | } |
---|
126 | | - |
---|
127 | | - rctx->left -= subreq->cryptlen; |
---|
128 | | - |
---|
129 | | - if (err || !rctx->left) |
---|
130 | | - goto out; |
---|
131 | | - |
---|
132 | | - rctx->dst = rctx->dstbuf; |
---|
133 | | - |
---|
134 | | - scatterwalk_done(&w.out, 0, 1); |
---|
135 | | - sg = w.out.sg; |
---|
136 | | - offset = w.out.offset; |
---|
137 | | - |
---|
138 | | - if (rctx->dst != sg) { |
---|
139 | | - rctx->dst[0] = *sg; |
---|
140 | | - sg_unmark_end(rctx->dst); |
---|
141 | | - scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2); |
---|
142 | | - } |
---|
143 | | - rctx->dst[0].length -= offset - sg->offset; |
---|
144 | | - rctx->dst[0].offset = offset; |
---|
145 | | - |
---|
146 | | -out: |
---|
147 | | - return err; |
---|
148 | | -} |
---|
149 | | - |
---|
150 | | -static int pre_crypt(struct skcipher_request *req) |
---|
151 | | -{ |
---|
152 | | - struct rctx *rctx = skcipher_request_ctx(req); |
---|
153 | | - le128 *buf = rctx->ext ?: rctx->buf; |
---|
154 | | - struct skcipher_request *subreq; |
---|
155 | | - const int bs = XTS_BLOCK_SIZE; |
---|
156 | | - struct skcipher_walk w; |
---|
157 | | - struct scatterlist *sg; |
---|
158 | | - unsigned cryptlen; |
---|
159 | | - unsigned offset; |
---|
160 | | - bool more; |
---|
161 | | - int err; |
---|
162 | | - |
---|
163 | | - subreq = &rctx->subreq; |
---|
164 | | - cryptlen = subreq->cryptlen; |
---|
165 | | - |
---|
166 | | - more = rctx->left > cryptlen; |
---|
167 | | - if (!more) |
---|
168 | | - cryptlen = rctx->left; |
---|
169 | | - |
---|
170 | | - skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, |
---|
171 | | - cryptlen, NULL); |
---|
172 | | - |
---|
173 | | - err = skcipher_walk_virt(&w, subreq, false); |
---|
| 98 | + err = skcipher_walk_virt(&w, req, false); |
---|
174 | 99 | |
---|
175 | 100 | while (w.nbytes) { |
---|
176 | 101 | unsigned int avail = w.nbytes; |
---|
.. | .. |
---|
181 | 106 | wdst = w.dst.virt.addr; |
---|
182 | 107 | |
---|
183 | 108 | do { |
---|
184 | | - *buf++ = rctx->t; |
---|
185 | | - le128_xor(wdst++, &rctx->t, wsrc++); |
---|
186 | | - gf128mul_x_ble(&rctx->t, &rctx->t); |
---|
| 109 | + if (unlikely(cts) && |
---|
| 110 | + w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) { |
---|
| 111 | + if (!enc) { |
---|
| 112 | + if (second_pass) |
---|
| 113 | + rctx->t = t; |
---|
| 114 | + gf128mul_x_ble(&t, &t); |
---|
| 115 | + } |
---|
| 116 | + le128_xor(wdst, &t, wsrc); |
---|
| 117 | + if (enc && second_pass) |
---|
| 118 | + gf128mul_x_ble(&rctx->t, &t); |
---|
| 119 | + skcipher_walk_done(&w, avail - bs); |
---|
| 120 | + return 0; |
---|
| 121 | + } |
---|
| 122 | + |
---|
| 123 | + le128_xor(wdst++, &t, wsrc++); |
---|
| 124 | + gf128mul_x_ble(&t, &t); |
---|
187 | 125 | } while ((avail -= bs) >= bs); |
---|
188 | 126 | |
---|
189 | 127 | err = skcipher_walk_done(&w, avail); |
---|
190 | 128 | } |
---|
191 | 129 | |
---|
192 | | - skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, |
---|
193 | | - cryptlen, NULL); |
---|
194 | | - |
---|
195 | | - if (err || !more) |
---|
196 | | - goto out; |
---|
197 | | - |
---|
198 | | - rctx->src = rctx->srcbuf; |
---|
199 | | - |
---|
200 | | - scatterwalk_done(&w.in, 0, 1); |
---|
201 | | - sg = w.in.sg; |
---|
202 | | - offset = w.in.offset; |
---|
203 | | - |
---|
204 | | - if (rctx->src != sg) { |
---|
205 | | - rctx->src[0] = *sg; |
---|
206 | | - sg_unmark_end(rctx->src); |
---|
207 | | - scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2); |
---|
208 | | - } |
---|
209 | | - rctx->src[0].length -= offset - sg->offset; |
---|
210 | | - rctx->src[0].offset = offset; |
---|
211 | | - |
---|
212 | | -out: |
---|
213 | 130 | return err; |
---|
214 | 131 | } |
---|
215 | 132 | |
---|
216 | | -static int init_crypt(struct skcipher_request *req, crypto_completion_t done) |
---|
| 133 | +static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc) |
---|
217 | 134 | { |
---|
218 | | - struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); |
---|
219 | | - struct rctx *rctx = skcipher_request_ctx(req); |
---|
220 | | - struct skcipher_request *subreq; |
---|
221 | | - gfp_t gfp; |
---|
| 135 | + return xts_xor_tweak(req, false, enc); |
---|
| 136 | +} |
---|
222 | 137 | |
---|
223 | | - subreq = &rctx->subreq; |
---|
224 | | - skcipher_request_set_tfm(subreq, ctx->child); |
---|
225 | | - skcipher_request_set_callback(subreq, req->base.flags, done, req); |
---|
| 138 | +static int xts_xor_tweak_post(struct skcipher_request *req, bool enc) |
---|
| 139 | +{ |
---|
| 140 | + return xts_xor_tweak(req, true, enc); |
---|
| 141 | +} |
---|
226 | 142 | |
---|
227 | | - gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
---|
228 | | - GFP_ATOMIC; |
---|
229 | | - rctx->ext = NULL; |
---|
| 143 | +static void xts_cts_done(struct crypto_async_request *areq, int err) |
---|
| 144 | +{ |
---|
| 145 | + struct skcipher_request *req = areq->data; |
---|
| 146 | + le128 b; |
---|
230 | 147 | |
---|
231 | | - subreq->cryptlen = XTS_BUFFER_SIZE; |
---|
232 | | - if (req->cryptlen > XTS_BUFFER_SIZE) { |
---|
233 | | - unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE); |
---|
| 148 | + if (!err) { |
---|
| 149 | + struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
---|
234 | 150 | |
---|
235 | | - rctx->ext = kmalloc(n, gfp); |
---|
236 | | - if (rctx->ext) |
---|
237 | | - subreq->cryptlen = n; |
---|
| 151 | + scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); |
---|
| 152 | + le128_xor(&b, &rctx->t, &b); |
---|
| 153 | + scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1); |
---|
238 | 154 | } |
---|
239 | 155 | |
---|
240 | | - rctx->src = req->src; |
---|
241 | | - rctx->dst = req->dst; |
---|
242 | | - rctx->left = req->cryptlen; |
---|
| 156 | + skcipher_request_complete(req, err); |
---|
| 157 | +} |
---|
| 158 | + |
---|
| 159 | +static int xts_cts_final(struct skcipher_request *req, |
---|
| 160 | + int (*crypt)(struct skcipher_request *req)) |
---|
| 161 | +{ |
---|
| 162 | + const struct xts_tfm_ctx *ctx = |
---|
| 163 | + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); |
---|
| 164 | + int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1); |
---|
| 165 | + struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
---|
| 166 | + struct skcipher_request *subreq = &rctx->subreq; |
---|
| 167 | + int tail = req->cryptlen % XTS_BLOCK_SIZE; |
---|
| 168 | + le128 b[2]; |
---|
| 169 | + int err; |
---|
| 170 | + |
---|
| 171 | + rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst, |
---|
| 172 | + offset - XTS_BLOCK_SIZE); |
---|
| 173 | + |
---|
| 174 | + scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); |
---|
| 175 | + b[1] = b[0]; |
---|
| 176 | + scatterwalk_map_and_copy(b, req->src, offset, tail, 0); |
---|
| 177 | + |
---|
| 178 | + le128_xor(b, &rctx->t, b); |
---|
| 179 | + |
---|
| 180 | + scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1); |
---|
| 181 | + |
---|
| 182 | + skcipher_request_set_tfm(subreq, ctx->child); |
---|
| 183 | + skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done, |
---|
| 184 | + req); |
---|
| 185 | + skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail, |
---|
| 186 | + XTS_BLOCK_SIZE, NULL); |
---|
| 187 | + |
---|
| 188 | + err = crypt(subreq); |
---|
| 189 | + if (err) |
---|
| 190 | + return err; |
---|
| 191 | + |
---|
| 192 | + scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); |
---|
| 193 | + le128_xor(b, &rctx->t, b); |
---|
| 194 | + scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1); |
---|
| 195 | + |
---|
| 196 | + return 0; |
---|
| 197 | +} |
---|
| 198 | + |
---|
| 199 | +static void xts_encrypt_done(struct crypto_async_request *areq, int err) |
---|
| 200 | +{ |
---|
| 201 | + struct skcipher_request *req = areq->data; |
---|
| 202 | + |
---|
| 203 | + if (!err) { |
---|
| 204 | + struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
---|
| 205 | + |
---|
| 206 | + rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; |
---|
| 207 | + err = xts_xor_tweak_post(req, true); |
---|
| 208 | + |
---|
| 209 | + if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { |
---|
| 210 | + err = xts_cts_final(req, crypto_skcipher_encrypt); |
---|
| 211 | + if (err == -EINPROGRESS || err == -EBUSY) |
---|
| 212 | + return; |
---|
| 213 | + } |
---|
| 214 | + } |
---|
| 215 | + |
---|
| 216 | + skcipher_request_complete(req, err); |
---|
| 217 | +} |
---|
| 218 | + |
---|
| 219 | +static void xts_decrypt_done(struct crypto_async_request *areq, int err) |
---|
| 220 | +{ |
---|
| 221 | + struct skcipher_request *req = areq->data; |
---|
| 222 | + |
---|
| 223 | + if (!err) { |
---|
| 224 | + struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
---|
| 225 | + |
---|
| 226 | + rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; |
---|
| 227 | + err = xts_xor_tweak_post(req, false); |
---|
| 228 | + |
---|
| 229 | + if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { |
---|
| 230 | + err = xts_cts_final(req, crypto_skcipher_decrypt); |
---|
| 231 | + if (err == -EINPROGRESS || err == -EBUSY) |
---|
| 232 | + return; |
---|
| 233 | + } |
---|
| 234 | + } |
---|
| 235 | + |
---|
| 236 | + skcipher_request_complete(req, err); |
---|
| 237 | +} |
---|
| 238 | + |
---|
| 239 | +static int xts_init_crypt(struct skcipher_request *req, |
---|
| 240 | + crypto_completion_t compl) |
---|
| 241 | +{ |
---|
| 242 | + const struct xts_tfm_ctx *ctx = |
---|
| 243 | + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); |
---|
| 244 | + struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
---|
| 245 | + struct skcipher_request *subreq = &rctx->subreq; |
---|
| 246 | + |
---|
| 247 | + if (req->cryptlen < XTS_BLOCK_SIZE) |
---|
| 248 | + return -EINVAL; |
---|
| 249 | + |
---|
| 250 | + skcipher_request_set_tfm(subreq, ctx->child); |
---|
| 251 | + skcipher_request_set_callback(subreq, req->base.flags, compl, req); |
---|
| 252 | + skcipher_request_set_crypt(subreq, req->dst, req->dst, |
---|
| 253 | + req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL); |
---|
243 | 254 | |
---|
244 | 255 | /* calculate first value of T */ |
---|
245 | 256 | crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv); |
---|
.. | .. |
---|
247 | 258 | return 0; |
---|
248 | 259 | } |
---|
249 | 260 | |
---|
250 | | -static void exit_crypt(struct skcipher_request *req) |
---|
| 261 | +static int xts_encrypt(struct skcipher_request *req) |
---|
251 | 262 | { |
---|
252 | | - struct rctx *rctx = skcipher_request_ctx(req); |
---|
| 263 | + struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
---|
| 264 | + struct skcipher_request *subreq = &rctx->subreq; |
---|
| 265 | + int err; |
---|
253 | 266 | |
---|
254 | | - rctx->left = 0; |
---|
| 267 | + err = xts_init_crypt(req, xts_encrypt_done) ?: |
---|
| 268 | + xts_xor_tweak_pre(req, true) ?: |
---|
| 269 | + crypto_skcipher_encrypt(subreq) ?: |
---|
| 270 | + xts_xor_tweak_post(req, true); |
---|
255 | 271 | |
---|
256 | | - if (rctx->ext) |
---|
257 | | - kzfree(rctx->ext); |
---|
| 272 | + if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) |
---|
| 273 | + return err; |
---|
| 274 | + |
---|
| 275 | + return xts_cts_final(req, crypto_skcipher_encrypt); |
---|
258 | 276 | } |
---|
259 | 277 | |
---|
260 | | -static int do_encrypt(struct skcipher_request *req, int err) |
---|
| 278 | +static int xts_decrypt(struct skcipher_request *req) |
---|
261 | 279 | { |
---|
262 | | - struct rctx *rctx = skcipher_request_ctx(req); |
---|
263 | | - struct skcipher_request *subreq; |
---|
| 280 | + struct xts_request_ctx *rctx = skcipher_request_ctx(req); |
---|
| 281 | + struct skcipher_request *subreq = &rctx->subreq; |
---|
| 282 | + int err; |
---|
264 | 283 | |
---|
265 | | - subreq = &rctx->subreq; |
---|
| 284 | + err = xts_init_crypt(req, xts_decrypt_done) ?: |
---|
| 285 | + xts_xor_tweak_pre(req, false) ?: |
---|
| 286 | + crypto_skcipher_decrypt(subreq) ?: |
---|
| 287 | + xts_xor_tweak_post(req, false); |
---|
266 | 288 | |
---|
267 | | - while (!err && rctx->left) { |
---|
268 | | - err = pre_crypt(req) ?: |
---|
269 | | - crypto_skcipher_encrypt(subreq) ?: |
---|
270 | | - post_crypt(req); |
---|
| 289 | + if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) |
---|
| 290 | + return err; |
---|
271 | 291 | |
---|
272 | | - if (err == -EINPROGRESS || err == -EBUSY) |
---|
273 | | - return err; |
---|
274 | | - } |
---|
275 | | - |
---|
276 | | - exit_crypt(req); |
---|
277 | | - return err; |
---|
| 292 | + return xts_cts_final(req, crypto_skcipher_decrypt); |
---|
278 | 293 | } |
---|
279 | 294 | |
---|
280 | | -static void encrypt_done(struct crypto_async_request *areq, int err) |
---|
281 | | -{ |
---|
282 | | - struct skcipher_request *req = areq->data; |
---|
283 | | - struct skcipher_request *subreq; |
---|
284 | | - struct rctx *rctx; |
---|
285 | | - |
---|
286 | | - rctx = skcipher_request_ctx(req); |
---|
287 | | - |
---|
288 | | - if (err == -EINPROGRESS) { |
---|
289 | | - if (rctx->left != req->cryptlen) |
---|
290 | | - return; |
---|
291 | | - goto out; |
---|
292 | | - } |
---|
293 | | - |
---|
294 | | - subreq = &rctx->subreq; |
---|
295 | | - subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; |
---|
296 | | - |
---|
297 | | - err = do_encrypt(req, err ?: post_crypt(req)); |
---|
298 | | - if (rctx->left) |
---|
299 | | - return; |
---|
300 | | - |
---|
301 | | -out: |
---|
302 | | - skcipher_request_complete(req, err); |
---|
303 | | -} |
---|
304 | | - |
---|
305 | | -static int encrypt(struct skcipher_request *req) |
---|
306 | | -{ |
---|
307 | | - return do_encrypt(req, init_crypt(req, encrypt_done)); |
---|
308 | | -} |
---|
309 | | - |
---|
310 | | -static int do_decrypt(struct skcipher_request *req, int err) |
---|
311 | | -{ |
---|
312 | | - struct rctx *rctx = skcipher_request_ctx(req); |
---|
313 | | - struct skcipher_request *subreq; |
---|
314 | | - |
---|
315 | | - subreq = &rctx->subreq; |
---|
316 | | - |
---|
317 | | - while (!err && rctx->left) { |
---|
318 | | - err = pre_crypt(req) ?: |
---|
319 | | - crypto_skcipher_decrypt(subreq) ?: |
---|
320 | | - post_crypt(req); |
---|
321 | | - |
---|
322 | | - if (err == -EINPROGRESS || err == -EBUSY) |
---|
323 | | - return err; |
---|
324 | | - } |
---|
325 | | - |
---|
326 | | - exit_crypt(req); |
---|
327 | | - return err; |
---|
328 | | -} |
---|
329 | | - |
---|
330 | | -static void decrypt_done(struct crypto_async_request *areq, int err) |
---|
331 | | -{ |
---|
332 | | - struct skcipher_request *req = areq->data; |
---|
333 | | - struct skcipher_request *subreq; |
---|
334 | | - struct rctx *rctx; |
---|
335 | | - |
---|
336 | | - rctx = skcipher_request_ctx(req); |
---|
337 | | - |
---|
338 | | - if (err == -EINPROGRESS) { |
---|
339 | | - if (rctx->left != req->cryptlen) |
---|
340 | | - return; |
---|
341 | | - goto out; |
---|
342 | | - } |
---|
343 | | - |
---|
344 | | - subreq = &rctx->subreq; |
---|
345 | | - subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; |
---|
346 | | - |
---|
347 | | - err = do_decrypt(req, err ?: post_crypt(req)); |
---|
348 | | - if (rctx->left) |
---|
349 | | - return; |
---|
350 | | - |
---|
351 | | -out: |
---|
352 | | - skcipher_request_complete(req, err); |
---|
353 | | -} |
---|
354 | | - |
---|
355 | | -static int decrypt(struct skcipher_request *req) |
---|
356 | | -{ |
---|
357 | | - return do_decrypt(req, init_crypt(req, decrypt_done)); |
---|
358 | | -} |
---|
359 | | - |
---|
360 | | -static int init_tfm(struct crypto_skcipher *tfm) |
---|
| 295 | +static int xts_init_tfm(struct crypto_skcipher *tfm) |
---|
361 | 296 | { |
---|
362 | 297 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
---|
363 | 298 | struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); |
---|
364 | | - struct priv *ctx = crypto_skcipher_ctx(tfm); |
---|
| 299 | + struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
---|
365 | 300 | struct crypto_skcipher *child; |
---|
366 | 301 | struct crypto_cipher *tweak; |
---|
367 | 302 | |
---|
.. | .. |
---|
380 | 315 | ctx->tweak = tweak; |
---|
381 | 316 | |
---|
382 | 317 | crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) + |
---|
383 | | - sizeof(struct rctx)); |
---|
| 318 | + sizeof(struct xts_request_ctx)); |
---|
384 | 319 | |
---|
385 | 320 | return 0; |
---|
386 | 321 | } |
---|
387 | 322 | |
---|
388 | | -static void exit_tfm(struct crypto_skcipher *tfm) |
---|
| 323 | +static void xts_exit_tfm(struct crypto_skcipher *tfm) |
---|
389 | 324 | { |
---|
390 | | - struct priv *ctx = crypto_skcipher_ctx(tfm); |
---|
| 325 | + struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
---|
391 | 326 | |
---|
392 | 327 | crypto_free_skcipher(ctx->child); |
---|
393 | 328 | crypto_free_cipher(ctx->tweak); |
---|
394 | 329 | } |
---|
395 | 330 | |
---|
396 | | -static void free_inst(struct skcipher_instance *inst) |
---|
| 331 | +static void xts_free_instance(struct skcipher_instance *inst) |
---|
397 | 332 | { |
---|
398 | | - crypto_drop_skcipher(skcipher_instance_ctx(inst)); |
---|
| 333 | + struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); |
---|
| 334 | + |
---|
| 335 | + crypto_drop_skcipher(&ictx->spawn); |
---|
399 | 336 | kfree(inst); |
---|
400 | 337 | } |
---|
401 | 338 | |
---|
402 | | -static int create(struct crypto_template *tmpl, struct rtattr **tb) |
---|
| 339 | +static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) |
---|
403 | 340 | { |
---|
404 | 341 | struct skcipher_instance *inst; |
---|
405 | | - struct crypto_attr_type *algt; |
---|
406 | 342 | struct xts_instance_ctx *ctx; |
---|
407 | 343 | struct skcipher_alg *alg; |
---|
408 | 344 | const char *cipher_name; |
---|
409 | 345 | u32 mask; |
---|
410 | 346 | int err; |
---|
411 | 347 | |
---|
412 | | - algt = crypto_get_attr_type(tb); |
---|
413 | | - if (IS_ERR(algt)) |
---|
414 | | - return PTR_ERR(algt); |
---|
415 | | - |
---|
416 | | - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) |
---|
417 | | - return -EINVAL; |
---|
| 348 | + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); |
---|
| 349 | + if (err) |
---|
| 350 | + return err; |
---|
418 | 351 | |
---|
419 | 352 | cipher_name = crypto_attr_alg_name(tb[1]); |
---|
420 | 353 | if (IS_ERR(cipher_name)) |
---|
.. | .. |
---|
426 | 359 | |
---|
427 | 360 | ctx = skcipher_instance_ctx(inst); |
---|
428 | 361 | |
---|
429 | | - crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); |
---|
430 | | - |
---|
431 | | - mask = crypto_requires_off(algt->type, algt->mask, |
---|
432 | | - CRYPTO_ALG_NEED_FALLBACK | |
---|
433 | | - CRYPTO_ALG_ASYNC); |
---|
434 | | - |
---|
435 | | - err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask); |
---|
| 362 | + err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), |
---|
| 363 | + cipher_name, 0, mask); |
---|
436 | 364 | if (err == -ENOENT) { |
---|
437 | 365 | err = -ENAMETOOLONG; |
---|
438 | 366 | if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", |
---|
439 | 367 | cipher_name) >= CRYPTO_MAX_ALG_NAME) |
---|
440 | 368 | goto err_free_inst; |
---|
441 | 369 | |
---|
442 | | - err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask); |
---|
| 370 | + err = crypto_grab_skcipher(&ctx->spawn, |
---|
| 371 | + skcipher_crypto_instance(inst), |
---|
| 372 | + ctx->name, 0, mask); |
---|
443 | 373 | } |
---|
444 | 374 | |
---|
445 | 375 | if (err) |
---|
.. | .. |
---|
449 | 379 | |
---|
450 | 380 | err = -EINVAL; |
---|
451 | 381 | if (alg->base.cra_blocksize != XTS_BLOCK_SIZE) |
---|
452 | | - goto err_drop_spawn; |
---|
| 382 | + goto err_free_inst; |
---|
453 | 383 | |
---|
454 | 384 | if (crypto_skcipher_alg_ivsize(alg)) |
---|
455 | | - goto err_drop_spawn; |
---|
| 385 | + goto err_free_inst; |
---|
456 | 386 | |
---|
457 | 387 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts", |
---|
458 | 388 | &alg->base); |
---|
459 | 389 | if (err) |
---|
460 | | - goto err_drop_spawn; |
---|
| 390 | + goto err_free_inst; |
---|
461 | 391 | |
---|
462 | 392 | err = -EINVAL; |
---|
463 | 393 | cipher_name = alg->base.cra_name; |
---|
.. | .. |
---|
466 | 396 | * cipher name. |
---|
467 | 397 | */ |
---|
468 | 398 | if (!strncmp(cipher_name, "ecb(", 4)) { |
---|
469 | | - unsigned len; |
---|
| 399 | + int len; |
---|
470 | 400 | |
---|
471 | | - len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); |
---|
472 | | - if (len < 2 || len >= sizeof(ctx->name)) |
---|
473 | | - goto err_drop_spawn; |
---|
| 401 | + len = strscpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); |
---|
| 402 | + if (len < 2) |
---|
| 403 | + goto err_free_inst; |
---|
474 | 404 | |
---|
475 | 405 | if (ctx->name[len - 1] != ')') |
---|
476 | | - goto err_drop_spawn; |
---|
| 406 | + goto err_free_inst; |
---|
477 | 407 | |
---|
478 | 408 | ctx->name[len - 1] = 0; |
---|
479 | 409 | |
---|
480 | 410 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
---|
481 | 411 | "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) { |
---|
482 | 412 | err = -ENAMETOOLONG; |
---|
483 | | - goto err_drop_spawn; |
---|
| 413 | + goto err_free_inst; |
---|
484 | 414 | } |
---|
485 | 415 | } else |
---|
486 | | - goto err_drop_spawn; |
---|
| 416 | + goto err_free_inst; |
---|
487 | 417 | |
---|
488 | | - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; |
---|
489 | 418 | inst->alg.base.cra_priority = alg->base.cra_priority; |
---|
490 | 419 | inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; |
---|
491 | 420 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask | |
---|
.. | .. |
---|
495 | 424 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2; |
---|
496 | 425 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2; |
---|
497 | 426 | |
---|
498 | | - inst->alg.base.cra_ctxsize = sizeof(struct priv); |
---|
| 427 | + inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx); |
---|
499 | 428 | |
---|
500 | | - inst->alg.init = init_tfm; |
---|
501 | | - inst->alg.exit = exit_tfm; |
---|
| 429 | + inst->alg.init = xts_init_tfm; |
---|
| 430 | + inst->alg.exit = xts_exit_tfm; |
---|
502 | 431 | |
---|
503 | | - inst->alg.setkey = setkey; |
---|
504 | | - inst->alg.encrypt = encrypt; |
---|
505 | | - inst->alg.decrypt = decrypt; |
---|
| 432 | + inst->alg.setkey = xts_setkey; |
---|
| 433 | + inst->alg.encrypt = xts_encrypt; |
---|
| 434 | + inst->alg.decrypt = xts_decrypt; |
---|
506 | 435 | |
---|
507 | | - inst->free = free_inst; |
---|
| 436 | + inst->free = xts_free_instance; |
---|
508 | 437 | |
---|
509 | 438 | err = skcipher_register_instance(tmpl, inst); |
---|
510 | | - if (err) |
---|
511 | | - goto err_drop_spawn; |
---|
512 | | - |
---|
513 | | -out: |
---|
514 | | - return err; |
---|
515 | | - |
---|
516 | | -err_drop_spawn: |
---|
517 | | - crypto_drop_skcipher(&ctx->spawn); |
---|
| 439 | + if (err) { |
---|
518 | 440 | err_free_inst: |
---|
519 | | - kfree(inst); |
---|
520 | | - goto out; |
---|
| 441 | + xts_free_instance(inst); |
---|
| 442 | + } |
---|
| 443 | + return err; |
---|
521 | 444 | } |
---|
522 | 445 | |
---|
523 | | -static struct crypto_template crypto_tmpl = { |
---|
| 446 | +static struct crypto_template xts_tmpl = { |
---|
524 | 447 | .name = "xts", |
---|
525 | | - .create = create, |
---|
| 448 | + .create = xts_create, |
---|
526 | 449 | .module = THIS_MODULE, |
---|
527 | 450 | }; |
---|
528 | 451 | |
---|
529 | | -static int __init crypto_module_init(void) |
---|
| 452 | +static int __init xts_module_init(void) |
---|
530 | 453 | { |
---|
531 | | - return crypto_register_template(&crypto_tmpl); |
---|
| 454 | + return crypto_register_template(&xts_tmpl); |
---|
532 | 455 | } |
---|
533 | 456 | |
---|
534 | | -static void __exit crypto_module_exit(void) |
---|
| 457 | +static void __exit xts_module_exit(void) |
---|
535 | 458 | { |
---|
536 | | - crypto_unregister_template(&crypto_tmpl); |
---|
| 459 | + crypto_unregister_template(&xts_tmpl); |
---|
537 | 460 | } |
---|
538 | 461 | |
---|
539 | | -module_init(crypto_module_init); |
---|
540 | | -module_exit(crypto_module_exit); |
---|
| 462 | +subsys_initcall(xts_module_init); |
---|
| 463 | +module_exit(xts_module_exit); |
---|
541 | 464 | |
---|
542 | 465 | MODULE_LICENSE("GPL"); |
---|
543 | 466 | MODULE_DESCRIPTION("XTS block cipher mode"); |
---|
544 | 467 | MODULE_ALIAS_CRYPTO("xts"); |
---|
| 468 | +MODULE_IMPORT_NS(CRYPTO_INTERNAL); |
---|