hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/crypto/rockchip/rk3288_crypto_ahash.c
....@@ -9,12 +9,47 @@
99 * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
1010 */
1111 #include <linux/device.h>
12
+#include <asm/unaligned.h>
1213 #include "rk3288_crypto.h"
1314
1415 /*
1516 * IC can not process zero message hash,
1617 * so we put the fixed hash out when met zero message.
1718 */
19
+
20
+static bool rk_ahash_need_fallback(struct ahash_request *req)
21
+{
22
+ struct scatterlist *sg;
23
+
24
+ sg = req->src;
25
+ while (sg) {
26
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
27
+ return true;
28
+ }
29
+ if (sg->length % 4) {
30
+ return true;
31
+ }
32
+ sg = sg_next(sg);
33
+ }
34
+ return false;
35
+}
36
+
37
+static int rk_ahash_digest_fb(struct ahash_request *areq)
38
+{
39
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
40
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
41
+ struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
42
+
43
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
44
+ rctx->fallback_req.base.flags = areq->base.flags &
45
+ CRYPTO_TFM_REQ_MAY_SLEEP;
46
+
47
+ rctx->fallback_req.nbytes = areq->nbytes;
48
+ rctx->fallback_req.src = areq->src;
49
+ rctx->fallback_req.result = areq->result;
50
+
51
+ return crypto_ahash_digest(&rctx->fallback_req);
52
+}
1853
1954 static int zero_message_process(struct ahash_request *req)
2055 {
....@@ -38,17 +73,13 @@
3873 return 0;
3974 }
4075
41
-static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
76
+static void rk_ahash_reg_init(struct ahash_request *req)
4277 {
43
- if (base->complete)
44
- base->complete(base, err);
45
-}
46
-
47
-static void rk_ahash_reg_init(struct rk_crypto_info *dev)
48
-{
49
- struct ahash_request *req = ahash_request_cast(dev->async_req);
5078 struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
51
- int reg_status = 0;
79
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
80
+ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
81
+ struct rk_crypto_info *dev = tctx->dev;
82
+ int reg_status;
5283
5384 reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
5485 RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
....@@ -74,7 +105,7 @@
74105 RK_CRYPTO_BYTESWAP_BRFIFO |
75106 RK_CRYPTO_BYTESWAP_BTFIFO);
76107
77
- CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total);
108
+ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes);
78109 }
79110
80111 static int rk_ahash_init(struct ahash_request *req)
....@@ -167,48 +198,64 @@
167198 struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
168199 struct rk_crypto_info *dev = tctx->dev;
169200
201
+ if (rk_ahash_need_fallback(req))
202
+ return rk_ahash_digest_fb(req);
203
+
170204 if (!req->nbytes)
171205 return zero_message_process(req);
172
- else
173
- return dev->enqueue(dev, &req->base);
206
+
207
+ return crypto_transfer_hash_request_to_engine(dev->engine, req);
174208 }
175209
176
-static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
210
+static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg)
177211 {
178
- CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in);
179
- CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4);
212
+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg));
213
+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4);
180214 CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
181215 (RK_CRYPTO_HASH_START << 16));
182216 }
183217
184
-static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
218
+static int rk_hash_prepare(struct crypto_engine *engine, void *breq)
185219 {
186
- int err;
220
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
221
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
222
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
223
+ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
224
+ int ret;
187225
188
- err = dev->load_data(dev, dev->sg_src, NULL);
189
- if (!err)
190
- crypto_ahash_dma_start(dev);
191
- return err;
226
+ ret = dma_map_sg(tctx->dev->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
227
+ if (ret <= 0)
228
+ return -EINVAL;
229
+
230
+ rctx->nrsg = ret;
231
+
232
+ return 0;
192233 }
193234
194
-static int rk_ahash_start(struct rk_crypto_info *dev)
235
+static int rk_hash_unprepare(struct crypto_engine *engine, void *breq)
195236 {
196
- struct ahash_request *req = ahash_request_cast(dev->async_req);
197
- struct crypto_ahash *tfm;
198
- struct rk_ahash_rctx *rctx;
237
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
238
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
239
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
240
+ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
199241
200
- dev->total = req->nbytes;
201
- dev->left_bytes = req->nbytes;
202
- dev->aligned = 0;
203
- dev->align_size = 4;
204
- dev->sg_dst = NULL;
205
- dev->sg_src = req->src;
206
- dev->first = req->src;
207
- dev->src_nents = sg_nents(req->src);
208
- rctx = ahash_request_ctx(req);
242
+ dma_unmap_sg(tctx->dev->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
243
+ return 0;
244
+}
245
+
246
+static int rk_hash_run(struct crypto_engine *engine, void *breq)
247
+{
248
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
249
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
250
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
251
+ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
252
+ struct scatterlist *sg = areq->src;
253
+ int err = 0;
254
+ int i;
255
+ u32 v;
256
+
209257 rctx->mode = 0;
210258
211
- tfm = crypto_ahash_reqtfm(req);
212259 switch (crypto_ahash_digestsize(tfm)) {
213260 case SHA1_DIGEST_SIZE:
214261 rctx->mode = RK_CRYPTO_HASH_SHA1;
....@@ -220,32 +267,26 @@
220267 rctx->mode = RK_CRYPTO_HASH_MD5;
221268 break;
222269 default:
223
- return -EINVAL;
270
+ err = -EINVAL;
271
+ goto theend;
224272 }
225273
226
- rk_ahash_reg_init(dev);
227
- return rk_ahash_set_data_start(dev);
228
-}
274
+ rk_ahash_reg_init(areq);
229275
230
-static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
231
-{
232
- int err = 0;
233
- struct ahash_request *req = ahash_request_cast(dev->async_req);
234
- struct crypto_ahash *tfm;
235
-
236
- dev->unload_data(dev);
237
- if (dev->left_bytes) {
238
- if (dev->aligned) {
239
- if (sg_is_last(dev->sg_src)) {
240
- dev_warn(dev->dev, "[%s:%d], Lack of data\n",
241
- __func__, __LINE__);
242
- err = -ENOMEM;
243
- goto out_rx;
244
- }
245
- dev->sg_src = sg_next(dev->sg_src);
276
+ while (sg) {
277
+ reinit_completion(&tctx->dev->complete);
278
+ tctx->dev->status = 0;
279
+ crypto_ahash_dma_start(tctx->dev, sg);
280
+ wait_for_completion_interruptible_timeout(&tctx->dev->complete,
281
+ msecs_to_jiffies(2000));
282
+ if (!tctx->dev->status) {
283
+ dev_err(tctx->dev->dev, "DMA timeout\n");
284
+ err = -EFAULT;
285
+ goto theend;
246286 }
247
- err = rk_ahash_set_data_start(dev);
248
- } else {
287
+ sg = sg_next(sg);
288
+ }
289
+
249290 /*
250291 * it will take some time to process date after last dma
251292 * transmission.
....@@ -256,18 +297,20 @@
256297 * efficiency, and make it response quickly when dma
257298 * complete.
258299 */
259
- while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
260
- udelay(10);
300
+ while (!CRYPTO_READ(tctx->dev, RK_CRYPTO_HASH_STS))
301
+ udelay(10);
261302
262
- tfm = crypto_ahash_reqtfm(req);
263
- memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
264
- crypto_ahash_digestsize(tfm));
265
- dev->complete(dev->async_req, 0);
266
- tasklet_schedule(&dev->queue_task);
303
+ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) {
304
+ v = readl(tctx->dev->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4);
305
+ put_unaligned_le32(v, areq->result + i * 4);
267306 }
268307
269
-out_rx:
270
- return err;
308
+theend:
309
+ local_bh_disable();
310
+ crypto_finalize_hash_request(engine, breq, err);
311
+ local_bh_enable();
312
+
313
+ return 0;
271314 }
272315
273316 static int rk_cra_hash_init(struct crypto_tfm *tfm)
....@@ -281,14 +324,6 @@
281324 algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
282325
283326 tctx->dev = algt->dev;
284
- tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
285
- if (!tctx->dev->addr_vir) {
286
- dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
287
- return -ENOMEM;
288
- }
289
- tctx->dev->start = rk_ahash_start;
290
- tctx->dev->update = rk_ahash_crypto_rx;
291
- tctx->dev->complete = rk_ahash_crypto_complete;
292327
293328 /* for fallback */
294329 tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
....@@ -297,19 +332,23 @@
297332 dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
298333 return PTR_ERR(tctx->fallback_tfm);
299334 }
335
+
300336 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
301337 sizeof(struct rk_ahash_rctx) +
302338 crypto_ahash_reqsize(tctx->fallback_tfm));
303339
304
- return tctx->dev->enable_clk(tctx->dev);
340
+ tctx->enginectx.op.do_one_request = rk_hash_run;
341
+ tctx->enginectx.op.prepare_request = rk_hash_prepare;
342
+ tctx->enginectx.op.unprepare_request = rk_hash_unprepare;
343
+
344
+ return 0;
305345 }
306346
307347 static void rk_cra_hash_exit(struct crypto_tfm *tfm)
308348 {
309349 struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
310350
311
- free_page((unsigned long)tctx->dev->addr_vir);
312
- return tctx->dev->disable_clk(tctx->dev);
351
+ crypto_free_ahash(tctx->fallback_tfm);
313352 }
314353
315354 struct rk_crypto_tmp rk_ahash_sha1 = {