hc
2024-01-04 1543e317f1da31b75942316931e8f491a8920811
kernel/drivers/crypto/caam/caamhash.c
....@@ -1,7 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0+
12 /*
23 * caam - Freescale FSL CAAM support for ahash functions of crypto API
34 *
45 * Copyright 2011 Freescale Semiconductor, Inc.
6
+ * Copyright 2018-2019 NXP
57 *
68 * Based on caamalg.c crypto API driver.
79 *
....@@ -62,6 +64,8 @@
6264 #include "error.h"
6365 #include "sg_sw_sec4.h"
6466 #include "key_gen.h"
67
+#include "caamhash_desc.h"
68
+#include <crypto/engine.h>
6569
6670 #define CAAM_CRA_PRIORITY 3000
6771
....@@ -71,14 +75,6 @@
7175 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
7276 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
7377
74
-/* length of descriptors text */
75
-#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
76
-#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77
-#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78
-#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79
-#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80
-#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
-
8278 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
8379 CAAM_MAX_HASH_KEY_SIZE)
8480 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
....@@ -87,29 +83,23 @@
8783 #define HASH_MSG_LEN 8
8884 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
8985
90
-#ifdef DEBUG
91
-/* for print_hex_dumps with line references */
92
-#define debug(format, arg...) printk(format, arg)
93
-#else
94
-#define debug(format, arg...)
95
-#endif
96
-
97
-
9886 static struct list_head hash_list;
9987
10088 /* ahash per-session context */
10189 struct caam_hash_ctx {
90
+ struct crypto_engine_ctx enginectx;
10291 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
10392 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
10493 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
10594 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
95
+ u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
10696 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
10797 dma_addr_t sh_desc_update_first_dma;
10898 dma_addr_t sh_desc_fin_dma;
10999 dma_addr_t sh_desc_digest_dma;
110100 enum dma_data_direction dir;
101
+ enum dma_data_direction key_dir;
111102 struct device *jrdev;
112
- u8 key[CAAM_MAX_HASH_KEY_SIZE];
113103 int ctx_len;
114104 struct alginfo adata;
115105 };
....@@ -119,15 +109,16 @@
119109 dma_addr_t buf_dma;
120110 dma_addr_t ctx_dma;
121111 int ctx_dma_len;
122
- u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
123
- int buflen_0;
124
- u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
125
- int buflen_1;
112
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
113
+ int buflen;
114
+ int next_buflen;
126115 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
127
- int (*update)(struct ahash_request *req);
116
+ int (*update)(struct ahash_request *req) ____cacheline_aligned;
128117 int (*final)(struct ahash_request *req);
129118 int (*finup)(struct ahash_request *req);
130
- int current_buf;
119
+ struct ahash_edesc *edesc;
120
+ void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
121
+ void *context);
131122 };
132123
133124 struct caam_export_state {
....@@ -139,31 +130,11 @@
139130 int (*finup)(struct ahash_request *req);
140131 };
141132
142
-static inline void switch_buf(struct caam_hash_state *state)
133
+static inline bool is_cmac_aes(u32 algtype)
143134 {
144
- state->current_buf ^= 1;
135
+ return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
136
+ (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
145137 }
146
-
147
-static inline u8 *current_buf(struct caam_hash_state *state)
148
-{
149
- return state->current_buf ? state->buf_1 : state->buf_0;
150
-}
151
-
152
-static inline u8 *alt_buf(struct caam_hash_state *state)
153
-{
154
- return state->current_buf ? state->buf_0 : state->buf_1;
155
-}
156
-
157
-static inline int *current_buflen(struct caam_hash_state *state)
158
-{
159
- return state->current_buf ? &state->buflen_1 : &state->buflen_0;
160
-}
161
-
162
-static inline int *alt_buflen(struct caam_hash_state *state)
163
-{
164
- return state->current_buf ? &state->buflen_0 : &state->buflen_1;
165
-}
166
-
167138 /* Common job descriptor seq in/out ptr routines */
168139
169140 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
....@@ -190,12 +161,12 @@
190161 struct sec4_sg_entry *sec4_sg,
191162 struct caam_hash_state *state)
192163 {
193
- int buflen = *current_buflen(state);
164
+ int buflen = state->buflen;
194165
195166 if (!buflen)
196167 return 0;
197168
198
- state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
169
+ state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
199170 DMA_TO_DEVICE);
200171 if (dma_mapping_error(jrdev, state->buf_dma)) {
201172 dev_err(jrdev, "unable to map buf\n");
....@@ -226,60 +197,6 @@
226197 return 0;
227198 }
228199
229
-/*
230
- * For ahash update, final and finup (import_ctx = true)
231
- * import context, read and write to seqout
232
- * For ahash firsts and digest (import_ctx = false)
233
- * read and write to seqout
234
- */
235
-static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
236
- struct caam_hash_ctx *ctx, bool import_ctx,
237
- int era)
238
-{
239
- u32 op = ctx->adata.algtype;
240
- u32 *skip_key_load;
241
-
242
- init_sh_desc(desc, HDR_SHARE_SERIAL);
243
-
244
- /* Append key if it has been set; ahash update excluded */
245
- if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
246
- /* Skip key loading if already shared */
247
- skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
248
- JUMP_COND_SHRD);
249
-
250
- if (era < 6)
251
- append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
252
- ctx->adata.keylen, CLASS_2 |
253
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
254
- else
255
- append_proto_dkp(desc, &ctx->adata);
256
-
257
- set_jump_tgt_here(desc, skip_key_load);
258
-
259
- op |= OP_ALG_AAI_HMAC_PRECOMP;
260
- }
261
-
262
- /* If needed, import context from software */
263
- if (import_ctx)
264
- append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
265
- LDST_SRCDST_BYTE_CONTEXT);
266
-
267
- /* Class 2 operation */
268
- append_operation(desc, op | state | OP_ALG_ENCRYPT);
269
-
270
- /*
271
- * Load from buf and/or src and write to req->result or state->context
272
- * Calculate remaining bytes to read
273
- */
274
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
275
- /* Read remaining bytes */
276
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
277
- FIFOLD_TYPE_MSG | KEY_VLF);
278
- /* Store class2 context bytes */
279
- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
280
- LDST_SRCDST_BYTE_CONTEXT);
281
-}
282
-
283200 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
284201 {
285202 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
....@@ -292,64 +209,160 @@
292209
293210 /* ahash_update shared descriptor */
294211 desc = ctx->sh_desc_update;
295
- ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
296
- ctrlpriv->era);
212
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
213
+ ctx->ctx_len, true, ctrlpriv->era);
297214 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
298215 desc_bytes(desc), ctx->dir);
299
-#ifdef DEBUG
300
- print_hex_dump(KERN_ERR,
301
- "ahash update shdesc@"__stringify(__LINE__)": ",
302
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
303
-#endif
216
+
217
+ print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
218
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
219
+ 1);
304220
305221 /* ahash_update_first shared descriptor */
306222 desc = ctx->sh_desc_update_first;
307
- ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
308
- ctrlpriv->era);
223
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
224
+ ctx->ctx_len, false, ctrlpriv->era);
309225 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
310226 desc_bytes(desc), ctx->dir);
311
-#ifdef DEBUG
312
- print_hex_dump(KERN_ERR,
313
- "ahash update first shdesc@"__stringify(__LINE__)": ",
314
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
315
-#endif
227
+ print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
228
+ ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
229
+ desc_bytes(desc), 1);
316230
317231 /* ahash_final shared descriptor */
318232 desc = ctx->sh_desc_fin;
319
- ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
320
- ctrlpriv->era);
233
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
234
+ ctx->ctx_len, true, ctrlpriv->era);
321235 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
322236 desc_bytes(desc), ctx->dir);
323
-#ifdef DEBUG
324
- print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
325
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
326
- desc_bytes(desc), 1);
327
-#endif
237
+
238
+ print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
239
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
240
+ desc_bytes(desc), 1);
328241
329242 /* ahash_digest shared descriptor */
330243 desc = ctx->sh_desc_digest;
331
- ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
332
- ctrlpriv->era);
244
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
245
+ ctx->ctx_len, false, ctrlpriv->era);
333246 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
334247 desc_bytes(desc), ctx->dir);
335
-#ifdef DEBUG
336
- print_hex_dump(KERN_ERR,
337
- "ahash digest shdesc@"__stringify(__LINE__)": ",
338
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
339
- desc_bytes(desc), 1);
340
-#endif
248
+
249
+ print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
250
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
251
+ desc_bytes(desc), 1);
252
+
253
+ return 0;
254
+}
255
+
256
+static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
257
+{
258
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
259
+ int digestsize = crypto_ahash_digestsize(ahash);
260
+ struct device *jrdev = ctx->jrdev;
261
+ u32 *desc;
262
+
263
+ /* shared descriptor for ahash_update */
264
+ desc = ctx->sh_desc_update;
265
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
266
+ ctx->ctx_len, ctx->ctx_len);
267
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
268
+ desc_bytes(desc), ctx->dir);
269
+ print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
270
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
271
+ 1);
272
+
273
+ /* shared descriptor for ahash_{final,finup} */
274
+ desc = ctx->sh_desc_fin;
275
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
276
+ digestsize, ctx->ctx_len);
277
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
278
+ desc_bytes(desc), ctx->dir);
279
+ print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
280
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
281
+ 1);
282
+
283
+ /* key is immediate data for INIT and INITFINAL states */
284
+ ctx->adata.key_virt = ctx->key;
285
+
286
+ /* shared descriptor for first invocation of ahash_update */
287
+ desc = ctx->sh_desc_update_first;
288
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
289
+ ctx->ctx_len);
290
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
291
+ desc_bytes(desc), ctx->dir);
292
+ print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
293
+ " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
294
+ desc_bytes(desc), 1);
295
+
296
+ /* shared descriptor for ahash_digest */
297
+ desc = ctx->sh_desc_digest;
298
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
299
+ digestsize, ctx->ctx_len);
300
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
301
+ desc_bytes(desc), ctx->dir);
302
+ print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
303
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
304
+ 1);
305
+ return 0;
306
+}
307
+
308
+static int acmac_set_sh_desc(struct crypto_ahash *ahash)
309
+{
310
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
311
+ int digestsize = crypto_ahash_digestsize(ahash);
312
+ struct device *jrdev = ctx->jrdev;
313
+ u32 *desc;
314
+
315
+ /* shared descriptor for ahash_update */
316
+ desc = ctx->sh_desc_update;
317
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
318
+ ctx->ctx_len, ctx->ctx_len);
319
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
320
+ desc_bytes(desc), ctx->dir);
321
+ print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
322
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
323
+ desc_bytes(desc), 1);
324
+
325
+ /* shared descriptor for ahash_{final,finup} */
326
+ desc = ctx->sh_desc_fin;
327
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
328
+ digestsize, ctx->ctx_len);
329
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
330
+ desc_bytes(desc), ctx->dir);
331
+ print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
332
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
333
+ desc_bytes(desc), 1);
334
+
335
+ /* shared descriptor for first invocation of ahash_update */
336
+ desc = ctx->sh_desc_update_first;
337
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
338
+ ctx->ctx_len);
339
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
340
+ desc_bytes(desc), ctx->dir);
341
+ print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
342
+ " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
343
+ desc_bytes(desc), 1);
344
+
345
+ /* shared descriptor for ahash_digest */
346
+ desc = ctx->sh_desc_digest;
347
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
348
+ digestsize, ctx->ctx_len);
349
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
350
+ desc_bytes(desc), ctx->dir);
351
+ print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
352
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
353
+ desc_bytes(desc), 1);
341354
342355 return 0;
343356 }
344357
345358 /* Digest hash size if it is too large */
346
-static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
347
- u32 *keylen, u8 *key_out, u32 digestsize)
359
+static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
360
+ u32 digestsize)
348361 {
349362 struct device *jrdev = ctx->jrdev;
350363 u32 *desc;
351364 struct split_key_result result;
352
- dma_addr_t src_dma, dst_dma;
365
+ dma_addr_t key_dma;
353366 int ret;
354367
355368 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
....@@ -360,18 +373,9 @@
360373
361374 init_job_desc(desc, 0);
362375
363
- src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
364
- DMA_TO_DEVICE);
365
- if (dma_mapping_error(jrdev, src_dma)) {
366
- dev_err(jrdev, "unable to map key input memory\n");
367
- kfree(desc);
368
- return -ENOMEM;
369
- }
370
- dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
371
- DMA_FROM_DEVICE);
372
- if (dma_mapping_error(jrdev, dst_dma)) {
373
- dev_err(jrdev, "unable to map key output memory\n");
374
- dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
376
+ key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
377
+ if (dma_mapping_error(jrdev, key_dma)) {
378
+ dev_err(jrdev, "unable to map key memory\n");
375379 kfree(desc);
376380 return -ENOMEM;
377381 }
....@@ -379,37 +383,33 @@
379383 /* Job descriptor to perform unkeyed hash on key_in */
380384 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
381385 OP_ALG_AS_INITFINAL);
382
- append_seq_in_ptr(desc, src_dma, *keylen, 0);
386
+ append_seq_in_ptr(desc, key_dma, *keylen, 0);
383387 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
384388 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
385
- append_seq_out_ptr(desc, dst_dma, digestsize, 0);
389
+ append_seq_out_ptr(desc, key_dma, digestsize, 0);
386390 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
387391 LDST_SRCDST_BYTE_CONTEXT);
388392
389
-#ifdef DEBUG
390
- print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
391
- DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
392
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
393
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
394
-#endif
393
+ print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
394
+ DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
395
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
396
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
397
+ 1);
395398
396399 result.err = 0;
397400 init_completion(&result.completion);
398401
399402 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
400
- if (!ret) {
403
+ if (ret == -EINPROGRESS) {
401404 /* in progress */
402405 wait_for_completion(&result.completion);
403406 ret = result.err;
404
-#ifdef DEBUG
405
- print_hex_dump(KERN_ERR,
406
- "digested key@"__stringify(__LINE__)": ",
407
- DUMP_PREFIX_ADDRESS, 16, 4, key_in,
408
- digestsize, 1);
409
-#endif
407
+
408
+ print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
409
+ DUMP_PREFIX_ADDRESS, 16, 4, key,
410
+ digestsize, 1);
410411 }
411
- dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
412
- dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
412
+ dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
413413
414414 *keylen = digestsize;
415415
....@@ -422,24 +422,20 @@
422422 const u8 *key, unsigned int keylen)
423423 {
424424 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
425
+ struct device *jrdev = ctx->jrdev;
425426 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
426427 int digestsize = crypto_ahash_digestsize(ahash);
427428 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
428429 int ret;
429430 u8 *hashed_key = NULL;
430431
431
-#ifdef DEBUG
432
- printk(KERN_ERR "keylen %d\n", keylen);
433
-#endif
432
+ dev_dbg(jrdev, "keylen %d\n", keylen);
434433
435434 if (keylen > blocksize) {
436
- hashed_key = kmalloc_array(digestsize,
437
- sizeof(*hashed_key),
438
- GFP_KERNEL | GFP_DMA);
435
+ hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
439436 if (!hashed_key)
440437 return -ENOMEM;
441
- ret = hash_digest_key(ctx, key, &keylen, hashed_key,
442
- digestsize);
438
+ ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
443439 if (ret)
444440 goto bad_free_key;
445441 key = hashed_key;
....@@ -459,6 +455,18 @@
459455 goto bad_free_key;
460456
461457 memcpy(ctx->key, key, keylen);
458
+
459
+ /*
460
+ * In case |user key| > |derived key|, using DKP<imm,imm>
461
+ * would result in invalid opcodes (last bytes of user key) in
462
+ * the resulting descriptor. Use DKP<ptr,imm> instead => both
463
+ * virtual and dma key addresses are needed.
464
+ */
465
+ if (keylen > ctx->adata.keylen_pad)
466
+ dma_sync_single_for_device(ctx->jrdev,
467
+ ctx->adata.key_dma,
468
+ ctx->adata.keylen_pad,
469
+ DMA_TO_DEVICE);
462470 } else {
463471 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
464472 keylen, CAAM_MAX_HASH_KEY_SIZE);
....@@ -470,8 +478,47 @@
470478 return ahash_set_sh_desc(ahash);
471479 bad_free_key:
472480 kfree(hashed_key);
473
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
474481 return -EINVAL;
482
+}
483
+
484
+static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
485
+ unsigned int keylen)
486
+{
487
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
488
+ struct device *jrdev = ctx->jrdev;
489
+
490
+ if (keylen != AES_KEYSIZE_128)
491
+ return -EINVAL;
492
+
493
+ memcpy(ctx->key, key, keylen);
494
+ dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
495
+ DMA_TO_DEVICE);
496
+ ctx->adata.keylen = keylen;
497
+
498
+ print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
499
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
500
+
501
+ return axcbc_set_sh_desc(ahash);
502
+}
503
+
504
+static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
505
+ unsigned int keylen)
506
+{
507
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
508
+ int err;
509
+
510
+ err = aes_check_keylen(keylen);
511
+ if (err)
512
+ return err;
513
+
514
+ /* key is immediate data for all cmac shared descriptors */
515
+ ctx->adata.key_virt = key;
516
+ ctx->adata.keylen = keylen;
517
+
518
+ print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
519
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
520
+
521
+ return acmac_set_sh_desc(ahash);
475522 }
476523
477524 /*
....@@ -479,6 +526,7 @@
479526 * @sec4_sg_dma: physical mapped address of h/w link table
480527 * @src_nents: number of segments in input scatterlist
481528 * @sec4_sg_bytes: length of dma mapped sec4_sg space
529
+ * @bklog: stored to determine if the request needs backlog
482530 * @hw_desc: the h/w job descriptor followed by any referenced link tables
483531 * @sec4_sg: h/w link table
484532 */
....@@ -486,8 +534,9 @@
486534 dma_addr_t sec4_sg_dma;
487535 int src_nents;
488536 int sec4_sg_bytes;
489
- u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
490
- struct sec4_sg_entry sec4_sg[0];
537
+ bool bklog;
538
+ u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
539
+ struct sec4_sg_entry sec4_sg[];
491540 };
492541
493542 static inline void ahash_unmap(struct device *dev,
....@@ -504,7 +553,7 @@
504553 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
505554
506555 if (state->buf_dma) {
507
- dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
556
+ dma_unmap_single(dev, state->buf_dma, state->buflen,
508557 DMA_TO_DEVICE);
509558 state->buf_dma = 0;
510559 }
....@@ -523,147 +572,133 @@
523572 ahash_unmap(dev, edesc, req, dst_len);
524573 }
525574
526
-static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
527
- void *context)
575
+static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
576
+ void *context, enum dma_data_direction dir)
528577 {
529578 struct ahash_request *req = context;
579
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
530580 struct ahash_edesc *edesc;
531581 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
532582 int digestsize = crypto_ahash_digestsize(ahash);
533583 struct caam_hash_state *state = ahash_request_ctx(req);
534
-#ifdef DEBUG
535584 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
585
+ int ecode = 0;
586
+ bool has_bklog;
536587
537
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
538
-#endif
588
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
539589
540
- edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
590
+ edesc = state->edesc;
591
+ has_bklog = edesc->bklog;
592
+
541593 if (err)
542
- caam_jr_strstatus(jrdev, err);
594
+ ecode = caam_jr_strstatus(jrdev, err);
543595
544
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
596
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
545597 memcpy(req->result, state->caam_ctx, digestsize);
546598 kfree(edesc);
547599
548
-#ifdef DEBUG
549
- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
550
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
551
- ctx->ctx_len, 1);
552
-#endif
600
+ print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
601
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
602
+ ctx->ctx_len, 1);
553603
554
- req->base.complete(&req->base, err);
604
+ /*
605
+ * If no backlog flag, the completion of the request is done
606
+ * by CAAM, not crypto engine.
607
+ */
608
+ if (!has_bklog)
609
+ req->base.complete(&req->base, ecode);
610
+ else
611
+ crypto_finalize_hash_request(jrp->engine, req, ecode);
555612 }
556613
557
-static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
558
- void *context)
614
+static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
615
+ void *context)
559616 {
560
- struct ahash_request *req = context;
561
- struct ahash_edesc *edesc;
562
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
563
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
564
- struct caam_hash_state *state = ahash_request_ctx(req);
565
-#ifdef DEBUG
566
- int digestsize = crypto_ahash_digestsize(ahash);
567
-
568
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
569
-#endif
570
-
571
- edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
572
- if (err)
573
- caam_jr_strstatus(jrdev, err);
574
-
575
- ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
576
- switch_buf(state);
577
- kfree(edesc);
578
-
579
-#ifdef DEBUG
580
- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
581
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
582
- ctx->ctx_len, 1);
583
- if (req->result)
584
- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
585
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
586
- digestsize, 1);
587
-#endif
588
-
589
- req->base.complete(&req->base, err);
617
+ ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
590618 }
591619
592620 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
593621 void *context)
594622 {
623
+ ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
624
+}
625
+
626
+static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
627
+ void *context, enum dma_data_direction dir)
628
+{
595629 struct ahash_request *req = context;
630
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
596631 struct ahash_edesc *edesc;
597632 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
598
- int digestsize = crypto_ahash_digestsize(ahash);
599
- struct caam_hash_state *state = ahash_request_ctx(req);
600
-#ifdef DEBUG
601633 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
634
+ struct caam_hash_state *state = ahash_request_ctx(req);
635
+ int digestsize = crypto_ahash_digestsize(ahash);
636
+ int ecode = 0;
637
+ bool has_bklog;
602638
603
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
604
-#endif
639
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
605640
606
- edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
641
+ edesc = state->edesc;
642
+ has_bklog = edesc->bklog;
607643 if (err)
608
- caam_jr_strstatus(jrdev, err);
644
+ ecode = caam_jr_strstatus(jrdev, err);
609645
610
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
611
- memcpy(req->result, state->caam_ctx, digestsize);
646
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
612647 kfree(edesc);
613648
614
-#ifdef DEBUG
615
- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
616
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
617
- ctx->ctx_len, 1);
618
-#endif
649
+ scatterwalk_map_and_copy(state->buf, req->src,
650
+ req->nbytes - state->next_buflen,
651
+ state->next_buflen, 0);
652
+ state->buflen = state->next_buflen;
619653
620
- req->base.complete(&req->base, err);
654
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
655
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
656
+ state->buflen, 1);
657
+
658
+ print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
659
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
660
+ ctx->ctx_len, 1);
661
+ if (req->result)
662
+ print_hex_dump_debug("result@"__stringify(__LINE__)": ",
663
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
664
+ digestsize, 1);
665
+
666
+ /*
667
+ * If no backlog flag, the completion of the request is done
668
+ * by CAAM, not crypto engine.
669
+ */
670
+ if (!has_bklog)
671
+ req->base.complete(&req->base, ecode);
672
+ else
673
+ crypto_finalize_hash_request(jrp->engine, req, ecode);
674
+
675
+}
676
+
677
+static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
678
+ void *context)
679
+{
680
+ ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
621681 }
622682
623683 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
624684 void *context)
625685 {
626
- struct ahash_request *req = context;
627
- struct ahash_edesc *edesc;
628
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
629
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
630
- struct caam_hash_state *state = ahash_request_ctx(req);
631
-#ifdef DEBUG
632
- int digestsize = crypto_ahash_digestsize(ahash);
633
-
634
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
635
-#endif
636
-
637
- edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
638
- if (err)
639
- caam_jr_strstatus(jrdev, err);
640
-
641
- ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
642
- switch_buf(state);
643
- kfree(edesc);
644
-
645
-#ifdef DEBUG
646
- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
647
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
648
- ctx->ctx_len, 1);
649
- if (req->result)
650
- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
651
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
652
- digestsize, 1);
653
-#endif
654
-
655
- req->base.complete(&req->base, err);
686
+ ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
656687 }
657688
658689 /*
659690 * Allocate an enhanced descriptor, which contains the hardware descriptor
660691 * and space for hardware scatter table containing sg_num entries.
661692 */
662
-static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
693
+static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
663694 int sg_num, u32 *sh_desc,
664
- dma_addr_t sh_desc_dma,
665
- gfp_t flags)
695
+ dma_addr_t sh_desc_dma)
666696 {
697
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
698
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
699
+ struct caam_hash_state *state = ahash_request_ctx(req);
700
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
701
+ GFP_KERNEL : GFP_ATOMIC;
667702 struct ahash_edesc *edesc;
668703 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
669704
....@@ -672,6 +707,8 @@
672707 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
673708 return NULL;
674709 }
710
+
711
+ state->edesc = edesc;
675712
676713 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
677714 HDR_SHARE_DEFER | HDR_REVERSE);
....@@ -690,9 +727,10 @@
690727
691728 if (nents > 1 || first_sg) {
692729 struct sec4_sg_entry *sg = edesc->sec4_sg;
693
- unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
730
+ unsigned int sgsize = sizeof(*sg) *
731
+ pad_sg_nents(first_sg + nents);
694732
695
- sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
733
+ sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
696734
697735 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
698736 if (dma_mapping_error(ctx->jrdev, src_dma)) {
....@@ -714,6 +752,62 @@
714752 return 0;
715753 }
716754
755
+static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
756
+{
757
+ struct ahash_request *req = ahash_request_cast(areq);
758
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
759
+ struct caam_hash_state *state = ahash_request_ctx(req);
760
+ struct device *jrdev = ctx->jrdev;
761
+ u32 *desc = state->edesc->hw_desc;
762
+ int ret;
763
+
764
+ state->edesc->bklog = true;
765
+
766
+ ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
767
+
768
+ if (ret != -EINPROGRESS) {
769
+ ahash_unmap(jrdev, state->edesc, req, 0);
770
+ kfree(state->edesc);
771
+ } else {
772
+ ret = 0;
773
+ }
774
+
775
+ return ret;
776
+}
777
+
778
+static int ahash_enqueue_req(struct device *jrdev,
779
+ void (*cbk)(struct device *jrdev, u32 *desc,
780
+ u32 err, void *context),
781
+ struct ahash_request *req,
782
+ int dst_len, enum dma_data_direction dir)
783
+{
784
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
785
+ struct caam_hash_state *state = ahash_request_ctx(req);
786
+ struct ahash_edesc *edesc = state->edesc;
787
+ u32 *desc = edesc->hw_desc;
788
+ int ret;
789
+
790
+ state->ahash_op_done = cbk;
791
+
792
+ /*
793
+ * Only the backlog request are sent to crypto-engine since the others
794
+ * can be handled by CAAM, if free, especially since JR has up to 1024
795
+ * entries (more than the 10 entries from crypto-engine).
796
+ */
797
+ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
798
+ ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
799
+ req);
800
+ else
801
+ ret = caam_jr_enqueue(jrdev, desc, cbk, req);
802
+
803
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
804
+ ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
805
+ kfree(edesc);
806
+ }
807
+
808
+ return ret;
809
+}
810
+
717811 /* submit update job descriptor */
718812 static int ahash_update_ctx(struct ahash_request *req)
719813 {
....@@ -721,25 +815,35 @@
721815 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
722816 struct caam_hash_state *state = ahash_request_ctx(req);
723817 struct device *jrdev = ctx->jrdev;
724
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
725
- GFP_KERNEL : GFP_ATOMIC;
726
- u8 *buf = current_buf(state);
727
- int *buflen = current_buflen(state);
728
- u8 *next_buf = alt_buf(state);
729
- int *next_buflen = alt_buflen(state), last_buflen;
818
+ u8 *buf = state->buf;
819
+ int *buflen = &state->buflen;
820
+ int *next_buflen = &state->next_buflen;
821
+ int blocksize = crypto_ahash_blocksize(ahash);
730822 int in_len = *buflen + req->nbytes, to_hash;
731823 u32 *desc;
732824 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
733825 struct ahash_edesc *edesc;
734826 int ret = 0;
735827
736
- last_buflen = *next_buflen;
737
- *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
828
+ *next_buflen = in_len & (blocksize - 1);
738829 to_hash = in_len - *next_buflen;
739830
831
+ /*
832
+ * For XCBC and CMAC, if to_hash is multiple of block size,
833
+ * keep last block in internal buffer
834
+ */
835
+ if ((is_xcbc_aes(ctx->adata.algtype) ||
836
+ is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
837
+ (*next_buflen == 0)) {
838
+ *next_buflen = blocksize;
839
+ to_hash -= blocksize;
840
+ }
841
+
740842 if (to_hash) {
741
- src_nents = sg_nents_for_len(req->src,
742
- req->nbytes - (*next_buflen));
843
+ int pad_nents;
844
+ int src_len = req->nbytes - *next_buflen;
845
+
846
+ src_nents = sg_nents_for_len(req->src, src_len);
743847 if (src_nents < 0) {
744848 dev_err(jrdev, "Invalid number of src SG.\n");
745849 return src_nents;
....@@ -757,16 +861,15 @@
757861 }
758862
759863 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
760
- sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
761
- sizeof(struct sec4_sg_entry);
864
+ pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
865
+ sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
762866
763867 /*
764868 * allocate space for base edesc and hw desc commands,
765869 * link tables
766870 */
767
- edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
768
- ctx->sh_desc_update,
769
- ctx->sh_desc_update_dma, flags);
871
+ edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
872
+ ctx->sh_desc_update_dma);
770873 if (!edesc) {
771874 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
772875 return -ENOMEM;
....@@ -784,18 +887,13 @@
784887 if (ret)
785888 goto unmap_ctx;
786889
787
- if (mapped_nents) {
788
- sg_to_sec4_sg_last(req->src, mapped_nents,
890
+ if (mapped_nents)
891
+ sg_to_sec4_sg_last(req->src, src_len,
789892 edesc->sec4_sg + sec4_sg_src_index,
790893 0);
791
- if (*next_buflen)
792
- scatterwalk_map_and_copy(next_buf, req->src,
793
- to_hash - *buflen,
794
- *next_buflen, 0);
795
- } else {
894
+ else
796895 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
797896 1);
798
- }
799897
800898 desc = edesc->hw_desc;
801899
....@@ -813,33 +911,24 @@
813911
814912 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
815913
816
-#ifdef DEBUG
817
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
818
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
819
- desc_bytes(desc), 1);
820
-#endif
914
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
915
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
916
+ desc_bytes(desc), 1);
821917
822
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
823
- if (ret)
824
- goto unmap_ctx;
825
-
826
- ret = -EINPROGRESS;
918
+ ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
919
+ ctx->ctx_len, DMA_BIDIRECTIONAL);
827920 } else if (*next_buflen) {
828921 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
829922 req->nbytes, 0);
830923 *buflen = *next_buflen;
831
- *next_buflen = last_buflen;
924
+
925
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
926
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
927
+ *buflen, 1);
832928 }
833
-#ifdef DEBUG
834
- print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
835
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
836
- print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
837
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
838
- *next_buflen, 1);
839
-#endif
840929
841930 return ret;
842
- unmap_ctx:
931
+unmap_ctx:
843932 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
844933 kfree(edesc);
845934 return ret;
....@@ -851,22 +940,19 @@
851940 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
852941 struct caam_hash_state *state = ahash_request_ctx(req);
853942 struct device *jrdev = ctx->jrdev;
854
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
855
- GFP_KERNEL : GFP_ATOMIC;
856
- int buflen = *current_buflen(state);
943
+ int buflen = state->buflen;
857944 u32 *desc;
858
- int sec4_sg_bytes, sec4_sg_src_index;
945
+ int sec4_sg_bytes;
859946 int digestsize = crypto_ahash_digestsize(ahash);
860947 struct ahash_edesc *edesc;
861948 int ret;
862949
863
- sec4_sg_src_index = 1 + (buflen ? 1 : 0);
864
- sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
950
+ sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
951
+ sizeof(struct sec4_sg_entry);
865952
866953 /* allocate space for base edesc and hw desc commands, link tables */
867
- edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
868
- ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
869
- flags);
954
+ edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
955
+ ctx->sh_desc_fin_dma);
870956 if (!edesc)
871957 return -ENOMEM;
872958
....@@ -883,7 +969,7 @@
883969 if (ret)
884970 goto unmap_ctx;
885971
886
- sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
972
+ sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
887973
888974 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
889975 sec4_sg_bytes, DMA_TO_DEVICE);
....@@ -897,16 +983,12 @@
897983 LDST_SGF);
898984 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
899985
900
-#ifdef DEBUG
901
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
902
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
903
-#endif
986
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
987
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
988
+ 1);
904989
905
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
906
- if (ret)
907
- goto unmap_ctx;
908
-
909
- return -EINPROGRESS;
990
+ return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
991
+ digestsize, DMA_BIDIRECTIONAL);
910992 unmap_ctx:
911993 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
912994 kfree(edesc);
....@@ -919,9 +1001,7 @@
9191001 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9201002 struct caam_hash_state *state = ahash_request_ctx(req);
9211003 struct device *jrdev = ctx->jrdev;
922
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
923
- GFP_KERNEL : GFP_ATOMIC;
924
- int buflen = *current_buflen(state);
1004
+ int buflen = state->buflen;
9251005 u32 *desc;
9261006 int sec4_sg_src_index;
9271007 int src_nents, mapped_nents;
....@@ -949,9 +1029,8 @@
9491029 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
9501030
9511031 /* allocate space for base edesc and hw desc commands, link tables */
952
- edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
953
- ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
954
- flags);
1032
+ edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1033
+ ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
9551034 if (!edesc) {
9561035 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9571036 return -ENOMEM;
....@@ -978,16 +1057,12 @@
9781057
9791058 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
9801059
981
-#ifdef DEBUG
982
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
983
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
984
-#endif
1060
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1061
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1062
+ 1);
9851063
986
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
987
- if (ret)
988
- goto unmap_ctx;
989
-
990
- return -EINPROGRESS;
1064
+ return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1065
+ digestsize, DMA_BIDIRECTIONAL);
9911066 unmap_ctx:
9921067 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
9931068 kfree(edesc);
....@@ -1000,8 +1075,6 @@
10001075 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
10011076 struct caam_hash_state *state = ahash_request_ctx(req);
10021077 struct device *jrdev = ctx->jrdev;
1003
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1004
- GFP_KERNEL : GFP_ATOMIC;
10051078 u32 *desc;
10061079 int digestsize = crypto_ahash_digestsize(ahash);
10071080 int src_nents, mapped_nents;
....@@ -1028,9 +1101,8 @@
10281101 }
10291102
10301103 /* allocate space for base edesc and hw desc commands, link tables */
1031
- edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1032
- ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1033
- flags);
1104
+ edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1105
+ ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
10341106 if (!edesc) {
10351107 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
10361108 return -ENOMEM;
....@@ -1055,20 +1127,12 @@
10551127 return -ENOMEM;
10561128 }
10571129
1058
-#ifdef DEBUG
1059
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1060
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1061
-#endif
1130
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1131
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1132
+ 1);
10621133
1063
- ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1064
- if (!ret) {
1065
- ret = -EINPROGRESS;
1066
- } else {
1067
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1068
- kfree(edesc);
1069
- }
1070
-
1071
- return ret;
1134
+ return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
1135
+ DMA_FROM_DEVICE);
10721136 }
10731137
10741138 /* submit ahash final if it the first job descriptor */
....@@ -1078,18 +1142,16 @@
10781142 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
10791143 struct caam_hash_state *state = ahash_request_ctx(req);
10801144 struct device *jrdev = ctx->jrdev;
1081
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1082
- GFP_KERNEL : GFP_ATOMIC;
1083
- u8 *buf = current_buf(state);
1084
- int buflen = *current_buflen(state);
1145
+ u8 *buf = state->buf;
1146
+ int buflen = state->buflen;
10851147 u32 *desc;
10861148 int digestsize = crypto_ahash_digestsize(ahash);
10871149 struct ahash_edesc *edesc;
10881150 int ret;
10891151
10901152 /* allocate space for base edesc and hw desc commands, link tables */
1091
- edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1092
- ctx->sh_desc_digest_dma, flags);
1153
+ edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1154
+ ctx->sh_desc_digest_dma);
10931155 if (!edesc)
10941156 return -ENOMEM;
10951157
....@@ -1110,25 +1172,16 @@
11101172 if (ret)
11111173 goto unmap;
11121174
1113
-#ifdef DEBUG
1114
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1115
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1116
-#endif
1175
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1176
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1177
+ 1);
11171178
1118
- ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1119
- if (!ret) {
1120
- ret = -EINPROGRESS;
1121
- } else {
1122
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1123
- kfree(edesc);
1124
- }
1125
-
1126
- return ret;
1179
+ return ahash_enqueue_req(jrdev, ahash_done, req,
1180
+ digestsize, DMA_FROM_DEVICE);
11271181 unmap:
11281182 ahash_unmap(jrdev, edesc, req, digestsize);
11291183 kfree(edesc);
11301184 return -ENOMEM;
1131
-
11321185 }
11331186
11341187 /* submit ahash update if it the first job descriptor after update */
....@@ -1138,24 +1191,35 @@
11381191 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
11391192 struct caam_hash_state *state = ahash_request_ctx(req);
11401193 struct device *jrdev = ctx->jrdev;
1141
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1142
- GFP_KERNEL : GFP_ATOMIC;
1143
- u8 *buf = current_buf(state);
1144
- int *buflen = current_buflen(state);
1145
- u8 *next_buf = alt_buf(state);
1146
- int *next_buflen = alt_buflen(state);
1194
+ u8 *buf = state->buf;
1195
+ int *buflen = &state->buflen;
1196
+ int *next_buflen = &state->next_buflen;
1197
+ int blocksize = crypto_ahash_blocksize(ahash);
11471198 int in_len = *buflen + req->nbytes, to_hash;
11481199 int sec4_sg_bytes, src_nents, mapped_nents;
11491200 struct ahash_edesc *edesc;
11501201 u32 *desc;
11511202 int ret = 0;
11521203
1153
- *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1204
+ *next_buflen = in_len & (blocksize - 1);
11541205 to_hash = in_len - *next_buflen;
11551206
1207
+ /*
1208
+ * For XCBC and CMAC, if to_hash is multiple of block size,
1209
+ * keep last block in internal buffer
1210
+ */
1211
+ if ((is_xcbc_aes(ctx->adata.algtype) ||
1212
+ is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1213
+ (*next_buflen == 0)) {
1214
+ *next_buflen = blocksize;
1215
+ to_hash -= blocksize;
1216
+ }
1217
+
11561218 if (to_hash) {
1157
- src_nents = sg_nents_for_len(req->src,
1158
- req->nbytes - *next_buflen);
1219
+ int pad_nents;
1220
+ int src_len = req->nbytes - *next_buflen;
1221
+
1222
+ src_nents = sg_nents_for_len(req->src, src_len);
11591223 if (src_nents < 0) {
11601224 dev_err(jrdev, "Invalid number of src SG.\n");
11611225 return src_nents;
....@@ -1172,17 +1236,16 @@
11721236 mapped_nents = 0;
11731237 }
11741238
1175
- sec4_sg_bytes = (1 + mapped_nents) *
1176
- sizeof(struct sec4_sg_entry);
1239
+ pad_nents = pad_sg_nents(1 + mapped_nents);
1240
+ sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
11771241
11781242 /*
11791243 * allocate space for base edesc and hw desc commands,
11801244 * link tables
11811245 */
1182
- edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1246
+ edesc = ahash_edesc_alloc(req, pad_nents,
11831247 ctx->sh_desc_update_first,
1184
- ctx->sh_desc_update_first_dma,
1185
- flags);
1248
+ ctx->sh_desc_update_first_dma);
11861249 if (!edesc) {
11871250 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
11881251 return -ENOMEM;
....@@ -1195,14 +1258,7 @@
11951258 if (ret)
11961259 goto unmap_ctx;
11971260
1198
- sg_to_sec4_sg_last(req->src, mapped_nents,
1199
- edesc->sec4_sg + 1, 0);
1200
-
1201
- if (*next_buflen) {
1202
- scatterwalk_map_and_copy(next_buf, req->src,
1203
- to_hash - *buflen,
1204
- *next_buflen, 0);
1205
- }
1261
+ sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
12061262
12071263 desc = edesc->hw_desc;
12081264
....@@ -1221,17 +1277,14 @@
12211277 if (ret)
12221278 goto unmap_ctx;
12231279
1224
-#ifdef DEBUG
1225
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1226
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
1227
- desc_bytes(desc), 1);
1228
-#endif
1280
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1281
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
1282
+ desc_bytes(desc), 1);
12291283
1230
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1231
- if (ret)
1232
- goto unmap_ctx;
1233
-
1234
- ret = -EINPROGRESS;
1284
+ ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1285
+ ctx->ctx_len, DMA_TO_DEVICE);
1286
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1287
+ return ret;
12351288 state->update = ahash_update_ctx;
12361289 state->finup = ahash_finup_ctx;
12371290 state->final = ahash_final_ctx;
....@@ -1239,15 +1292,11 @@
12391292 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
12401293 req->nbytes, 0);
12411294 *buflen = *next_buflen;
1242
- *next_buflen = 0;
1295
+
1296
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1297
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
1298
+ *buflen, 1);
12431299 }
1244
-#ifdef DEBUG
1245
- print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1246
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1247
- print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1248
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1249
- *next_buflen, 1);
1250
-#endif
12511300
12521301 return ret;
12531302 unmap_ctx:
....@@ -1263,9 +1312,7 @@
12631312 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
12641313 struct caam_hash_state *state = ahash_request_ctx(req);
12651314 struct device *jrdev = ctx->jrdev;
1266
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1267
- GFP_KERNEL : GFP_ATOMIC;
1268
- int buflen = *current_buflen(state);
1315
+ int buflen = state->buflen;
12691316 u32 *desc;
12701317 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
12711318 int digestsize = crypto_ahash_digestsize(ahash);
....@@ -1294,9 +1341,8 @@
12941341 sizeof(struct sec4_sg_entry);
12951342
12961343 /* allocate space for base edesc and hw desc commands, link tables */
1297
- edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1298
- ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1299
- flags);
1344
+ edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1345
+ ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
13001346 if (!edesc) {
13011347 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
13021348 return -ENOMEM;
....@@ -1322,20 +1368,12 @@
13221368 if (ret)
13231369 goto unmap;
13241370
1325
-#ifdef DEBUG
1326
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1327
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1328
-#endif
1371
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1372
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1373
+ 1);
13291374
1330
- ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1331
- if (!ret) {
1332
- ret = -EINPROGRESS;
1333
- } else {
1334
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1335
- kfree(edesc);
1336
- }
1337
-
1338
- return ret;
1375
+ return ahash_enqueue_req(jrdev, ahash_done, req,
1376
+ digestsize, DMA_FROM_DEVICE);
13391377 unmap:
13401378 ahash_unmap(jrdev, edesc, req, digestsize);
13411379 kfree(edesc);
....@@ -1350,19 +1388,29 @@
13501388 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
13511389 struct caam_hash_state *state = ahash_request_ctx(req);
13521390 struct device *jrdev = ctx->jrdev;
1353
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1354
- GFP_KERNEL : GFP_ATOMIC;
1355
- u8 *next_buf = alt_buf(state);
1356
- int *next_buflen = alt_buflen(state);
1391
+ u8 *buf = state->buf;
1392
+ int *buflen = &state->buflen;
1393
+ int *next_buflen = &state->next_buflen;
13571394 int to_hash;
1395
+ int blocksize = crypto_ahash_blocksize(ahash);
13581396 u32 *desc;
13591397 int src_nents, mapped_nents;
13601398 struct ahash_edesc *edesc;
13611399 int ret = 0;
13621400
1363
- *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1364
- 1);
1401
+ *next_buflen = req->nbytes & (blocksize - 1);
13651402 to_hash = req->nbytes - *next_buflen;
1403
+
1404
+ /*
1405
+ * For XCBC and CMAC, if to_hash is multiple of block size,
1406
+ * keep last block in internal buffer
1407
+ */
1408
+ if ((is_xcbc_aes(ctx->adata.algtype) ||
1409
+ is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1410
+ (*next_buflen == 0)) {
1411
+ *next_buflen = blocksize;
1412
+ to_hash -= blocksize;
1413
+ }
13661414
13671415 if (to_hash) {
13681416 src_nents = sg_nents_for_len(req->src,
....@@ -1387,11 +1435,10 @@
13871435 * allocate space for base edesc and hw desc commands,
13881436 * link tables
13891437 */
1390
- edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1438
+ edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
13911439 mapped_nents : 0,
13921440 ctx->sh_desc_update_first,
1393
- ctx->sh_desc_update_first_dma,
1394
- flags);
1441
+ ctx->sh_desc_update_first_dma);
13951442 if (!edesc) {
13961443 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
13971444 return -ENOMEM;
....@@ -1404,27 +1451,20 @@
14041451 if (ret)
14051452 goto unmap_ctx;
14061453
1407
- if (*next_buflen)
1408
- scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1409
- *next_buflen, 0);
1410
-
14111454 desc = edesc->hw_desc;
14121455
14131456 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
14141457 if (ret)
14151458 goto unmap_ctx;
14161459
1417
-#ifdef DEBUG
1418
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1419
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
1420
- desc_bytes(desc), 1);
1421
-#endif
1460
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1461
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
1462
+ desc_bytes(desc), 1);
14221463
1423
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1424
- if (ret)
1425
- goto unmap_ctx;
1426
-
1427
- ret = -EINPROGRESS;
1464
+ ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1465
+ ctx->ctx_len, DMA_TO_DEVICE);
1466
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1467
+ return ret;
14281468 state->update = ahash_update_ctx;
14291469 state->finup = ahash_finup_ctx;
14301470 state->final = ahash_final_ctx;
....@@ -1432,15 +1472,14 @@
14321472 state->update = ahash_update_no_ctx;
14331473 state->finup = ahash_finup_no_ctx;
14341474 state->final = ahash_final_no_ctx;
1435
- scatterwalk_map_and_copy(next_buf, req->src, 0,
1475
+ scatterwalk_map_and_copy(buf, req->src, 0,
14361476 req->nbytes, 0);
1437
- switch_buf(state);
1477
+ *buflen = *next_buflen;
1478
+
1479
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1480
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
1481
+ *buflen, 1);
14381482 }
1439
-#ifdef DEBUG
1440
- print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1441
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1442
- *next_buflen, 1);
1443
-#endif
14441483
14451484 return ret;
14461485 unmap_ctx:
....@@ -1464,10 +1503,9 @@
14641503
14651504 state->ctx_dma = 0;
14661505 state->ctx_dma_len = 0;
1467
- state->current_buf = 0;
14681506 state->buf_dma = 0;
1469
- state->buflen_0 = 0;
1470
- state->buflen_1 = 0;
1507
+ state->buflen = 0;
1508
+ state->next_buflen = 0;
14711509
14721510 return 0;
14731511 }
....@@ -1497,16 +1535,8 @@
14971535 {
14981536 struct caam_hash_state *state = ahash_request_ctx(req);
14991537 struct caam_export_state *export = out;
1500
- int len;
1501
- u8 *buf;
1502
-
1503
- if (state->current_buf) {
1504
- buf = state->buf_1;
1505
- len = state->buflen_1;
1506
- } else {
1507
- buf = state->buf_0;
1508
- len = state->buflen_0;
1509
- }
1538
+ u8 *buf = state->buf;
1539
+ int len = state->buflen;
15101540
15111541 memcpy(export->buf, buf, len);
15121542 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
....@@ -1524,9 +1554,9 @@
15241554 const struct caam_export_state *export = in;
15251555
15261556 memset(state, 0, sizeof(*state));
1527
- memcpy(state->buf_0, export->buf, export->buflen);
1557
+ memcpy(state->buf, export->buf, export->buflen);
15281558 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1529
- state->buflen_0 = export->buflen;
1559
+ state->buflen = export->buflen;
15301560 state->update = export->update;
15311561 state->final = export->final;
15321562 state->finup = export->finup;
....@@ -1672,6 +1702,44 @@
16721702 },
16731703 },
16741704 .alg_type = OP_ALG_ALGSEL_MD5,
1705
+ }, {
1706
+ .hmac_name = "xcbc(aes)",
1707
+ .hmac_driver_name = "xcbc-aes-caam",
1708
+ .blocksize = AES_BLOCK_SIZE,
1709
+ .template_ahash = {
1710
+ .init = ahash_init,
1711
+ .update = ahash_update,
1712
+ .final = ahash_final,
1713
+ .finup = ahash_finup,
1714
+ .digest = ahash_digest,
1715
+ .export = ahash_export,
1716
+ .import = ahash_import,
1717
+ .setkey = axcbc_setkey,
1718
+ .halg = {
1719
+ .digestsize = AES_BLOCK_SIZE,
1720
+ .statesize = sizeof(struct caam_export_state),
1721
+ },
1722
+ },
1723
+ .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1724
+ }, {
1725
+ .hmac_name = "cmac(aes)",
1726
+ .hmac_driver_name = "cmac-aes-caam",
1727
+ .blocksize = AES_BLOCK_SIZE,
1728
+ .template_ahash = {
1729
+ .init = ahash_init,
1730
+ .update = ahash_update,
1731
+ .final = ahash_final,
1732
+ .finup = ahash_finup,
1733
+ .digest = ahash_digest,
1734
+ .export = ahash_export,
1735
+ .import = ahash_import,
1736
+ .setkey = acmac_setkey,
1737
+ .halg = {
1738
+ .digestsize = AES_BLOCK_SIZE,
1739
+ .statesize = sizeof(struct caam_export_state),
1740
+ },
1741
+ },
1742
+ .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
16751743 },
16761744 };
16771745
....@@ -1699,6 +1767,8 @@
16991767 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
17001768 HASH_MSG_LEN + 64,
17011769 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1770
+ const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
1771
+ sh_desc_update);
17021772 dma_addr_t dma_addr;
17031773 struct caam_drv_private *priv;
17041774
....@@ -1713,14 +1783,56 @@
17131783 }
17141784
17151785 priv = dev_get_drvdata(ctx->jrdev->parent);
1716
- ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1786
+
1787
+ if (is_xcbc_aes(caam_hash->alg_type)) {
1788
+ ctx->dir = DMA_TO_DEVICE;
1789
+ ctx->key_dir = DMA_BIDIRECTIONAL;
1790
+ ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1791
+ ctx->ctx_len = 48;
1792
+ } else if (is_cmac_aes(caam_hash->alg_type)) {
1793
+ ctx->dir = DMA_TO_DEVICE;
1794
+ ctx->key_dir = DMA_NONE;
1795
+ ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1796
+ ctx->ctx_len = 32;
1797
+ } else {
1798
+ if (priv->era >= 6) {
1799
+ ctx->dir = DMA_BIDIRECTIONAL;
1800
+ ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1801
+ } else {
1802
+ ctx->dir = DMA_TO_DEVICE;
1803
+ ctx->key_dir = DMA_NONE;
1804
+ }
1805
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1806
+ ctx->ctx_len = runninglen[(ctx->adata.algtype &
1807
+ OP_ALG_ALGSEL_SUBMASK) >>
1808
+ OP_ALG_ALGSEL_SHIFT];
1809
+ }
1810
+
1811
+ if (ctx->key_dir != DMA_NONE) {
1812
+ ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1813
+ ARRAY_SIZE(ctx->key),
1814
+ ctx->key_dir,
1815
+ DMA_ATTR_SKIP_CPU_SYNC);
1816
+ if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1817
+ dev_err(ctx->jrdev, "unable to map key\n");
1818
+ caam_jr_free(ctx->jrdev);
1819
+ return -ENOMEM;
1820
+ }
1821
+ }
17171822
17181823 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1719
- offsetof(struct caam_hash_ctx,
1720
- sh_desc_update_dma),
1824
+ offsetof(struct caam_hash_ctx, key) -
1825
+ sh_desc_update_offset,
17211826 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
17221827 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
17231828 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1829
+
1830
+ if (ctx->key_dir != DMA_NONE)
1831
+ dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1832
+ ARRAY_SIZE(ctx->key),
1833
+ ctx->key_dir,
1834
+ DMA_ATTR_SKIP_CPU_SYNC);
1835
+
17241836 caam_jr_free(ctx->jrdev);
17251837 return -ENOMEM;
17261838 }
....@@ -1728,22 +1840,25 @@
17281840 ctx->sh_desc_update_dma = dma_addr;
17291841 ctx->sh_desc_update_first_dma = dma_addr +
17301842 offsetof(struct caam_hash_ctx,
1731
- sh_desc_update_first);
1843
+ sh_desc_update_first) -
1844
+ sh_desc_update_offset;
17321845 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1733
- sh_desc_fin);
1846
+ sh_desc_fin) -
1847
+ sh_desc_update_offset;
17341848 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1735
- sh_desc_digest);
1849
+ sh_desc_digest) -
1850
+ sh_desc_update_offset;
17361851
1737
- /* copy descriptor header template value */
1738
- ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1739
-
1740
- ctx->ctx_len = runninglen[(ctx->adata.algtype &
1741
- OP_ALG_ALGSEL_SUBMASK) >>
1742
- OP_ALG_ALGSEL_SHIFT];
1852
+ ctx->enginectx.op.do_one_request = ahash_do_one_req;
17431853
17441854 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
17451855 sizeof(struct caam_hash_state));
1746
- return ahash_set_sh_desc(ahash);
1856
+
1857
+ /*
1858
+ * For keyed hash algorithms shared descriptors
1859
+ * will be created later in setkey() callback
1860
+ */
1861
+ return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
17471862 }
17481863
17491864 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
....@@ -1751,13 +1866,17 @@
17511866 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
17521867
17531868 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1754
- offsetof(struct caam_hash_ctx,
1755
- sh_desc_update_dma),
1869
+ offsetof(struct caam_hash_ctx, key) -
1870
+ offsetof(struct caam_hash_ctx, sh_desc_update),
17561871 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1872
+ if (ctx->key_dir != DMA_NONE)
1873
+ dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1874
+ ARRAY_SIZE(ctx->key), ctx->key_dir,
1875
+ DMA_ATTR_SKIP_CPU_SYNC);
17571876 caam_jr_free(ctx->jrdev);
17581877 }
17591878
1760
-static void __exit caam_algapi_hash_exit(void)
1879
+void caam_algapi_hash_exit(void)
17611880 {
17621881 struct caam_hash_alg *t_alg, *n;
17631882
....@@ -1808,63 +1927,45 @@
18081927 alg->cra_priority = CAAM_CRA_PRIORITY;
18091928 alg->cra_blocksize = template->blocksize;
18101929 alg->cra_alignmask = 0;
1811
- alg->cra_flags = CRYPTO_ALG_ASYNC;
1930
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
18121931
18131932 t_alg->alg_type = template->alg_type;
18141933
18151934 return t_alg;
18161935 }
18171936
1818
-static int __init caam_algapi_hash_init(void)
1937
+int caam_algapi_hash_init(struct device *ctrldev)
18191938 {
1820
- struct device_node *dev_node;
1821
- struct platform_device *pdev;
1822
- struct device *ctrldev;
18231939 int i = 0, err = 0;
1824
- struct caam_drv_private *priv;
1940
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
18251941 unsigned int md_limit = SHA512_DIGEST_SIZE;
1826
- u32 cha_inst, cha_vid;
1827
-
1828
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1829
- if (!dev_node) {
1830
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1831
- if (!dev_node)
1832
- return -ENODEV;
1833
- }
1834
-
1835
- pdev = of_find_device_by_node(dev_node);
1836
- if (!pdev) {
1837
- of_node_put(dev_node);
1838
- return -ENODEV;
1839
- }
1840
-
1841
- ctrldev = &pdev->dev;
1842
- priv = dev_get_drvdata(ctrldev);
1843
- of_node_put(dev_node);
1844
-
1845
- /*
1846
- * If priv is NULL, it's probably because the caam driver wasn't
1847
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1848
- */
1849
- if (!priv)
1850
- return -ENODEV;
1942
+ u32 md_inst, md_vid;
18511943
18521944 /*
18531945 * Register crypto algorithms the device supports. First, identify
18541946 * presence and attributes of MD block.
18551947 */
1856
- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1857
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1948
+ if (priv->era < 10) {
1949
+ md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1950
+ CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1951
+ md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1952
+ CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1953
+ } else {
1954
+ u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
1955
+
1956
+ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1957
+ md_inst = mdha & CHA_VER_NUM_MASK;
1958
+ }
18581959
18591960 /*
18601961 * Skip registration of any hashing algorithms if MD block
18611962 * is not present.
18621963 */
1863
- if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
1864
- return -ENODEV;
1964
+ if (!md_inst)
1965
+ return 0;
18651966
18661967 /* Limit digest size based on LP256 */
1867
- if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
1968
+ if (md_vid == CHA_VER_VID_MD_LP256)
18681969 md_limit = SHA256_DIGEST_SIZE;
18691970
18701971 INIT_LIST_HEAD(&hash_list);
....@@ -1875,14 +1976,16 @@
18751976 struct caam_hash_template *alg = driver_hash + i;
18761977
18771978 /* If MD size is not supported by device, skip registration */
1878
- if (alg->template_ahash.halg.digestsize > md_limit)
1979
+ if (is_mdha(alg->alg_type) &&
1980
+ alg->template_ahash.halg.digestsize > md_limit)
18791981 continue;
18801982
18811983 /* register hmac version */
18821984 t_alg = caam_hash_alloc(alg, true);
18831985 if (IS_ERR(t_alg)) {
18841986 err = PTR_ERR(t_alg);
1885
- pr_warn("%s alg allocation failed\n", alg->driver_name);
1987
+ pr_warn("%s alg allocation failed\n",
1988
+ alg->hmac_driver_name);
18861989 continue;
18871990 }
18881991
....@@ -1894,6 +1997,9 @@
18941997 kfree(t_alg);
18951998 } else
18961999 list_add_tail(&t_alg->entry, &hash_list);
2000
+
2001
+ if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2002
+ continue;
18972003
18982004 /* register unkeyed version */
18992005 t_alg = caam_hash_alloc(alg, false);
....@@ -1915,10 +2021,3 @@
19152021
19162022 return err;
19172023 }
1918
-
1919
-module_init(caam_algapi_hash_init);
1920
-module_exit(caam_algapi_hash_exit);
1921
-
1922
-MODULE_LICENSE("GPL");
1923
-MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1924
-MODULE_AUTHOR("Freescale Semiconductor - NMG");