.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 |
---|
2 | | -/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ |
---|
| 2 | +/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ |
---|
3 | 3 | |
---|
4 | 4 | #include <crypto/internal/aead.h> |
---|
5 | 5 | #include <crypto/authenc.h> |
---|
.. | .. |
---|
13 | 13 | #include "cc_hash.h" |
---|
14 | 14 | #include "cc_aead.h" |
---|
15 | 15 | |
---|
16 | | -enum dma_buffer_type { |
---|
17 | | - DMA_NULL_TYPE = -1, |
---|
18 | | - DMA_SGL_TYPE = 1, |
---|
19 | | - DMA_BUFF_TYPE = 2, |
---|
20 | | -}; |
---|
21 | | - |
---|
22 | | -struct buff_mgr_handle { |
---|
23 | | - struct dma_pool *mlli_buffs_pool; |
---|
24 | | -}; |
---|
25 | | - |
---|
26 | 16 | union buffer_array_entry { |
---|
27 | 17 | struct scatterlist *sgl; |
---|
28 | 18 | dma_addr_t buffer_dma; |
---|
.. | .. |
---|
34 | 24 | unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI]; |
---|
35 | 25 | int nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; |
---|
36 | 26 | int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI]; |
---|
37 | | - enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI]; |
---|
38 | 27 | bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI]; |
---|
39 | 28 | u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; |
---|
40 | 29 | }; |
---|
.. | .. |
---|
64 | 53 | enum cc_sg_cpy_direct dir) |
---|
65 | 54 | { |
---|
66 | 55 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); |
---|
67 | | - struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
---|
68 | | - u32 skip = areq_ctx->assoclen + req->cryptlen; |
---|
69 | | - |
---|
70 | | - if (areq_ctx->is_gcm4543) |
---|
71 | | - skip += crypto_aead_ivsize(tfm); |
---|
| 56 | + u32 skip = req->assoclen + req->cryptlen; |
---|
72 | 57 | |
---|
73 | 58 | cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src, |
---|
74 | 59 | (skip - areq_ctx->req_authsize), skip, dir); |
---|
.. | .. |
---|
77 | 62 | /** |
---|
78 | 63 | * cc_get_sgl_nents() - Get scatterlist number of entries. |
---|
79 | 64 | * |
---|
| 65 | + * @dev: Device object |
---|
80 | 66 | * @sg_list: SG list |
---|
81 | 67 | * @nbytes: [IN] Total SGL data bytes. |
---|
82 | 68 | * @lbytes: [OUT] Returns the amount of bytes at the last entry |
---|
| 69 | + * |
---|
| 70 | + * Return: |
---|
| 71 | + * Number of entries in the scatterlist |
---|
83 | 72 | */ |
---|
84 | 73 | static unsigned int cc_get_sgl_nents(struct device *dev, |
---|
85 | 74 | struct scatterlist *sg_list, |
---|
86 | 75 | unsigned int nbytes, u32 *lbytes) |
---|
87 | 76 | { |
---|
88 | 77 | unsigned int nents = 0; |
---|
| 78 | + |
---|
| 79 | + *lbytes = 0; |
---|
89 | 80 | |
---|
90 | 81 | while (nbytes && sg_list) { |
---|
91 | 82 | nents++; |
---|
.. | .. |
---|
95 | 86 | nbytes : sg_list->length; |
---|
96 | 87 | sg_list = sg_next(sg_list); |
---|
97 | 88 | } |
---|
| 89 | + |
---|
98 | 90 | dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); |
---|
99 | 91 | return nents; |
---|
100 | | -} |
---|
101 | | - |
---|
102 | | -/** |
---|
103 | | - * cc_zero_sgl() - Zero scatter scatter list data. |
---|
104 | | - * |
---|
105 | | - * @sgl: |
---|
106 | | - */ |
---|
107 | | -void cc_zero_sgl(struct scatterlist *sgl, u32 data_len) |
---|
108 | | -{ |
---|
109 | | - struct scatterlist *current_sg = sgl; |
---|
110 | | - int sg_index = 0; |
---|
111 | | - |
---|
112 | | - while (sg_index <= data_len) { |
---|
113 | | - if (!current_sg) { |
---|
114 | | - /* reached the end of the sgl --> just return back */ |
---|
115 | | - return; |
---|
116 | | - } |
---|
117 | | - memset(sg_virt(current_sg), 0, current_sg->length); |
---|
118 | | - sg_index += current_sg->length; |
---|
119 | | - current_sg = sg_next(current_sg); |
---|
120 | | - } |
---|
121 | 92 | } |
---|
122 | 93 | |
---|
123 | 94 | /** |
---|
124 | 95 | * cc_copy_sg_portion() - Copy scatter list data, |
---|
125 | 96 | * from to_skip to end, to dest and vice versa |
---|
126 | 97 | * |
---|
127 | | - * @dest: |
---|
128 | | - * @sg: |
---|
129 | | - * @to_skip: |
---|
130 | | - * @end: |
---|
131 | | - * @direct: |
---|
| 98 | + * @dev: Device object |
---|
| 99 | + * @dest: Buffer to copy to/from |
---|
| 100 | + * @sg: SG list |
---|
| 101 | + * @to_skip: Number of bytes to skip before copying |
---|
| 102 | + * @end: Offset of last byte to copy |
---|
| 103 | + * @direct: Transfer direction (true == from SG list to buffer, false == from |
---|
| 104 | + * buffer to SG list) |
---|
132 | 105 | */ |
---|
133 | 106 | void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, |
---|
134 | 107 | u32 to_skip, u32 end, enum cc_sg_cpy_direct direct) |
---|
135 | 108 | { |
---|
136 | | - u32 nents, lbytes; |
---|
| 109 | + u32 nents; |
---|
137 | 110 | |
---|
138 | | - nents = cc_get_sgl_nents(dev, sg, end, &lbytes); |
---|
139 | | - sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip, |
---|
| 111 | + nents = sg_nents_for_len(sg, end); |
---|
| 112 | + sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip, |
---|
140 | 113 | (direct == CC_SG_TO_BUF)); |
---|
141 | 114 | } |
---|
142 | 115 | |
---|
.. | .. |
---|
149 | 122 | |
---|
150 | 123 | /* Verify there is no memory overflow*/ |
---|
151 | 124 | new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1); |
---|
152 | | - if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) |
---|
| 125 | + if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) { |
---|
| 126 | + dev_err(dev, "Too many mlli entries. current %d max %d\n", |
---|
| 127 | + new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES); |
---|
153 | 128 | return -ENOMEM; |
---|
| 129 | + } |
---|
154 | 130 | |
---|
155 | 131 | /*handle buffer longer than 64 kbytes */ |
---|
156 | 132 | while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) { |
---|
.. | .. |
---|
222 | 198 | goto build_mlli_exit; |
---|
223 | 199 | } |
---|
224 | 200 | /* Point to start of MLLI */ |
---|
225 | | - mlli_p = (u32 *)mlli_params->mlli_virt_addr; |
---|
| 201 | + mlli_p = mlli_params->mlli_virt_addr; |
---|
226 | 202 | /* go over all SG's and link it to one MLLI table */ |
---|
227 | 203 | for (i = 0; i < sg_data->num_of_buffers; i++) { |
---|
228 | 204 | union buffer_array_entry *entry = &sg_data->entry[i]; |
---|
229 | 205 | u32 tot_len = sg_data->total_data_len[i]; |
---|
230 | 206 | u32 offset = sg_data->offset[i]; |
---|
231 | 207 | |
---|
232 | | - if (sg_data->type[i] == DMA_SGL_TYPE) |
---|
233 | | - rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, |
---|
234 | | - offset, &total_nents, |
---|
235 | | - &mlli_p); |
---|
236 | | - else /*DMA_BUFF_TYPE*/ |
---|
237 | | - rc = cc_render_buff_to_mlli(dev, entry->buffer_dma, |
---|
238 | | - tot_len, &total_nents, |
---|
239 | | - &mlli_p); |
---|
| 208 | + rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset, |
---|
| 209 | + &total_nents, &mlli_p); |
---|
240 | 210 | if (rc) |
---|
241 | 211 | return rc; |
---|
242 | 212 | |
---|
.. | .. |
---|
262 | 232 | return rc; |
---|
263 | 233 | } |
---|
264 | 234 | |
---|
265 | | -static void cc_add_buffer_entry(struct device *dev, |
---|
266 | | - struct buffer_array *sgl_data, |
---|
267 | | - dma_addr_t buffer_dma, unsigned int buffer_len, |
---|
268 | | - bool is_last_entry, u32 *mlli_nents) |
---|
269 | | -{ |
---|
270 | | - unsigned int index = sgl_data->num_of_buffers; |
---|
271 | | - |
---|
272 | | - dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n", |
---|
273 | | - index, &buffer_dma, buffer_len, is_last_entry); |
---|
274 | | - sgl_data->nents[index] = 1; |
---|
275 | | - sgl_data->entry[index].buffer_dma = buffer_dma; |
---|
276 | | - sgl_data->offset[index] = 0; |
---|
277 | | - sgl_data->total_data_len[index] = buffer_len; |
---|
278 | | - sgl_data->type[index] = DMA_BUFF_TYPE; |
---|
279 | | - sgl_data->is_last[index] = is_last_entry; |
---|
280 | | - sgl_data->mlli_nents[index] = mlli_nents; |
---|
281 | | - if (sgl_data->mlli_nents[index]) |
---|
282 | | - *sgl_data->mlli_nents[index] = 0; |
---|
283 | | - sgl_data->num_of_buffers++; |
---|
284 | | -} |
---|
285 | | - |
---|
286 | 235 | static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data, |
---|
287 | 236 | unsigned int nents, struct scatterlist *sgl, |
---|
288 | 237 | unsigned int data_len, unsigned int data_offset, |
---|
.. | .. |
---|
296 | 245 | sgl_data->entry[index].sgl = sgl; |
---|
297 | 246 | sgl_data->offset[index] = data_offset; |
---|
298 | 247 | sgl_data->total_data_len[index] = data_len; |
---|
299 | | - sgl_data->type[index] = DMA_SGL_TYPE; |
---|
300 | 248 | sgl_data->is_last[index] = is_last_table; |
---|
301 | 249 | sgl_data->mlli_nents[index] = mlli_nents; |
---|
302 | 250 | if (sgl_data->mlli_nents[index]) |
---|
.. | .. |
---|
308 | 256 | unsigned int nbytes, int direction, u32 *nents, |
---|
309 | 257 | u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) |
---|
310 | 258 | { |
---|
311 | | - if (sg_is_last(sg)) { |
---|
312 | | - /* One entry only case -set to DLLI */ |
---|
313 | | - if (dma_map_sg(dev, sg, 1, direction) != 1) { |
---|
314 | | - dev_err(dev, "dma_map_sg() single buffer failed\n"); |
---|
315 | | - return -ENOMEM; |
---|
316 | | - } |
---|
317 | | - dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", |
---|
318 | | - &sg_dma_address(sg), sg_page(sg), sg_virt(sg), |
---|
319 | | - sg->offset, sg->length); |
---|
320 | | - *lbytes = nbytes; |
---|
321 | | - *nents = 1; |
---|
322 | | - *mapped_nents = 1; |
---|
323 | | - } else { /*sg_is_last*/ |
---|
324 | | - *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); |
---|
325 | | - if (*nents > max_sg_nents) { |
---|
326 | | - *nents = 0; |
---|
327 | | - dev_err(dev, "Too many fragments. current %d max %d\n", |
---|
328 | | - *nents, max_sg_nents); |
---|
329 | | - return -ENOMEM; |
---|
330 | | - } |
---|
331 | | - /* In case of mmu the number of mapped nents might |
---|
332 | | - * be changed from the original sgl nents |
---|
333 | | - */ |
---|
334 | | - *mapped_nents = dma_map_sg(dev, sg, *nents, direction); |
---|
335 | | - if (*mapped_nents == 0) { |
---|
336 | | - *nents = 0; |
---|
337 | | - dev_err(dev, "dma_map_sg() sg buffer failed\n"); |
---|
338 | | - return -ENOMEM; |
---|
339 | | - } |
---|
| 259 | + int ret = 0; |
---|
| 260 | + |
---|
| 261 | + if (!nbytes) { |
---|
| 262 | + *mapped_nents = 0; |
---|
| 263 | + *lbytes = 0; |
---|
| 264 | + *nents = 0; |
---|
| 265 | + return 0; |
---|
340 | 266 | } |
---|
| 267 | + |
---|
| 268 | + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); |
---|
| 269 | + if (*nents > max_sg_nents) { |
---|
| 270 | + *nents = 0; |
---|
| 271 | + dev_err(dev, "Too many fragments. current %d max %d\n", |
---|
| 272 | + *nents, max_sg_nents); |
---|
| 273 | + return -ENOMEM; |
---|
| 274 | + } |
---|
| 275 | + |
---|
| 276 | + ret = dma_map_sg(dev, sg, *nents, direction); |
---|
| 277 | + if (dma_mapping_error(dev, ret)) { |
---|
| 278 | + *nents = 0; |
---|
| 279 | + dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret); |
---|
| 280 | + return -ENOMEM; |
---|
| 281 | + } |
---|
| 282 | + |
---|
| 283 | + *mapped_nents = ret; |
---|
341 | 284 | |
---|
342 | 285 | return 0; |
---|
343 | 286 | } |
---|
.. | .. |
---|
403 | 346 | dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n", |
---|
404 | 347 | &req_ctx->gen_ctx.iv_dma_addr, ivsize); |
---|
405 | 348 | dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, |
---|
406 | | - ivsize, DMA_TO_DEVICE); |
---|
| 349 | + ivsize, DMA_BIDIRECTIONAL); |
---|
407 | 350 | } |
---|
408 | 351 | /* Release pool */ |
---|
409 | 352 | if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI && |
---|
.. | .. |
---|
413 | 356 | req_ctx->mlli_params.mlli_dma_addr); |
---|
414 | 357 | } |
---|
415 | 358 | |
---|
416 | | - dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); |
---|
417 | | - dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); |
---|
418 | | - |
---|
419 | 359 | if (src != dst) { |
---|
420 | | - dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL); |
---|
| 360 | + dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE); |
---|
| 361 | + dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE); |
---|
421 | 362 | dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst)); |
---|
| 363 | + dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); |
---|
| 364 | + } else { |
---|
| 365 | + dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); |
---|
| 366 | + dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); |
---|
422 | 367 | } |
---|
423 | 368 | } |
---|
424 | 369 | |
---|
.. | .. |
---|
429 | 374 | { |
---|
430 | 375 | struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx; |
---|
431 | 376 | struct mlli_params *mlli_params = &req_ctx->mlli_params; |
---|
432 | | - struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; |
---|
433 | 377 | struct device *dev = drvdata_to_dev(drvdata); |
---|
434 | 378 | struct buffer_array sg_data; |
---|
435 | 379 | u32 dummy = 0; |
---|
436 | 380 | int rc = 0; |
---|
437 | 381 | u32 mapped_nents = 0; |
---|
| 382 | + int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); |
---|
438 | 383 | |
---|
439 | 384 | req_ctx->dma_buf_type = CC_DMA_BUF_DLLI; |
---|
440 | 385 | mlli_params->curr_pool = NULL; |
---|
.. | .. |
---|
442 | 387 | |
---|
443 | 388 | /* Map IV buffer */ |
---|
444 | 389 | if (ivsize) { |
---|
445 | | - dump_byte_array("iv", (u8 *)info, ivsize); |
---|
| 390 | + dump_byte_array("iv", info, ivsize); |
---|
446 | 391 | req_ctx->gen_ctx.iv_dma_addr = |
---|
447 | | - dma_map_single(dev, (void *)info, |
---|
448 | | - ivsize, DMA_TO_DEVICE); |
---|
| 392 | + dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL); |
---|
449 | 393 | if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) { |
---|
450 | 394 | dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", |
---|
451 | 395 | ivsize, info); |
---|
.. | .. |
---|
458 | 402 | } |
---|
459 | 403 | |
---|
460 | 404 | /* Map the src SGL */ |
---|
461 | | - rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, |
---|
| 405 | + rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents, |
---|
462 | 406 | LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); |
---|
463 | 407 | if (rc) |
---|
464 | 408 | goto cipher_exit; |
---|
.. | .. |
---|
475 | 419 | } |
---|
476 | 420 | } else { |
---|
477 | 421 | /* Map the dst sg */ |
---|
478 | | - rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, |
---|
| 422 | + rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE, |
---|
479 | 423 | &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, |
---|
480 | 424 | &dummy, &mapped_nents); |
---|
481 | 425 | if (rc) |
---|
.. | .. |
---|
494 | 438 | } |
---|
495 | 439 | |
---|
496 | 440 | if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { |
---|
497 | | - mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; |
---|
| 441 | + mlli_params->curr_pool = drvdata->mlli_buffs_pool; |
---|
498 | 442 | rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); |
---|
499 | 443 | if (rc) |
---|
500 | 444 | goto cipher_exit; |
---|
.. | .. |
---|
514 | 458 | { |
---|
515 | 459 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); |
---|
516 | 460 | unsigned int hw_iv_size = areq_ctx->hw_iv_size; |
---|
517 | | - struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
---|
518 | 461 | struct cc_drvdata *drvdata = dev_get_drvdata(dev); |
---|
519 | | - u32 dummy; |
---|
520 | | - u32 size_to_unmap = 0; |
---|
| 462 | + int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); |
---|
521 | 463 | |
---|
522 | 464 | if (areq_ctx->mac_buf_dma_addr) { |
---|
523 | 465 | dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, |
---|
.. | .. |
---|
557 | 499 | if (areq_ctx->gen_ctx.iv_dma_addr) { |
---|
558 | 500 | dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, |
---|
559 | 501 | hw_iv_size, DMA_BIDIRECTIONAL); |
---|
560 | | - kzfree(areq_ctx->gen_ctx.iv); |
---|
| 502 | + kfree_sensitive(areq_ctx->gen_ctx.iv); |
---|
561 | 503 | } |
---|
562 | 504 | |
---|
563 | 505 | /* Release pool */ |
---|
.. | .. |
---|
575 | 517 | dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", |
---|
576 | 518 | sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, |
---|
577 | 519 | areq_ctx->assoclen, req->cryptlen); |
---|
578 | | - size_to_unmap = areq_ctx->assoclen + req->cryptlen; |
---|
579 | | - if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) |
---|
580 | | - size_to_unmap += areq_ctx->req_authsize; |
---|
581 | | - if (areq_ctx->is_gcm4543) |
---|
582 | | - size_to_unmap += crypto_aead_ivsize(tfm); |
---|
583 | 520 | |
---|
584 | | - dma_unmap_sg(dev, req->src, |
---|
585 | | - cc_get_sgl_nents(dev, req->src, size_to_unmap, &dummy), |
---|
586 | | - DMA_BIDIRECTIONAL); |
---|
| 521 | + dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction); |
---|
587 | 522 | if (req->src != req->dst) { |
---|
588 | 523 | dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", |
---|
589 | 524 | sg_virt(req->dst)); |
---|
590 | | - dma_unmap_sg(dev, req->dst, |
---|
591 | | - cc_get_sgl_nents(dev, req->dst, size_to_unmap, |
---|
592 | | - &dummy), |
---|
593 | | - DMA_BIDIRECTIONAL); |
---|
| 525 | + dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE); |
---|
594 | 526 | } |
---|
595 | 527 | if (drvdata->coherent && |
---|
596 | 528 | areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && |
---|
.. | .. |
---|
603 | 535 | } |
---|
604 | 536 | } |
---|
605 | 537 | |
---|
606 | | -static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl, |
---|
607 | | - unsigned int sgl_nents, unsigned int authsize, |
---|
608 | | - u32 last_entry_data_size, |
---|
609 | | - bool *is_icv_fragmented) |
---|
| 538 | +static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize, |
---|
| 539 | + u32 last_entry_data_size) |
---|
610 | 540 | { |
---|
611 | | - unsigned int icv_max_size = 0; |
---|
612 | | - unsigned int icv_required_size = authsize > last_entry_data_size ? |
---|
613 | | - (authsize - last_entry_data_size) : |
---|
614 | | - authsize; |
---|
615 | | - unsigned int nents; |
---|
616 | | - unsigned int i; |
---|
617 | | - |
---|
618 | | - if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) { |
---|
619 | | - *is_icv_fragmented = false; |
---|
620 | | - return 0; |
---|
621 | | - } |
---|
622 | | - |
---|
623 | | - for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) { |
---|
624 | | - if (!sgl) |
---|
625 | | - break; |
---|
626 | | - sgl = sg_next(sgl); |
---|
627 | | - } |
---|
628 | | - |
---|
629 | | - if (sgl) |
---|
630 | | - icv_max_size = sgl->length; |
---|
631 | | - |
---|
632 | | - if (last_entry_data_size > authsize) { |
---|
633 | | - /* ICV attached to data in last entry (not fragmented!) */ |
---|
634 | | - nents = 0; |
---|
635 | | - *is_icv_fragmented = false; |
---|
636 | | - } else if (last_entry_data_size == authsize) { |
---|
637 | | - /* ICV placed in whole last entry (not fragmented!) */ |
---|
638 | | - nents = 1; |
---|
639 | | - *is_icv_fragmented = false; |
---|
640 | | - } else if (icv_max_size > icv_required_size) { |
---|
641 | | - nents = 1; |
---|
642 | | - *is_icv_fragmented = true; |
---|
643 | | - } else if (icv_max_size == icv_required_size) { |
---|
644 | | - nents = 2; |
---|
645 | | - *is_icv_fragmented = true; |
---|
646 | | - } else { |
---|
647 | | - dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n", |
---|
648 | | - MAX_ICV_NENTS_SUPPORTED); |
---|
649 | | - nents = -1; /*unsupported*/ |
---|
650 | | - } |
---|
651 | | - dev_dbg(dev, "is_frag=%s icv_nents=%u\n", |
---|
652 | | - (*is_icv_fragmented ? "true" : "false"), nents); |
---|
653 | | - |
---|
654 | | - return nents; |
---|
| 541 | + return ((sgl_nents > 1) && (last_entry_data_size < authsize)); |
---|
655 | 542 | } |
---|
656 | 543 | |
---|
657 | 544 | static int cc_aead_chain_iv(struct cc_drvdata *drvdata, |
---|
.. | .. |
---|
681 | 568 | if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { |
---|
682 | 569 | dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", |
---|
683 | 570 | hw_iv_size, req->iv); |
---|
684 | | - kzfree(areq_ctx->gen_ctx.iv); |
---|
| 571 | + kfree_sensitive(areq_ctx->gen_ctx.iv); |
---|
685 | 572 | areq_ctx->gen_ctx.iv = NULL; |
---|
686 | 573 | rc = -ENOMEM; |
---|
687 | 574 | goto chain_iv_exit; |
---|
.. | .. |
---|
689 | 576 | |
---|
690 | 577 | dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", |
---|
691 | 578 | hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr); |
---|
692 | | - // TODO: what about CTR?? ask Ron |
---|
693 | | - if (do_chain && areq_ctx->plaintext_authenticate_only) { |
---|
694 | | - struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
---|
695 | | - unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm); |
---|
696 | | - unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET; |
---|
697 | | - /* Chain to given list */ |
---|
698 | | - cc_add_buffer_entry(dev, sg_data, |
---|
699 | | - (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs), |
---|
700 | | - iv_size_to_authenc, is_last, |
---|
701 | | - &areq_ctx->assoc.mlli_nents); |
---|
702 | | - areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; |
---|
703 | | - } |
---|
704 | 579 | |
---|
705 | 580 | chain_iv_exit: |
---|
706 | 581 | return rc; |
---|
.. | .. |
---|
713 | 588 | { |
---|
714 | 589 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); |
---|
715 | 590 | int rc = 0; |
---|
716 | | - u32 mapped_nents = 0; |
---|
717 | | - struct scatterlist *current_sg = req->src; |
---|
718 | | - struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
---|
719 | | - unsigned int sg_index = 0; |
---|
720 | | - u32 size_of_assoc = areq_ctx->assoclen; |
---|
| 591 | + int mapped_nents = 0; |
---|
721 | 592 | struct device *dev = drvdata_to_dev(drvdata); |
---|
722 | | - |
---|
723 | | - if (areq_ctx->is_gcm4543) |
---|
724 | | - size_of_assoc += crypto_aead_ivsize(tfm); |
---|
725 | 593 | |
---|
726 | 594 | if (!sg_data) { |
---|
727 | 595 | rc = -EINVAL; |
---|
.. | .. |
---|
738 | 606 | goto chain_assoc_exit; |
---|
739 | 607 | } |
---|
740 | 608 | |
---|
741 | | - //iterate over the sgl to see how many entries are for associated data |
---|
742 | | - //it is assumed that if we reach here , the sgl is already mapped |
---|
743 | | - sg_index = current_sg->length; |
---|
744 | | - //the first entry in the scatter list contains all the associated data |
---|
745 | | - if (sg_index > size_of_assoc) { |
---|
746 | | - mapped_nents++; |
---|
747 | | - } else { |
---|
748 | | - while (sg_index <= size_of_assoc) { |
---|
749 | | - current_sg = sg_next(current_sg); |
---|
750 | | - /* if have reached the end of the sgl, then this is |
---|
751 | | - * unexpected |
---|
752 | | - */ |
---|
753 | | - if (!current_sg) { |
---|
754 | | - dev_err(dev, "reached end of sg list. unexpected\n"); |
---|
755 | | - return -EINVAL; |
---|
756 | | - } |
---|
757 | | - sg_index += current_sg->length; |
---|
758 | | - mapped_nents++; |
---|
759 | | - } |
---|
760 | | - } |
---|
| 609 | + mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen); |
---|
| 610 | + if (mapped_nents < 0) |
---|
| 611 | + return mapped_nents; |
---|
| 612 | + |
---|
761 | 613 | if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { |
---|
762 | 614 | dev_err(dev, "Too many fragments. current %d max %d\n", |
---|
763 | 615 | mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); |
---|
.. | .. |
---|
803 | 655 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); |
---|
804 | 656 | enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; |
---|
805 | 657 | unsigned int authsize = areq_ctx->req_authsize; |
---|
| 658 | + struct scatterlist *sg; |
---|
| 659 | + ssize_t offset; |
---|
806 | 660 | |
---|
807 | 661 | areq_ctx->is_icv_fragmented = false; |
---|
808 | | - if (req->src == req->dst) { |
---|
809 | | - /*INPLACE*/ |
---|
810 | | - areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) + |
---|
811 | | - (*src_last_bytes - authsize); |
---|
812 | | - areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) + |
---|
813 | | - (*src_last_bytes - authsize); |
---|
814 | | - } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { |
---|
815 | | - /*NON-INPLACE and DECRYPT*/ |
---|
816 | | - areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) + |
---|
817 | | - (*src_last_bytes - authsize); |
---|
818 | | - areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) + |
---|
819 | | - (*src_last_bytes - authsize); |
---|
| 662 | + |
---|
| 663 | + if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) { |
---|
| 664 | + sg = areq_ctx->src_sgl; |
---|
| 665 | + offset = *src_last_bytes - authsize; |
---|
820 | 666 | } else { |
---|
821 | | - /*NON-INPLACE and ENCRYPT*/ |
---|
822 | | - areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) + |
---|
823 | | - (*dst_last_bytes - authsize); |
---|
824 | | - areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) + |
---|
825 | | - (*dst_last_bytes - authsize); |
---|
| 667 | + sg = areq_ctx->dst_sgl; |
---|
| 668 | + offset = *dst_last_bytes - authsize; |
---|
826 | 669 | } |
---|
| 670 | + |
---|
| 671 | + areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset; |
---|
| 672 | + areq_ctx->icv_virt_addr = sg_virt(sg) + offset; |
---|
827 | 673 | } |
---|
828 | 674 | |
---|
829 | | -static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata, |
---|
830 | | - struct aead_request *req, |
---|
831 | | - struct buffer_array *sg_data, |
---|
832 | | - u32 *src_last_bytes, u32 *dst_last_bytes, |
---|
833 | | - bool is_last_table) |
---|
| 675 | +static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata, |
---|
| 676 | + struct aead_request *req, |
---|
| 677 | + struct buffer_array *sg_data, |
---|
| 678 | + u32 *src_last_bytes, u32 *dst_last_bytes, |
---|
| 679 | + bool is_last_table) |
---|
834 | 680 | { |
---|
835 | 681 | struct aead_req_ctx *areq_ctx = aead_request_ctx(req); |
---|
836 | 682 | enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; |
---|
837 | 683 | unsigned int authsize = areq_ctx->req_authsize; |
---|
838 | | - int rc = 0, icv_nents; |
---|
839 | 684 | struct device *dev = drvdata_to_dev(drvdata); |
---|
840 | 685 | struct scatterlist *sg; |
---|
841 | 686 | |
---|
.. | .. |
---|
846 | 691 | areq_ctx->src_offset, is_last_table, |
---|
847 | 692 | &areq_ctx->src.mlli_nents); |
---|
848 | 693 | |
---|
849 | | - icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl, |
---|
850 | | - areq_ctx->src.nents, |
---|
851 | | - authsize, *src_last_bytes, |
---|
852 | | - &areq_ctx->is_icv_fragmented); |
---|
853 | | - if (icv_nents < 0) { |
---|
854 | | - rc = -ENOTSUPP; |
---|
855 | | - goto prepare_data_mlli_exit; |
---|
856 | | - } |
---|
| 694 | + areq_ctx->is_icv_fragmented = |
---|
| 695 | + cc_is_icv_frag(areq_ctx->src.nents, authsize, |
---|
| 696 | + *src_last_bytes); |
---|
857 | 697 | |
---|
858 | 698 | if (areq_ctx->is_icv_fragmented) { |
---|
859 | 699 | /* Backup happens only when ICV is fragmented, ICV |
---|
.. | .. |
---|
895 | 735 | areq_ctx->dst_offset, is_last_table, |
---|
896 | 736 | &areq_ctx->dst.mlli_nents); |
---|
897 | 737 | |
---|
898 | | - icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl, |
---|
899 | | - areq_ctx->src.nents, |
---|
900 | | - authsize, *src_last_bytes, |
---|
901 | | - &areq_ctx->is_icv_fragmented); |
---|
902 | | - if (icv_nents < 0) { |
---|
903 | | - rc = -ENOTSUPP; |
---|
904 | | - goto prepare_data_mlli_exit; |
---|
905 | | - } |
---|
906 | | - |
---|
| 738 | + areq_ctx->is_icv_fragmented = |
---|
| 739 | + cc_is_icv_frag(areq_ctx->src.nents, authsize, |
---|
| 740 | + *src_last_bytes); |
---|
907 | 741 | /* Backup happens only when ICV is fragmented, ICV |
---|
| 742 | + |
---|
908 | 743 | * verification is made by CPU compare in order to simplify |
---|
909 | 744 | * MAC verification upon request completion |
---|
910 | 745 | */ |
---|
.. | .. |
---|
932 | 767 | areq_ctx->src_offset, is_last_table, |
---|
933 | 768 | &areq_ctx->src.mlli_nents); |
---|
934 | 769 | |
---|
935 | | - icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl, |
---|
936 | | - areq_ctx->dst.nents, |
---|
937 | | - authsize, *dst_last_bytes, |
---|
938 | | - &areq_ctx->is_icv_fragmented); |
---|
939 | | - if (icv_nents < 0) { |
---|
940 | | - rc = -ENOTSUPP; |
---|
941 | | - goto prepare_data_mlli_exit; |
---|
942 | | - } |
---|
| 770 | + areq_ctx->is_icv_fragmented = |
---|
| 771 | + cc_is_icv_frag(areq_ctx->dst.nents, authsize, |
---|
| 772 | + *dst_last_bytes); |
---|
943 | 773 | |
---|
944 | 774 | if (!areq_ctx->is_icv_fragmented) { |
---|
945 | 775 | sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]; |
---|
.. | .. |
---|
953 | 783 | areq_ctx->icv_virt_addr = areq_ctx->mac_buf; |
---|
954 | 784 | } |
---|
955 | 785 | } |
---|
956 | | - |
---|
957 | | -prepare_data_mlli_exit: |
---|
958 | | - return rc; |
---|
959 | 786 | } |
---|
960 | 787 | |
---|
961 | 788 | static int cc_aead_chain_data(struct cc_drvdata *drvdata, |
---|
.. | .. |
---|
972 | 799 | u32 src_mapped_nents = 0, dst_mapped_nents = 0; |
---|
973 | 800 | u32 offset = 0; |
---|
974 | 801 | /* non-inplace mode */ |
---|
975 | | - unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen; |
---|
976 | | - struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
---|
| 802 | + unsigned int size_for_map = req->assoclen + req->cryptlen; |
---|
977 | 803 | u32 sg_index = 0; |
---|
978 | | - bool is_gcm4543 = areq_ctx->is_gcm4543; |
---|
979 | | - u32 size_to_skip = areq_ctx->assoclen; |
---|
980 | | - |
---|
981 | | - if (is_gcm4543) |
---|
982 | | - size_to_skip += crypto_aead_ivsize(tfm); |
---|
| 804 | + u32 size_to_skip = req->assoclen; |
---|
| 805 | + struct scatterlist *sgl; |
---|
983 | 806 | |
---|
984 | 807 | offset = size_to_skip; |
---|
985 | 808 | |
---|
.. | .. |
---|
989 | 812 | areq_ctx->src_sgl = req->src; |
---|
990 | 813 | areq_ctx->dst_sgl = req->dst; |
---|
991 | 814 | |
---|
992 | | - if (is_gcm4543) |
---|
993 | | - size_for_map += crypto_aead_ivsize(tfm); |
---|
994 | | - |
---|
995 | 815 | size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? |
---|
996 | 816 | authsize : 0; |
---|
997 | 817 | src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, |
---|
998 | 818 | &src_last_bytes); |
---|
999 | 819 | sg_index = areq_ctx->src_sgl->length; |
---|
1000 | 820 | //check where the data starts |
---|
1001 | | - while (sg_index <= size_to_skip) { |
---|
1002 | | - offset -= areq_ctx->src_sgl->length; |
---|
1003 | | - areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl); |
---|
1004 | | - //if have reached the end of the sgl, then this is unexpected |
---|
1005 | | - if (!areq_ctx->src_sgl) { |
---|
1006 | | - dev_err(dev, "reached end of sg list. unexpected\n"); |
---|
1007 | | - return -EINVAL; |
---|
1008 | | - } |
---|
1009 | | - sg_index += areq_ctx->src_sgl->length; |
---|
| 821 | + while (src_mapped_nents && (sg_index <= size_to_skip)) { |
---|
1010 | 822 | src_mapped_nents--; |
---|
| 823 | + offset -= areq_ctx->src_sgl->length; |
---|
| 824 | + sgl = sg_next(areq_ctx->src_sgl); |
---|
| 825 | + if (!sgl) |
---|
| 826 | + break; |
---|
| 827 | + areq_ctx->src_sgl = sgl; |
---|
| 828 | + sg_index += areq_ctx->src_sgl->length; |
---|
1011 | 829 | } |
---|
1012 | 830 | if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { |
---|
1013 | 831 | dev_err(dev, "Too many fragments. current %d max %d\n", |
---|
.. | .. |
---|
1020 | 838 | areq_ctx->src_offset = offset; |
---|
1021 | 839 | |
---|
1022 | 840 | if (req->src != req->dst) { |
---|
1023 | | - size_for_map = areq_ctx->assoclen + req->cryptlen; |
---|
| 841 | + size_for_map = req->assoclen + req->cryptlen; |
---|
1024 | 842 | |
---|
1025 | 843 | if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) |
---|
1026 | 844 | size_for_map += authsize; |
---|
1027 | 845 | else |
---|
1028 | 846 | size_for_map -= authsize; |
---|
1029 | 847 | |
---|
1030 | | - if (is_gcm4543) |
---|
1031 | | - size_for_map += crypto_aead_ivsize(tfm); |
---|
1032 | | - |
---|
1033 | | - rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, |
---|
1034 | | - &areq_ctx->dst.nents, |
---|
| 848 | + rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE, |
---|
| 849 | + &areq_ctx->dst.mapped_nents, |
---|
1035 | 850 | LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, |
---|
1036 | 851 | &dst_mapped_nents); |
---|
1037 | 852 | if (rc) |
---|
.. | .. |
---|
1044 | 859 | offset = size_to_skip; |
---|
1045 | 860 | |
---|
1046 | 861 | //check where the data starts |
---|
1047 | | - while (sg_index <= size_to_skip) { |
---|
1048 | | - offset -= areq_ctx->dst_sgl->length; |
---|
1049 | | - areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl); |
---|
1050 | | - //if have reached the end of the sgl, then this is unexpected |
---|
1051 | | - if (!areq_ctx->dst_sgl) { |
---|
1052 | | - dev_err(dev, "reached end of sg list. unexpected\n"); |
---|
1053 | | - return -EINVAL; |
---|
1054 | | - } |
---|
1055 | | - sg_index += areq_ctx->dst_sgl->length; |
---|
| 862 | + while (dst_mapped_nents && sg_index <= size_to_skip) { |
---|
1056 | 863 | dst_mapped_nents--; |
---|
| 864 | + offset -= areq_ctx->dst_sgl->length; |
---|
| 865 | + sgl = sg_next(areq_ctx->dst_sgl); |
---|
| 866 | + if (!sgl) |
---|
| 867 | + break; |
---|
| 868 | + areq_ctx->dst_sgl = sgl; |
---|
| 869 | + sg_index += areq_ctx->dst_sgl->length; |
---|
1057 | 870 | } |
---|
1058 | 871 | if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { |
---|
1059 | 872 | dev_err(dev, "Too many fragments. current %d max %d\n", |
---|
.. | .. |
---|
1066 | 879 | dst_mapped_nents > 1 || |
---|
1067 | 880 | do_chain) { |
---|
1068 | 881 | areq_ctx->data_buff_type = CC_DMA_BUF_MLLI; |
---|
1069 | | - rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data, |
---|
1070 | | - &src_last_bytes, |
---|
1071 | | - &dst_last_bytes, is_last_table); |
---|
| 882 | + cc_prepare_aead_data_mlli(drvdata, req, sg_data, |
---|
| 883 | + &src_last_bytes, &dst_last_bytes, |
---|
| 884 | + is_last_table); |
---|
1072 | 885 | } else { |
---|
1073 | 886 | areq_ctx->data_buff_type = CC_DMA_BUF_DLLI; |
---|
1074 | 887 | cc_prepare_aead_data_dlli(req, &src_last_bytes, |
---|
.. | .. |
---|
1137 | 950 | struct device *dev = drvdata_to_dev(drvdata); |
---|
1138 | 951 | struct buffer_array sg_data; |
---|
1139 | 952 | unsigned int authsize = areq_ctx->req_authsize; |
---|
1140 | | - struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; |
---|
1141 | 953 | int rc = 0; |
---|
1142 | | - struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
---|
1143 | | - bool is_gcm4543 = areq_ctx->is_gcm4543; |
---|
1144 | 954 | dma_addr_t dma_addr; |
---|
1145 | 955 | u32 mapped_nents = 0; |
---|
1146 | 956 | u32 dummy = 0; /*used for the assoc data fragments */ |
---|
1147 | | - u32 size_to_map = 0; |
---|
| 957 | + u32 size_to_map; |
---|
1148 | 958 | gfp_t flags = cc_gfp_flags(&req->base); |
---|
1149 | 959 | |
---|
1150 | 960 | mlli_params->curr_pool = NULL; |
---|
.. | .. |
---|
1241 | 1051 | areq_ctx->gcm_iv_inc2_dma_addr = dma_addr; |
---|
1242 | 1052 | } |
---|
1243 | 1053 | |
---|
1244 | | - size_to_map = req->cryptlen + areq_ctx->assoclen; |
---|
| 1054 | + size_to_map = req->cryptlen + req->assoclen; |
---|
1245 | 1055 | /* If we do in-place encryption, we also need the auth tag */ |
---|
1246 | 1056 | if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) && |
---|
1247 | 1057 | (req->src == req->dst)) { |
---|
1248 | 1058 | size_to_map += authsize; |
---|
1249 | 1059 | } |
---|
1250 | | - if (is_gcm4543) |
---|
1251 | | - size_to_map += crypto_aead_ivsize(tfm); |
---|
1252 | | - rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, |
---|
1253 | | - &areq_ctx->src.nents, |
---|
| 1060 | + |
---|
| 1061 | + rc = cc_map_sg(dev, req->src, size_to_map, |
---|
| 1062 | + (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL), |
---|
| 1063 | + &areq_ctx->src.mapped_nents, |
---|
1254 | 1064 | (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + |
---|
1255 | 1065 | LLI_MAX_NUM_OF_DATA_ENTRIES), |
---|
1256 | 1066 | &dummy, &mapped_nents); |
---|
.. | .. |
---|
1310 | 1120 | */ |
---|
1311 | 1121 | if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || |
---|
1312 | 1122 | areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { |
---|
1313 | | - mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; |
---|
| 1123 | + mlli_params->curr_pool = drvdata->mlli_buffs_pool; |
---|
1314 | 1124 | rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); |
---|
1315 | 1125 | if (rc) |
---|
1316 | 1126 | goto aead_map_failure; |
---|
.. | .. |
---|
1338 | 1148 | u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); |
---|
1339 | 1149 | struct mlli_params *mlli_params = &areq_ctx->mlli_params; |
---|
1340 | 1150 | struct buffer_array sg_data; |
---|
1341 | | - struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; |
---|
1342 | 1151 | int rc = 0; |
---|
1343 | 1152 | u32 dummy = 0; |
---|
1344 | 1153 | u32 mapped_nents = 0; |
---|
.. | .. |
---|
1356 | 1165 | return 0; |
---|
1357 | 1166 | } |
---|
1358 | 1167 | |
---|
1359 | | - /*TODO: copy data in case that buffer is enough for operation */ |
---|
1360 | 1168 | /* map the previous buffer */ |
---|
1361 | 1169 | if (*curr_buff_cnt) { |
---|
1362 | 1170 | rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, |
---|
.. | .. |
---|
1385 | 1193 | |
---|
1386 | 1194 | /*build mlli */ |
---|
1387 | 1195 | if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { |
---|
1388 | | - mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; |
---|
| 1196 | + mlli_params->curr_pool = drvdata->mlli_buffs_pool; |
---|
1389 | 1197 | /* add the src data to the sg_data */ |
---|
1390 | 1198 | cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes, |
---|
1391 | 1199 | 0, true, &areq_ctx->mlli_nents); |
---|
.. | .. |
---|
1423 | 1231 | unsigned int update_data_len; |
---|
1424 | 1232 | u32 total_in_len = nbytes + *curr_buff_cnt; |
---|
1425 | 1233 | struct buffer_array sg_data; |
---|
1426 | | - struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; |
---|
1427 | 1234 | unsigned int swap_index = 0; |
---|
1428 | 1235 | int rc = 0; |
---|
1429 | 1236 | u32 dummy = 0; |
---|
.. | .. |
---|
1441 | 1248 | if (total_in_len < block_size) { |
---|
1442 | 1249 | dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", |
---|
1443 | 1250 | curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); |
---|
1444 | | - areq_ctx->in_nents = |
---|
1445 | | - cc_get_sgl_nents(dev, src, nbytes, &dummy); |
---|
| 1251 | + areq_ctx->in_nents = sg_nents_for_len(src, nbytes); |
---|
1446 | 1252 | sg_copy_to_buffer(src, areq_ctx->in_nents, |
---|
1447 | 1253 | &curr_buff[*curr_buff_cnt], nbytes); |
---|
1448 | 1254 | *curr_buff_cnt += nbytes; |
---|
.. | .. |
---|
1499 | 1305 | } |
---|
1500 | 1306 | |
---|
1501 | 1307 | if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { |
---|
1502 | | - mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; |
---|
| 1308 | + mlli_params->curr_pool = drvdata->mlli_buffs_pool; |
---|
1503 | 1309 | /* add the src data to the sg_data */ |
---|
1504 | 1310 | cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, |
---|
1505 | 1311 | (update_data_len - *curr_buff_cnt), 0, true, |
---|
.. | .. |
---|
1566 | 1372 | |
---|
1567 | 1373 | int cc_buffer_mgr_init(struct cc_drvdata *drvdata) |
---|
1568 | 1374 | { |
---|
1569 | | - struct buff_mgr_handle *buff_mgr_handle; |
---|
1570 | 1375 | struct device *dev = drvdata_to_dev(drvdata); |
---|
1571 | 1376 | |
---|
1572 | | - buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL); |
---|
1573 | | - if (!buff_mgr_handle) |
---|
1574 | | - return -ENOMEM; |
---|
1575 | | - |
---|
1576 | | - drvdata->buff_mgr_handle = buff_mgr_handle; |
---|
1577 | | - |
---|
1578 | | - buff_mgr_handle->mlli_buffs_pool = |
---|
| 1377 | + drvdata->mlli_buffs_pool = |
---|
1579 | 1378 | dma_pool_create("dx_single_mlli_tables", dev, |
---|
1580 | 1379 | MAX_NUM_OF_TOTAL_MLLI_ENTRIES * |
---|
1581 | 1380 | LLI_ENTRY_BYTE_SIZE, |
---|
1582 | 1381 | MLLI_TABLE_MIN_ALIGNMENT, 0); |
---|
1583 | 1382 | |
---|
1584 | | - if (!buff_mgr_handle->mlli_buffs_pool) |
---|
1585 | | - goto error; |
---|
| 1383 | + if (!drvdata->mlli_buffs_pool) |
---|
| 1384 | + return -ENOMEM; |
---|
1586 | 1385 | |
---|
1587 | 1386 | return 0; |
---|
1588 | | - |
---|
1589 | | -error: |
---|
1590 | | - cc_buffer_mgr_fini(drvdata); |
---|
1591 | | - return -ENOMEM; |
---|
1592 | 1387 | } |
---|
1593 | 1388 | |
---|
1594 | 1389 | int cc_buffer_mgr_fini(struct cc_drvdata *drvdata) |
---|
1595 | 1390 | { |
---|
1596 | | - struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle; |
---|
1597 | | - |
---|
1598 | | - if (buff_mgr_handle) { |
---|
1599 | | - dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool); |
---|
1600 | | - kfree(drvdata->buff_mgr_handle); |
---|
1601 | | - drvdata->buff_mgr_handle = NULL; |
---|
1602 | | - } |
---|
| 1391 | + dma_pool_destroy(drvdata->mlli_buffs_pool); |
---|
1603 | 1392 | return 0; |
---|
1604 | 1393 | } |
---|