hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/drivers/crypto/ccree/cc_buffer_mgr.c
....@@ -1,5 +1,5 @@
11 // SPDX-License-Identifier: GPL-2.0
2
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
2
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
33
44 #include <crypto/internal/aead.h>
55 #include <crypto/authenc.h>
....@@ -13,16 +13,6 @@
1313 #include "cc_hash.h"
1414 #include "cc_aead.h"
1515
16
-enum dma_buffer_type {
17
- DMA_NULL_TYPE = -1,
18
- DMA_SGL_TYPE = 1,
19
- DMA_BUFF_TYPE = 2,
20
-};
21
-
22
-struct buff_mgr_handle {
23
- struct dma_pool *mlli_buffs_pool;
24
-};
25
-
2616 union buffer_array_entry {
2717 struct scatterlist *sgl;
2818 dma_addr_t buffer_dma;
....@@ -34,7 +24,6 @@
3424 unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
3525 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
3626 int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
37
- enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
3827 bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
3928 u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
4029 };
....@@ -64,11 +53,7 @@
6453 enum cc_sg_cpy_direct dir)
6554 {
6655 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
67
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
68
- u32 skip = areq_ctx->assoclen + req->cryptlen;
69
-
70
- if (areq_ctx->is_gcm4543)
71
- skip += crypto_aead_ivsize(tfm);
56
+ u32 skip = req->assoclen + req->cryptlen;
7257
7358 cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
7459 (skip - areq_ctx->req_authsize), skip, dir);
....@@ -77,15 +62,21 @@
7762 /**
7863 * cc_get_sgl_nents() - Get scatterlist number of entries.
7964 *
65
+ * @dev: Device object
8066 * @sg_list: SG list
8167 * @nbytes: [IN] Total SGL data bytes.
8268 * @lbytes: [OUT] Returns the amount of bytes at the last entry
69
+ *
70
+ * Return:
71
+ * Number of entries in the scatterlist
8372 */
8473 static unsigned int cc_get_sgl_nents(struct device *dev,
8574 struct scatterlist *sg_list,
8675 unsigned int nbytes, u32 *lbytes)
8776 {
8877 unsigned int nents = 0;
78
+
79
+ *lbytes = 0;
8980
9081 while (nbytes && sg_list) {
9182 nents++;
....@@ -95,48 +86,30 @@
9586 nbytes : sg_list->length;
9687 sg_list = sg_next(sg_list);
9788 }
89
+
9890 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
9991 return nents;
100
-}
101
-
102
-/**
103
- * cc_zero_sgl() - Zero scatter scatter list data.
104
- *
105
- * @sgl:
106
- */
107
-void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
108
-{
109
- struct scatterlist *current_sg = sgl;
110
- int sg_index = 0;
111
-
112
- while (sg_index <= data_len) {
113
- if (!current_sg) {
114
- /* reached the end of the sgl --> just return back */
115
- return;
116
- }
117
- memset(sg_virt(current_sg), 0, current_sg->length);
118
- sg_index += current_sg->length;
119
- current_sg = sg_next(current_sg);
120
- }
12192 }
12293
12394 /**
12495 * cc_copy_sg_portion() - Copy scatter list data,
12596 * from to_skip to end, to dest and vice versa
12697 *
127
- * @dest:
128
- * @sg:
129
- * @to_skip:
130
- * @end:
131
- * @direct:
98
+ * @dev: Device object
99
+ * @dest: Buffer to copy to/from
100
+ * @sg: SG list
101
+ * @to_skip: Number of bytes to skip before copying
102
+ * @end: Offset of last byte to copy
103
+ * @direct: Transfer direction (true == from SG list to buffer, false == from
104
+ * buffer to SG list)
132105 */
133106 void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
134107 u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
135108 {
136
- u32 nents, lbytes;
109
+ u32 nents;
137110
138
- nents = cc_get_sgl_nents(dev, sg, end, &lbytes);
139
- sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
111
+ nents = sg_nents_for_len(sg, end);
112
+ sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip,
140113 (direct == CC_SG_TO_BUF));
141114 }
142115
....@@ -149,8 +122,11 @@
149122
150123 /* Verify there is no memory overflow*/
151124 new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
152
- if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
125
+ if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
126
+ dev_err(dev, "Too many mlli entries. current %d max %d\n",
127
+ new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
153128 return -ENOMEM;
129
+ }
154130
155131 /*handle buffer longer than 64 kbytes */
156132 while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
....@@ -222,21 +198,15 @@
222198 goto build_mlli_exit;
223199 }
224200 /* Point to start of MLLI */
225
- mlli_p = (u32 *)mlli_params->mlli_virt_addr;
201
+ mlli_p = mlli_params->mlli_virt_addr;
226202 /* go over all SG's and link it to one MLLI table */
227203 for (i = 0; i < sg_data->num_of_buffers; i++) {
228204 union buffer_array_entry *entry = &sg_data->entry[i];
229205 u32 tot_len = sg_data->total_data_len[i];
230206 u32 offset = sg_data->offset[i];
231207
232
- if (sg_data->type[i] == DMA_SGL_TYPE)
233
- rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
234
- offset, &total_nents,
235
- &mlli_p);
236
- else /*DMA_BUFF_TYPE*/
237
- rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
238
- tot_len, &total_nents,
239
- &mlli_p);
208
+ rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset,
209
+ &total_nents, &mlli_p);
240210 if (rc)
241211 return rc;
242212
....@@ -262,27 +232,6 @@
262232 return rc;
263233 }
264234
265
-static void cc_add_buffer_entry(struct device *dev,
266
- struct buffer_array *sgl_data,
267
- dma_addr_t buffer_dma, unsigned int buffer_len,
268
- bool is_last_entry, u32 *mlli_nents)
269
-{
270
- unsigned int index = sgl_data->num_of_buffers;
271
-
272
- dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
273
- index, &buffer_dma, buffer_len, is_last_entry);
274
- sgl_data->nents[index] = 1;
275
- sgl_data->entry[index].buffer_dma = buffer_dma;
276
- sgl_data->offset[index] = 0;
277
- sgl_data->total_data_len[index] = buffer_len;
278
- sgl_data->type[index] = DMA_BUFF_TYPE;
279
- sgl_data->is_last[index] = is_last_entry;
280
- sgl_data->mlli_nents[index] = mlli_nents;
281
- if (sgl_data->mlli_nents[index])
282
- *sgl_data->mlli_nents[index] = 0;
283
- sgl_data->num_of_buffers++;
284
-}
285
-
286235 static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
287236 unsigned int nents, struct scatterlist *sgl,
288237 unsigned int data_len, unsigned int data_offset,
....@@ -296,7 +245,6 @@
296245 sgl_data->entry[index].sgl = sgl;
297246 sgl_data->offset[index] = data_offset;
298247 sgl_data->total_data_len[index] = data_len;
299
- sgl_data->type[index] = DMA_SGL_TYPE;
300248 sgl_data->is_last[index] = is_last_table;
301249 sgl_data->mlli_nents[index] = mlli_nents;
302250 if (sgl_data->mlli_nents[index])
....@@ -308,36 +256,31 @@
308256 unsigned int nbytes, int direction, u32 *nents,
309257 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
310258 {
311
- if (sg_is_last(sg)) {
312
- /* One entry only case -set to DLLI */
313
- if (dma_map_sg(dev, sg, 1, direction) != 1) {
314
- dev_err(dev, "dma_map_sg() single buffer failed\n");
315
- return -ENOMEM;
316
- }
317
- dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
318
- &sg_dma_address(sg), sg_page(sg), sg_virt(sg),
319
- sg->offset, sg->length);
320
- *lbytes = nbytes;
321
- *nents = 1;
322
- *mapped_nents = 1;
323
- } else { /*sg_is_last*/
324
- *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
325
- if (*nents > max_sg_nents) {
326
- *nents = 0;
327
- dev_err(dev, "Too many fragments. current %d max %d\n",
328
- *nents, max_sg_nents);
329
- return -ENOMEM;
330
- }
331
- /* In case of mmu the number of mapped nents might
332
- * be changed from the original sgl nents
333
- */
334
- *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
335
- if (*mapped_nents == 0) {
336
- *nents = 0;
337
- dev_err(dev, "dma_map_sg() sg buffer failed\n");
338
- return -ENOMEM;
339
- }
259
+ int ret = 0;
260
+
261
+ if (!nbytes) {
262
+ *mapped_nents = 0;
263
+ *lbytes = 0;
264
+ *nents = 0;
265
+ return 0;
340266 }
267
+
268
+ *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
269
+ if (*nents > max_sg_nents) {
270
+ *nents = 0;
271
+ dev_err(dev, "Too many fragments. current %d max %d\n",
272
+ *nents, max_sg_nents);
273
+ return -ENOMEM;
274
+ }
275
+
276
+ ret = dma_map_sg(dev, sg, *nents, direction);
277
+ if (dma_mapping_error(dev, ret)) {
278
+ *nents = 0;
279
+ dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
280
+ return -ENOMEM;
281
+ }
282
+
283
+ *mapped_nents = ret;
341284
342285 return 0;
343286 }
....@@ -403,7 +346,7 @@
403346 dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
404347 &req_ctx->gen_ctx.iv_dma_addr, ivsize);
405348 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
406
- ivsize, DMA_TO_DEVICE);
349
+ ivsize, DMA_BIDIRECTIONAL);
407350 }
408351 /* Release pool */
409352 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
....@@ -413,12 +356,14 @@
413356 req_ctx->mlli_params.mlli_dma_addr);
414357 }
415358
416
- dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
417
- dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
418
-
419359 if (src != dst) {
420
- dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
360
+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
361
+ dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
421362 dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
363
+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
364
+ } else {
365
+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
366
+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
422367 }
423368 }
424369
....@@ -429,12 +374,12 @@
429374 {
430375 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
431376 struct mlli_params *mlli_params = &req_ctx->mlli_params;
432
- struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
433377 struct device *dev = drvdata_to_dev(drvdata);
434378 struct buffer_array sg_data;
435379 u32 dummy = 0;
436380 int rc = 0;
437381 u32 mapped_nents = 0;
382
+ int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
438383
439384 req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
440385 mlli_params->curr_pool = NULL;
....@@ -442,10 +387,9 @@
442387
443388 /* Map IV buffer */
444389 if (ivsize) {
445
- dump_byte_array("iv", (u8 *)info, ivsize);
390
+ dump_byte_array("iv", info, ivsize);
446391 req_ctx->gen_ctx.iv_dma_addr =
447
- dma_map_single(dev, (void *)info,
448
- ivsize, DMA_TO_DEVICE);
392
+ dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
449393 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
450394 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
451395 ivsize, info);
....@@ -458,7 +402,7 @@
458402 }
459403
460404 /* Map the src SGL */
461
- rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
405
+ rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents,
462406 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
463407 if (rc)
464408 goto cipher_exit;
....@@ -475,7 +419,7 @@
475419 }
476420 } else {
477421 /* Map the dst sg */
478
- rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
422
+ rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE,
479423 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
480424 &dummy, &mapped_nents);
481425 if (rc)
....@@ -494,7 +438,7 @@
494438 }
495439
496440 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
497
- mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
441
+ mlli_params->curr_pool = drvdata->mlli_buffs_pool;
498442 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
499443 if (rc)
500444 goto cipher_exit;
....@@ -514,10 +458,8 @@
514458 {
515459 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
516460 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
517
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
518461 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
519
- u32 dummy;
520
- u32 size_to_unmap = 0;
462
+ int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
521463
522464 if (areq_ctx->mac_buf_dma_addr) {
523465 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
....@@ -557,7 +499,7 @@
557499 if (areq_ctx->gen_ctx.iv_dma_addr) {
558500 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
559501 hw_iv_size, DMA_BIDIRECTIONAL);
560
- kzfree(areq_ctx->gen_ctx.iv);
502
+ kfree_sensitive(areq_ctx->gen_ctx.iv);
561503 }
562504
563505 /* Release pool */
....@@ -575,22 +517,12 @@
575517 dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
576518 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
577519 areq_ctx->assoclen, req->cryptlen);
578
- size_to_unmap = areq_ctx->assoclen + req->cryptlen;
579
- if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
580
- size_to_unmap += areq_ctx->req_authsize;
581
- if (areq_ctx->is_gcm4543)
582
- size_to_unmap += crypto_aead_ivsize(tfm);
583520
584
- dma_unmap_sg(dev, req->src,
585
- cc_get_sgl_nents(dev, req->src, size_to_unmap, &dummy),
586
- DMA_BIDIRECTIONAL);
521
+ dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
587522 if (req->src != req->dst) {
588523 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
589524 sg_virt(req->dst));
590
- dma_unmap_sg(dev, req->dst,
591
- cc_get_sgl_nents(dev, req->dst, size_to_unmap,
592
- &dummy),
593
- DMA_BIDIRECTIONAL);
525
+ dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
594526 }
595527 if (drvdata->coherent &&
596528 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
....@@ -603,55 +535,10 @@
603535 }
604536 }
605537
606
-static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
607
- unsigned int sgl_nents, unsigned int authsize,
608
- u32 last_entry_data_size,
609
- bool *is_icv_fragmented)
538
+static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
539
+ u32 last_entry_data_size)
610540 {
611
- unsigned int icv_max_size = 0;
612
- unsigned int icv_required_size = authsize > last_entry_data_size ?
613
- (authsize - last_entry_data_size) :
614
- authsize;
615
- unsigned int nents;
616
- unsigned int i;
617
-
618
- if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
619
- *is_icv_fragmented = false;
620
- return 0;
621
- }
622
-
623
- for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
624
- if (!sgl)
625
- break;
626
- sgl = sg_next(sgl);
627
- }
628
-
629
- if (sgl)
630
- icv_max_size = sgl->length;
631
-
632
- if (last_entry_data_size > authsize) {
633
- /* ICV attached to data in last entry (not fragmented!) */
634
- nents = 0;
635
- *is_icv_fragmented = false;
636
- } else if (last_entry_data_size == authsize) {
637
- /* ICV placed in whole last entry (not fragmented!) */
638
- nents = 1;
639
- *is_icv_fragmented = false;
640
- } else if (icv_max_size > icv_required_size) {
641
- nents = 1;
642
- *is_icv_fragmented = true;
643
- } else if (icv_max_size == icv_required_size) {
644
- nents = 2;
645
- *is_icv_fragmented = true;
646
- } else {
647
- dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
648
- MAX_ICV_NENTS_SUPPORTED);
649
- nents = -1; /*unsupported*/
650
- }
651
- dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
652
- (*is_icv_fragmented ? "true" : "false"), nents);
653
-
654
- return nents;
541
+ return ((sgl_nents > 1) && (last_entry_data_size < authsize));
655542 }
656543
657544 static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
....@@ -681,7 +568,7 @@
681568 if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
682569 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
683570 hw_iv_size, req->iv);
684
- kzfree(areq_ctx->gen_ctx.iv);
571
+ kfree_sensitive(areq_ctx->gen_ctx.iv);
685572 areq_ctx->gen_ctx.iv = NULL;
686573 rc = -ENOMEM;
687574 goto chain_iv_exit;
....@@ -689,18 +576,6 @@
689576
690577 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
691578 hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
692
- // TODO: what about CTR?? ask Ron
693
- if (do_chain && areq_ctx->plaintext_authenticate_only) {
694
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
695
- unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
696
- unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
697
- /* Chain to given list */
698
- cc_add_buffer_entry(dev, sg_data,
699
- (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
700
- iv_size_to_authenc, is_last,
701
- &areq_ctx->assoc.mlli_nents);
702
- areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
703
- }
704579
705580 chain_iv_exit:
706581 return rc;
....@@ -713,15 +588,8 @@
713588 {
714589 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
715590 int rc = 0;
716
- u32 mapped_nents = 0;
717
- struct scatterlist *current_sg = req->src;
718
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
719
- unsigned int sg_index = 0;
720
- u32 size_of_assoc = areq_ctx->assoclen;
591
+ int mapped_nents = 0;
721592 struct device *dev = drvdata_to_dev(drvdata);
722
-
723
- if (areq_ctx->is_gcm4543)
724
- size_of_assoc += crypto_aead_ivsize(tfm);
725593
726594 if (!sg_data) {
727595 rc = -EINVAL;
....@@ -738,26 +606,10 @@
738606 goto chain_assoc_exit;
739607 }
740608
741
- //iterate over the sgl to see how many entries are for associated data
742
- //it is assumed that if we reach here , the sgl is already mapped
743
- sg_index = current_sg->length;
744
- //the first entry in the scatter list contains all the associated data
745
- if (sg_index > size_of_assoc) {
746
- mapped_nents++;
747
- } else {
748
- while (sg_index <= size_of_assoc) {
749
- current_sg = sg_next(current_sg);
750
- /* if have reached the end of the sgl, then this is
751
- * unexpected
752
- */
753
- if (!current_sg) {
754
- dev_err(dev, "reached end of sg list. unexpected\n");
755
- return -EINVAL;
756
- }
757
- sg_index += current_sg->length;
758
- mapped_nents++;
759
- }
760
- }
609
+ mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen);
610
+ if (mapped_nents < 0)
611
+ return mapped_nents;
612
+
761613 if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
762614 dev_err(dev, "Too many fragments. current %d max %d\n",
763615 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
....@@ -803,39 +655,32 @@
803655 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
804656 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
805657 unsigned int authsize = areq_ctx->req_authsize;
658
+ struct scatterlist *sg;
659
+ ssize_t offset;
806660
807661 areq_ctx->is_icv_fragmented = false;
808
- if (req->src == req->dst) {
809
- /*INPLACE*/
810
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
811
- (*src_last_bytes - authsize);
812
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
813
- (*src_last_bytes - authsize);
814
- } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
815
- /*NON-INPLACE and DECRYPT*/
816
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
817
- (*src_last_bytes - authsize);
818
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
819
- (*src_last_bytes - authsize);
662
+
663
+ if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
664
+ sg = areq_ctx->src_sgl;
665
+ offset = *src_last_bytes - authsize;
820666 } else {
821
- /*NON-INPLACE and ENCRYPT*/
822
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
823
- (*dst_last_bytes - authsize);
824
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
825
- (*dst_last_bytes - authsize);
667
+ sg = areq_ctx->dst_sgl;
668
+ offset = *dst_last_bytes - authsize;
826669 }
670
+
671
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
672
+ areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
827673 }
828674
829
-static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
830
- struct aead_request *req,
831
- struct buffer_array *sg_data,
832
- u32 *src_last_bytes, u32 *dst_last_bytes,
833
- bool is_last_table)
675
+static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
676
+ struct aead_request *req,
677
+ struct buffer_array *sg_data,
678
+ u32 *src_last_bytes, u32 *dst_last_bytes,
679
+ bool is_last_table)
834680 {
835681 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
836682 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
837683 unsigned int authsize = areq_ctx->req_authsize;
838
- int rc = 0, icv_nents;
839684 struct device *dev = drvdata_to_dev(drvdata);
840685 struct scatterlist *sg;
841686
....@@ -846,14 +691,9 @@
846691 areq_ctx->src_offset, is_last_table,
847692 &areq_ctx->src.mlli_nents);
848693
849
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
850
- areq_ctx->src.nents,
851
- authsize, *src_last_bytes,
852
- &areq_ctx->is_icv_fragmented);
853
- if (icv_nents < 0) {
854
- rc = -ENOTSUPP;
855
- goto prepare_data_mlli_exit;
856
- }
694
+ areq_ctx->is_icv_fragmented =
695
+ cc_is_icv_frag(areq_ctx->src.nents, authsize,
696
+ *src_last_bytes);
857697
858698 if (areq_ctx->is_icv_fragmented) {
859699 /* Backup happens only when ICV is fragmented, ICV
....@@ -895,16 +735,11 @@
895735 areq_ctx->dst_offset, is_last_table,
896736 &areq_ctx->dst.mlli_nents);
897737
898
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
899
- areq_ctx->src.nents,
900
- authsize, *src_last_bytes,
901
- &areq_ctx->is_icv_fragmented);
902
- if (icv_nents < 0) {
903
- rc = -ENOTSUPP;
904
- goto prepare_data_mlli_exit;
905
- }
906
-
738
+ areq_ctx->is_icv_fragmented =
739
+ cc_is_icv_frag(areq_ctx->src.nents, authsize,
740
+ *src_last_bytes);
907741 /* Backup happens only when ICV is fragmented, ICV
742
+
908743 * verification is made by CPU compare in order to simplify
909744 * MAC verification upon request completion
910745 */
....@@ -932,14 +767,9 @@
932767 areq_ctx->src_offset, is_last_table,
933768 &areq_ctx->src.mlli_nents);
934769
935
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
936
- areq_ctx->dst.nents,
937
- authsize, *dst_last_bytes,
938
- &areq_ctx->is_icv_fragmented);
939
- if (icv_nents < 0) {
940
- rc = -ENOTSUPP;
941
- goto prepare_data_mlli_exit;
942
- }
770
+ areq_ctx->is_icv_fragmented =
771
+ cc_is_icv_frag(areq_ctx->dst.nents, authsize,
772
+ *dst_last_bytes);
943773
944774 if (!areq_ctx->is_icv_fragmented) {
945775 sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
....@@ -953,9 +783,6 @@
953783 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
954784 }
955785 }
956
-
957
-prepare_data_mlli_exit:
958
- return rc;
959786 }
960787
961788 static int cc_aead_chain_data(struct cc_drvdata *drvdata,
....@@ -972,14 +799,10 @@
972799 u32 src_mapped_nents = 0, dst_mapped_nents = 0;
973800 u32 offset = 0;
974801 /* non-inplace mode */
975
- unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
976
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
802
+ unsigned int size_for_map = req->assoclen + req->cryptlen;
977803 u32 sg_index = 0;
978
- bool is_gcm4543 = areq_ctx->is_gcm4543;
979
- u32 size_to_skip = areq_ctx->assoclen;
980
-
981
- if (is_gcm4543)
982
- size_to_skip += crypto_aead_ivsize(tfm);
804
+ u32 size_to_skip = req->assoclen;
805
+ struct scatterlist *sgl;
983806
984807 offset = size_to_skip;
985808
....@@ -989,25 +812,20 @@
989812 areq_ctx->src_sgl = req->src;
990813 areq_ctx->dst_sgl = req->dst;
991814
992
- if (is_gcm4543)
993
- size_for_map += crypto_aead_ivsize(tfm);
994
-
995815 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
996816 authsize : 0;
997817 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
998818 &src_last_bytes);
999819 sg_index = areq_ctx->src_sgl->length;
1000820 //check where the data starts
1001
- while (sg_index <= size_to_skip) {
1002
- offset -= areq_ctx->src_sgl->length;
1003
- areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
1004
- //if have reached the end of the sgl, then this is unexpected
1005
- if (!areq_ctx->src_sgl) {
1006
- dev_err(dev, "reached end of sg list. unexpected\n");
1007
- return -EINVAL;
1008
- }
1009
- sg_index += areq_ctx->src_sgl->length;
821
+ while (src_mapped_nents && (sg_index <= size_to_skip)) {
1010822 src_mapped_nents--;
823
+ offset -= areq_ctx->src_sgl->length;
824
+ sgl = sg_next(areq_ctx->src_sgl);
825
+ if (!sgl)
826
+ break;
827
+ areq_ctx->src_sgl = sgl;
828
+ sg_index += areq_ctx->src_sgl->length;
1011829 }
1012830 if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
1013831 dev_err(dev, "Too many fragments. current %d max %d\n",
....@@ -1020,18 +838,15 @@
1020838 areq_ctx->src_offset = offset;
1021839
1022840 if (req->src != req->dst) {
1023
- size_for_map = areq_ctx->assoclen + req->cryptlen;
841
+ size_for_map = req->assoclen + req->cryptlen;
1024842
1025843 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
1026844 size_for_map += authsize;
1027845 else
1028846 size_for_map -= authsize;
1029847
1030
- if (is_gcm4543)
1031
- size_for_map += crypto_aead_ivsize(tfm);
1032
-
1033
- rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
1034
- &areq_ctx->dst.nents,
848
+ rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE,
849
+ &areq_ctx->dst.mapped_nents,
1035850 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
1036851 &dst_mapped_nents);
1037852 if (rc)
....@@ -1044,16 +859,14 @@
1044859 offset = size_to_skip;
1045860
1046861 //check where the data starts
1047
- while (sg_index <= size_to_skip) {
1048
- offset -= areq_ctx->dst_sgl->length;
1049
- areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
1050
- //if have reached the end of the sgl, then this is unexpected
1051
- if (!areq_ctx->dst_sgl) {
1052
- dev_err(dev, "reached end of sg list. unexpected\n");
1053
- return -EINVAL;
1054
- }
1055
- sg_index += areq_ctx->dst_sgl->length;
862
+ while (dst_mapped_nents && sg_index <= size_to_skip) {
1056863 dst_mapped_nents--;
864
+ offset -= areq_ctx->dst_sgl->length;
865
+ sgl = sg_next(areq_ctx->dst_sgl);
866
+ if (!sgl)
867
+ break;
868
+ areq_ctx->dst_sgl = sgl;
869
+ sg_index += areq_ctx->dst_sgl->length;
1057870 }
1058871 if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
1059872 dev_err(dev, "Too many fragments. current %d max %d\n",
....@@ -1066,9 +879,9 @@
1066879 dst_mapped_nents > 1 ||
1067880 do_chain) {
1068881 areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
1069
- rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
1070
- &src_last_bytes,
1071
- &dst_last_bytes, is_last_table);
882
+ cc_prepare_aead_data_mlli(drvdata, req, sg_data,
883
+ &src_last_bytes, &dst_last_bytes,
884
+ is_last_table);
1072885 } else {
1073886 areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
1074887 cc_prepare_aead_data_dlli(req, &src_last_bytes,
....@@ -1137,14 +950,11 @@
1137950 struct device *dev = drvdata_to_dev(drvdata);
1138951 struct buffer_array sg_data;
1139952 unsigned int authsize = areq_ctx->req_authsize;
1140
- struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1141953 int rc = 0;
1142
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1143
- bool is_gcm4543 = areq_ctx->is_gcm4543;
1144954 dma_addr_t dma_addr;
1145955 u32 mapped_nents = 0;
1146956 u32 dummy = 0; /*used for the assoc data fragments */
1147
- u32 size_to_map = 0;
957
+ u32 size_to_map;
1148958 gfp_t flags = cc_gfp_flags(&req->base);
1149959
1150960 mlli_params->curr_pool = NULL;
....@@ -1241,16 +1051,16 @@
12411051 areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
12421052 }
12431053
1244
- size_to_map = req->cryptlen + areq_ctx->assoclen;
1054
+ size_to_map = req->cryptlen + req->assoclen;
12451055 /* If we do in-place encryption, we also need the auth tag */
12461056 if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
12471057 (req->src == req->dst)) {
12481058 size_to_map += authsize;
12491059 }
1250
- if (is_gcm4543)
1251
- size_to_map += crypto_aead_ivsize(tfm);
1252
- rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
1253
- &areq_ctx->src.nents,
1060
+
1061
+ rc = cc_map_sg(dev, req->src, size_to_map,
1062
+ (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL),
1063
+ &areq_ctx->src.mapped_nents,
12541064 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
12551065 LLI_MAX_NUM_OF_DATA_ENTRIES),
12561066 &dummy, &mapped_nents);
....@@ -1310,7 +1120,7 @@
13101120 */
13111121 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
13121122 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1313
- mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1123
+ mlli_params->curr_pool = drvdata->mlli_buffs_pool;
13141124 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
13151125 if (rc)
13161126 goto aead_map_failure;
....@@ -1338,7 +1148,6 @@
13381148 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
13391149 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
13401150 struct buffer_array sg_data;
1341
- struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
13421151 int rc = 0;
13431152 u32 dummy = 0;
13441153 u32 mapped_nents = 0;
....@@ -1356,7 +1165,6 @@
13561165 return 0;
13571166 }
13581167
1359
- /*TODO: copy data in case that buffer is enough for operation */
13601168 /* map the previous buffer */
13611169 if (*curr_buff_cnt) {
13621170 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
....@@ -1385,7 +1193,7 @@
13851193
13861194 /*build mlli */
13871195 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1388
- mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1196
+ mlli_params->curr_pool = drvdata->mlli_buffs_pool;
13891197 /* add the src data to the sg_data */
13901198 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
13911199 0, true, &areq_ctx->mlli_nents);
....@@ -1423,7 +1231,6 @@
14231231 unsigned int update_data_len;
14241232 u32 total_in_len = nbytes + *curr_buff_cnt;
14251233 struct buffer_array sg_data;
1426
- struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
14271234 unsigned int swap_index = 0;
14281235 int rc = 0;
14291236 u32 dummy = 0;
....@@ -1441,8 +1248,7 @@
14411248 if (total_in_len < block_size) {
14421249 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
14431250 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1444
- areq_ctx->in_nents =
1445
- cc_get_sgl_nents(dev, src, nbytes, &dummy);
1251
+ areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
14461252 sg_copy_to_buffer(src, areq_ctx->in_nents,
14471253 &curr_buff[*curr_buff_cnt], nbytes);
14481254 *curr_buff_cnt += nbytes;
....@@ -1499,7 +1305,7 @@
14991305 }
15001306
15011307 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1502
- mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1308
+ mlli_params->curr_pool = drvdata->mlli_buffs_pool;
15031309 /* add the src data to the sg_data */
15041310 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
15051311 (update_data_len - *curr_buff_cnt), 0, true,
....@@ -1566,39 +1372,22 @@
15661372
15671373 int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
15681374 {
1569
- struct buff_mgr_handle *buff_mgr_handle;
15701375 struct device *dev = drvdata_to_dev(drvdata);
15711376
1572
- buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
1573
- if (!buff_mgr_handle)
1574
- return -ENOMEM;
1575
-
1576
- drvdata->buff_mgr_handle = buff_mgr_handle;
1577
-
1578
- buff_mgr_handle->mlli_buffs_pool =
1377
+ drvdata->mlli_buffs_pool =
15791378 dma_pool_create("dx_single_mlli_tables", dev,
15801379 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
15811380 LLI_ENTRY_BYTE_SIZE,
15821381 MLLI_TABLE_MIN_ALIGNMENT, 0);
15831382
1584
- if (!buff_mgr_handle->mlli_buffs_pool)
1585
- goto error;
1383
+ if (!drvdata->mlli_buffs_pool)
1384
+ return -ENOMEM;
15861385
15871386 return 0;
1588
-
1589
-error:
1590
- cc_buffer_mgr_fini(drvdata);
1591
- return -ENOMEM;
15921387 }
15931388
15941389 int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
15951390 {
1596
- struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1597
-
1598
- if (buff_mgr_handle) {
1599
- dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1600
- kfree(drvdata->buff_mgr_handle);
1601
- drvdata->buff_mgr_handle = NULL;
1602
- }
1391
+ dma_pool_destroy(drvdata->mlli_buffs_pool);
16031392 return 0;
16041393 }