From 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 10 May 2024 07:44:59 +0000 Subject: [PATCH] gmac get mac form eeprom --- kernel/drivers/crypto/ccree/cc_buffer_mgr.c | 489 +++++++++++++++-------------------------------------- 1 files changed, 139 insertions(+), 350 deletions(-) diff --git a/kernel/drivers/crypto/ccree/cc_buffer_mgr.c b/kernel/drivers/crypto/ccree/cc_buffer_mgr.c index 6300202..6140e49 100644 --- a/kernel/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/kernel/drivers/crypto/ccree/cc_buffer_mgr.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ +/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ #include <crypto/internal/aead.h> #include <crypto/authenc.h> @@ -13,16 +13,6 @@ #include "cc_hash.h" #include "cc_aead.h" -enum dma_buffer_type { - DMA_NULL_TYPE = -1, - DMA_SGL_TYPE = 1, - DMA_BUFF_TYPE = 2, -}; - -struct buff_mgr_handle { - struct dma_pool *mlli_buffs_pool; -}; - union buffer_array_entry { struct scatterlist *sgl; dma_addr_t buffer_dma; @@ -34,7 +24,6 @@ unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI]; int nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI]; - enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI]; bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI]; u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; }; @@ -64,11 +53,7 @@ enum cc_sg_cpy_direct dir) { struct aead_req_ctx *areq_ctx = aead_request_ctx(req); - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - u32 skip = areq_ctx->assoclen + req->cryptlen; - - if (areq_ctx->is_gcm4543) - skip += crypto_aead_ivsize(tfm); + u32 skip = req->assoclen + req->cryptlen; cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src, (skip - areq_ctx->req_authsize), skip, dir); @@ -77,15 +62,21 @@ /** * cc_get_sgl_nents() - Get scatterlist number of entries. * + * @dev: Device object * @sg_list: SG list * @nbytes: [IN] Total SGL data bytes. * @lbytes: [OUT] Returns the amount of bytes at the last entry + * + * Return: + * Number of entries in the scatterlist */ static unsigned int cc_get_sgl_nents(struct device *dev, struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes) { unsigned int nents = 0; + + *lbytes = 0; while (nbytes && sg_list) { nents++; @@ -95,48 +86,30 @@ nbytes : sg_list->length; sg_list = sg_next(sg_list); } + dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); return nents; -} - -/** - * cc_zero_sgl() - Zero scatter scatter list data. - * - * @sgl: - */ -void cc_zero_sgl(struct scatterlist *sgl, u32 data_len) -{ - struct scatterlist *current_sg = sgl; - int sg_index = 0; - - while (sg_index <= data_len) { - if (!current_sg) { - /* reached the end of the sgl --> just return back */ - return; - } - memset(sg_virt(current_sg), 0, current_sg->length); - sg_index += current_sg->length; - current_sg = sg_next(current_sg); - } } /** * cc_copy_sg_portion() - Copy scatter list data, * from to_skip to end, to dest and vice versa * - * @dest: - * @sg: - * @to_skip: - * @end: - * @direct: + * @dev: Device object + * @dest: Buffer to copy to/from + * @sg: SG list + * @to_skip: Number of bytes to skip before copying + * @end: Offset of last byte to copy + * @direct: Transfer direction (true == from SG list to buffer, false == from + * buffer to SG list) */ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, u32 to_skip, u32 end, enum cc_sg_cpy_direct direct) { - u32 nents, lbytes; + u32 nents; - nents = cc_get_sgl_nents(dev, sg, end, &lbytes); - sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip, + nents = sg_nents_for_len(sg, end); + sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip, (direct == CC_SG_TO_BUF)); } @@ -149,8 +122,11 @@ /* Verify there is no memory overflow*/ new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1); - if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) + if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) { + dev_err(dev, "Too many mlli entries. current %d max %d\n", + new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES); return -ENOMEM; + } /*handle buffer longer than 64 kbytes */ while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) { @@ -222,21 +198,15 @@ goto build_mlli_exit; } /* Point to start of MLLI */ - mlli_p = (u32 *)mlli_params->mlli_virt_addr; + mlli_p = mlli_params->mlli_virt_addr; /* go over all SG's and link it to one MLLI table */ for (i = 0; i < sg_data->num_of_buffers; i++) { union buffer_array_entry *entry = &sg_data->entry[i]; u32 tot_len = sg_data->total_data_len[i]; u32 offset = sg_data->offset[i]; - if (sg_data->type[i] == DMA_SGL_TYPE) - rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, - offset, &total_nents, - &mlli_p); - else /*DMA_BUFF_TYPE*/ - rc = cc_render_buff_to_mlli(dev, entry->buffer_dma, - tot_len, &total_nents, - &mlli_p); + rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset, + &total_nents, &mlli_p); if (rc) return rc; @@ -262,27 +232,6 @@ return rc; } -static void cc_add_buffer_entry(struct device *dev, - struct buffer_array *sgl_data, - dma_addr_t buffer_dma, unsigned int buffer_len, - bool is_last_entry, u32 *mlli_nents) -{ - unsigned int index = sgl_data->num_of_buffers; - - dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n", - index, &buffer_dma, buffer_len, is_last_entry); - sgl_data->nents[index] = 1; - sgl_data->entry[index].buffer_dma = buffer_dma; - sgl_data->offset[index] = 0; - sgl_data->total_data_len[index] = buffer_len; - sgl_data->type[index] = DMA_BUFF_TYPE; - sgl_data->is_last[index] = is_last_entry; - sgl_data->mlli_nents[index] = mlli_nents; - if (sgl_data->mlli_nents[index]) - *sgl_data->mlli_nents[index] = 0; - sgl_data->num_of_buffers++; -} - static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data, unsigned int nents, struct scatterlist *sgl, unsigned int data_len, unsigned int data_offset, @@ -296,7 +245,6 @@ sgl_data->entry[index].sgl = sgl; sgl_data->offset[index] = data_offset; sgl_data->total_data_len[index] = data_len; - sgl_data->type[index] = DMA_SGL_TYPE; sgl_data->is_last[index] = is_last_table; sgl_data->mlli_nents[index] = mlli_nents; if (sgl_data->mlli_nents[index]) @@ -308,36 +256,31 @@ unsigned int nbytes, int direction, u32 *nents, u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) { - if (sg_is_last(sg)) { - /* One entry only case -set to DLLI */ - if (dma_map_sg(dev, sg, 1, direction) != 1) { - dev_err(dev, "dma_map_sg() single buffer failed\n"); - return -ENOMEM; - } - dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", - &sg_dma_address(sg), sg_page(sg), sg_virt(sg), - sg->offset, sg->length); - *lbytes = nbytes; - *nents = 1; - *mapped_nents = 1; - } else { /*sg_is_last*/ - *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); - if (*nents > max_sg_nents) { - *nents = 0; - dev_err(dev, "Too many fragments. current %d max %d\n", - *nents, max_sg_nents); - return -ENOMEM; - } - /* In case of mmu the number of mapped nents might - * be changed from the original sgl nents - */ - *mapped_nents = dma_map_sg(dev, sg, *nents, direction); - if (*mapped_nents == 0) { - *nents = 0; - dev_err(dev, "dma_map_sg() sg buffer failed\n"); - return -ENOMEM; - } + int ret = 0; + + if (!nbytes) { + *mapped_nents = 0; + *lbytes = 0; + *nents = 0; + return 0; } + + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); + if (*nents > max_sg_nents) { + *nents = 0; + dev_err(dev, "Too many fragments. current %d max %d\n", + *nents, max_sg_nents); + return -ENOMEM; + } + + ret = dma_map_sg(dev, sg, *nents, direction); + if (dma_mapping_error(dev, ret)) { + *nents = 0; + dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret); + return -ENOMEM; + } + + *mapped_nents = ret; return 0; } @@ -403,7 +346,7 @@ dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n", &req_ctx->gen_ctx.iv_dma_addr, ivsize); dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, - ivsize, DMA_TO_DEVICE); + ivsize, DMA_BIDIRECTIONAL); } /* Release pool */ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI && @@ -413,12 +356,14 @@ req_ctx->mlli_params.mlli_dma_addr); } - dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); - dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); - if (src != dst) { - dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE); + dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE); dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst)); + dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); + } else { + dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); + dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); } } @@ -429,12 +374,12 @@ { struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx; struct mlli_params *mlli_params = &req_ctx->mlli_params; - struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; struct device *dev = drvdata_to_dev(drvdata); struct buffer_array sg_data; u32 dummy = 0; int rc = 0; u32 mapped_nents = 0; + int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); req_ctx->dma_buf_type = CC_DMA_BUF_DLLI; mlli_params->curr_pool = NULL; @@ -442,10 +387,9 @@ /* Map IV buffer */ if (ivsize) { - dump_byte_array("iv", (u8 *)info, ivsize); + dump_byte_array("iv", info, ivsize); req_ctx->gen_ctx.iv_dma_addr = - dma_map_single(dev, (void *)info, - ivsize, DMA_TO_DEVICE); + dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) { dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", ivsize, info); @@ -458,7 +402,7 @@ } /* Map the src SGL */ - rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, + rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) goto cipher_exit; @@ -475,7 +419,7 @@ } } else { /* Map the dst sg */ - rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, + rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE, &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) @@ -494,7 +438,7 @@ } if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { - mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; + mlli_params->curr_pool = drvdata->mlli_buffs_pool; rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); if (rc) goto cipher_exit; @@ -514,10 +458,8 @@ { struct aead_req_ctx *areq_ctx = aead_request_ctx(req); unsigned int hw_iv_size = areq_ctx->hw_iv_size; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_drvdata *drvdata = dev_get_drvdata(dev); - u32 dummy; - u32 size_to_unmap = 0; + int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); if (areq_ctx->mac_buf_dma_addr) { dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, @@ -557,7 +499,7 @@ if (areq_ctx->gen_ctx.iv_dma_addr) { dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, hw_iv_size, DMA_BIDIRECTIONAL); - kzfree(areq_ctx->gen_ctx.iv); + kfree_sensitive(areq_ctx->gen_ctx.iv); } /* Release pool */ @@ -575,22 +517,12 @@ dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, areq_ctx->assoclen, req->cryptlen); - size_to_unmap = areq_ctx->assoclen + req->cryptlen; - if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) - size_to_unmap += areq_ctx->req_authsize; - if (areq_ctx->is_gcm4543) - size_to_unmap += crypto_aead_ivsize(tfm); - dma_unmap_sg(dev, req->src, - cc_get_sgl_nents(dev, req->src, size_to_unmap, &dummy), - DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction); if (req->src != req->dst) { dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", sg_virt(req->dst)); - dma_unmap_sg(dev, req->dst, - cc_get_sgl_nents(dev, req->dst, size_to_unmap, - &dummy), - DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE); } if (drvdata->coherent && areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && @@ -603,55 +535,10 @@ } } -static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl, - unsigned int sgl_nents, unsigned int authsize, - u32 last_entry_data_size, - bool *is_icv_fragmented) +static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize, + u32 last_entry_data_size) { - unsigned int icv_max_size = 0; - unsigned int icv_required_size = authsize > last_entry_data_size ? - (authsize - last_entry_data_size) : - authsize; - unsigned int nents; - unsigned int i; - - if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) { - *is_icv_fragmented = false; - return 0; - } - - for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) { - if (!sgl) - break; - sgl = sg_next(sgl); - } - - if (sgl) - icv_max_size = sgl->length; - - if (last_entry_data_size > authsize) { - /* ICV attached to data in last entry (not fragmented!) */ - nents = 0; - *is_icv_fragmented = false; - } else if (last_entry_data_size == authsize) { - /* ICV placed in whole last entry (not fragmented!) */ - nents = 1; - *is_icv_fragmented = false; - } else if (icv_max_size > icv_required_size) { - nents = 1; - *is_icv_fragmented = true; - } else if (icv_max_size == icv_required_size) { - nents = 2; - *is_icv_fragmented = true; - } else { - dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n", - MAX_ICV_NENTS_SUPPORTED); - nents = -1; /*unsupported*/ - } - dev_dbg(dev, "is_frag=%s icv_nents=%u\n", - (*is_icv_fragmented ? "true" : "false"), nents); - - return nents; + return ((sgl_nents > 1) && (last_entry_data_size < authsize)); } static int cc_aead_chain_iv(struct cc_drvdata *drvdata, @@ -681,7 +568,7 @@ if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", hw_iv_size, req->iv); - kzfree(areq_ctx->gen_ctx.iv); + kfree_sensitive(areq_ctx->gen_ctx.iv); areq_ctx->gen_ctx.iv = NULL; rc = -ENOMEM; goto chain_iv_exit; @@ -689,18 +576,6 @@ dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr); - // TODO: what about CTR?? ask Ron - if (do_chain && areq_ctx->plaintext_authenticate_only) { - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm); - unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET; - /* Chain to given list */ - cc_add_buffer_entry(dev, sg_data, - (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs), - iv_size_to_authenc, is_last, - &areq_ctx->assoc.mlli_nents); - areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; - } chain_iv_exit: return rc; @@ -713,15 +588,8 @@ { struct aead_req_ctx *areq_ctx = aead_request_ctx(req); int rc = 0; - u32 mapped_nents = 0; - struct scatterlist *current_sg = req->src; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - unsigned int sg_index = 0; - u32 size_of_assoc = areq_ctx->assoclen; + int mapped_nents = 0; struct device *dev = drvdata_to_dev(drvdata); - - if (areq_ctx->is_gcm4543) - size_of_assoc += crypto_aead_ivsize(tfm); if (!sg_data) { rc = -EINVAL; @@ -738,26 +606,10 @@ goto chain_assoc_exit; } - //iterate over the sgl to see how many entries are for associated data - //it is assumed that if we reach here , the sgl is already mapped - sg_index = current_sg->length; - //the first entry in the scatter list contains all the associated data - if (sg_index > size_of_assoc) { - mapped_nents++; - } else { - while (sg_index <= size_of_assoc) { - current_sg = sg_next(current_sg); - /* if have reached the end of the sgl, then this is - * unexpected - */ - if (!current_sg) { - dev_err(dev, "reached end of sg list. unexpected\n"); - return -EINVAL; - } - sg_index += current_sg->length; - mapped_nents++; - } - } + mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen); + if (mapped_nents < 0) + return mapped_nents; + if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { dev_err(dev, "Too many fragments. current %d max %d\n", mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); @@ -803,39 +655,32 @@ struct aead_req_ctx *areq_ctx = aead_request_ctx(req); enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; unsigned int authsize = areq_ctx->req_authsize; + struct scatterlist *sg; + ssize_t offset; areq_ctx->is_icv_fragmented = false; - if (req->src == req->dst) { - /*INPLACE*/ - areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) + - (*src_last_bytes - authsize); - areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) + - (*src_last_bytes - authsize); - } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { - /*NON-INPLACE and DECRYPT*/ - areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) + - (*src_last_bytes - authsize); - areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) + - (*src_last_bytes - authsize); + + if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) { + sg = areq_ctx->src_sgl; + offset = *src_last_bytes - authsize; } else { - /*NON-INPLACE and ENCRYPT*/ - areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) + - (*dst_last_bytes - authsize); - areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) + - (*dst_last_bytes - authsize); + sg = areq_ctx->dst_sgl; + offset = *dst_last_bytes - authsize; } + + areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset; + areq_ctx->icv_virt_addr = sg_virt(sg) + offset; } -static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata, - struct aead_request *req, - struct buffer_array *sg_data, - u32 *src_last_bytes, u32 *dst_last_bytes, - bool is_last_table) +static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata, + struct aead_request *req, + struct buffer_array *sg_data, + u32 *src_last_bytes, u32 *dst_last_bytes, + bool is_last_table) { struct aead_req_ctx *areq_ctx = aead_request_ctx(req); enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; unsigned int authsize = areq_ctx->req_authsize; - int rc = 0, icv_nents; struct device *dev = drvdata_to_dev(drvdata); struct scatterlist *sg; @@ -846,14 +691,9 @@ areq_ctx->src_offset, is_last_table, &areq_ctx->src.mlli_nents); - icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl, - areq_ctx->src.nents, - authsize, *src_last_bytes, - &areq_ctx->is_icv_fragmented); - if (icv_nents < 0) { - rc = -ENOTSUPP; - goto prepare_data_mlli_exit; - } + areq_ctx->is_icv_fragmented = + cc_is_icv_frag(areq_ctx->src.nents, authsize, + *src_last_bytes); if (areq_ctx->is_icv_fragmented) { /* Backup happens only when ICV is fragmented, ICV @@ -895,16 +735,11 @@ areq_ctx->dst_offset, is_last_table, &areq_ctx->dst.mlli_nents); - icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl, - areq_ctx->src.nents, - authsize, *src_last_bytes, - &areq_ctx->is_icv_fragmented); - if (icv_nents < 0) { - rc = -ENOTSUPP; - goto prepare_data_mlli_exit; - } - + areq_ctx->is_icv_fragmented = + cc_is_icv_frag(areq_ctx->src.nents, authsize, + *src_last_bytes); /* Backup happens only when ICV is fragmented, ICV + * verification is made by CPU compare in order to simplify * MAC verification upon request completion */ @@ -932,14 +767,9 @@ areq_ctx->src_offset, is_last_table, &areq_ctx->src.mlli_nents); - icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl, - areq_ctx->dst.nents, - authsize, *dst_last_bytes, - &areq_ctx->is_icv_fragmented); - if (icv_nents < 0) { - rc = -ENOTSUPP; - goto prepare_data_mlli_exit; - } + areq_ctx->is_icv_fragmented = + cc_is_icv_frag(areq_ctx->dst.nents, authsize, + *dst_last_bytes); if (!areq_ctx->is_icv_fragmented) { sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]; @@ -953,9 +783,6 @@ areq_ctx->icv_virt_addr = areq_ctx->mac_buf; } } - -prepare_data_mlli_exit: - return rc; } static int cc_aead_chain_data(struct cc_drvdata *drvdata, @@ -972,14 +799,10 @@ u32 src_mapped_nents = 0, dst_mapped_nents = 0; u32 offset = 0; /* non-inplace mode */ - unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); + unsigned int size_for_map = req->assoclen + req->cryptlen; u32 sg_index = 0; - bool is_gcm4543 = areq_ctx->is_gcm4543; - u32 size_to_skip = areq_ctx->assoclen; - - if (is_gcm4543) - size_to_skip += crypto_aead_ivsize(tfm); + u32 size_to_skip = req->assoclen; + struct scatterlist *sgl; offset = size_to_skip; @@ -989,25 +812,20 @@ areq_ctx->src_sgl = req->src; areq_ctx->dst_sgl = req->dst; - if (is_gcm4543) - size_for_map += crypto_aead_ivsize(tfm); - size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0; src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, &src_last_bytes); sg_index = areq_ctx->src_sgl->length; //check where the data starts - while (sg_index <= size_to_skip) { - offset -= areq_ctx->src_sgl->length; - areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl); - //if have reached the end of the sgl, then this is unexpected - if (!areq_ctx->src_sgl) { - dev_err(dev, "reached end of sg list. unexpected\n"); - return -EINVAL; - } - sg_index += areq_ctx->src_sgl->length; + while (src_mapped_nents && (sg_index <= size_to_skip)) { src_mapped_nents--; + offset -= areq_ctx->src_sgl->length; + sgl = sg_next(areq_ctx->src_sgl); + if (!sgl) + break; + areq_ctx->src_sgl = sgl; + sg_index += areq_ctx->src_sgl->length; } if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { dev_err(dev, "Too many fragments. current %d max %d\n", @@ -1020,18 +838,15 @@ areq_ctx->src_offset = offset; if (req->src != req->dst) { - size_for_map = areq_ctx->assoclen + req->cryptlen; + size_for_map = req->assoclen + req->cryptlen; if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) size_for_map += authsize; else size_for_map -= authsize; - if (is_gcm4543) - size_for_map += crypto_aead_ivsize(tfm); - - rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, - &areq_ctx->dst.nents, + rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE, + &areq_ctx->dst.mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, &dst_mapped_nents); if (rc) @@ -1044,16 +859,14 @@ offset = size_to_skip; //check where the data starts - while (sg_index <= size_to_skip) { - offset -= areq_ctx->dst_sgl->length; - areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl); - //if have reached the end of the sgl, then this is unexpected - if (!areq_ctx->dst_sgl) { - dev_err(dev, "reached end of sg list. unexpected\n"); - return -EINVAL; - } - sg_index += areq_ctx->dst_sgl->length; + while (dst_mapped_nents && sg_index <= size_to_skip) { dst_mapped_nents--; + offset -= areq_ctx->dst_sgl->length; + sgl = sg_next(areq_ctx->dst_sgl); + if (!sgl) + break; + areq_ctx->dst_sgl = sgl; + sg_index += areq_ctx->dst_sgl->length; } if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { dev_err(dev, "Too many fragments. current %d max %d\n", @@ -1066,9 +879,9 @@ dst_mapped_nents > 1 || do_chain) { areq_ctx->data_buff_type = CC_DMA_BUF_MLLI; - rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data, - &src_last_bytes, - &dst_last_bytes, is_last_table); + cc_prepare_aead_data_mlli(drvdata, req, sg_data, + &src_last_bytes, &dst_last_bytes, + is_last_table); } else { areq_ctx->data_buff_type = CC_DMA_BUF_DLLI; cc_prepare_aead_data_dlli(req, &src_last_bytes, @@ -1137,14 +950,11 @@ struct device *dev = drvdata_to_dev(drvdata); struct buffer_array sg_data; unsigned int authsize = areq_ctx->req_authsize; - struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; int rc = 0; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - bool is_gcm4543 = areq_ctx->is_gcm4543; dma_addr_t dma_addr; u32 mapped_nents = 0; u32 dummy = 0; /*used for the assoc data fragments */ - u32 size_to_map = 0; + u32 size_to_map; gfp_t flags = cc_gfp_flags(&req->base); mlli_params->curr_pool = NULL; @@ -1241,16 +1051,16 @@ areq_ctx->gcm_iv_inc2_dma_addr = dma_addr; } - size_to_map = req->cryptlen + areq_ctx->assoclen; + size_to_map = req->cryptlen + req->assoclen; /* If we do in-place encryption, we also need the auth tag */ if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) && (req->src == req->dst)) { size_to_map += authsize; } - if (is_gcm4543) - size_to_map += crypto_aead_ivsize(tfm); - rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, - &areq_ctx->src.nents, + + rc = cc_map_sg(dev, req->src, size_to_map, + (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL), + &areq_ctx->src.mapped_nents, (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES), &dummy, &mapped_nents); @@ -1310,7 +1120,7 @@ */ if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { - mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; + mlli_params->curr_pool = drvdata->mlli_buffs_pool; rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); if (rc) goto aead_map_failure; @@ -1338,7 +1148,6 @@ u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); struct mlli_params *mlli_params = &areq_ctx->mlli_params; struct buffer_array sg_data; - struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; int rc = 0; u32 dummy = 0; u32 mapped_nents = 0; @@ -1356,7 +1165,6 @@ return 0; } - /*TODO: copy data in case that buffer is enough for operation */ /* map the previous buffer */ if (*curr_buff_cnt) { rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, @@ -1385,7 +1193,7 @@ /*build mlli */ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { - mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; + mlli_params->curr_pool = drvdata->mlli_buffs_pool; /* add the src data to the sg_data */ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes, 0, true, &areq_ctx->mlli_nents); @@ -1423,7 +1231,6 @@ unsigned int update_data_len; u32 total_in_len = nbytes + *curr_buff_cnt; struct buffer_array sg_data; - struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; unsigned int swap_index = 0; int rc = 0; u32 dummy = 0; @@ -1441,8 +1248,7 @@ if (total_in_len < block_size) { dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); - areq_ctx->in_nents = - cc_get_sgl_nents(dev, src, nbytes, &dummy); + areq_ctx->in_nents = sg_nents_for_len(src, nbytes); sg_copy_to_buffer(src, areq_ctx->in_nents, &curr_buff[*curr_buff_cnt], nbytes); *curr_buff_cnt += nbytes; @@ -1499,7 +1305,7 @@ } if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { - mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; + mlli_params->curr_pool = drvdata->mlli_buffs_pool; /* add the src data to the sg_data */ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, (update_data_len - *curr_buff_cnt), 0, true, @@ -1566,39 +1372,22 @@ int cc_buffer_mgr_init(struct cc_drvdata *drvdata) { - struct buff_mgr_handle *buff_mgr_handle; struct device *dev = drvdata_to_dev(drvdata); - buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL); - if (!buff_mgr_handle) - return -ENOMEM; - - drvdata->buff_mgr_handle = buff_mgr_handle; - - buff_mgr_handle->mlli_buffs_pool = + drvdata->mlli_buffs_pool = dma_pool_create("dx_single_mlli_tables", dev, MAX_NUM_OF_TOTAL_MLLI_ENTRIES * LLI_ENTRY_BYTE_SIZE, MLLI_TABLE_MIN_ALIGNMENT, 0); - if (!buff_mgr_handle->mlli_buffs_pool) - goto error; + if (!drvdata->mlli_buffs_pool) + return -ENOMEM; return 0; - -error: - cc_buffer_mgr_fini(drvdata); - return -ENOMEM; } int cc_buffer_mgr_fini(struct cc_drvdata *drvdata) { - struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle; - - if (buff_mgr_handle) { - dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool); - kfree(drvdata->buff_mgr_handle); - drvdata->buff_mgr_handle = NULL; - } + dma_pool_destroy(drvdata->mlli_buffs_pool); return 0; } -- Gitblit v1.6.2