From 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 10 May 2024 07:44:59 +0000 Subject: [PATCH] gmac get mac form eeprom --- kernel/drivers/crypto/caam/caampkc.c | 557 ++++++++++++++++++++++++++++++++++-------------------- 1 files changed, 348 insertions(+), 209 deletions(-) diff --git a/kernel/drivers/crypto/caam/caampkc.c b/kernel/drivers/crypto/caam/caampkc.c index f26d62e..5bd70a5 100644 --- a/kernel/drivers/crypto/caam/caampkc.c +++ b/kernel/drivers/crypto/caam/caampkc.c @@ -1,7 +1,9 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* * caam - Freescale FSL CAAM support for Public Key Cryptography * * Copyright 2016 Freescale Semiconductor, Inc. + * Copyright 2018-2019 NXP * * There is no Shared Descriptor for PKC so that the Job Descriptor must carry * all the desired key parameters, input and output pointers. @@ -15,19 +17,36 @@ #include "sg_sw_sec4.h" #include "caampkc.h" -#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb)) +#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB) #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ - sizeof(struct rsa_priv_f1_pdb)) + SIZEOF_RSA_PRIV_F1_PDB) #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \ - sizeof(struct rsa_priv_f2_pdb)) + SIZEOF_RSA_PRIV_F2_PDB) #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ - sizeof(struct rsa_priv_f3_pdb)) + SIZEOF_RSA_PRIV_F3_PDB) +#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */ + +/* buffer filled with zeros, used for padding */ +static u8 *zero_buffer; + +/* + * variable used to avoid double free of resources in case + * algorithm registration was unsuccessful + */ +static bool init_done; + +struct caam_akcipher_alg { + struct akcipher_alg akcipher; + bool registered; +}; static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, struct akcipher_request *req) { + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE); - dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); + dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE); if (edesc->sec4_sg_bytes) dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, @@ -98,74 +117,82 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) { struct akcipher_request *req = context; + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); struct rsa_edesc *edesc; + int ecode = 0; + bool has_bklog; if (err) - caam_jr_strstatus(dev, err); + ecode = caam_jr_strstatus(dev, err); - edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); + edesc = req_ctx->edesc; + has_bklog = edesc->bklog; rsa_pub_unmap(dev, edesc, req); rsa_io_unmap(dev, edesc, req); kfree(edesc); - akcipher_request_complete(req, err); + /* + * If no backlog flag, the completion of the request is done + * by CAAM, not crypto engine. + */ + if (!has_bklog) + akcipher_request_complete(req, ecode); + else + crypto_finalize_akcipher_request(jrp->engine, req, ecode); } -static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err, - void *context) +static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err, + void *context) { struct akcipher_request *req = context; + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct rsa_edesc *edesc; + int ecode = 0; + bool has_bklog; if (err) - caam_jr_strstatus(dev, err); + ecode = caam_jr_strstatus(dev, err); - edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); + edesc = req_ctx->edesc; + has_bklog = edesc->bklog; - rsa_priv_f1_unmap(dev, edesc, req); + switch (key->priv_form) { + case FORM1: + rsa_priv_f1_unmap(dev, edesc, req); + break; + case FORM2: + rsa_priv_f2_unmap(dev, edesc, req); + break; + case FORM3: + rsa_priv_f3_unmap(dev, edesc, req); + } + rsa_io_unmap(dev, edesc, req); kfree(edesc); - akcipher_request_complete(req, err); + /* + * If no backlog flag, the completion of the request is done + * by CAAM, not crypto engine. + */ + if (!has_bklog) + akcipher_request_complete(req, ecode); + else + crypto_finalize_akcipher_request(jrp->engine, req, ecode); } -static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err, - void *context) -{ - struct akcipher_request *req = context; - struct rsa_edesc *edesc; - - if (err) - caam_jr_strstatus(dev, err); - - edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); - - rsa_priv_f2_unmap(dev, edesc, req); - rsa_io_unmap(dev, edesc, req); - kfree(edesc); - - akcipher_request_complete(req, err); -} - -static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, - void *context) -{ - struct akcipher_request *req = context; - struct rsa_edesc *edesc; - - if (err) - caam_jr_strstatus(dev, err); - - edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); - - rsa_priv_f3_unmap(dev, edesc, req); - rsa_io_unmap(dev, edesc, req); - kfree(edesc); - - akcipher_request_complete(req, err); -} - +/** + * Count leading zeros, need it to strip, from a given scatterlist + * + * @sgl : scatterlist to count zeros from + * @nbytes: number of zeros, in bytes, to strip + * @flags : operation flags + */ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, unsigned int nbytes, unsigned int flags) @@ -185,7 +212,8 @@ lzeros = 0; len = 0; while (nbytes > 0) { - while (len && !*buff) { + /* do not strip more than given bytes */ + while (len && !*buff && lzeros < nbytes) { lzeros++; len--; buff++; @@ -194,7 +222,9 @@ if (len && *buff) break; - sg_miter_next(&miter); + if (!sg_miter_next(&miter)) + break; + buff = miter.addr; len = miter.length; @@ -216,29 +246,67 @@ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct device *dev = ctx->dev; struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + struct caam_rsa_key *key = &ctx->key; struct rsa_edesc *edesc; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0; - int sgc; int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; int src_nents, dst_nents; + int mapped_src_nents, mapped_dst_nents; + unsigned int diff_size = 0; int lzeros; - lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags); - if (lzeros < 0) - return ERR_PTR(lzeros); + if (req->src_len > key->n_sz) { + /* + * strip leading zeros and + * return the number of zeros to skip + */ + lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len - + key->n_sz, sg_flags); + if (lzeros < 0) + return ERR_PTR(lzeros); - req->src_len -= lzeros; - req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros); + req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src, + lzeros); + req_ctx->fixup_src_len = req->src_len - lzeros; + } else { + /* + * input src is less then n key modulus, + * so there will be zero padding + */ + diff_size = key->n_sz - req->src_len; + req_ctx->fixup_src = req->src; + req_ctx->fixup_src_len = req->src_len; + } - src_nents = sg_nents_for_len(req->src, req->src_len); + src_nents = sg_nents_for_len(req_ctx->fixup_src, + req_ctx->fixup_src_len); dst_nents = sg_nents_for_len(req->dst, req->dst_len); - if (src_nents > 1) - sec4_sg_len = src_nents; - if (dst_nents > 1) - sec4_sg_len += dst_nents; + mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents, + DMA_TO_DEVICE); + if (unlikely(!mapped_src_nents)) { + dev_err(dev, "unable to map source\n"); + return ERR_PTR(-ENOMEM); + } + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(dev, "unable to map destination\n"); + goto src_fail; + } + + if (!diff_size && mapped_src_nents == 1) + sec4_sg_len = 0; /* no need for an input hw s/g table */ + else + sec4_sg_len = mapped_src_nents + !!diff_size; + sec4_sg_index = sec4_sg_len; + + if (mapped_dst_nents > 1) + sec4_sg_len += pad_sg_nents(mapped_dst_nents); + else + sec4_sg_len = pad_sg_nents(sec4_sg_len); sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); @@ -246,37 +314,32 @@ edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) - return ERR_PTR(-ENOMEM); - - sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE); - if (unlikely(!sgc)) { - dev_err(dev, "unable to map source\n"); - goto src_fail; - } - - sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); - if (unlikely(!sgc)) { - dev_err(dev, "unable to map destination\n"); goto dst_fail; - } edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; + if (diff_size) + dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size, + 0); - sec4_sg_index = 0; - if (src_nents > 1) { - sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); - sec4_sg_index += src_nents; - } - if (dst_nents > 1) - sg_to_sec4_sg_last(req->dst, dst_nents, + if (sec4_sg_index) + sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len, + edesc->sec4_sg + !!diff_size, 0); + + if (mapped_dst_nents > 1) + sg_to_sec4_sg_last(req->dst, req->dst_len, edesc->sec4_sg + sec4_sg_index, 0); /* Save nents for later use in Job Descriptor */ edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; + req_ctx->edesc = edesc; + if (!sec4_sg_bytes) return edesc; + + edesc->mapped_src_nents = mapped_src_nents; + edesc->mapped_dst_nents = mapped_dst_nents; edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); @@ -287,21 +350,53 @@ edesc->sec4_sg_bytes = sec4_sg_bytes; + print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, + edesc->sec4_sg_bytes, 1); + return edesc; sec4_sg_fail: - dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); -dst_fail: - dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); -src_fail: kfree(edesc); +dst_fail: + dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); +src_fail: + dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE); return ERR_PTR(-ENOMEM); +} + +static int akcipher_do_one_req(struct crypto_engine *engine, void *areq) +{ + struct akcipher_request *req = container_of(areq, + struct akcipher_request, + base); + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct device *jrdev = ctx->dev; + u32 *desc = req_ctx->edesc->hw_desc; + int ret; + + req_ctx->edesc->bklog = true; + + ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req); + + if (ret != -EINPROGRESS) { + rsa_pub_unmap(jrdev, req_ctx->edesc, req); + rsa_io_unmap(jrdev, req_ctx->edesc, req); + kfree(req_ctx->edesc); + } else { + ret = 0; + } + + return ret; } static int set_rsa_pub_pdb(struct akcipher_request *req, struct rsa_edesc *edesc) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct caam_rsa_key *key = &ctx->key; struct device *dev = ctx->dev; @@ -321,15 +416,15 @@ return -ENOMEM; } - if (edesc->src_nents > 1) { + if (edesc->mapped_src_nents > 1) { pdb->sgf |= RSA_PDB_SGF_F; pdb->f_dma = edesc->sec4_sg_dma; - sec4_sg_index += edesc->src_nents; + sec4_sg_index += edesc->mapped_src_nents; } else { - pdb->f_dma = sg_dma_address(req->src); + pdb->f_dma = sg_dma_address(req_ctx->fixup_src); } - if (edesc->dst_nents > 1) { + if (edesc->mapped_dst_nents > 1) { pdb->sgf |= RSA_PDB_SGF_G; pdb->g_dma = edesc->sec4_sg_dma + sec4_sg_index * sizeof(struct sec4_sg_entry); @@ -338,7 +433,7 @@ } pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz; - pdb->f_len = req->src_len; + pdb->f_len = req_ctx->fixup_src_len; return 0; } @@ -366,15 +461,18 @@ return -ENOMEM; } - if (edesc->src_nents > 1) { + if (edesc->mapped_src_nents > 1) { pdb->sgf |= RSA_PRIV_PDB_SGF_G; pdb->g_dma = edesc->sec4_sg_dma; - sec4_sg_index += edesc->src_nents; + sec4_sg_index += edesc->mapped_src_nents; + } else { - pdb->g_dma = sg_dma_address(req->src); + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + + pdb->g_dma = sg_dma_address(req_ctx->fixup_src); } - if (edesc->dst_nents > 1) { + if (edesc->mapped_dst_nents > 1) { pdb->sgf |= RSA_PRIV_PDB_SGF_F; pdb->f_dma = edesc->sec4_sg_dma + sec4_sg_index * sizeof(struct sec4_sg_entry); @@ -429,15 +527,17 @@ goto unmap_tmp1; } - if (edesc->src_nents > 1) { + if (edesc->mapped_src_nents > 1) { pdb->sgf |= RSA_PRIV_PDB_SGF_G; pdb->g_dma = edesc->sec4_sg_dma; - sec4_sg_index += edesc->src_nents; + sec4_sg_index += edesc->mapped_src_nents; } else { - pdb->g_dma = sg_dma_address(req->src); + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + + pdb->g_dma = sg_dma_address(req_ctx->fixup_src); } - if (edesc->dst_nents > 1) { + if (edesc->mapped_dst_nents > 1) { pdb->sgf |= RSA_PRIV_PDB_SGF_F; pdb->f_dma = edesc->sec4_sg_dma + sec4_sg_index * sizeof(struct sec4_sg_entry); @@ -516,15 +616,17 @@ goto unmap_tmp1; } - if (edesc->src_nents > 1) { + if (edesc->mapped_src_nents > 1) { pdb->sgf |= RSA_PRIV_PDB_SGF_G; pdb->g_dma = edesc->sec4_sg_dma; - sec4_sg_index += edesc->src_nents; + sec4_sg_index += edesc->mapped_src_nents; } else { - pdb->g_dma = sg_dma_address(req->src); + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + + pdb->g_dma = sg_dma_address(req_ctx->fixup_src); } - if (edesc->dst_nents > 1) { + if (edesc->mapped_dst_nents > 1) { pdb->sgf |= RSA_PRIV_PDB_SGF_F; pdb->f_dma = edesc->sec4_sg_dma + sec4_sg_index * sizeof(struct sec4_sg_entry); @@ -551,6 +653,53 @@ dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); return -ENOMEM; +} + +static int akcipher_enqueue_req(struct device *jrdev, + void (*cbk)(struct device *jrdev, u32 *desc, + u32 err, void *context), + struct akcipher_request *req) +{ + struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + struct rsa_edesc *edesc = req_ctx->edesc; + u32 *desc = edesc->hw_desc; + int ret; + + req_ctx->akcipher_op_done = cbk; + /* + * Only the backlog request are sent to crypto-engine since the others + * can be handled by CAAM, if free, especially since JR has up to 1024 + * entries (more than the 10 entries from crypto-engine). + */ + if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine, + req); + else + ret = caam_jr_enqueue(jrdev, desc, cbk, req); + + if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { + switch (key->priv_form) { + case FORM1: + rsa_priv_f1_unmap(jrdev, edesc, req); + break; + case FORM2: + rsa_priv_f2_unmap(jrdev, edesc, req); + break; + case FORM3: + rsa_priv_f3_unmap(jrdev, edesc, req); + break; + default: + rsa_pub_unmap(jrdev, edesc, req); + } + rsa_io_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; } static int caam_rsa_enc(struct akcipher_request *req) @@ -584,11 +733,7 @@ /* Initialize Job Descriptor */ init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub); - ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req); - if (!ret) - return -EINPROGRESS; - - rsa_pub_unmap(jrdev, edesc, req); + return akcipher_enqueue_req(jrdev, rsa_pub_done, req); init_fail: rsa_io_unmap(jrdev, edesc, req); @@ -617,11 +762,7 @@ /* Initialize Job Descriptor */ init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1); - ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req); - if (!ret) - return -EINPROGRESS; - - rsa_priv_f1_unmap(jrdev, edesc, req); + return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req); init_fail: rsa_io_unmap(jrdev, edesc, req); @@ -650,11 +791,7 @@ /* Initialize Job Descriptor */ init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2); - ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req); - if (!ret) - return -EINPROGRESS; - - rsa_priv_f2_unmap(jrdev, edesc, req); + return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req); init_fail: rsa_io_unmap(jrdev, edesc, req); @@ -683,11 +820,7 @@ /* Initialize Job Descriptor */ init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3); - ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req); - if (!ret) - return -EINPROGRESS; - - rsa_priv_f3_unmap(jrdev, edesc, req); + return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req); init_fail: rsa_io_unmap(jrdev, edesc, req); @@ -723,14 +856,14 @@ static void caam_rsa_free_key(struct caam_rsa_key *key) { - kzfree(key->d); - kzfree(key->p); - kzfree(key->q); - kzfree(key->dp); - kzfree(key->dq); - kzfree(key->qinv); - kzfree(key->tmp1); - kzfree(key->tmp2); + kfree_sensitive(key->d); + kfree_sensitive(key->p); + kfree_sensitive(key->q); + kfree_sensitive(key->dp); + kfree_sensitive(key->dq); + kfree_sensitive(key->qinv); + kfree_sensitive(key->tmp1); + kfree_sensitive(key->tmp2); kfree(key->e); kfree(key->n); memset(key, 0, sizeof(*key)); @@ -814,7 +947,7 @@ return ret; /* Copy key in DMA zone */ - rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL); + rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL); if (!rsa_key->e) goto err; @@ -835,8 +968,6 @@ rsa_key->e_sz = raw_key.e_sz; rsa_key->n_sz = raw_key.n_sz; - - memcpy(rsa_key->e, raw_key.e, raw_key.e_sz); return 0; err: @@ -889,17 +1020,17 @@ return; free_dq: - kzfree(rsa_key->dq); + kfree_sensitive(rsa_key->dq); free_dp: - kzfree(rsa_key->dp); + kfree_sensitive(rsa_key->dp); free_tmp2: - kzfree(rsa_key->tmp2); + kfree_sensitive(rsa_key->tmp2); free_tmp1: - kzfree(rsa_key->tmp1); + kfree_sensitive(rsa_key->tmp1); free_q: - kzfree(rsa_key->q); + kfree_sensitive(rsa_key->q); free_p: - kzfree(rsa_key->p); + kfree_sensitive(rsa_key->p); } static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, @@ -918,11 +1049,11 @@ return ret; /* Copy key in DMA zone */ - rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL); + rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL); if (!rsa_key->d) goto err; - rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL); + rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL); if (!rsa_key->e) goto err; @@ -944,9 +1075,6 @@ rsa_key->d_sz = raw_key.d_sz; rsa_key->e_sz = raw_key.e_sz; rsa_key->n_sz = raw_key.n_sz; - - memcpy(rsa_key->d, raw_key.d, raw_key.d_sz); - memcpy(rsa_key->e, raw_key.e, raw_key.e_sz); caam_rsa_set_priv_key_form(ctx, &raw_key); @@ -976,6 +1104,17 @@ return PTR_ERR(ctx->dev); } + ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer, + CAAM_RSA_MAX_INPUT_SIZE - 1, + DMA_TO_DEVICE); + if (dma_mapping_error(ctx->dev, ctx->padding_dma)) { + dev_err(ctx->dev, "unable to map padding\n"); + caam_jr_free(ctx->dev); + return -ENOMEM; + } + + ctx->enginectx.op.do_one_request = akcipher_do_one_req; + return 0; } @@ -985,90 +1124,90 @@ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct caam_rsa_key *key = &ctx->key; + dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE - + 1, DMA_TO_DEVICE); caam_rsa_free_key(key); caam_jr_free(ctx->dev); } -static struct akcipher_alg caam_rsa = { - .encrypt = caam_rsa_enc, - .decrypt = caam_rsa_dec, - .sign = caam_rsa_dec, - .verify = caam_rsa_enc, - .set_pub_key = caam_rsa_set_pub_key, - .set_priv_key = caam_rsa_set_priv_key, - .max_size = caam_rsa_max_size, - .init = caam_rsa_init_tfm, - .exit = caam_rsa_exit_tfm, - .reqsize = sizeof(struct caam_rsa_req_ctx), - .base = { - .cra_name = "rsa", - .cra_driver_name = "rsa-caam", - .cra_priority = 3000, - .cra_module = THIS_MODULE, - .cra_ctxsize = sizeof(struct caam_rsa_ctx), - }, +static struct caam_akcipher_alg caam_rsa = { + .akcipher = { + .encrypt = caam_rsa_enc, + .decrypt = caam_rsa_dec, + .set_pub_key = caam_rsa_set_pub_key, + .set_priv_key = caam_rsa_set_priv_key, + .max_size = caam_rsa_max_size, + .init = caam_rsa_init_tfm, + .exit = caam_rsa_exit_tfm, + .reqsize = sizeof(struct caam_rsa_req_ctx), + .base = { + .cra_name = "rsa", + .cra_driver_name = "rsa-caam", + .cra_priority = 3000, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct caam_rsa_ctx), + }, + } }; /* Public Key Cryptography module initialization handler */ -static int __init caam_pkc_init(void) +int caam_pkc_init(struct device *ctrldev) { - struct device_node *dev_node; - struct platform_device *pdev; - struct device *ctrldev; - struct caam_drv_private *priv; - u32 cha_inst, pk_inst; + struct caam_drv_private *priv = dev_get_drvdata(ctrldev); + u32 pk_inst, pkha; int err; - - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); - if (!dev_node) { - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); - if (!dev_node) - return -ENODEV; - } - - pdev = of_find_device_by_node(dev_node); - if (!pdev) { - of_node_put(dev_node); - return -ENODEV; - } - - ctrldev = &pdev->dev; - priv = dev_get_drvdata(ctrldev); - of_node_put(dev_node); - - /* - * If priv is NULL, it's probably because the caam driver wasn't - * properly initialized (e.g. RNG4 init failed). Thus, bail out here. - */ - if (!priv) - return -ENODEV; + init_done = false; /* Determine public key hardware accelerator presence. */ - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); - pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; + if (priv->era < 10) { + pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & + CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; + } else { + pkha = rd_reg32(&priv->ctrl->vreg.pkha); + pk_inst = pkha & CHA_VER_NUM_MASK; + + /* + * Newer CAAMs support partially disabled functionality. If this is the + * case, the number is non-zero, but this bit is set to indicate that + * no encryption or decryption is supported. Only signing and verifying + * is supported. + */ + if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT) + pk_inst = 0; + } /* Do not register algorithms if PKHA is not present. */ if (!pk_inst) - return -ENODEV; + return 0; - err = crypto_register_akcipher(&caam_rsa); - if (err) + /* allocate zero buffer, used for padding input */ + zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA | + GFP_KERNEL); + if (!zero_buffer) + return -ENOMEM; + + err = crypto_register_akcipher(&caam_rsa.akcipher); + + if (err) { + kfree(zero_buffer); dev_warn(ctrldev, "%s alg registration failed\n", - caam_rsa.base.cra_driver_name); - else + caam_rsa.akcipher.base.cra_driver_name); + } else { + init_done = true; + caam_rsa.registered = true; dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); + } return err; } -static void __exit caam_pkc_exit(void) +void caam_pkc_exit(void) { - crypto_unregister_akcipher(&caam_rsa); + if (!init_done) + return; + + if (caam_rsa.registered) + crypto_unregister_akcipher(&caam_rsa.akcipher); + + kfree(zero_buffer); } - -module_init(caam_pkc_init); -module_exit(caam_pkc_exit); - -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API"); -MODULE_AUTHOR("Freescale Semiconductor"); -- Gitblit v1.6.2