From 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Tue, 22 Oct 2024 10:36:11 +0000 Subject: [PATCH] 修改4g拨号为QMI,需要在系统里后台执行quectel-CM --- kernel/drivers/crypto/caam/caamalg_qi.c | 1299 ++++++++++++++++++++++++++-------------------------------- 1 files changed, 590 insertions(+), 709 deletions(-) diff --git a/kernel/drivers/crypto/caam/caamalg_qi.c b/kernel/drivers/crypto/caam/caamalg_qi.c index d7aa7d7..a24ae96 100644 --- a/kernel/drivers/crypto/caam/caamalg_qi.c +++ b/kernel/drivers/crypto/caam/caamalg_qi.c @@ -1,9 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Freescale FSL CAAM support for crypto API over QI backend. * Based on caamalg.c * * Copyright 2013-2016 Freescale Semiconductor, Inc. - * Copyright 2016-2017 NXP + * Copyright 2016-2019 NXP */ #include "compat.h" @@ -17,6 +18,8 @@ #include "qi.h" #include "jr.h" #include "caamalg_desc.h" +#include <crypto/xts.h> +#include <asm/unaligned.h> /* * crypto alg @@ -35,10 +38,17 @@ int class2_alg_type; bool rfc3686; bool geniv; + bool nodkp; }; struct caam_aead_alg { struct aead_alg aead; + struct caam_alg_entry caam; + bool registered; +}; + +struct caam_skcipher_alg { + struct skcipher_alg skcipher; struct caam_alg_entry caam; bool registered; }; @@ -50,7 +60,6 @@ struct device *jrdev; u32 sh_desc_enc[DESC_MAX_USED_LEN]; u32 sh_desc_dec[DESC_MAX_USED_LEN]; - u32 sh_desc_givenc[DESC_MAX_USED_LEN]; u8 key[CAAM_MAX_KEY_SIZE]; dma_addr_t key_dma; enum dma_data_direction dir; @@ -60,6 +69,12 @@ struct device *qidev; spinlock_t lock; /* Protects multiple init of driver context */ struct caam_drv_ctx *drv_ctx[NUM_OP]; + bool xts_key_fallback; + struct crypto_skcipher *fallback; +}; + +struct caam_skcipher_req_ctx { + struct skcipher_request fallback_req; }; static int aead_set_sh_desc(struct crypto_aead *aead) @@ -98,6 +113,18 @@ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); } + /* + * In case |user key| > |derived key|, using DKP<imm,imm> would result + * in invalid opcodes (last bytes of user key) in the resulting + * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key + * addresses are needed. + */ + ctx->adata.key_virt = ctx->key; + ctx->adata.key_dma = ctx->key_dma; + + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; + data_len[0] = ctx->adata.keylen_pad; data_len[1] = ctx->cdata.keylen; @@ -110,16 +137,6 @@ DESC_JOB_IO_LEN, data_len, &inl_mask, ARRAY_SIZE(data_len)) < 0) return -EINVAL; - - if (inl_mask & 1) - ctx->adata.key_virt = ctx->key; - else - ctx->adata.key_dma = ctx->key_dma; - - if (inl_mask & 2) - ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; - else - ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); @@ -135,16 +152,6 @@ DESC_JOB_IO_LEN, data_len, &inl_mask, ARRAY_SIZE(data_len)) < 0) return -EINVAL; - - if (inl_mask & 1) - ctx->adata.key_virt = ctx->key; - else - ctx->adata.key_dma = ctx->key_dma; - - if (inl_mask & 2) - ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; - else - ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); @@ -163,16 +170,6 @@ DESC_JOB_IO_LEN, data_len, &inl_mask, ARRAY_SIZE(data_len)) < 0) return -EINVAL; - - if (inl_mask & 1) - ctx->adata.key_virt = ctx->key; - else - ctx->adata.key_dma = ctx->key_dma; - - if (inl_mask & 2) - ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; - else - ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); @@ -207,13 +204,11 @@ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto badkey; -#ifdef DEBUG - dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n", + dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", keys.authkeylen + keys.enckeylen, keys.enckeylen, keys.authkeylen); - print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); -#endif + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); /* * If DKP is supported, use it in the shared descriptor to generate @@ -230,7 +225,7 @@ memcpy(ctx->key, keys.authkey, keys.authkeylen); memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); - dma_sync_single_for_device(jrdev, ctx->key_dma, + dma_sync_single_for_device(jrdev->parent, ctx->key_dma, ctx->adata.keylen_pad + keys.enckeylen, ctx->dir); goto skip_split_key; @@ -244,13 +239,13 @@ /* postpend encryption key to auth split key */ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); - dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + - keys.enckeylen, ctx->dir); -#ifdef DEBUG - print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, - ctx->adata.keylen_pad + keys.enckeylen, 1); -#endif + dma_sync_single_for_device(jrdev->parent, ctx->key_dma, + ctx->adata.keylen_pad + keys.enckeylen, + ctx->dir); + + print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, + ctx->adata.keylen_pad + keys.enckeylen, 1); skip_split_key: ctx->cdata.keylen = keys.enckeylen; @@ -281,9 +276,25 @@ memzero_explicit(&keys, sizeof(keys)); return ret; badkey: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; +} + +static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct crypto_authenc_keys keys; + int err; + + err = crypto_authenc_extractkeys(&keys, key, keylen); + if (unlikely(err)) + return err; + + err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: + aead_setkey(aead, key, keylen); + + memzero_explicit(&keys, sizeof(keys)); + return err; } static int gcm_set_sh_desc(struct crypto_aead *aead) @@ -332,6 +343,11 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx(authenc); + int err; + + err = crypto_gcm_check_authsize(authsize); + if (err) + return err; ctx->authsize = authsize; gcm_set_sh_desc(authenc); @@ -346,13 +362,16 @@ struct device *jrdev = ctx->jrdev; int ret; -#ifdef DEBUG - print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); -#endif + ret = aes_check_keylen(keylen); + if (ret) + return ret; + + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); memcpy(ctx->key, key, keylen); - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); + dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen, + ctx->dir); ctx->cdata.keylen = keylen; ret = gcm_set_sh_desc(aead); @@ -428,6 +447,11 @@ unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx(authenc); + int err; + + err = crypto_rfc4106_check_authsize(authsize); + if (err) + return err; ctx->authsize = authsize; rfc4106_set_sh_desc(authenc); @@ -442,13 +466,12 @@ struct device *jrdev = ctx->jrdev; int ret; - if (keylen < 4) - return -EINVAL; + ret = aes_check_keylen(keylen - 4); + if (ret) + return ret; -#ifdef DEBUG - print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); -#endif + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); memcpy(ctx->key, key, keylen); /* @@ -456,8 +479,8 @@ * in the nonce. Update the AES key length. */ ctx->cdata.keylen = keylen - 4; - dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, - ctx->dir); + dma_sync_single_for_device(jrdev->parent, ctx->key_dma, + ctx->cdata.keylen, ctx->dir); ret = rfc4106_set_sh_desc(aead); if (ret) @@ -533,6 +556,9 @@ { struct caam_ctx *ctx = crypto_aead_ctx(authenc); + if (authsize != 16) + return -EINVAL; + ctx->authsize = authsize; rfc4543_set_sh_desc(authenc); @@ -546,13 +572,12 @@ struct device *jrdev = ctx->jrdev; int ret; - if (keylen < 4) - return -EINVAL; + ret = aes_check_keylen(keylen - 4); + if (ret) + return ret; -#ifdef DEBUG - print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); -#endif + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); memcpy(ctx->key, key, keylen); /* @@ -560,8 +585,8 @@ * in the nonce. Update the AES key length. */ ctx->cdata.keylen = keylen - 4; - dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, - ctx->dir); + dma_sync_single_for_device(jrdev->parent, ctx->key_dma, + ctx->cdata.keylen, ctx->dir); ret = rfc4543_set_sh_desc(aead); if (ret) @@ -589,107 +614,151 @@ return 0; } -static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, - const u8 *key, unsigned int keylen) +static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, + unsigned int keylen, const u32 ctx1_iv_off) { - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); - const char *alg_name = crypto_tfm_alg_name(tfm); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_skcipher_alg *alg = + container_of(crypto_skcipher_alg(skcipher), typeof(*alg), + skcipher); struct device *jrdev = ctx->jrdev; - unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); - u32 ctx1_iv_off = 0; - const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == - OP_ALG_AAI_CTR_MOD128); - const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686")); + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + const bool is_rfc3686 = alg->caam.rfc3686; int ret = 0; -#ifdef DEBUG - print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); -#endif - /* - * AES-CTR needs to load IV in CONTEXT1 reg - * at an offset of 128bits (16bytes) - * CONTEXT1[255:128] = IV - */ - if (ctr_mode) - ctx1_iv_off = 16; + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + ctx->cdata.keylen = keylen; + ctx->cdata.key_virt = key; + ctx->cdata.key_inline = true; + + /* skcipher encrypt, decrypt shared descriptors */ + cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, + is_rfc3686, ctx1_iv_off); + cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, + is_rfc3686, ctx1_iv_off); + + /* Now update the driver contexts with the new shared descriptor */ + if (ctx->drv_ctx[ENCRYPT]) { + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], + ctx->sh_desc_enc); + if (ret) { + dev_err(jrdev, "driver enc context update failed\n"); + return -EINVAL; + } + } + + if (ctx->drv_ctx[DECRYPT]) { + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], + ctx->sh_desc_dec); + if (ret) { + dev_err(jrdev, "driver dec context update failed\n"); + return -EINVAL; + } + } + + return ret; +} + +static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + int err; + + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, 0); +} + +static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; /* * RFC3686 specific: * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} * | *key = {KEY, NONCE} */ - if (is_rfc3686) { - ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; - keylen -= CTR_RFC3686_NONCE_SIZE; - } + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; + keylen -= CTR_RFC3686_NONCE_SIZE; - ctx->cdata.keylen = keylen; - ctx->cdata.key_virt = key; - ctx->cdata.key_inline = true; + err = aes_check_keylen(keylen); + if (err) + return err; - /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */ - cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, - is_rfc3686, ctx1_iv_off); - cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, - is_rfc3686, ctx1_iv_off); - cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata, - ivsize, is_rfc3686, ctx1_iv_off); - - /* Now update the driver contexts with the new shared descriptor */ - if (ctx->drv_ctx[ENCRYPT]) { - ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], - ctx->sh_desc_enc); - if (ret) { - dev_err(jrdev, "driver enc context update failed\n"); - goto badkey; - } - } - - if (ctx->drv_ctx[DECRYPT]) { - ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], - ctx->sh_desc_dec); - if (ret) { - dev_err(jrdev, "driver dec context update failed\n"); - goto badkey; - } - } - - if (ctx->drv_ctx[GIVENCRYPT]) { - ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT], - ctx->sh_desc_givenc); - if (ret) { - dev_err(jrdev, "driver givenc context update failed\n"); - goto badkey; - } - } - - return ret; -badkey: - crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } -static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, - const u8 *key, unsigned int keylen) +static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) { - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); - struct device *jrdev = ctx->jrdev; - int ret = 0; + u32 ctx1_iv_off; + int err; - if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - dev_err(jrdev, "key size mismatch\n"); - goto badkey; + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + ctx1_iv_off = 16; + + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + +static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + return verify_skcipher_des3_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); +} + +static int des_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + return verify_skcipher_des_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); +} + +static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, + unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct device *jrdev = ctx->jrdev; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); + int ret = 0; + int err; + + err = xts_verify_key(skcipher, key, keylen); + if (err) { + dev_dbg(jrdev, "key size mismatch\n"); + return err; + } + + if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256) + ctx->xts_key_fallback = true; + + if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) { + err = crypto_skcipher_setkey(ctx->fallback, key, keylen); + if (err) + return err; } ctx->cdata.keylen = keylen; ctx->cdata.key_virt = key; ctx->cdata.key_inline = true; - /* xts ablkcipher encrypt, decrypt shared descriptors */ - cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata); - cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata); + /* xts skcipher encrypt, decrypt shared descriptors */ + cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata); + cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata); /* Now update the driver contexts with the new shared descriptor */ if (ctx->drv_ctx[ENCRYPT]) { @@ -697,7 +766,7 @@ ctx->sh_desc_enc); if (ret) { dev_err(jrdev, "driver enc context update failed\n"); - goto badkey; + return -EINVAL; } } @@ -706,14 +775,11 @@ ctx->sh_desc_dec); if (ret) { dev_err(jrdev, "driver dec context update failed\n"); - goto badkey; + return -EINVAL; } } return ret; -badkey: - crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; } /* @@ -737,11 +803,11 @@ unsigned int assoclen; dma_addr_t assoclen_dma; struct caam_drv_req drv_req; - struct qm_sg_entry sgt[0]; + struct qm_sg_entry sgt[]; }; /* - * ablkcipher_edesc - s/w-extended ablkcipher descriptor + * skcipher_edesc - s/w-extended skcipher descriptor * @src_nents: number of segments in input scatterlist * @dst_nents: number of segments in output scatterlist * @iv_dma: dma address of iv for checking continuity and link table @@ -750,14 +816,14 @@ * @drv_req: driver-specific request structure * @sgt: the h/w link table, followed by IV */ -struct ablkcipher_edesc { +struct skcipher_edesc { int src_nents; int dst_nents; dma_addr_t iv_dma; int qm_sg_bytes; dma_addr_t qm_sg_dma; struct caam_drv_req drv_req; - struct qm_sg_entry sgt[0]; + struct qm_sg_entry sgt[]; }; static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, @@ -781,14 +847,12 @@ if (type == ENCRYPT) desc = ctx->sh_desc_enc; - else if (type == DECRYPT) + else /* (type == DECRYPT) */ desc = ctx->sh_desc_dec; - else /* (type == GIVENCRYPT) */ - desc = ctx->sh_desc_givenc; cpu = smp_processor_id(); drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); - if (likely(!IS_ERR_OR_NULL(drv_ctx))) + if (!IS_ERR_OR_NULL(drv_ctx)) drv_ctx->op_type = type; ctx->drv_ctx[type] = drv_ctx; @@ -803,21 +867,20 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, struct scatterlist *dst, int src_nents, int dst_nents, dma_addr_t iv_dma, int ivsize, - enum optype op_type, dma_addr_t qm_sg_dma, + enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, int qm_sg_bytes) { if (dst != src) { if (src_nents) dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); - dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); + if (dst_nents) + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); } else { dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); } if (iv_dma) - dma_unmap_single(dev, iv_dma, ivsize, - op_type == GIVENCRYPT ? DMA_FROM_DEVICE : - DMA_TO_DEVICE); + dma_unmap_single(dev, iv_dma, ivsize, iv_dir); if (qm_sg_bytes) dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); } @@ -830,21 +893,20 @@ int ivsize = crypto_aead_ivsize(aead); caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, - edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type, - edesc->qm_sg_dma, edesc->qm_sg_bytes); + edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, + edesc->qm_sg_bytes); dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); } -static void ablkcipher_unmap(struct device *dev, - struct ablkcipher_edesc *edesc, - struct ablkcipher_request *req) +static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, + struct skcipher_request *req) { - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + int ivsize = crypto_skcipher_ivsize(skcipher); caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, - edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type, - edesc->qm_sg_dma, edesc->qm_sg_bytes); + edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, + edesc->qm_sg_bytes); } static void aead_done(struct caam_drv_req *drv_req, u32 status) @@ -858,20 +920,8 @@ qidev = caam_ctx->qidev; - if (unlikely(status)) { - u32 ssrc = status & JRSTA_SSRC_MASK; - u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; - - caam_jr_strstatus(qidev, status); - /* - * verify hw auth check passed else return -EBADMSG - */ - if (ssrc == JRSTA_SSRC_CCB_ERROR && - err_id == JRSTA_CCBERR_ERRID_ICVCHK) - ecode = -EBADMSG; - else - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_jr_strstatus(qidev, status); edesc = container_of(drv_req, typeof(*edesc), drv_req); aead_unmap(qidev, edesc, aead_req); @@ -894,6 +944,7 @@ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; + int src_len, dst_len = 0; struct aead_edesc *edesc; dma_addr_t qm_sg_dma, iv_dma = 0; int ivsize = 0; @@ -902,10 +953,9 @@ int in_len, out_len; struct qm_sg_entry *sg_table, *fd_sgt; struct caam_drv_ctx *drv_ctx; - enum optype op_type = encrypt ? ENCRYPT : DECRYPT; - drv_ctx = get_drv_ctx(ctx, op_type); - if (unlikely(IS_ERR_OR_NULL(drv_ctx))) + drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); + if (IS_ERR_OR_NULL(drv_ctx)) return (struct aead_edesc *)drv_ctx; /* allocate space for base edesc and hw desc commands, link tables */ @@ -916,13 +966,13 @@ } if (likely(req->src == req->dst)) { - src_nents = sg_nents_for_len(req->src, req->assoclen + - req->cryptlen + - (encrypt ? authsize : 0)); + src_len = req->assoclen + req->cryptlen + + (encrypt ? authsize : 0); + + src_nents = sg_nents_for_len(req->src, src_len); if (unlikely(src_nents < 0)) { dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", - req->assoclen + req->cryptlen + - (encrypt ? authsize : 0)); + src_len); qi_cache_free(edesc); return ERR_PTR(src_nents); } @@ -935,23 +985,21 @@ return ERR_PTR(-ENOMEM); } } else { - src_nents = sg_nents_for_len(req->src, req->assoclen + - req->cryptlen); + src_len = req->assoclen + req->cryptlen; + dst_len = src_len + (encrypt ? authsize : (-authsize)); + + src_nents = sg_nents_for_len(req->src, src_len); if (unlikely(src_nents < 0)) { dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", - req->assoclen + req->cryptlen); + src_len); qi_cache_free(edesc); return ERR_PTR(src_nents); } - dst_nents = sg_nents_for_len(req->dst, req->assoclen + - req->cryptlen + - (encrypt ? authsize : - (-authsize))); + dst_nents = sg_nents_for_len(req->dst, dst_len); if (unlikely(dst_nents < 0)) { dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", - req->assoclen + req->cryptlen + - (encrypt ? authsize : (-authsize))); + dst_len); qi_cache_free(edesc); return ERR_PTR(dst_nents); } @@ -968,13 +1016,19 @@ mapped_src_nents = 0; } - mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, - DMA_FROM_DEVICE); - if (unlikely(!mapped_dst_nents)) { - dev_err(qidev, "unable to map destination\n"); - dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); - qi_cache_free(edesc); - return ERR_PTR(-ENOMEM); + if (dst_nents) { + mapped_dst_nents = dma_map_sg(qidev, req->dst, + dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(qidev, "unable to map destination\n"); + dma_unmap_sg(qidev, req->src, src_nents, + DMA_TO_DEVICE); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_dst_nents = 0; } } @@ -984,9 +1038,24 @@ /* * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. * Input is not contiguous. + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond + * the end of the table by allocating more S/G entries. Logic: + * if (src != dst && output S/G) + * pad output S/G, if needed + * else if (src == dst && S/G) + * overlapping S/Gs; pad one of them + * else if (input S/G) ... + * pad input S/G, if needed */ - qm_sg_ents = 1 + !!ivsize + mapped_src_nents + - (mapped_dst_nents > 1 ? mapped_dst_nents : 0); + qm_sg_ents = 1 + !!ivsize + mapped_src_nents; + if (mapped_dst_nents > 1) + qm_sg_ents += pad_sg_nents(mapped_dst_nents); + else if ((req->src == req->dst) && (mapped_src_nents > 1)) + qm_sg_ents = max(pad_sg_nents(qm_sg_ents), + 1 + !!ivsize + pad_sg_nents(mapped_src_nents)); + else + qm_sg_ents = pad_sg_nents(qm_sg_ents); + sg_table = &edesc->sgt[0]; qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > @@ -994,7 +1063,7 @@ dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", qm_sg_ents, ivsize); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0, 0); + 0, DMA_NONE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } @@ -1009,7 +1078,7 @@ if (dma_mapping_error(qidev, iv_dma)) { dev_err(qidev, "unable to map IV\n"); caam_unmap(qidev, req->src, req->dst, src_nents, - dst_nents, 0, 0, 0, 0, 0); + dst_nents, 0, 0, DMA_NONE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } @@ -1028,7 +1097,7 @@ if (dma_mapping_error(qidev, edesc->assoclen_dma)) { dev_err(qidev, "unable to map assoclen\n"); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, op_type, 0, 0); + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } @@ -1039,19 +1108,18 @@ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); qm_sg_index++; } - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); + sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); qm_sg_index += mapped_src_nents; if (mapped_dst_nents > 1) - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + - qm_sg_index, 0); + sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(qidev, qm_sg_dma)) { dev_err(qidev, "unable to map S/G table\n"); dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, op_type, 0, 0); + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } @@ -1074,7 +1142,7 @@ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + (1 + !!ivsize) * sizeof(*sg_table), out_len, 0); - } else if (mapped_dst_nents == 1) { + } else if (mapped_dst_nents <= 1) { dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, 0); } else { @@ -1124,106 +1192,88 @@ static int ipsec_gcm_encrypt(struct aead_request *req) { - if (req->assoclen < 8) - return -EINVAL; - - return aead_crypt(req, true); + return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, + true); } static int ipsec_gcm_decrypt(struct aead_request *req) { - if (req->assoclen < 8) - return -EINVAL; - - return aead_crypt(req, false); + return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, + false); } -static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status) +static void skcipher_done(struct caam_drv_req *drv_req, u32 status) { - struct ablkcipher_edesc *edesc; - struct ablkcipher_request *req = drv_req->app_ctx; - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher); + struct skcipher_edesc *edesc; + struct skcipher_request *req = drv_req->app_ctx; + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher); struct device *qidev = caam_ctx->qidev; - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + int ivsize = crypto_skcipher_ivsize(skcipher); + int ecode = 0; -#ifdef DEBUG - dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); -#endif + dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); edesc = container_of(drv_req, typeof(*edesc), drv_req); if (status) - caam_jr_strstatus(qidev, status); + ecode = caam_jr_strstatus(qidev, status); -#ifdef DEBUG - print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->info, - edesc->src_nents > 1 ? 100 : ivsize, 1); - caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", + print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, + edesc->src_nents > 1 ? 100 : ivsize, 1); + caam_dump_sg("dst @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->dst, - edesc->dst_nents > 1 ? 100 : req->nbytes, 1); -#endif + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); - ablkcipher_unmap(qidev, edesc, req); - - /* In case initial IV was generated, copy it in GIVCIPHER request */ - if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) { - u8 *iv; - struct skcipher_givcrypt_request *greq; - - greq = container_of(req, struct skcipher_givcrypt_request, - creq); - iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes; - memcpy(greq->giv, iv, ivsize); - } + skcipher_unmap(qidev, edesc, req); /* - * The crypto API expects us to set the IV (req->info) to the last - * ciphertext block. This is used e.g. by the CTS mode. + * The crypto API expects us to set the IV (req->iv) to the last + * ciphertext block (CBC mode) or last counter (CTR mode). + * This is used e.g. by the CTS mode. */ - if (edesc->drv_req.drv_ctx->op_type != DECRYPT) - scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - - ivsize, ivsize, 0); + if (!ecode) + memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, + ivsize); qi_cache_free(edesc); - ablkcipher_request_complete(req, status); + skcipher_request_complete(req, ecode); } -static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request - *req, bool encrypt) +static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, + bool encrypt) { - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); struct device *qidev = ctx->qidev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; - struct ablkcipher_edesc *edesc; + struct skcipher_edesc *edesc; dma_addr_t iv_dma; u8 *iv; - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + int ivsize = crypto_skcipher_ivsize(skcipher); int dst_sg_idx, qm_sg_ents, qm_sg_bytes; struct qm_sg_entry *sg_table, *fd_sgt; struct caam_drv_ctx *drv_ctx; - enum optype op_type = encrypt ? ENCRYPT : DECRYPT; - drv_ctx = get_drv_ctx(ctx, op_type); - if (unlikely(IS_ERR_OR_NULL(drv_ctx))) - return (struct ablkcipher_edesc *)drv_ctx; + drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); + if (IS_ERR_OR_NULL(drv_ctx)) + return (struct skcipher_edesc *)drv_ctx; - src_nents = sg_nents_for_len(req->src, req->nbytes); + src_nents = sg_nents_for_len(req->src, req->cryptlen); if (unlikely(src_nents < 0)) { dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", - req->nbytes); + req->cryptlen); return ERR_PTR(src_nents); } if (unlikely(req->src != req->dst)) { - dst_nents = sg_nents_for_len(req->dst, req->nbytes); + dst_nents = sg_nents_for_len(req->dst, req->cryptlen); if (unlikely(dst_nents < 0)) { dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", - req->nbytes); + req->cryptlen); return ERR_PTR(dst_nents); } @@ -1253,14 +1303,26 @@ qm_sg_ents = 1 + mapped_src_nents; dst_sg_idx = qm_sg_ents; - qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; + /* + * Input, output HW S/G tables: [IV, src][dst, IV] + * IV entries point to the same buffer + * If src == dst, S/G entries are reused (S/G tables overlap) + * + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond + * the end of the table by allocating more S/G entries. + */ + if (req->src != req->dst) + qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); + else + qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); + qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); - if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes + + if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + ivsize > CAAM_QI_MEMCACHE_SIZE)) { dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", qm_sg_ents, ivsize); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0, 0); + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1269,20 +1331,20 @@ if (unlikely(!edesc)) { dev_err(qidev, "could not allocate extended descriptor\n"); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0, 0); + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } /* Make sure IV is located in a DMAable area */ sg_table = &edesc->sgt[0]; iv = (u8 *)(sg_table + qm_sg_ents); - memcpy(iv, req->info, ivsize); + memcpy(iv, req->iv, ivsize); - iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(qidev, iv_dma)) { dev_err(qidev, "unable to map IV\n"); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0, 0); + 0, DMA_NONE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } @@ -1292,22 +1354,24 @@ edesc->iv_dma = iv_dma; edesc->qm_sg_bytes = qm_sg_bytes; edesc->drv_req.app_ctx = req; - edesc->drv_req.cbk = ablkcipher_done; + edesc->drv_req.cbk = skcipher_done; edesc->drv_req.drv_ctx = drv_ctx; dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); + sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); - if (mapped_dst_nents > 1) - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + - dst_sg_idx, 0); + if (req->src != req->dst) + sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); + + dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, + ivsize, 0); edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { dev_err(qidev, "unable to map S/G table\n"); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, op_type, 0, 0); + iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } @@ -1315,218 +1379,65 @@ fd_sgt = &edesc->drv_req.fd_sgt[0]; dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, - ivsize + req->nbytes, 0); + ivsize + req->cryptlen, 0); - if (req->src == req->dst) { + if (req->src == req->dst) dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + - sizeof(*sg_table), req->nbytes, 0); - } else if (mapped_dst_nents > 1) { - dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * - sizeof(*sg_table), req->nbytes, 0); - } else { - dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), - req->nbytes, 0); - } - - return edesc; -} - -static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( - struct skcipher_givcrypt_request *creq) -{ - struct ablkcipher_request *req = &creq->creq; - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); - struct device *qidev = ctx->qidev; - gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC; - int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; - struct ablkcipher_edesc *edesc; - dma_addr_t iv_dma; - u8 *iv; - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); - struct qm_sg_entry *sg_table, *fd_sgt; - int dst_sg_idx, qm_sg_ents, qm_sg_bytes; - struct caam_drv_ctx *drv_ctx; - - drv_ctx = get_drv_ctx(ctx, GIVENCRYPT); - if (unlikely(IS_ERR_OR_NULL(drv_ctx))) - return (struct ablkcipher_edesc *)drv_ctx; - - src_nents = sg_nents_for_len(req->src, req->nbytes); - if (unlikely(src_nents < 0)) { - dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", - req->nbytes); - return ERR_PTR(src_nents); - } - - if (unlikely(req->src != req->dst)) { - dst_nents = sg_nents_for_len(req->dst, req->nbytes); - if (unlikely(dst_nents < 0)) { - dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", - req->nbytes); - return ERR_PTR(dst_nents); - } - - mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, - DMA_TO_DEVICE); - if (unlikely(!mapped_src_nents)) { - dev_err(qidev, "unable to map source\n"); - return ERR_PTR(-ENOMEM); - } - - mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, - DMA_FROM_DEVICE); - if (unlikely(!mapped_dst_nents)) { - dev_err(qidev, "unable to map destination\n"); - dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); - return ERR_PTR(-ENOMEM); - } - } else { - mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, - DMA_BIDIRECTIONAL); - if (unlikely(!mapped_src_nents)) { - dev_err(qidev, "unable to map source\n"); - return ERR_PTR(-ENOMEM); - } - - dst_nents = src_nents; - mapped_dst_nents = src_nents; - } - - qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0; - dst_sg_idx = qm_sg_ents; - - qm_sg_ents += 1 + mapped_dst_nents; - qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); - if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes + - ivsize > CAAM_QI_MEMCACHE_SIZE)) { - dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", - qm_sg_ents, ivsize); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0, 0); - return ERR_PTR(-ENOMEM); - } - - /* allocate space for base edesc, link tables and IV */ - edesc = qi_cache_alloc(GFP_DMA | flags); - if (!edesc) { - dev_err(qidev, "could not allocate extended descriptor\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0, 0); - return ERR_PTR(-ENOMEM); - } - - /* Make sure IV is located in a DMAable area */ - sg_table = &edesc->sgt[0]; - iv = (u8 *)(sg_table + qm_sg_ents); - iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE); - if (dma_mapping_error(qidev, iv_dma)) { - dev_err(qidev, "unable to map IV\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0, 0); - qi_cache_free(edesc); - return ERR_PTR(-ENOMEM); - } - - edesc->src_nents = src_nents; - edesc->dst_nents = dst_nents; - edesc->iv_dma = iv_dma; - edesc->qm_sg_bytes = qm_sg_bytes; - edesc->drv_req.app_ctx = req; - edesc->drv_req.cbk = ablkcipher_done; - edesc->drv_req.drv_ctx = drv_ctx; - - if (mapped_src_nents > 1) - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0); - - dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0); - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1, - 0); - - edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, - DMA_TO_DEVICE); - if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { - dev_err(qidev, "unable to map S/G table\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, GIVENCRYPT, 0, 0); - qi_cache_free(edesc); - return ERR_PTR(-ENOMEM); - } - - fd_sgt = &edesc->drv_req.fd_sgt[0]; - - if (mapped_src_nents > 1) - dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes, + sizeof(*sg_table), req->cryptlen + ivsize, 0); else - dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src), - req->nbytes, 0); - - dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * - sizeof(*sg_table), ivsize + req->nbytes, 0); + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * + sizeof(*sg_table), req->cryptlen + ivsize, + 0); return edesc; } -static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt) +static inline bool xts_skcipher_ivsize(struct skcipher_request *req) { - struct ablkcipher_edesc *edesc; - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + + return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); +} + +static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) +{ + struct skcipher_edesc *edesc; + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); int ret; - - if (unlikely(caam_congested)) - return -EAGAIN; - - /* allocate extended descriptor */ - edesc = ablkcipher_edesc_alloc(req, encrypt); - if (IS_ERR(edesc)) - return PTR_ERR(edesc); /* - * The crypto API expects us to set the IV (req->info) to the last - * ciphertext block. + * XTS is expected to return an error even for input length = 0 + * Note that the case input length < block size will be caught during + * HW offloading and return an error. */ - if (!encrypt) - scatterwalk_map_and_copy(req->info, req->src, req->nbytes - - ivsize, ivsize, 0); + if (!req->cryptlen && !ctx->fallback) + return 0; - ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); - if (!ret) { - ret = -EINPROGRESS; - } else { - ablkcipher_unmap(ctx->qidev, edesc, req); - qi_cache_free(edesc); + if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) || + ctx->xts_key_fallback)) { + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + + return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : + crypto_skcipher_decrypt(&rctx->fallback_req); } - - return ret; -} - -static int ablkcipher_encrypt(struct ablkcipher_request *req) -{ - return ablkcipher_crypt(req, true); -} - -static int ablkcipher_decrypt(struct ablkcipher_request *req) -{ - return ablkcipher_crypt(req, false); -} - -static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq) -{ - struct ablkcipher_request *req = &creq->creq; - struct ablkcipher_edesc *edesc; - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); - int ret; if (unlikely(caam_congested)) return -EAGAIN; /* allocate extended descriptor */ - edesc = ablkcipher_giv_edesc_alloc(creq); + edesc = skcipher_edesc_alloc(req, encrypt); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1534,129 +1445,129 @@ if (!ret) { ret = -EINPROGRESS; } else { - ablkcipher_unmap(ctx->qidev, edesc, req); + skcipher_unmap(ctx->qidev, edesc, req); qi_cache_free(edesc); } return ret; } -#define template_ablkcipher template_u.ablkcipher -struct caam_alg_template { - char name[CRYPTO_MAX_ALG_NAME]; - char driver_name[CRYPTO_MAX_ALG_NAME]; - unsigned int blocksize; - u32 type; - union { - struct ablkcipher_alg ablkcipher; - } template_u; - u32 class1_alg_type; - u32 class2_alg_type; -}; +static int skcipher_encrypt(struct skcipher_request *req) +{ + return skcipher_crypt(req, true); +} -static struct caam_alg_template driver_algs[] = { - /* ablkcipher descriptor */ +static int skcipher_decrypt(struct skcipher_request *req) +{ + return skcipher_crypt(req, false); +} + +static struct caam_skcipher_alg driver_algs[] = { { - .name = "cbc(aes)", - .driver_name = "cbc-aes-caam-qi", - .blocksize = AES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_GIVCIPHER, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, - .givencrypt = ablkcipher_givencrypt, - .geniv = "<built-in>", + .skcipher = { + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-caam-qi", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aes_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, }, { - .name = "cbc(des3_ede)", - .driver_name = "cbc-3des-caam-qi", - .blocksize = DES3_EDE_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_GIVCIPHER, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, - .givencrypt = ablkcipher_givencrypt, - .geniv = "<built-in>", + .skcipher = { + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-3des-caam-qi", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, }, { - .name = "cbc(des)", - .driver_name = "cbc-des-caam-qi", - .blocksize = DES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_GIVCIPHER, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, - .givencrypt = ablkcipher_givencrypt, - .geniv = "<built-in>", + .skcipher = { + .base = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-caam-qi", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = des_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, }, { - .name = "ctr(aes)", - .driver_name = "ctr-aes-caam-qi", - .blocksize = 1, - .type = CRYPTO_ALG_TYPE_ABLKCIPHER, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, - .geniv = "chainiv", + .skcipher = { + .base = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-caam-qi", + .cra_blocksize = 1, + }, + .setkey = ctr_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, + .chunksize = AES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, }, { - .name = "rfc3686(ctr(aes))", - .driver_name = "rfc3686-ctr-aes-caam-qi", - .blocksize = 1, - .type = CRYPTO_ALG_TYPE_GIVCIPHER, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, - .givencrypt = ablkcipher_givencrypt, - .geniv = "<built-in>", + .skcipher = { + .base = { + .cra_name = "rfc3686(ctr(aes))", + .cra_driver_name = "rfc3686-ctr-aes-caam-qi", + .cra_blocksize = 1, + }, + .setkey = rfc3686_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .ivsize = CTR_RFC3686_IV_SIZE, + .chunksize = AES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .rfc3686 = true, + }, }, { - .name = "xts(aes)", - .driver_name = "xts-aes-caam-qi", - .blocksize = AES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_ABLKCIPHER, - .template_ablkcipher = { - .setkey = xts_ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, - .geniv = "eseqiv", + .skcipher = { + .base = { + .cra_name = "xts(aes)", + .cra_driver_name = "xts-aes-caam-qi", + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = xts_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, }, }; @@ -1677,6 +1588,7 @@ }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .nodkp = true, }, }, { @@ -1695,6 +1607,7 @@ }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .nodkp = true, }, }, /* Galois Counter Mode */ @@ -1714,6 +1627,7 @@ }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .nodkp = true, } }, /* single-pass ipsec_esp descriptor */ @@ -1992,7 +1906,7 @@ "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = aead_setkey, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, @@ -2014,7 +1928,7 @@ "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = aead_setkey, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, @@ -2037,7 +1951,7 @@ "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = aead_setkey, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, @@ -2060,7 +1974,7 @@ "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = aead_setkey, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, @@ -2083,7 +1997,7 @@ "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = aead_setkey, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, @@ -2106,7 +2020,7 @@ "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = aead_setkey, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, @@ -2129,7 +2043,7 @@ "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = aead_setkey, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, @@ -2152,7 +2066,7 @@ "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = aead_setkey, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, @@ -2175,7 +2089,7 @@ "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = aead_setkey, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, @@ -2198,7 +2112,7 @@ "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = aead_setkey, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, @@ -2221,7 +2135,7 @@ "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = aead_setkey, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, @@ -2244,7 +2158,7 @@ "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = aead_setkey, + .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, @@ -2528,16 +2442,11 @@ }, }; -struct caam_crypto_alg { - struct list_head entry; - struct crypto_alg crypto_alg; - struct caam_alg_entry caam; -}; - static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, bool uses_dkp) { struct caam_drv_private *priv; + struct device *dev; /* * distribute tfms across job rings to ensure in-order @@ -2549,16 +2458,17 @@ return PTR_ERR(ctx->jrdev); } - priv = dev_get_drvdata(ctx->jrdev->parent); + dev = ctx->jrdev->parent; + priv = dev_get_drvdata(dev); if (priv->era >= 6 && uses_dkp) ctx->dir = DMA_BIDIRECTIONAL; else ctx->dir = DMA_TO_DEVICE; - ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key), + ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key), ctx->dir); - if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { - dev_err(ctx->jrdev, "unable to map key\n"); + if (dma_mapping_error(dev, ctx->key_dma)) { + dev_err(dev, "unable to map key\n"); caam_jr_free(ctx->jrdev); return -ENOMEM; } @@ -2567,24 +2477,46 @@ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; - ctx->qidev = priv->qidev; + ctx->qidev = dev; spin_lock_init(&ctx->lock); ctx->drv_ctx[ENCRYPT] = NULL; ctx->drv_ctx[DECRYPT] = NULL; - ctx->drv_ctx[GIVENCRYPT] = NULL; return 0; } -static int caam_cra_init(struct crypto_tfm *tfm) +static int caam_cra_init(struct crypto_skcipher *tfm) { - struct crypto_alg *alg = tfm->__crt_alg; - struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg), - crypto_alg); - struct caam_ctx *ctx = crypto_tfm_ctx(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct caam_skcipher_alg *caam_alg = + container_of(alg, typeof(*caam_alg), skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; + int ret = 0; - return caam_init_common(ctx, &caam_alg->caam, false); + if (alg_aai == OP_ALG_AAI_XTS) { + const char *tfm_name = crypto_tfm_alg_name(&tfm->base); + struct crypto_skcipher *fallback; + + fallback = crypto_alloc_skcipher(tfm_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + pr_err("Failed to allocate %s fallback: %ld\n", + tfm_name, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + + ctx->fallback = fallback; + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) + + crypto_skcipher_reqsize(fallback)); + } + + ret = caam_init_common(ctx, &caam_alg->caam, false); + if (ret && ctx->fallback) + crypto_free_skcipher(ctx->fallback); + + return ret; } static int caam_aead_init(struct crypto_aead *tfm) @@ -2594,24 +2526,27 @@ aead); struct caam_ctx *ctx = crypto_aead_ctx(tfm); - return caam_init_common(ctx, &caam_alg->caam, - alg->setkey == aead_setkey); + return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp); } static void caam_exit_common(struct caam_ctx *ctx) { caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); - caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]); - dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir); + dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key), + ctx->dir); caam_jr_free(ctx->jrdev); } -static void caam_cra_exit(struct crypto_tfm *tfm) +static void caam_cra_exit(struct crypto_skcipher *tfm) { - caam_exit_common(crypto_tfm_ctx(tfm)); + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (ctx->fallback) + crypto_free_skcipher(ctx->fallback); + caam_exit_common(ctx); } static void caam_aead_exit(struct crypto_aead *tfm) @@ -2619,10 +2554,8 @@ caam_exit_common(crypto_aead_ctx(tfm)); } -static struct list_head alg_list; -static void __exit caam_qi_algapi_exit(void) +void caam_qi_algapi_exit(void) { - struct caam_crypto_alg *t_alg, *n; int i; for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { @@ -2632,55 +2565,26 @@ crypto_unregister_aead(&t_alg->aead); } - if (!alg_list.next) - return; + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + struct caam_skcipher_alg *t_alg = driver_algs + i; - list_for_each_entry_safe(t_alg, n, &alg_list, entry) { - crypto_unregister_alg(&t_alg->crypto_alg); - list_del(&t_alg->entry); - kfree(t_alg); + if (t_alg->registered) + crypto_unregister_skcipher(&t_alg->skcipher); } } -static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template - *template) +static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) { - struct caam_crypto_alg *t_alg; - struct crypto_alg *alg; + struct skcipher_alg *alg = &t_alg->skcipher; - t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); - if (!t_alg) - return ERR_PTR(-ENOMEM); + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = CAAM_CRA_PRIORITY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx); + alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY); - alg = &t_alg->crypto_alg; - - snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); - snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - template->driver_name); - alg->cra_module = THIS_MODULE; - alg->cra_init = caam_cra_init; - alg->cra_exit = caam_cra_exit; - alg->cra_priority = CAAM_CRA_PRIORITY; - alg->cra_blocksize = template->blocksize; - alg->cra_alignmask = 0; - alg->cra_ctxsize = sizeof(struct caam_ctx); - alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | - template->type; - switch (template->type) { - case CRYPTO_ALG_TYPE_GIVCIPHER: - alg->cra_type = &crypto_givcipher_type; - alg->cra_ablkcipher = template->template_ablkcipher; - break; - case CRYPTO_ALG_TYPE_ABLKCIPHER: - alg->cra_type = &crypto_ablkcipher_type; - alg->cra_ablkcipher = template->template_ablkcipher; - break; - } - - t_alg->caam.class1_alg_type = template->class1_alg_type; - t_alg->caam.class2_alg_type = template->class2_alg_type; - - return t_alg; + alg->init = caam_cra_init; + alg->exit = caam_cra_exit; } static void caam_aead_alg_init(struct caam_aead_alg *t_alg) @@ -2690,70 +2594,62 @@ alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_aead_init; alg->exit = caam_aead_exit; } -static int __init caam_qi_algapi_init(void) +int caam_qi_algapi_init(struct device *ctrldev) { - struct device_node *dev_node; - struct platform_device *pdev; - struct device *ctrldev; - struct caam_drv_private *priv; + struct caam_drv_private *priv = dev_get_drvdata(ctrldev); int i = 0, err = 0; - u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst; + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; unsigned int md_limit = SHA512_DIGEST_SIZE; bool registered = false; - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); - if (!dev_node) { - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); - if (!dev_node) - return -ENODEV; - } - - pdev = of_find_device_by_node(dev_node); - of_node_put(dev_node); - if (!pdev) - return -ENODEV; - - ctrldev = &pdev->dev; - priv = dev_get_drvdata(ctrldev); - - /* - * If priv is NULL, it's probably because the caam driver wasn't - * properly initialized (e.g. RNG4 init failed). Thus, bail out here. - */ - if (!priv || !priv->qi_present) - return -ENODEV; - - if (caam_dpaa2) { - dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); - return -ENODEV; - } - - INIT_LIST_HEAD(&alg_list); + /* Make sure this runs only on (DPAA 1.x) QI */ + if (!priv->qi_present || caam_dpaa2) + return 0; /* * Register crypto algorithms the device supports. * First, detect presence and attributes of DES, AES, and MD blocks. */ - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); - des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; - aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT; - md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + if (priv->era < 10) { + u32 cha_vid, cha_inst; + + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); + aes_vid = cha_vid & CHA_ID_LS_AES_MASK; + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> + CHA_ID_LS_DES_SHIFT; + aes_inst = cha_inst & CHA_ID_LS_AES_MASK; + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + } else { + u32 aesa, mdha; + + aesa = rd_reg32(&priv->ctrl->vreg.aesa); + mdha = rd_reg32(&priv->ctrl->vreg.mdha); + + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + + des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; + aes_inst = aesa & CHA_VER_NUM_MASK; + md_inst = mdha & CHA_VER_NUM_MASK; + } /* If MD is present, limit digest size based on LP256 */ - if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)) + if (md_inst && md_vid == CHA_VER_VID_MD_LP256) md_limit = SHA256_DIGEST_SIZE; for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { - struct caam_crypto_alg *t_alg; - struct caam_alg_template *alg = driver_algs + i; - u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK; + struct caam_skcipher_alg *t_alg = driver_algs + i; + u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; /* Skip DES algorithms if not supported by device */ if (!des_inst && @@ -2765,23 +2661,16 @@ if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) continue; - t_alg = caam_alg_alloc(alg); - if (IS_ERR(t_alg)) { - err = PTR_ERR(t_alg); - dev_warn(priv->qidev, "%s alg allocation failed\n", - alg->driver_name); - continue; - } + caam_skcipher_alg_init(t_alg); - err = crypto_register_alg(&t_alg->crypto_alg); + err = crypto_register_skcipher(&t_alg->skcipher); if (err) { - dev_warn(priv->qidev, "%s alg registration failed\n", - t_alg->crypto_alg.cra_driver_name); - kfree(t_alg); + dev_warn(ctrldev, "%s alg registration failed\n", + t_alg->skcipher.base.cra_driver_name); continue; } - list_add_tail(&t_alg->entry, &alg_list); + t_alg->registered = true; registered = true; } @@ -2807,8 +2696,7 @@ * Check support for AES algorithms not available * on LP devices. */ - if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) && - (alg_aai == OP_ALG_AAI_GCM)) + if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) continue; /* @@ -2833,14 +2721,7 @@ } if (registered) - dev_info(priv->qidev, "algorithms registered in /proc/crypto\n"); + dev_info(ctrldev, "algorithms registered in /proc/crypto\n"); return err; } - -module_init(caam_qi_algapi_init); -module_exit(caam_qi_algapi_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend"); -MODULE_AUTHOR("Freescale Semiconductor"); -- Gitblit v1.6.2