From 102a0743326a03cd1a1202ceda21e175b7d3575c Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Tue, 20 Feb 2024 01:20:52 +0000 Subject: [PATCH] add new system file --- kernel/crypto/cryptd.c | 590 ++++++++++++++-------------------------------------------- 1 files changed, 146 insertions(+), 444 deletions(-) diff --git a/kernel/crypto/cryptd.c b/kernel/crypto/cryptd.c index e079f9a..ca3a40f 100644 --- a/kernel/crypto/cryptd.c +++ b/kernel/crypto/cryptd.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Software async crypto daemon. * @@ -9,20 +10,13 @@ * Gabriele Paoloni <gabriele.paoloni@intel.com> * Aidan O'Mahony (aidan.o.mahony@intel.com) * Copyright (c) 2010, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * */ #include <crypto/internal/hash.h> #include <crypto/internal/aead.h> #include <crypto/internal/skcipher.h> #include <crypto/cryptd.h> -#include <crypto/crypto_wq.h> -#include <linux/atomic.h> +#include <linux/refcount.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> @@ -31,18 +25,24 @@ #include <linux/scatterlist.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/workqueue.h> static unsigned int cryptd_max_cpu_qlen = 1000; module_param(cryptd_max_cpu_qlen, uint, 0); MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); +static struct workqueue_struct *cryptd_wq; + struct cryptd_cpu_queue { struct crypto_queue queue; struct work_struct work; - spinlock_t qlock; }; struct cryptd_queue { + /* + * Protected by disabling BH to allow enqueueing from softinterrupt and + * dequeuing from kworker (cryptd_queue_worker()). + */ struct cryptd_cpu_queue __percpu *cpu_queue; }; @@ -66,26 +66,18 @@ struct cryptd_queue *queue; }; -struct cryptd_blkcipher_ctx { - atomic_t refcnt; - struct crypto_blkcipher *child; -}; - -struct cryptd_blkcipher_request_ctx { - crypto_completion_t complete; -}; - struct cryptd_skcipher_ctx { - atomic_t refcnt; + refcount_t refcnt; struct crypto_skcipher *child; }; struct cryptd_skcipher_request_ctx { crypto_completion_t complete; + struct skcipher_request req; }; struct cryptd_hash_ctx { - atomic_t refcnt; + refcount_t refcnt; struct crypto_shash *child; }; @@ -95,7 +87,7 @@ }; struct cryptd_aead_ctx { - atomic_t refcnt; + refcount_t refcnt; struct crypto_aead *child; }; @@ -118,7 +110,6 @@ cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); INIT_WORK(&cpu_queue->work, cryptd_queue_worker); - spin_lock_init(&cpu_queue->qlock); } pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); return 0; @@ -139,30 +130,28 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, struct crypto_async_request *request) { - int cpu, err; + int err; struct cryptd_cpu_queue *cpu_queue; - atomic_t *refcnt; + refcount_t *refcnt; - cpu_queue = raw_cpu_ptr(queue->cpu_queue); - spin_lock_bh(&cpu_queue->qlock); - cpu = smp_processor_id(); - + local_bh_disable(); + cpu_queue = this_cpu_ptr(queue->cpu_queue); err = crypto_enqueue_request(&cpu_queue->queue, request); refcnt = crypto_tfm_ctx(request->tfm); if (err == -ENOSPC) - goto out_put_cpu; + goto out; - queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); + queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work); - if (!atomic_read(refcnt)) - goto out_put_cpu; + if (!refcount_read(refcnt)) + goto out; - atomic_inc(refcnt); + refcount_inc(refcnt); -out_put_cpu: - spin_unlock_bh(&cpu_queue->qlock); +out: + local_bh_enable(); return err; } @@ -179,10 +168,10 @@ /* * Only handle one request at a time to avoid hogging crypto workqueue. */ - spin_lock_bh(&cpu_queue->qlock); + local_bh_disable(); backlog = crypto_get_backlog(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue); - spin_unlock_bh(&cpu_queue->qlock); + local_bh_enable(); if (!req) return; @@ -192,7 +181,7 @@ req->complete(req, 0); if (cpu_queue->queue.qlen) - queue_work(kcrypto_wq, &cpu_queue->work); + queue_work(cryptd_wq, &cpu_queue->work); } static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) @@ -202,140 +191,20 @@ return ictx->queue; } -static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, - u32 *mask) +static void cryptd_type_and_mask(struct crypto_attr_type *algt, + u32 *type, u32 *mask) { - struct crypto_attr_type *algt; + /* + * cryptd is allowed to wrap internal algorithms, but in that case the + * resulting cryptd instance will be marked as internal as well. + */ + *type = algt->type & CRYPTO_ALG_INTERNAL; + *mask = algt->mask & CRYPTO_ALG_INTERNAL; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return; + /* No point in cryptd wrapping an algorithm that's already async. */ + *mask |= CRYPTO_ALG_ASYNC; - *type |= algt->type & CRYPTO_ALG_INTERNAL; - *mask |= algt->mask & CRYPTO_ALG_INTERNAL; -} - -static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, - const u8 *key, unsigned int keylen) -{ - struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); - struct crypto_blkcipher *child = ctx->child; - int err; - - crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_blkcipher_setkey(child, key, keylen); - crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; -} - -static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, - struct crypto_blkcipher *child, - int err, - int (*crypt)(struct blkcipher_desc *desc, - struct scatterlist *dst, - struct scatterlist *src, - unsigned int len)) -{ - struct cryptd_blkcipher_request_ctx *rctx; - struct cryptd_blkcipher_ctx *ctx; - struct crypto_ablkcipher *tfm; - struct blkcipher_desc desc; - int refcnt; - - rctx = ablkcipher_request_ctx(req); - - if (unlikely(err == -EINPROGRESS)) - goto out; - - desc.tfm = child; - desc.info = req->info; - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; - - err = crypt(&desc, req->dst, req->src, req->nbytes); - - req->base.complete = rctx->complete; - -out: - tfm = crypto_ablkcipher_reqtfm(req); - ctx = crypto_ablkcipher_ctx(tfm); - refcnt = atomic_read(&ctx->refcnt); - - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); - - if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) - crypto_free_ablkcipher(tfm); -} - -static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) -{ - struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); - struct crypto_blkcipher *child = ctx->child; - - cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, - crypto_blkcipher_crt(child)->encrypt); -} - -static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) -{ - struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); - struct crypto_blkcipher *child = ctx->child; - - cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, - crypto_blkcipher_crt(child)->decrypt); -} - -static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, - crypto_completion_t compl) -{ - struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct cryptd_queue *queue; - - queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); - rctx->complete = req->base.complete; - req->base.complete = compl; - - return cryptd_enqueue_request(queue, &req->base); -} - -static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) -{ - return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); -} - -static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) -{ - return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); -} - -static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) -{ - struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); - struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); - struct crypto_spawn *spawn = &ictx->spawn; - struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_blkcipher *cipher; - - cipher = crypto_spawn_blkcipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - ctx->child = cipher; - tfm->crt_ablkcipher.reqsize = - sizeof(struct cryptd_blkcipher_request_ctx); - return 0; -} - -static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) -{ - struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); - - crypto_free_blkcipher(ctx->child); + *mask |= crypto_algt_inherited_mask(algt); } static int cryptd_init_instance(struct crypto_instance *inst, @@ -355,109 +224,17 @@ return 0; } -static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, - unsigned int tail) -{ - char *p; - struct crypto_instance *inst; - int err; - - p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); - if (!p) - return ERR_PTR(-ENOMEM); - - inst = (void *)(p + head); - - err = cryptd_init_instance(inst, alg); - if (err) - goto out_free_inst; - -out: - return p; - -out_free_inst: - kfree(p); - p = ERR_PTR(err); - goto out; -} - -static int cryptd_create_blkcipher(struct crypto_template *tmpl, - struct rtattr **tb, - struct cryptd_queue *queue) -{ - struct cryptd_instance_ctx *ctx; - struct crypto_instance *inst; - struct crypto_alg *alg; - u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; - u32 mask = CRYPTO_ALG_TYPE_MASK; - int err; - - cryptd_check_internal(tb, &type, &mask); - - alg = crypto_get_attr_alg(tb, type, mask); - if (IS_ERR(alg)) - return PTR_ERR(alg); - - inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); - err = PTR_ERR(inst); - if (IS_ERR(inst)) - goto out_put_alg; - - ctx = crypto_instance_ctx(inst); - ctx->queue = queue; - - err = crypto_init_spawn(&ctx->spawn, alg, inst, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); - if (err) - goto out_free_inst; - - type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; - if (alg->cra_flags & CRYPTO_ALG_INTERNAL) - type |= CRYPTO_ALG_INTERNAL; - inst->alg.cra_flags = type; - inst->alg.cra_type = &crypto_ablkcipher_type; - - inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; - inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; - inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; - - inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; - - inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); - - inst->alg.cra_init = cryptd_blkcipher_init_tfm; - inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; - - inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; - inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; - inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; - - err = crypto_register_instance(tmpl, inst); - if (err) { - crypto_drop_spawn(&ctx->spawn); -out_free_inst: - kfree(inst); - } - -out_put_alg: - crypto_mod_put(alg); - return err; -} - static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_skcipher *child = ctx->child; - int err; crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_skcipher_setkey(child, key, keylen); - crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; + crypto_skcipher_set_flags(child, + crypto_skcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + return crypto_skcipher_setkey(child, key, keylen); } static void cryptd_skcipher_complete(struct skcipher_request *req, int err) @@ -465,13 +242,13 @@ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); - int refcnt = atomic_read(&ctx->refcnt); + int refcnt = refcount_read(&ctx->refcnt); local_bh_disable(); rctx->complete(&req->base, err); local_bh_enable(); - if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) + if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) crypto_free_skcipher(tfm); } @@ -482,8 +259,8 @@ struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_request *subreq = &rctx->req; struct crypto_skcipher *child = ctx->child; - SKCIPHER_REQUEST_ON_STACK(subreq, child); if (unlikely(err == -EINPROGRESS)) goto out; @@ -510,8 +287,8 @@ struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_request *subreq = &rctx->req; struct crypto_skcipher *child = ctx->child; - SKCIPHER_REQUEST_ON_STACK(subreq, child); if (unlikely(err == -EINPROGRESS)) goto out; @@ -569,7 +346,8 @@ ctx->child = cipher; crypto_skcipher_set_reqsize( - tfm, sizeof(struct cryptd_skcipher_request_ctx)); + tfm, sizeof(struct cryptd_skcipher_request_ctx) + + crypto_skcipher_reqsize(cipher)); return 0; } @@ -590,24 +368,17 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct skcipherd_instance_ctx *ctx; struct skcipher_instance *inst; struct skcipher_alg *alg; - const char *name; u32 type; u32 mask; int err; - type = 0; - mask = CRYPTO_ALG_ASYNC; - - cryptd_check_internal(tb, &type, &mask); - - name = crypto_attr_alg_name(tb[1]); - if (IS_ERR(name)) - return PTR_ERR(name); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -616,19 +387,18 @@ ctx = skcipher_instance_ctx(inst); ctx->queue = queue; - crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); - err = crypto_grab_skcipher(&ctx->spawn, name, type, mask); + err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), type, mask); if (err) - goto out_free_inst; + goto err_free_inst; alg = crypto_spawn_skcipher_alg(&ctx->spawn); err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); if (err) - goto out_drop_skcipher; + goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); - + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); @@ -647,10 +417,8 @@ err = skcipher_register_instance(tmpl, inst); if (err) { -out_drop_skcipher: - crypto_drop_skcipher(&ctx->spawn); -out_free_inst: - kfree(inst); +err_free_inst: + cryptd_skcipher_free(inst); } return err; } @@ -686,15 +454,11 @@ { struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); struct crypto_shash *child = ctx->child; - int err; crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & CRYPTO_TFM_REQ_MASK); - err = crypto_shash_setkey(child, key, keylen); - crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; + return crypto_shash_setkey(child, key, keylen); } static int cryptd_hash_enqueue(struct ahash_request *req, @@ -716,13 +480,13 @@ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - int refcnt = atomic_read(&ctx->refcnt); + int refcnt = refcount_read(&ctx->refcnt); local_bh_disable(); rctx->complete(&req->base, err); local_bh_enable(); - if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) + if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) crypto_free_ahash(tfm); } @@ -738,7 +502,6 @@ goto out; desc->tfm = child; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; err = crypto_shash_init(desc); @@ -830,7 +593,6 @@ goto out; desc->tfm = child; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; err = shash_ahash_digest(req, desc); @@ -859,49 +621,53 @@ struct shash_desc *desc = cryptd_shash_desc(req); desc->tfm = ctx->child; - desc->flags = req->base.flags; return crypto_shash_import(desc, in); } +static void cryptd_hash_free(struct ahash_instance *inst) +{ + struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst); + + crypto_drop_shash(&ctx->spawn); + kfree(inst); +} + static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct hashd_instance_ctx *ctx; struct ahash_instance *inst; - struct shash_alg *salg; - struct crypto_alg *alg; - u32 type = 0; - u32 mask = 0; + struct shash_alg *alg; + u32 type; + u32 mask; int err; - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); - salg = shash_attr_alg(tb[1], type, mask); - if (IS_ERR(salg)) - return PTR_ERR(salg); - - alg = &salg->base; - inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), - sizeof(*ctx)); - err = PTR_ERR(inst); - if (IS_ERR(inst)) - goto out_put_alg; + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (!inst) + return -ENOMEM; ctx = ahash_instance_ctx(inst); ctx->queue = queue; - err = crypto_init_shash_spawn(&ctx->spawn, salg, - ahash_crypto_instance(inst)); + err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), type, mask); if (err) - goto out_free_inst; + goto err_free_inst; + alg = crypto_spawn_shash_alg(&ctx->spawn); - inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->cra_flags & (CRYPTO_ALG_INTERNAL | - CRYPTO_ALG_OPTIONAL_KEY)); + err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base); + if (err) + goto err_free_inst; - inst->alg.halg.digestsize = salg->digestsize; - inst->alg.halg.statesize = salg->statesize; + inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL| + CRYPTO_ALG_OPTIONAL_KEY)); + inst->alg.halg.digestsize = alg->digestsize; + inst->alg.halg.statesize = alg->statesize; inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; @@ -913,19 +679,17 @@ inst->alg.finup = cryptd_hash_finup_enqueue; inst->alg.export = cryptd_hash_export; inst->alg.import = cryptd_hash_import; - if (crypto_shash_alg_has_setkey(salg)) + if (crypto_shash_alg_has_setkey(alg)) inst->alg.setkey = cryptd_hash_setkey; inst->alg.digest = cryptd_hash_digest_enqueue; + inst->free = cryptd_hash_free; + err = ahash_register_instance(tmpl, inst); if (err) { - crypto_drop_shash(&ctx->spawn); -out_free_inst: - kfree(inst); +err_free_inst: + cryptd_hash_free(inst); } - -out_put_alg: - crypto_mod_put(alg); return err; } @@ -970,13 +734,13 @@ out: ctx = crypto_aead_ctx(tfm); - refcnt = atomic_read(&ctx->refcnt); + refcnt = refcount_read(&ctx->refcnt); local_bh_disable(); compl(&req->base, err); local_bh_enable(); - if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) + if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) crypto_free_aead(tfm); } @@ -1047,23 +811,27 @@ crypto_free_aead(ctx->child); } +static void cryptd_aead_free(struct aead_instance *inst) +{ + struct aead_instance_ctx *ctx = aead_instance_ctx(inst); + + crypto_drop_aead(&ctx->aead_spawn); + kfree(inst); +} + static int cryptd_create_aead(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct aead_instance_ctx *ctx; struct aead_instance *inst; struct aead_alg *alg; - const char *name; - u32 type = 0; - u32 mask = CRYPTO_ALG_ASYNC; + u32 type; + u32 mask; int err; - cryptd_check_internal(tb, &type, &mask); - - name = crypto_attr_alg_name(tb[1]); - if (IS_ERR(name)) - return PTR_ERR(name); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -1072,18 +840,18 @@ ctx = aead_instance_ctx(inst); ctx->queue = queue; - crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst)); - err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask); + err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), type, mask); if (err) - goto out_free_inst; + goto err_free_inst; alg = crypto_spawn_aead_alg(&ctx->aead_spawn); err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); if (err) - goto out_drop_aead; + goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); inst->alg.ivsize = crypto_aead_alg_ivsize(alg); @@ -1096,12 +864,12 @@ inst->alg.encrypt = cryptd_aead_encrypt_enqueue; inst->alg.decrypt = cryptd_aead_decrypt_enqueue; + inst->free = cryptd_aead_free; + err = aead_register_instance(tmpl, inst); if (err) { -out_drop_aead: - crypto_drop_aead(&ctx->aead_spawn); -out_free_inst: - kfree(inst); +err_free_inst: + cryptd_aead_free(inst); } return err; } @@ -1117,100 +885,22 @@ return PTR_ERR(algt); switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { - case CRYPTO_ALG_TYPE_BLKCIPHER: - if ((algt->type & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_BLKCIPHER) - return cryptd_create_blkcipher(tmpl, tb, &queue); - - return cryptd_create_skcipher(tmpl, tb, &queue); - case CRYPTO_ALG_TYPE_DIGEST: - return cryptd_create_hash(tmpl, tb, &queue); + case CRYPTO_ALG_TYPE_SKCIPHER: + return cryptd_create_skcipher(tmpl, tb, algt, &queue); + case CRYPTO_ALG_TYPE_HASH: + return cryptd_create_hash(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_AEAD: - return cryptd_create_aead(tmpl, tb, &queue); + return cryptd_create_aead(tmpl, tb, algt, &queue); } return -EINVAL; } -static void cryptd_free(struct crypto_instance *inst) -{ - struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); - struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); - struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); - - switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { - case CRYPTO_ALG_TYPE_AHASH: - crypto_drop_shash(&hctx->spawn); - kfree(ahash_instance(inst)); - return; - case CRYPTO_ALG_TYPE_AEAD: - crypto_drop_aead(&aead_ctx->aead_spawn); - kfree(aead_instance(inst)); - return; - default: - crypto_drop_spawn(&ctx->spawn); - kfree(inst); - } -} - static struct crypto_template cryptd_tmpl = { .name = "cryptd", .create = cryptd_create, - .free = cryptd_free, .module = THIS_MODULE, }; - -struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, - u32 type, u32 mask) -{ - char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; - struct cryptd_blkcipher_ctx *ctx; - struct crypto_tfm *tfm; - - if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, - "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) - return ERR_PTR(-EINVAL); - type = crypto_skcipher_type(type); - mask &= ~CRYPTO_ALG_TYPE_MASK; - mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); - tfm = crypto_alloc_base(cryptd_alg_name, type, mask); - if (IS_ERR(tfm)) - return ERR_CAST(tfm); - if (tfm->__crt_alg->cra_module != THIS_MODULE) { - crypto_free_tfm(tfm); - return ERR_PTR(-EINVAL); - } - - ctx = crypto_tfm_ctx(tfm); - atomic_set(&ctx->refcnt, 1); - - return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); -} -EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); - -struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) -{ - struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); - return ctx->child; -} -EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); - -bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm) -{ - struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); - - return atomic_read(&ctx->refcnt) - 1; -} -EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued); - -void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) -{ - struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); - - if (atomic_dec_and_test(&ctx->refcnt)) - crypto_free_ablkcipher(&tfm->base); -} -EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, u32 type, u32 mask) @@ -1233,7 +923,7 @@ } ctx = crypto_skcipher_ctx(tfm); - atomic_set(&ctx->refcnt, 1); + refcount_set(&ctx->refcnt, 1); return container_of(tfm, struct cryptd_skcipher, base); } @@ -1251,7 +941,7 @@ { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); - return atomic_read(&ctx->refcnt) - 1; + return refcount_read(&ctx->refcnt) - 1; } EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); @@ -1259,7 +949,7 @@ { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); - if (atomic_dec_and_test(&ctx->refcnt)) + if (refcount_dec_and_test(&ctx->refcnt)) crypto_free_skcipher(&tfm->base); } EXPORT_SYMBOL_GPL(cryptd_free_skcipher); @@ -1283,7 +973,7 @@ } ctx = crypto_ahash_ctx(tfm); - atomic_set(&ctx->refcnt, 1); + refcount_set(&ctx->refcnt, 1); return __cryptd_ahash_cast(tfm); } @@ -1308,7 +998,7 @@ { struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); - return atomic_read(&ctx->refcnt) - 1; + return refcount_read(&ctx->refcnt) - 1; } EXPORT_SYMBOL_GPL(cryptd_ahash_queued); @@ -1316,7 +1006,7 @@ { struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); - if (atomic_dec_and_test(&ctx->refcnt)) + if (refcount_dec_and_test(&ctx->refcnt)) crypto_free_ahash(&tfm->base); } EXPORT_SYMBOL_GPL(cryptd_free_ahash); @@ -1340,7 +1030,7 @@ } ctx = crypto_aead_ctx(tfm); - atomic_set(&ctx->refcnt, 1); + refcount_set(&ctx->refcnt, 1); return __cryptd_aead_cast(tfm); } @@ -1358,7 +1048,7 @@ { struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); - return atomic_read(&ctx->refcnt) - 1; + return refcount_read(&ctx->refcnt) - 1; } EXPORT_SYMBOL_GPL(cryptd_aead_queued); @@ -1366,7 +1056,7 @@ { struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); - if (atomic_dec_and_test(&ctx->refcnt)) + if (refcount_dec_and_test(&ctx->refcnt)) crypto_free_aead(&tfm->base); } EXPORT_SYMBOL_GPL(cryptd_free_aead); @@ -1375,19 +1065,31 @@ { int err; + cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, + 1); + if (!cryptd_wq) + return -ENOMEM; + err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); if (err) - return err; + goto err_destroy_wq; err = crypto_register_template(&cryptd_tmpl); if (err) - cryptd_fini_queue(&queue); + goto err_fini_queue; + return 0; + +err_fini_queue: + cryptd_fini_queue(&queue); +err_destroy_wq: + destroy_workqueue(cryptd_wq); return err; } static void __exit cryptd_exit(void) { + destroy_workqueue(cryptd_wq); cryptd_fini_queue(&queue); crypto_unregister_template(&cryptd_tmpl); } -- Gitblit v1.6.2