| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Software async crypto daemon. |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 9 | 10 | * Gabriele Paoloni <gabriele.paoloni@intel.com> |
|---|
| 10 | 11 | * Aidan O'Mahony (aidan.o.mahony@intel.com) |
|---|
| 11 | 12 | * Copyright (c) 2010, Intel Corporation. |
|---|
| 12 | | - * |
|---|
| 13 | | - * This program is free software; you can redistribute it and/or modify it |
|---|
| 14 | | - * under the terms of the GNU General Public License as published by the Free |
|---|
| 15 | | - * Software Foundation; either version 2 of the License, or (at your option) |
|---|
| 16 | | - * any later version. |
|---|
| 17 | | - * |
|---|
| 18 | 13 | */ |
|---|
| 19 | 14 | |
|---|
| 20 | 15 | #include <crypto/internal/hash.h> |
|---|
| 21 | 16 | #include <crypto/internal/aead.h> |
|---|
| 22 | 17 | #include <crypto/internal/skcipher.h> |
|---|
| 23 | 18 | #include <crypto/cryptd.h> |
|---|
| 24 | | -#include <crypto/crypto_wq.h> |
|---|
| 25 | | -#include <linux/atomic.h> |
|---|
| 19 | +#include <linux/refcount.h> |
|---|
| 26 | 20 | #include <linux/err.h> |
|---|
| 27 | 21 | #include <linux/init.h> |
|---|
| 28 | 22 | #include <linux/kernel.h> |
|---|
| .. | .. |
|---|
| 31 | 25 | #include <linux/scatterlist.h> |
|---|
| 32 | 26 | #include <linux/sched.h> |
|---|
| 33 | 27 | #include <linux/slab.h> |
|---|
| 28 | +#include <linux/workqueue.h> |
|---|
| 34 | 29 | |
|---|
| 35 | 30 | static unsigned int cryptd_max_cpu_qlen = 1000; |
|---|
| 36 | 31 | module_param(cryptd_max_cpu_qlen, uint, 0); |
|---|
| 37 | 32 | MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); |
|---|
| 33 | + |
|---|
| 34 | +static struct workqueue_struct *cryptd_wq; |
|---|
| 38 | 35 | |
|---|
| 39 | 36 | struct cryptd_cpu_queue { |
|---|
| 40 | 37 | struct crypto_queue queue; |
|---|
| .. | .. |
|---|
| 42 | 39 | }; |
|---|
| 43 | 40 | |
|---|
| 44 | 41 | struct cryptd_queue { |
|---|
| 42 | + /* |
|---|
| 43 | + * Protected by disabling BH to allow enqueueing from softinterrupt and |
|---|
| 44 | + * dequeuing from kworker (cryptd_queue_worker()). |
|---|
| 45 | + */ |
|---|
| 45 | 46 | struct cryptd_cpu_queue __percpu *cpu_queue; |
|---|
| 46 | 47 | }; |
|---|
| 47 | 48 | |
|---|
| .. | .. |
|---|
| 65 | 66 | struct cryptd_queue *queue; |
|---|
| 66 | 67 | }; |
|---|
| 67 | 68 | |
|---|
| 68 | | -struct cryptd_blkcipher_ctx { |
|---|
| 69 | | - atomic_t refcnt; |
|---|
| 70 | | - struct crypto_blkcipher *child; |
|---|
| 71 | | -}; |
|---|
| 72 | | - |
|---|
| 73 | | -struct cryptd_blkcipher_request_ctx { |
|---|
| 74 | | - crypto_completion_t complete; |
|---|
| 75 | | -}; |
|---|
| 76 | | - |
|---|
| 77 | 69 | struct cryptd_skcipher_ctx { |
|---|
| 78 | | - atomic_t refcnt; |
|---|
| 70 | + refcount_t refcnt; |
|---|
| 79 | 71 | struct crypto_skcipher *child; |
|---|
| 80 | 72 | }; |
|---|
| 81 | 73 | |
|---|
| 82 | 74 | struct cryptd_skcipher_request_ctx { |
|---|
| 83 | 75 | crypto_completion_t complete; |
|---|
| 76 | + struct skcipher_request req; |
|---|
| 84 | 77 | }; |
|---|
| 85 | 78 | |
|---|
| 86 | 79 | struct cryptd_hash_ctx { |
|---|
| 87 | | - atomic_t refcnt; |
|---|
| 80 | + refcount_t refcnt; |
|---|
| 88 | 81 | struct crypto_shash *child; |
|---|
| 89 | 82 | }; |
|---|
| 90 | 83 | |
|---|
| .. | .. |
|---|
| 94 | 87 | }; |
|---|
| 95 | 88 | |
|---|
| 96 | 89 | struct cryptd_aead_ctx { |
|---|
| 97 | | - atomic_t refcnt; |
|---|
| 90 | + refcount_t refcnt; |
|---|
| 98 | 91 | struct crypto_aead *child; |
|---|
| 99 | 92 | }; |
|---|
| 100 | 93 | |
|---|
| .. | .. |
|---|
| 137 | 130 | static int cryptd_enqueue_request(struct cryptd_queue *queue, |
|---|
| 138 | 131 | struct crypto_async_request *request) |
|---|
| 139 | 132 | { |
|---|
| 140 | | - int cpu, err; |
|---|
| 133 | + int err; |
|---|
| 141 | 134 | struct cryptd_cpu_queue *cpu_queue; |
|---|
| 142 | | - atomic_t *refcnt; |
|---|
| 135 | + refcount_t *refcnt; |
|---|
| 143 | 136 | |
|---|
| 144 | | - cpu = get_cpu(); |
|---|
| 137 | + local_bh_disable(); |
|---|
| 145 | 138 | cpu_queue = this_cpu_ptr(queue->cpu_queue); |
|---|
| 146 | 139 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
|---|
| 147 | 140 | |
|---|
| 148 | 141 | refcnt = crypto_tfm_ctx(request->tfm); |
|---|
| 149 | 142 | |
|---|
| 150 | 143 | if (err == -ENOSPC) |
|---|
| 151 | | - goto out_put_cpu; |
|---|
| 144 | + goto out; |
|---|
| 152 | 145 | |
|---|
| 153 | | - queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
|---|
| 146 | + queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work); |
|---|
| 154 | 147 | |
|---|
| 155 | | - if (!atomic_read(refcnt)) |
|---|
| 156 | | - goto out_put_cpu; |
|---|
| 148 | + if (!refcount_read(refcnt)) |
|---|
| 149 | + goto out; |
|---|
| 157 | 150 | |
|---|
| 158 | | - atomic_inc(refcnt); |
|---|
| 151 | + refcount_inc(refcnt); |
|---|
| 159 | 152 | |
|---|
| 160 | | -out_put_cpu: |
|---|
| 161 | | - put_cpu(); |
|---|
| 153 | +out: |
|---|
| 154 | + local_bh_enable(); |
|---|
| 162 | 155 | |
|---|
| 163 | 156 | return err; |
|---|
| 164 | 157 | } |
|---|
| .. | .. |
|---|
| 174 | 167 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); |
|---|
| 175 | 168 | /* |
|---|
| 176 | 169 | * Only handle one request at a time to avoid hogging crypto workqueue. |
|---|
| 177 | | - * preempt_disable/enable is used to prevent being preempted by |
|---|
| 178 | | - * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent |
|---|
| 179 | | - * cryptd_enqueue_request() being accessed from software interrupts. |
|---|
| 180 | 170 | */ |
|---|
| 181 | 171 | local_bh_disable(); |
|---|
| 182 | | - preempt_disable(); |
|---|
| 183 | 172 | backlog = crypto_get_backlog(&cpu_queue->queue); |
|---|
| 184 | 173 | req = crypto_dequeue_request(&cpu_queue->queue); |
|---|
| 185 | | - preempt_enable(); |
|---|
| 186 | 174 | local_bh_enable(); |
|---|
| 187 | 175 | |
|---|
| 188 | 176 | if (!req) |
|---|
| .. | .. |
|---|
| 193 | 181 | req->complete(req, 0); |
|---|
| 194 | 182 | |
|---|
| 195 | 183 | if (cpu_queue->queue.qlen) |
|---|
| 196 | | - queue_work(kcrypto_wq, &cpu_queue->work); |
|---|
| 184 | + queue_work(cryptd_wq, &cpu_queue->work); |
|---|
| 197 | 185 | } |
|---|
| 198 | 186 | |
|---|
| 199 | 187 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) |
|---|
| .. | .. |
|---|
| 203 | 191 | return ictx->queue; |
|---|
| 204 | 192 | } |
|---|
| 205 | 193 | |
|---|
| 206 | | -static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, |
|---|
| 207 | | - u32 *mask) |
|---|
| 194 | +static void cryptd_type_and_mask(struct crypto_attr_type *algt, |
|---|
| 195 | + u32 *type, u32 *mask) |
|---|
| 208 | 196 | { |
|---|
| 209 | | - struct crypto_attr_type *algt; |
|---|
| 197 | + /* |
|---|
| 198 | + * cryptd is allowed to wrap internal algorithms, but in that case the |
|---|
| 199 | + * resulting cryptd instance will be marked as internal as well. |
|---|
| 200 | + */ |
|---|
| 201 | + *type = algt->type & CRYPTO_ALG_INTERNAL; |
|---|
| 202 | + *mask = algt->mask & CRYPTO_ALG_INTERNAL; |
|---|
| 210 | 203 | |
|---|
| 211 | | - algt = crypto_get_attr_type(tb); |
|---|
| 212 | | - if (IS_ERR(algt)) |
|---|
| 213 | | - return; |
|---|
| 204 | + /* No point in cryptd wrapping an algorithm that's already async. */ |
|---|
| 205 | + *mask |= CRYPTO_ALG_ASYNC; |
|---|
| 214 | 206 | |
|---|
| 215 | | - *type |= algt->type & CRYPTO_ALG_INTERNAL; |
|---|
| 216 | | - *mask |= algt->mask & CRYPTO_ALG_INTERNAL; |
|---|
| 217 | | -} |
|---|
| 218 | | - |
|---|
| 219 | | -static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, |
|---|
| 220 | | - const u8 *key, unsigned int keylen) |
|---|
| 221 | | -{ |
|---|
| 222 | | - struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); |
|---|
| 223 | | - struct crypto_blkcipher *child = ctx->child; |
|---|
| 224 | | - int err; |
|---|
| 225 | | - |
|---|
| 226 | | - crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
|---|
| 227 | | - crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & |
|---|
| 228 | | - CRYPTO_TFM_REQ_MASK); |
|---|
| 229 | | - err = crypto_blkcipher_setkey(child, key, keylen); |
|---|
| 230 | | - crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & |
|---|
| 231 | | - CRYPTO_TFM_RES_MASK); |
|---|
| 232 | | - return err; |
|---|
| 233 | | -} |
|---|
| 234 | | - |
|---|
| 235 | | -static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, |
|---|
| 236 | | - struct crypto_blkcipher *child, |
|---|
| 237 | | - int err, |
|---|
| 238 | | - int (*crypt)(struct blkcipher_desc *desc, |
|---|
| 239 | | - struct scatterlist *dst, |
|---|
| 240 | | - struct scatterlist *src, |
|---|
| 241 | | - unsigned int len)) |
|---|
| 242 | | -{ |
|---|
| 243 | | - struct cryptd_blkcipher_request_ctx *rctx; |
|---|
| 244 | | - struct cryptd_blkcipher_ctx *ctx; |
|---|
| 245 | | - struct crypto_ablkcipher *tfm; |
|---|
| 246 | | - struct blkcipher_desc desc; |
|---|
| 247 | | - int refcnt; |
|---|
| 248 | | - |
|---|
| 249 | | - rctx = ablkcipher_request_ctx(req); |
|---|
| 250 | | - |
|---|
| 251 | | - if (unlikely(err == -EINPROGRESS)) |
|---|
| 252 | | - goto out; |
|---|
| 253 | | - |
|---|
| 254 | | - desc.tfm = child; |
|---|
| 255 | | - desc.info = req->info; |
|---|
| 256 | | - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
|---|
| 257 | | - |
|---|
| 258 | | - err = crypt(&desc, req->dst, req->src, req->nbytes); |
|---|
| 259 | | - |
|---|
| 260 | | - req->base.complete = rctx->complete; |
|---|
| 261 | | - |
|---|
| 262 | | -out: |
|---|
| 263 | | - tfm = crypto_ablkcipher_reqtfm(req); |
|---|
| 264 | | - ctx = crypto_ablkcipher_ctx(tfm); |
|---|
| 265 | | - refcnt = atomic_read(&ctx->refcnt); |
|---|
| 266 | | - |
|---|
| 267 | | - local_bh_disable(); |
|---|
| 268 | | - rctx->complete(&req->base, err); |
|---|
| 269 | | - local_bh_enable(); |
|---|
| 270 | | - |
|---|
| 271 | | - if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) |
|---|
| 272 | | - crypto_free_ablkcipher(tfm); |
|---|
| 273 | | -} |
|---|
| 274 | | - |
|---|
| 275 | | -static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) |
|---|
| 276 | | -{ |
|---|
| 277 | | - struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); |
|---|
| 278 | | - struct crypto_blkcipher *child = ctx->child; |
|---|
| 279 | | - |
|---|
| 280 | | - cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, |
|---|
| 281 | | - crypto_blkcipher_crt(child)->encrypt); |
|---|
| 282 | | -} |
|---|
| 283 | | - |
|---|
| 284 | | -static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) |
|---|
| 285 | | -{ |
|---|
| 286 | | - struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); |
|---|
| 287 | | - struct crypto_blkcipher *child = ctx->child; |
|---|
| 288 | | - |
|---|
| 289 | | - cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, |
|---|
| 290 | | - crypto_blkcipher_crt(child)->decrypt); |
|---|
| 291 | | -} |
|---|
| 292 | | - |
|---|
| 293 | | -static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, |
|---|
| 294 | | - crypto_completion_t compl) |
|---|
| 295 | | -{ |
|---|
| 296 | | - struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); |
|---|
| 297 | | - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
|---|
| 298 | | - struct cryptd_queue *queue; |
|---|
| 299 | | - |
|---|
| 300 | | - queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); |
|---|
| 301 | | - rctx->complete = req->base.complete; |
|---|
| 302 | | - req->base.complete = compl; |
|---|
| 303 | | - |
|---|
| 304 | | - return cryptd_enqueue_request(queue, &req->base); |
|---|
| 305 | | -} |
|---|
| 306 | | - |
|---|
| 307 | | -static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) |
|---|
| 308 | | -{ |
|---|
| 309 | | - return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); |
|---|
| 310 | | -} |
|---|
| 311 | | - |
|---|
| 312 | | -static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) |
|---|
| 313 | | -{ |
|---|
| 314 | | - return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); |
|---|
| 315 | | -} |
|---|
| 316 | | - |
|---|
| 317 | | -static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) |
|---|
| 318 | | -{ |
|---|
| 319 | | - struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
|---|
| 320 | | - struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
|---|
| 321 | | - struct crypto_spawn *spawn = &ictx->spawn; |
|---|
| 322 | | - struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
|---|
| 323 | | - struct crypto_blkcipher *cipher; |
|---|
| 324 | | - |
|---|
| 325 | | - cipher = crypto_spawn_blkcipher(spawn); |
|---|
| 326 | | - if (IS_ERR(cipher)) |
|---|
| 327 | | - return PTR_ERR(cipher); |
|---|
| 328 | | - |
|---|
| 329 | | - ctx->child = cipher; |
|---|
| 330 | | - tfm->crt_ablkcipher.reqsize = |
|---|
| 331 | | - sizeof(struct cryptd_blkcipher_request_ctx); |
|---|
| 332 | | - return 0; |
|---|
| 333 | | -} |
|---|
| 334 | | - |
|---|
| 335 | | -static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) |
|---|
| 336 | | -{ |
|---|
| 337 | | - struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
|---|
| 338 | | - |
|---|
| 339 | | - crypto_free_blkcipher(ctx->child); |
|---|
| 207 | + *mask |= crypto_algt_inherited_mask(algt); |
|---|
| 340 | 208 | } |
|---|
| 341 | 209 | |
|---|
| 342 | 210 | static int cryptd_init_instance(struct crypto_instance *inst, |
|---|
| .. | .. |
|---|
| 356 | 224 | return 0; |
|---|
| 357 | 225 | } |
|---|
| 358 | 226 | |
|---|
| 359 | | -static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, |
|---|
| 360 | | - unsigned int tail) |
|---|
| 361 | | -{ |
|---|
| 362 | | - char *p; |
|---|
| 363 | | - struct crypto_instance *inst; |
|---|
| 364 | | - int err; |
|---|
| 365 | | - |
|---|
| 366 | | - p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); |
|---|
| 367 | | - if (!p) |
|---|
| 368 | | - return ERR_PTR(-ENOMEM); |
|---|
| 369 | | - |
|---|
| 370 | | - inst = (void *)(p + head); |
|---|
| 371 | | - |
|---|
| 372 | | - err = cryptd_init_instance(inst, alg); |
|---|
| 373 | | - if (err) |
|---|
| 374 | | - goto out_free_inst; |
|---|
| 375 | | - |
|---|
| 376 | | -out: |
|---|
| 377 | | - return p; |
|---|
| 378 | | - |
|---|
| 379 | | -out_free_inst: |
|---|
| 380 | | - kfree(p); |
|---|
| 381 | | - p = ERR_PTR(err); |
|---|
| 382 | | - goto out; |
|---|
| 383 | | -} |
|---|
| 384 | | - |
|---|
| 385 | | -static int cryptd_create_blkcipher(struct crypto_template *tmpl, |
|---|
| 386 | | - struct rtattr **tb, |
|---|
| 387 | | - struct cryptd_queue *queue) |
|---|
| 388 | | -{ |
|---|
| 389 | | - struct cryptd_instance_ctx *ctx; |
|---|
| 390 | | - struct crypto_instance *inst; |
|---|
| 391 | | - struct crypto_alg *alg; |
|---|
| 392 | | - u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; |
|---|
| 393 | | - u32 mask = CRYPTO_ALG_TYPE_MASK; |
|---|
| 394 | | - int err; |
|---|
| 395 | | - |
|---|
| 396 | | - cryptd_check_internal(tb, &type, &mask); |
|---|
| 397 | | - |
|---|
| 398 | | - alg = crypto_get_attr_alg(tb, type, mask); |
|---|
| 399 | | - if (IS_ERR(alg)) |
|---|
| 400 | | - return PTR_ERR(alg); |
|---|
| 401 | | - |
|---|
| 402 | | - inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); |
|---|
| 403 | | - err = PTR_ERR(inst); |
|---|
| 404 | | - if (IS_ERR(inst)) |
|---|
| 405 | | - goto out_put_alg; |
|---|
| 406 | | - |
|---|
| 407 | | - ctx = crypto_instance_ctx(inst); |
|---|
| 408 | | - ctx->queue = queue; |
|---|
| 409 | | - |
|---|
| 410 | | - err = crypto_init_spawn(&ctx->spawn, alg, inst, |
|---|
| 411 | | - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); |
|---|
| 412 | | - if (err) |
|---|
| 413 | | - goto out_free_inst; |
|---|
| 414 | | - |
|---|
| 415 | | - type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
|---|
| 416 | | - if (alg->cra_flags & CRYPTO_ALG_INTERNAL) |
|---|
| 417 | | - type |= CRYPTO_ALG_INTERNAL; |
|---|
| 418 | | - inst->alg.cra_flags = type; |
|---|
| 419 | | - inst->alg.cra_type = &crypto_ablkcipher_type; |
|---|
| 420 | | - |
|---|
| 421 | | - inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; |
|---|
| 422 | | - inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; |
|---|
| 423 | | - inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; |
|---|
| 424 | | - |
|---|
| 425 | | - inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; |
|---|
| 426 | | - |
|---|
| 427 | | - inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); |
|---|
| 428 | | - |
|---|
| 429 | | - inst->alg.cra_init = cryptd_blkcipher_init_tfm; |
|---|
| 430 | | - inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; |
|---|
| 431 | | - |
|---|
| 432 | | - inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; |
|---|
| 433 | | - inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; |
|---|
| 434 | | - inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; |
|---|
| 435 | | - |
|---|
| 436 | | - err = crypto_register_instance(tmpl, inst); |
|---|
| 437 | | - if (err) { |
|---|
| 438 | | - crypto_drop_spawn(&ctx->spawn); |
|---|
| 439 | | -out_free_inst: |
|---|
| 440 | | - kfree(inst); |
|---|
| 441 | | - } |
|---|
| 442 | | - |
|---|
| 443 | | -out_put_alg: |
|---|
| 444 | | - crypto_mod_put(alg); |
|---|
| 445 | | - return err; |
|---|
| 446 | | -} |
|---|
| 447 | | - |
|---|
| 448 | 227 | static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, |
|---|
| 449 | 228 | const u8 *key, unsigned int keylen) |
|---|
| 450 | 229 | { |
|---|
| 451 | 230 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); |
|---|
| 452 | 231 | struct crypto_skcipher *child = ctx->child; |
|---|
| 453 | | - int err; |
|---|
| 454 | 232 | |
|---|
| 455 | 233 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
|---|
| 456 | | - crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & |
|---|
| 457 | | - CRYPTO_TFM_REQ_MASK); |
|---|
| 458 | | - err = crypto_skcipher_setkey(child, key, keylen); |
|---|
| 459 | | - crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & |
|---|
| 460 | | - CRYPTO_TFM_RES_MASK); |
|---|
| 461 | | - return err; |
|---|
| 234 | + crypto_skcipher_set_flags(child, |
|---|
| 235 | + crypto_skcipher_get_flags(parent) & |
|---|
| 236 | + CRYPTO_TFM_REQ_MASK); |
|---|
| 237 | + return crypto_skcipher_setkey(child, key, keylen); |
|---|
| 462 | 238 | } |
|---|
| 463 | 239 | |
|---|
| 464 | 240 | static void cryptd_skcipher_complete(struct skcipher_request *req, int err) |
|---|
| .. | .. |
|---|
| 466 | 242 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
|---|
| 467 | 243 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
|---|
| 468 | 244 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); |
|---|
| 469 | | - int refcnt = atomic_read(&ctx->refcnt); |
|---|
| 245 | + int refcnt = refcount_read(&ctx->refcnt); |
|---|
| 470 | 246 | |
|---|
| 471 | 247 | local_bh_disable(); |
|---|
| 472 | 248 | rctx->complete(&req->base, err); |
|---|
| 473 | 249 | local_bh_enable(); |
|---|
| 474 | 250 | |
|---|
| 475 | | - if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) |
|---|
| 251 | + if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) |
|---|
| 476 | 252 | crypto_free_skcipher(tfm); |
|---|
| 477 | 253 | } |
|---|
| 478 | 254 | |
|---|
| .. | .. |
|---|
| 483 | 259 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); |
|---|
| 484 | 260 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
|---|
| 485 | 261 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
|---|
| 262 | + struct skcipher_request *subreq = &rctx->req; |
|---|
| 486 | 263 | struct crypto_skcipher *child = ctx->child; |
|---|
| 487 | | - SKCIPHER_REQUEST_ON_STACK(subreq, child); |
|---|
| 488 | 264 | |
|---|
| 489 | 265 | if (unlikely(err == -EINPROGRESS)) |
|---|
| 490 | 266 | goto out; |
|---|
| .. | .. |
|---|
| 511 | 287 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); |
|---|
| 512 | 288 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
|---|
| 513 | 289 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
|---|
| 290 | + struct skcipher_request *subreq = &rctx->req; |
|---|
| 514 | 291 | struct crypto_skcipher *child = ctx->child; |
|---|
| 515 | | - SKCIPHER_REQUEST_ON_STACK(subreq, child); |
|---|
| 516 | 292 | |
|---|
| 517 | 293 | if (unlikely(err == -EINPROGRESS)) |
|---|
| 518 | 294 | goto out; |
|---|
| .. | .. |
|---|
| 570 | 346 | |
|---|
| 571 | 347 | ctx->child = cipher; |
|---|
| 572 | 348 | crypto_skcipher_set_reqsize( |
|---|
| 573 | | - tfm, sizeof(struct cryptd_skcipher_request_ctx)); |
|---|
| 349 | + tfm, sizeof(struct cryptd_skcipher_request_ctx) + |
|---|
| 350 | + crypto_skcipher_reqsize(cipher)); |
|---|
| 574 | 351 | return 0; |
|---|
| 575 | 352 | } |
|---|
| 576 | 353 | |
|---|
| .. | .. |
|---|
| 591 | 368 | |
|---|
| 592 | 369 | static int cryptd_create_skcipher(struct crypto_template *tmpl, |
|---|
| 593 | 370 | struct rtattr **tb, |
|---|
| 371 | + struct crypto_attr_type *algt, |
|---|
| 594 | 372 | struct cryptd_queue *queue) |
|---|
| 595 | 373 | { |
|---|
| 596 | 374 | struct skcipherd_instance_ctx *ctx; |
|---|
| 597 | 375 | struct skcipher_instance *inst; |
|---|
| 598 | 376 | struct skcipher_alg *alg; |
|---|
| 599 | | - const char *name; |
|---|
| 600 | 377 | u32 type; |
|---|
| 601 | 378 | u32 mask; |
|---|
| 602 | 379 | int err; |
|---|
| 603 | 380 | |
|---|
| 604 | | - type = 0; |
|---|
| 605 | | - mask = CRYPTO_ALG_ASYNC; |
|---|
| 606 | | - |
|---|
| 607 | | - cryptd_check_internal(tb, &type, &mask); |
|---|
| 608 | | - |
|---|
| 609 | | - name = crypto_attr_alg_name(tb[1]); |
|---|
| 610 | | - if (IS_ERR(name)) |
|---|
| 611 | | - return PTR_ERR(name); |
|---|
| 381 | + cryptd_type_and_mask(algt, &type, &mask); |
|---|
| 612 | 382 | |
|---|
| 613 | 383 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
|---|
| 614 | 384 | if (!inst) |
|---|
| .. | .. |
|---|
| 617 | 387 | ctx = skcipher_instance_ctx(inst); |
|---|
| 618 | 388 | ctx->queue = queue; |
|---|
| 619 | 389 | |
|---|
| 620 | | - crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); |
|---|
| 621 | | - err = crypto_grab_skcipher(&ctx->spawn, name, type, mask); |
|---|
| 390 | + err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), |
|---|
| 391 | + crypto_attr_alg_name(tb[1]), type, mask); |
|---|
| 622 | 392 | if (err) |
|---|
| 623 | | - goto out_free_inst; |
|---|
| 393 | + goto err_free_inst; |
|---|
| 624 | 394 | |
|---|
| 625 | 395 | alg = crypto_spawn_skcipher_alg(&ctx->spawn); |
|---|
| 626 | 396 | err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); |
|---|
| 627 | 397 | if (err) |
|---|
| 628 | | - goto out_drop_skcipher; |
|---|
| 398 | + goto err_free_inst; |
|---|
| 629 | 399 | |
|---|
| 630 | | - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | |
|---|
| 631 | | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); |
|---|
| 632 | | - |
|---|
| 400 | + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | |
|---|
| 401 | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); |
|---|
| 633 | 402 | inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); |
|---|
| 634 | 403 | inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); |
|---|
| 635 | 404 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); |
|---|
| .. | .. |
|---|
| 648 | 417 | |
|---|
| 649 | 418 | err = skcipher_register_instance(tmpl, inst); |
|---|
| 650 | 419 | if (err) { |
|---|
| 651 | | -out_drop_skcipher: |
|---|
| 652 | | - crypto_drop_skcipher(&ctx->spawn); |
|---|
| 653 | | -out_free_inst: |
|---|
| 654 | | - kfree(inst); |
|---|
| 420 | +err_free_inst: |
|---|
| 421 | + cryptd_skcipher_free(inst); |
|---|
| 655 | 422 | } |
|---|
| 656 | 423 | return err; |
|---|
| 657 | 424 | } |
|---|
| .. | .. |
|---|
| 687 | 454 | { |
|---|
| 688 | 455 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); |
|---|
| 689 | 456 | struct crypto_shash *child = ctx->child; |
|---|
| 690 | | - int err; |
|---|
| 691 | 457 | |
|---|
| 692 | 458 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
|---|
| 693 | 459 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & |
|---|
| 694 | 460 | CRYPTO_TFM_REQ_MASK); |
|---|
| 695 | | - err = crypto_shash_setkey(child, key, keylen); |
|---|
| 696 | | - crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & |
|---|
| 697 | | - CRYPTO_TFM_RES_MASK); |
|---|
| 698 | | - return err; |
|---|
| 461 | + return crypto_shash_setkey(child, key, keylen); |
|---|
| 699 | 462 | } |
|---|
| 700 | 463 | |
|---|
| 701 | 464 | static int cryptd_hash_enqueue(struct ahash_request *req, |
|---|
| .. | .. |
|---|
| 717 | 480 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
|---|
| 718 | 481 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
|---|
| 719 | 482 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
|---|
| 720 | | - int refcnt = atomic_read(&ctx->refcnt); |
|---|
| 483 | + int refcnt = refcount_read(&ctx->refcnt); |
|---|
| 721 | 484 | |
|---|
| 722 | 485 | local_bh_disable(); |
|---|
| 723 | 486 | rctx->complete(&req->base, err); |
|---|
| 724 | 487 | local_bh_enable(); |
|---|
| 725 | 488 | |
|---|
| 726 | | - if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) |
|---|
| 489 | + if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) |
|---|
| 727 | 490 | crypto_free_ahash(tfm); |
|---|
| 728 | 491 | } |
|---|
| 729 | 492 | |
|---|
| .. | .. |
|---|
| 739 | 502 | goto out; |
|---|
| 740 | 503 | |
|---|
| 741 | 504 | desc->tfm = child; |
|---|
| 742 | | - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
|---|
| 743 | 505 | |
|---|
| 744 | 506 | err = crypto_shash_init(desc); |
|---|
| 745 | 507 | |
|---|
| .. | .. |
|---|
| 831 | 593 | goto out; |
|---|
| 832 | 594 | |
|---|
| 833 | 595 | desc->tfm = child; |
|---|
| 834 | | - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
|---|
| 835 | 596 | |
|---|
| 836 | 597 | err = shash_ahash_digest(req, desc); |
|---|
| 837 | 598 | |
|---|
| .. | .. |
|---|
| 860 | 621 | struct shash_desc *desc = cryptd_shash_desc(req); |
|---|
| 861 | 622 | |
|---|
| 862 | 623 | desc->tfm = ctx->child; |
|---|
| 863 | | - desc->flags = req->base.flags; |
|---|
| 864 | 624 | |
|---|
| 865 | 625 | return crypto_shash_import(desc, in); |
|---|
| 866 | 626 | } |
|---|
| 867 | 627 | |
|---|
| 628 | +static void cryptd_hash_free(struct ahash_instance *inst) |
|---|
| 629 | +{ |
|---|
| 630 | + struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst); |
|---|
| 631 | + |
|---|
| 632 | + crypto_drop_shash(&ctx->spawn); |
|---|
| 633 | + kfree(inst); |
|---|
| 634 | +} |
|---|
| 635 | + |
|---|
| 868 | 636 | static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
|---|
| 637 | + struct crypto_attr_type *algt, |
|---|
| 869 | 638 | struct cryptd_queue *queue) |
|---|
| 870 | 639 | { |
|---|
| 871 | 640 | struct hashd_instance_ctx *ctx; |
|---|
| 872 | 641 | struct ahash_instance *inst; |
|---|
| 873 | | - struct shash_alg *salg; |
|---|
| 874 | | - struct crypto_alg *alg; |
|---|
| 875 | | - u32 type = 0; |
|---|
| 876 | | - u32 mask = 0; |
|---|
| 642 | + struct shash_alg *alg; |
|---|
| 643 | + u32 type; |
|---|
| 644 | + u32 mask; |
|---|
| 877 | 645 | int err; |
|---|
| 878 | 646 | |
|---|
| 879 | | - cryptd_check_internal(tb, &type, &mask); |
|---|
| 647 | + cryptd_type_and_mask(algt, &type, &mask); |
|---|
| 880 | 648 | |
|---|
| 881 | | - salg = shash_attr_alg(tb[1], type, mask); |
|---|
| 882 | | - if (IS_ERR(salg)) |
|---|
| 883 | | - return PTR_ERR(salg); |
|---|
| 884 | | - |
|---|
| 885 | | - alg = &salg->base; |
|---|
| 886 | | - inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), |
|---|
| 887 | | - sizeof(*ctx)); |
|---|
| 888 | | - err = PTR_ERR(inst); |
|---|
| 889 | | - if (IS_ERR(inst)) |
|---|
| 890 | | - goto out_put_alg; |
|---|
| 649 | + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
|---|
| 650 | + if (!inst) |
|---|
| 651 | + return -ENOMEM; |
|---|
| 891 | 652 | |
|---|
| 892 | 653 | ctx = ahash_instance_ctx(inst); |
|---|
| 893 | 654 | ctx->queue = queue; |
|---|
| 894 | 655 | |
|---|
| 895 | | - err = crypto_init_shash_spawn(&ctx->spawn, salg, |
|---|
| 896 | | - ahash_crypto_instance(inst)); |
|---|
| 656 | + err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst), |
|---|
| 657 | + crypto_attr_alg_name(tb[1]), type, mask); |
|---|
| 897 | 658 | if (err) |
|---|
| 898 | | - goto out_free_inst; |
|---|
| 659 | + goto err_free_inst; |
|---|
| 660 | + alg = crypto_spawn_shash_alg(&ctx->spawn); |
|---|
| 899 | 661 | |
|---|
| 900 | | - inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | |
|---|
| 901 | | - (alg->cra_flags & (CRYPTO_ALG_INTERNAL | |
|---|
| 902 | | - CRYPTO_ALG_OPTIONAL_KEY)); |
|---|
| 662 | + err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base); |
|---|
| 663 | + if (err) |
|---|
| 664 | + goto err_free_inst; |
|---|
| 903 | 665 | |
|---|
| 904 | | - inst->alg.halg.digestsize = salg->digestsize; |
|---|
| 905 | | - inst->alg.halg.statesize = salg->statesize; |
|---|
| 666 | + inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC | |
|---|
| 667 | + (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL| |
|---|
| 668 | + CRYPTO_ALG_OPTIONAL_KEY)); |
|---|
| 669 | + inst->alg.halg.digestsize = alg->digestsize; |
|---|
| 670 | + inst->alg.halg.statesize = alg->statesize; |
|---|
| 906 | 671 | inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); |
|---|
| 907 | 672 | |
|---|
| 908 | 673 | inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; |
|---|
| .. | .. |
|---|
| 914 | 679 | inst->alg.finup = cryptd_hash_finup_enqueue; |
|---|
| 915 | 680 | inst->alg.export = cryptd_hash_export; |
|---|
| 916 | 681 | inst->alg.import = cryptd_hash_import; |
|---|
| 917 | | - if (crypto_shash_alg_has_setkey(salg)) |
|---|
| 682 | + if (crypto_shash_alg_has_setkey(alg)) |
|---|
| 918 | 683 | inst->alg.setkey = cryptd_hash_setkey; |
|---|
| 919 | 684 | inst->alg.digest = cryptd_hash_digest_enqueue; |
|---|
| 920 | 685 | |
|---|
| 686 | + inst->free = cryptd_hash_free; |
|---|
| 687 | + |
|---|
| 921 | 688 | err = ahash_register_instance(tmpl, inst); |
|---|
| 922 | 689 | if (err) { |
|---|
| 923 | | - crypto_drop_shash(&ctx->spawn); |
|---|
| 924 | | -out_free_inst: |
|---|
| 925 | | - kfree(inst); |
|---|
| 690 | +err_free_inst: |
|---|
| 691 | + cryptd_hash_free(inst); |
|---|
| 926 | 692 | } |
|---|
| 927 | | - |
|---|
| 928 | | -out_put_alg: |
|---|
| 929 | | - crypto_mod_put(alg); |
|---|
| 930 | 693 | return err; |
|---|
| 931 | 694 | } |
|---|
| 932 | 695 | |
|---|
| .. | .. |
|---|
| 971 | 734 | |
|---|
| 972 | 735 | out: |
|---|
| 973 | 736 | ctx = crypto_aead_ctx(tfm); |
|---|
| 974 | | - refcnt = atomic_read(&ctx->refcnt); |
|---|
| 737 | + refcnt = refcount_read(&ctx->refcnt); |
|---|
| 975 | 738 | |
|---|
| 976 | 739 | local_bh_disable(); |
|---|
| 977 | 740 | compl(&req->base, err); |
|---|
| 978 | 741 | local_bh_enable(); |
|---|
| 979 | 742 | |
|---|
| 980 | | - if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) |
|---|
| 743 | + if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) |
|---|
| 981 | 744 | crypto_free_aead(tfm); |
|---|
| 982 | 745 | } |
|---|
| 983 | 746 | |
|---|
| .. | .. |
|---|
| 1048 | 811 | crypto_free_aead(ctx->child); |
|---|
| 1049 | 812 | } |
|---|
| 1050 | 813 | |
|---|
| 814 | +static void cryptd_aead_free(struct aead_instance *inst) |
|---|
| 815 | +{ |
|---|
| 816 | + struct aead_instance_ctx *ctx = aead_instance_ctx(inst); |
|---|
| 817 | + |
|---|
| 818 | + crypto_drop_aead(&ctx->aead_spawn); |
|---|
| 819 | + kfree(inst); |
|---|
| 820 | +} |
|---|
| 821 | + |
|---|
| 1051 | 822 | static int cryptd_create_aead(struct crypto_template *tmpl, |
|---|
| 1052 | 823 | struct rtattr **tb, |
|---|
| 824 | + struct crypto_attr_type *algt, |
|---|
| 1053 | 825 | struct cryptd_queue *queue) |
|---|
| 1054 | 826 | { |
|---|
| 1055 | 827 | struct aead_instance_ctx *ctx; |
|---|
| 1056 | 828 | struct aead_instance *inst; |
|---|
| 1057 | 829 | struct aead_alg *alg; |
|---|
| 1058 | | - const char *name; |
|---|
| 1059 | | - u32 type = 0; |
|---|
| 1060 | | - u32 mask = CRYPTO_ALG_ASYNC; |
|---|
| 830 | + u32 type; |
|---|
| 831 | + u32 mask; |
|---|
| 1061 | 832 | int err; |
|---|
| 1062 | 833 | |
|---|
| 1063 | | - cryptd_check_internal(tb, &type, &mask); |
|---|
| 1064 | | - |
|---|
| 1065 | | - name = crypto_attr_alg_name(tb[1]); |
|---|
| 1066 | | - if (IS_ERR(name)) |
|---|
| 1067 | | - return PTR_ERR(name); |
|---|
| 834 | + cryptd_type_and_mask(algt, &type, &mask); |
|---|
| 1068 | 835 | |
|---|
| 1069 | 836 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
|---|
| 1070 | 837 | if (!inst) |
|---|
| .. | .. |
|---|
| 1073 | 840 | ctx = aead_instance_ctx(inst); |
|---|
| 1074 | 841 | ctx->queue = queue; |
|---|
| 1075 | 842 | |
|---|
| 1076 | | - crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst)); |
|---|
| 1077 | | - err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask); |
|---|
| 843 | + err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst), |
|---|
| 844 | + crypto_attr_alg_name(tb[1]), type, mask); |
|---|
| 1078 | 845 | if (err) |
|---|
| 1079 | | - goto out_free_inst; |
|---|
| 846 | + goto err_free_inst; |
|---|
| 1080 | 847 | |
|---|
| 1081 | 848 | alg = crypto_spawn_aead_alg(&ctx->aead_spawn); |
|---|
| 1082 | 849 | err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); |
|---|
| 1083 | 850 | if (err) |
|---|
| 1084 | | - goto out_drop_aead; |
|---|
| 851 | + goto err_free_inst; |
|---|
| 1085 | 852 | |
|---|
| 1086 | | - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | |
|---|
| 1087 | | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); |
|---|
| 853 | + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | |
|---|
| 854 | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); |
|---|
| 1088 | 855 | inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); |
|---|
| 1089 | 856 | |
|---|
| 1090 | 857 | inst->alg.ivsize = crypto_aead_alg_ivsize(alg); |
|---|
| .. | .. |
|---|
| 1097 | 864 | inst->alg.encrypt = cryptd_aead_encrypt_enqueue; |
|---|
| 1098 | 865 | inst->alg.decrypt = cryptd_aead_decrypt_enqueue; |
|---|
| 1099 | 866 | |
|---|
| 867 | + inst->free = cryptd_aead_free; |
|---|
| 868 | + |
|---|
| 1100 | 869 | err = aead_register_instance(tmpl, inst); |
|---|
| 1101 | 870 | if (err) { |
|---|
| 1102 | | -out_drop_aead: |
|---|
| 1103 | | - crypto_drop_aead(&ctx->aead_spawn); |
|---|
| 1104 | | -out_free_inst: |
|---|
| 1105 | | - kfree(inst); |
|---|
| 871 | +err_free_inst: |
|---|
| 872 | + cryptd_aead_free(inst); |
|---|
| 1106 | 873 | } |
|---|
| 1107 | 874 | return err; |
|---|
| 1108 | 875 | } |
|---|
| .. | .. |
|---|
| 1118 | 885 | return PTR_ERR(algt); |
|---|
| 1119 | 886 | |
|---|
| 1120 | 887 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
|---|
| 1121 | | - case CRYPTO_ALG_TYPE_BLKCIPHER: |
|---|
| 1122 | | - if ((algt->type & CRYPTO_ALG_TYPE_MASK) == |
|---|
| 1123 | | - CRYPTO_ALG_TYPE_BLKCIPHER) |
|---|
| 1124 | | - return cryptd_create_blkcipher(tmpl, tb, &queue); |
|---|
| 1125 | | - |
|---|
| 1126 | | - return cryptd_create_skcipher(tmpl, tb, &queue); |
|---|
| 1127 | | - case CRYPTO_ALG_TYPE_DIGEST: |
|---|
| 1128 | | - return cryptd_create_hash(tmpl, tb, &queue); |
|---|
| 888 | + case CRYPTO_ALG_TYPE_SKCIPHER: |
|---|
| 889 | + return cryptd_create_skcipher(tmpl, tb, algt, &queue); |
|---|
| 890 | + case CRYPTO_ALG_TYPE_HASH: |
|---|
| 891 | + return cryptd_create_hash(tmpl, tb, algt, &queue); |
|---|
| 1129 | 892 | case CRYPTO_ALG_TYPE_AEAD: |
|---|
| 1130 | | - return cryptd_create_aead(tmpl, tb, &queue); |
|---|
| 893 | + return cryptd_create_aead(tmpl, tb, algt, &queue); |
|---|
| 1131 | 894 | } |
|---|
| 1132 | 895 | |
|---|
| 1133 | 896 | return -EINVAL; |
|---|
| 1134 | 897 | } |
|---|
| 1135 | 898 | |
|---|
| 1136 | | -static void cryptd_free(struct crypto_instance *inst) |
|---|
| 1137 | | -{ |
|---|
| 1138 | | - struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); |
|---|
| 1139 | | - struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); |
|---|
| 1140 | | - struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); |
|---|
| 1141 | | - |
|---|
| 1142 | | - switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { |
|---|
| 1143 | | - case CRYPTO_ALG_TYPE_AHASH: |
|---|
| 1144 | | - crypto_drop_shash(&hctx->spawn); |
|---|
| 1145 | | - kfree(ahash_instance(inst)); |
|---|
| 1146 | | - return; |
|---|
| 1147 | | - case CRYPTO_ALG_TYPE_AEAD: |
|---|
| 1148 | | - crypto_drop_aead(&aead_ctx->aead_spawn); |
|---|
| 1149 | | - kfree(aead_instance(inst)); |
|---|
| 1150 | | - return; |
|---|
| 1151 | | - default: |
|---|
| 1152 | | - crypto_drop_spawn(&ctx->spawn); |
|---|
| 1153 | | - kfree(inst); |
|---|
| 1154 | | - } |
|---|
| 1155 | | -} |
|---|
| 1156 | | - |
|---|
| 1157 | 899 | static struct crypto_template cryptd_tmpl = { |
|---|
| 1158 | 900 | .name = "cryptd", |
|---|
| 1159 | 901 | .create = cryptd_create, |
|---|
| 1160 | | - .free = cryptd_free, |
|---|
| 1161 | 902 | .module = THIS_MODULE, |
|---|
| 1162 | 903 | }; |
|---|
| 1163 | | - |
|---|
| 1164 | | -struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, |
|---|
| 1165 | | - u32 type, u32 mask) |
|---|
| 1166 | | -{ |
|---|
| 1167 | | - char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; |
|---|
| 1168 | | - struct cryptd_blkcipher_ctx *ctx; |
|---|
| 1169 | | - struct crypto_tfm *tfm; |
|---|
| 1170 | | - |
|---|
| 1171 | | - if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, |
|---|
| 1172 | | - "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
|---|
| 1173 | | - return ERR_PTR(-EINVAL); |
|---|
| 1174 | | - type = crypto_skcipher_type(type); |
|---|
| 1175 | | - mask &= ~CRYPTO_ALG_TYPE_MASK; |
|---|
| 1176 | | - mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); |
|---|
| 1177 | | - tfm = crypto_alloc_base(cryptd_alg_name, type, mask); |
|---|
| 1178 | | - if (IS_ERR(tfm)) |
|---|
| 1179 | | - return ERR_CAST(tfm); |
|---|
| 1180 | | - if (tfm->__crt_alg->cra_module != THIS_MODULE) { |
|---|
| 1181 | | - crypto_free_tfm(tfm); |
|---|
| 1182 | | - return ERR_PTR(-EINVAL); |
|---|
| 1183 | | - } |
|---|
| 1184 | | - |
|---|
| 1185 | | - ctx = crypto_tfm_ctx(tfm); |
|---|
| 1186 | | - atomic_set(&ctx->refcnt, 1); |
|---|
| 1187 | | - |
|---|
| 1188 | | - return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); |
|---|
| 1189 | | -} |
|---|
| 1190 | | -EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); |
|---|
| 1191 | | - |
|---|
| 1192 | | -struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) |
|---|
| 1193 | | -{ |
|---|
| 1194 | | - struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); |
|---|
| 1195 | | - return ctx->child; |
|---|
| 1196 | | -} |
|---|
| 1197 | | -EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); |
|---|
| 1198 | | - |
|---|
| 1199 | | -bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm) |
|---|
| 1200 | | -{ |
|---|
| 1201 | | - struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); |
|---|
| 1202 | | - |
|---|
| 1203 | | - return atomic_read(&ctx->refcnt) - 1; |
|---|
| 1204 | | -} |
|---|
| 1205 | | -EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued); |
|---|
| 1206 | | - |
|---|
| 1207 | | -void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) |
|---|
| 1208 | | -{ |
|---|
| 1209 | | - struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); |
|---|
| 1210 | | - |
|---|
| 1211 | | - if (atomic_dec_and_test(&ctx->refcnt)) |
|---|
| 1212 | | - crypto_free_ablkcipher(&tfm->base); |
|---|
| 1213 | | -} |
|---|
| 1214 | | -EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); |
|---|
| 1215 | 904 | |
|---|
| 1216 | 905 | struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, |
|---|
| 1217 | 906 | u32 type, u32 mask) |
|---|
| .. | .. |
|---|
| 1234 | 923 | } |
|---|
| 1235 | 924 | |
|---|
| 1236 | 925 | ctx = crypto_skcipher_ctx(tfm); |
|---|
| 1237 | | - atomic_set(&ctx->refcnt, 1); |
|---|
| 926 | + refcount_set(&ctx->refcnt, 1); |
|---|
| 1238 | 927 | |
|---|
| 1239 | 928 | return container_of(tfm, struct cryptd_skcipher, base); |
|---|
| 1240 | 929 | } |
|---|
| .. | .. |
|---|
| 1252 | 941 | { |
|---|
| 1253 | 942 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); |
|---|
| 1254 | 943 | |
|---|
| 1255 | | - return atomic_read(&ctx->refcnt) - 1; |
|---|
| 944 | + return refcount_read(&ctx->refcnt) - 1; |
|---|
| 1256 | 945 | } |
|---|
| 1257 | 946 | EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); |
|---|
| 1258 | 947 | |
|---|
| .. | .. |
|---|
| 1260 | 949 | { |
|---|
| 1261 | 950 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); |
|---|
| 1262 | 951 | |
|---|
| 1263 | | - if (atomic_dec_and_test(&ctx->refcnt)) |
|---|
| 952 | + if (refcount_dec_and_test(&ctx->refcnt)) |
|---|
| 1264 | 953 | crypto_free_skcipher(&tfm->base); |
|---|
| 1265 | 954 | } |
|---|
| 1266 | 955 | EXPORT_SYMBOL_GPL(cryptd_free_skcipher); |
|---|
| .. | .. |
|---|
| 1284 | 973 | } |
|---|
| 1285 | 974 | |
|---|
| 1286 | 975 | ctx = crypto_ahash_ctx(tfm); |
|---|
| 1287 | | - atomic_set(&ctx->refcnt, 1); |
|---|
| 976 | + refcount_set(&ctx->refcnt, 1); |
|---|
| 1288 | 977 | |
|---|
| 1289 | 978 | return __cryptd_ahash_cast(tfm); |
|---|
| 1290 | 979 | } |
|---|
| .. | .. |
|---|
| 1309 | 998 | { |
|---|
| 1310 | 999 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); |
|---|
| 1311 | 1000 | |
|---|
| 1312 | | - return atomic_read(&ctx->refcnt) - 1; |
|---|
| 1001 | + return refcount_read(&ctx->refcnt) - 1; |
|---|
| 1313 | 1002 | } |
|---|
| 1314 | 1003 | EXPORT_SYMBOL_GPL(cryptd_ahash_queued); |
|---|
| 1315 | 1004 | |
|---|
| .. | .. |
|---|
| 1317 | 1006 | { |
|---|
| 1318 | 1007 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); |
|---|
| 1319 | 1008 | |
|---|
| 1320 | | - if (atomic_dec_and_test(&ctx->refcnt)) |
|---|
| 1009 | + if (refcount_dec_and_test(&ctx->refcnt)) |
|---|
| 1321 | 1010 | crypto_free_ahash(&tfm->base); |
|---|
| 1322 | 1011 | } |
|---|
| 1323 | 1012 | EXPORT_SYMBOL_GPL(cryptd_free_ahash); |
|---|
| .. | .. |
|---|
| 1341 | 1030 | } |
|---|
| 1342 | 1031 | |
|---|
| 1343 | 1032 | ctx = crypto_aead_ctx(tfm); |
|---|
| 1344 | | - atomic_set(&ctx->refcnt, 1); |
|---|
| 1033 | + refcount_set(&ctx->refcnt, 1); |
|---|
| 1345 | 1034 | |
|---|
| 1346 | 1035 | return __cryptd_aead_cast(tfm); |
|---|
| 1347 | 1036 | } |
|---|
| .. | .. |
|---|
| 1359 | 1048 | { |
|---|
| 1360 | 1049 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); |
|---|
| 1361 | 1050 | |
|---|
| 1362 | | - return atomic_read(&ctx->refcnt) - 1; |
|---|
| 1051 | + return refcount_read(&ctx->refcnt) - 1; |
|---|
| 1363 | 1052 | } |
|---|
| 1364 | 1053 | EXPORT_SYMBOL_GPL(cryptd_aead_queued); |
|---|
| 1365 | 1054 | |
|---|
| .. | .. |
|---|
| 1367 | 1056 | { |
|---|
| 1368 | 1057 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); |
|---|
| 1369 | 1058 | |
|---|
| 1370 | | - if (atomic_dec_and_test(&ctx->refcnt)) |
|---|
| 1059 | + if (refcount_dec_and_test(&ctx->refcnt)) |
|---|
| 1371 | 1060 | crypto_free_aead(&tfm->base); |
|---|
| 1372 | 1061 | } |
|---|
| 1373 | 1062 | EXPORT_SYMBOL_GPL(cryptd_free_aead); |
|---|
| .. | .. |
|---|
| 1376 | 1065 | { |
|---|
| 1377 | 1066 | int err; |
|---|
| 1378 | 1067 | |
|---|
| 1068 | + cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, |
|---|
| 1069 | + 1); |
|---|
| 1070 | + if (!cryptd_wq) |
|---|
| 1071 | + return -ENOMEM; |
|---|
| 1072 | + |
|---|
| 1379 | 1073 | err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); |
|---|
| 1380 | 1074 | if (err) |
|---|
| 1381 | | - return err; |
|---|
| 1075 | + goto err_destroy_wq; |
|---|
| 1382 | 1076 | |
|---|
| 1383 | 1077 | err = crypto_register_template(&cryptd_tmpl); |
|---|
| 1384 | 1078 | if (err) |
|---|
| 1385 | | - cryptd_fini_queue(&queue); |
|---|
| 1079 | + goto err_fini_queue; |
|---|
| 1386 | 1080 | |
|---|
| 1081 | + return 0; |
|---|
| 1082 | + |
|---|
| 1083 | +err_fini_queue: |
|---|
| 1084 | + cryptd_fini_queue(&queue); |
|---|
| 1085 | +err_destroy_wq: |
|---|
| 1086 | + destroy_workqueue(cryptd_wq); |
|---|
| 1387 | 1087 | return err; |
|---|
| 1388 | 1088 | } |
|---|
| 1389 | 1089 | |
|---|
| 1390 | 1090 | static void __exit cryptd_exit(void) |
|---|
| 1391 | 1091 | { |
|---|
| 1092 | + destroy_workqueue(cryptd_wq); |
|---|
| 1392 | 1093 | cryptd_fini_queue(&queue); |
|---|
| 1393 | 1094 | crypto_unregister_template(&cryptd_tmpl); |
|---|
| 1394 | 1095 | } |
|---|