| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Software async crypto daemon. |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 9 | 10 | * Gabriele Paoloni <gabriele.paoloni@intel.com> |
|---|
| 10 | 11 | * Aidan O'Mahony (aidan.o.mahony@intel.com) |
|---|
| 11 | 12 | * Copyright (c) 2010, Intel Corporation. |
|---|
| 12 | | - * |
|---|
| 13 | | - * This program is free software; you can redistribute it and/or modify it |
|---|
| 14 | | - * under the terms of the GNU General Public License as published by the Free |
|---|
| 15 | | - * Software Foundation; either version 2 of the License, or (at your option) |
|---|
| 16 | | - * any later version. |
|---|
| 17 | | - * |
|---|
| 18 | 13 | */ |
|---|
| 19 | 14 | |
|---|
| 20 | 15 | #include <crypto/internal/hash.h> |
|---|
| 21 | 16 | #include <crypto/internal/aead.h> |
|---|
| 22 | 17 | #include <crypto/internal/skcipher.h> |
|---|
| 23 | 18 | #include <crypto/cryptd.h> |
|---|
| 24 | | -#include <crypto/crypto_wq.h> |
|---|
| 25 | | -#include <linux/atomic.h> |
|---|
| 19 | +#include <linux/refcount.h> |
|---|
| 26 | 20 | #include <linux/err.h> |
|---|
| 27 | 21 | #include <linux/init.h> |
|---|
| 28 | 22 | #include <linux/kernel.h> |
|---|
| .. | .. |
|---|
| 31 | 25 | #include <linux/scatterlist.h> |
|---|
| 32 | 26 | #include <linux/sched.h> |
|---|
| 33 | 27 | #include <linux/slab.h> |
|---|
| 28 | +#include <linux/workqueue.h> |
|---|
| 34 | 29 | |
|---|
| 35 | 30 | static unsigned int cryptd_max_cpu_qlen = 1000; |
|---|
| 36 | 31 | module_param(cryptd_max_cpu_qlen, uint, 0); |
|---|
| 37 | 32 | MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); |
|---|
| 33 | + |
|---|
| 34 | +static struct workqueue_struct *cryptd_wq; |
|---|
| 38 | 35 | |
|---|
| 39 | 36 | struct cryptd_cpu_queue { |
|---|
| 40 | 37 | struct crypto_queue queue; |
|---|
| .. | .. |
|---|
| 42 | 39 | }; |
|---|
| 43 | 40 | |
|---|
| 44 | 41 | struct cryptd_queue { |
|---|
| 42 | + /* |
|---|
| 43 | + * Protected by disabling BH to allow enqueueing from softinterrupt and |
|---|
| 44 | + * dequeuing from kworker (cryptd_queue_worker()). |
|---|
| 45 | + */ |
|---|
| 45 | 46 | struct cryptd_cpu_queue __percpu *cpu_queue; |
|---|
| 46 | 47 | }; |
|---|
| 47 | 48 | |
|---|
| .. | .. |
|---|
| 65 | 66 | struct cryptd_queue *queue; |
|---|
| 66 | 67 | }; |
|---|
| 67 | 68 | |
|---|
| 68 | | -struct cryptd_blkcipher_ctx { |
|---|
| 69 | | - atomic_t refcnt; |
|---|
| 70 | | - struct crypto_blkcipher *child; |
|---|
| 71 | | -}; |
|---|
| 72 | | - |
|---|
| 73 | | -struct cryptd_blkcipher_request_ctx { |
|---|
| 74 | | - crypto_completion_t complete; |
|---|
| 75 | | -}; |
|---|
| 76 | | - |
|---|
| 77 | 69 | struct cryptd_skcipher_ctx { |
|---|
| 78 | | - atomic_t refcnt; |
|---|
| 79 | | - struct crypto_skcipher *child; |
|---|
| 70 | + refcount_t refcnt; |
|---|
| 71 | + struct crypto_sync_skcipher *child; |
|---|
| 80 | 72 | }; |
|---|
| 81 | 73 | |
|---|
| 82 | 74 | struct cryptd_skcipher_request_ctx { |
|---|
| .. | .. |
|---|
| 84 | 76 | }; |
|---|
| 85 | 77 | |
|---|
| 86 | 78 | struct cryptd_hash_ctx { |
|---|
| 87 | | - atomic_t refcnt; |
|---|
| 79 | + refcount_t refcnt; |
|---|
| 88 | 80 | struct crypto_shash *child; |
|---|
| 89 | 81 | }; |
|---|
| 90 | 82 | |
|---|
| .. | .. |
|---|
| 94 | 86 | }; |
|---|
| 95 | 87 | |
|---|
| 96 | 88 | struct cryptd_aead_ctx { |
|---|
| 97 | | - atomic_t refcnt; |
|---|
| 89 | + refcount_t refcnt; |
|---|
| 98 | 90 | struct crypto_aead *child; |
|---|
| 99 | 91 | }; |
|---|
| 100 | 92 | |
|---|
| .. | .. |
|---|
| 137 | 129 | static int cryptd_enqueue_request(struct cryptd_queue *queue, |
|---|
| 138 | 130 | struct crypto_async_request *request) |
|---|
| 139 | 131 | { |
|---|
| 140 | | - int cpu, err; |
|---|
| 132 | + int err; |
|---|
| 141 | 133 | struct cryptd_cpu_queue *cpu_queue; |
|---|
| 142 | | - atomic_t *refcnt; |
|---|
| 134 | + refcount_t *refcnt; |
|---|
| 143 | 135 | |
|---|
| 144 | | - cpu = get_cpu(); |
|---|
| 136 | + local_bh_disable(); |
|---|
| 145 | 137 | cpu_queue = this_cpu_ptr(queue->cpu_queue); |
|---|
| 146 | 138 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
|---|
| 147 | 139 | |
|---|
| 148 | 140 | refcnt = crypto_tfm_ctx(request->tfm); |
|---|
| 149 | 141 | |
|---|
| 150 | 142 | if (err == -ENOSPC) |
|---|
| 151 | | - goto out_put_cpu; |
|---|
| 143 | + goto out; |
|---|
| 152 | 144 | |
|---|
| 153 | | - queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
|---|
| 145 | + queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work); |
|---|
| 154 | 146 | |
|---|
| 155 | | - if (!atomic_read(refcnt)) |
|---|
| 156 | | - goto out_put_cpu; |
|---|
| 147 | + if (!refcount_read(refcnt)) |
|---|
| 148 | + goto out; |
|---|
| 157 | 149 | |
|---|
| 158 | | - atomic_inc(refcnt); |
|---|
| 150 | + refcount_inc(refcnt); |
|---|
| 159 | 151 | |
|---|
| 160 | | -out_put_cpu: |
|---|
| 161 | | - put_cpu(); |
|---|
| 152 | +out: |
|---|
| 153 | + local_bh_enable(); |
|---|
| 162 | 154 | |
|---|
| 163 | 155 | return err; |
|---|
| 164 | 156 | } |
|---|
| .. | .. |
|---|
| 174 | 166 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); |
|---|
| 175 | 167 | /* |
|---|
| 176 | 168 | * Only handle one request at a time to avoid hogging crypto workqueue. |
|---|
| 177 | | - * preempt_disable/enable is used to prevent being preempted by |
|---|
| 178 | | - * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent |
|---|
| 179 | | - * cryptd_enqueue_request() being accessed from software interrupts. |
|---|
| 180 | 169 | */ |
|---|
| 181 | 170 | local_bh_disable(); |
|---|
| 182 | | - preempt_disable(); |
|---|
| 183 | 171 | backlog = crypto_get_backlog(&cpu_queue->queue); |
|---|
| 184 | 172 | req = crypto_dequeue_request(&cpu_queue->queue); |
|---|
| 185 | | - preempt_enable(); |
|---|
| 186 | 173 | local_bh_enable(); |
|---|
| 187 | 174 | |
|---|
| 188 | 175 | if (!req) |
|---|
| .. | .. |
|---|
| 193 | 180 | req->complete(req, 0); |
|---|
| 194 | 181 | |
|---|
| 195 | 182 | if (cpu_queue->queue.qlen) |
|---|
| 196 | | - queue_work(kcrypto_wq, &cpu_queue->work); |
|---|
| 183 | + queue_work(cryptd_wq, &cpu_queue->work); |
|---|
| 197 | 184 | } |
|---|
| 198 | 185 | |
|---|
| 199 | 186 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) |
|---|
| .. | .. |
|---|
| 203 | 190 | return ictx->queue; |
|---|
| 204 | 191 | } |
|---|
| 205 | 192 | |
|---|
| 206 | | -static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, |
|---|
| 207 | | - u32 *mask) |
|---|
| 193 | +static void cryptd_type_and_mask(struct crypto_attr_type *algt, |
|---|
| 194 | + u32 *type, u32 *mask) |
|---|
| 208 | 195 | { |
|---|
| 209 | | - struct crypto_attr_type *algt; |
|---|
| 196 | + /* |
|---|
| 197 | + * cryptd is allowed to wrap internal algorithms, but in that case the |
|---|
| 198 | + * resulting cryptd instance will be marked as internal as well. |
|---|
| 199 | + */ |
|---|
| 200 | + *type = algt->type & CRYPTO_ALG_INTERNAL; |
|---|
| 201 | + *mask = algt->mask & CRYPTO_ALG_INTERNAL; |
|---|
| 210 | 202 | |
|---|
| 211 | | - algt = crypto_get_attr_type(tb); |
|---|
| 212 | | - if (IS_ERR(algt)) |
|---|
| 213 | | - return; |
|---|
| 203 | + /* No point in cryptd wrapping an algorithm that's already async. */ |
|---|
| 204 | + *mask |= CRYPTO_ALG_ASYNC; |
|---|
| 214 | 205 | |
|---|
| 215 | | - *type |= algt->type & CRYPTO_ALG_INTERNAL; |
|---|
| 216 | | - *mask |= algt->mask & CRYPTO_ALG_INTERNAL; |
|---|
| 217 | | -} |
|---|
| 218 | | - |
|---|
| 219 | | -static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, |
|---|
| 220 | | - const u8 *key, unsigned int keylen) |
|---|
| 221 | | -{ |
|---|
| 222 | | - struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); |
|---|
| 223 | | - struct crypto_blkcipher *child = ctx->child; |
|---|
| 224 | | - int err; |
|---|
| 225 | | - |
|---|
| 226 | | - crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
|---|
| 227 | | - crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & |
|---|
| 228 | | - CRYPTO_TFM_REQ_MASK); |
|---|
| 229 | | - err = crypto_blkcipher_setkey(child, key, keylen); |
|---|
| 230 | | - crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & |
|---|
| 231 | | - CRYPTO_TFM_RES_MASK); |
|---|
| 232 | | - return err; |
|---|
| 233 | | -} |
|---|
| 234 | | - |
|---|
| 235 | | -static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, |
|---|
| 236 | | - struct crypto_blkcipher *child, |
|---|
| 237 | | - int err, |
|---|
| 238 | | - int (*crypt)(struct blkcipher_desc *desc, |
|---|
| 239 | | - struct scatterlist *dst, |
|---|
| 240 | | - struct scatterlist *src, |
|---|
| 241 | | - unsigned int len)) |
|---|
| 242 | | -{ |
|---|
| 243 | | - struct cryptd_blkcipher_request_ctx *rctx; |
|---|
| 244 | | - struct cryptd_blkcipher_ctx *ctx; |
|---|
| 245 | | - struct crypto_ablkcipher *tfm; |
|---|
| 246 | | - struct blkcipher_desc desc; |
|---|
| 247 | | - int refcnt; |
|---|
| 248 | | - |
|---|
| 249 | | - rctx = ablkcipher_request_ctx(req); |
|---|
| 250 | | - |
|---|
| 251 | | - if (unlikely(err == -EINPROGRESS)) |
|---|
| 252 | | - goto out; |
|---|
| 253 | | - |
|---|
| 254 | | - desc.tfm = child; |
|---|
| 255 | | - desc.info = req->info; |
|---|
| 256 | | - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
|---|
| 257 | | - |
|---|
| 258 | | - err = crypt(&desc, req->dst, req->src, req->nbytes); |
|---|
| 259 | | - |
|---|
| 260 | | - req->base.complete = rctx->complete; |
|---|
| 261 | | - |
|---|
| 262 | | -out: |
|---|
| 263 | | - tfm = crypto_ablkcipher_reqtfm(req); |
|---|
| 264 | | - ctx = crypto_ablkcipher_ctx(tfm); |
|---|
| 265 | | - refcnt = atomic_read(&ctx->refcnt); |
|---|
| 266 | | - |
|---|
| 267 | | - local_bh_disable(); |
|---|
| 268 | | - rctx->complete(&req->base, err); |
|---|
| 269 | | - local_bh_enable(); |
|---|
| 270 | | - |
|---|
| 271 | | - if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) |
|---|
| 272 | | - crypto_free_ablkcipher(tfm); |
|---|
| 273 | | -} |
|---|
| 274 | | - |
|---|
| 275 | | -static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) |
|---|
| 276 | | -{ |
|---|
| 277 | | - struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); |
|---|
| 278 | | - struct crypto_blkcipher *child = ctx->child; |
|---|
| 279 | | - |
|---|
| 280 | | - cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, |
|---|
| 281 | | - crypto_blkcipher_crt(child)->encrypt); |
|---|
| 282 | | -} |
|---|
| 283 | | - |
|---|
| 284 | | -static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) |
|---|
| 285 | | -{ |
|---|
| 286 | | - struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); |
|---|
| 287 | | - struct crypto_blkcipher *child = ctx->child; |
|---|
| 288 | | - |
|---|
| 289 | | - cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, |
|---|
| 290 | | - crypto_blkcipher_crt(child)->decrypt); |
|---|
| 291 | | -} |
|---|
| 292 | | - |
|---|
| 293 | | -static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, |
|---|
| 294 | | - crypto_completion_t compl) |
|---|
| 295 | | -{ |
|---|
| 296 | | - struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); |
|---|
| 297 | | - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
|---|
| 298 | | - struct cryptd_queue *queue; |
|---|
| 299 | | - |
|---|
| 300 | | - queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); |
|---|
| 301 | | - rctx->complete = req->base.complete; |
|---|
| 302 | | - req->base.complete = compl; |
|---|
| 303 | | - |
|---|
| 304 | | - return cryptd_enqueue_request(queue, &req->base); |
|---|
| 305 | | -} |
|---|
| 306 | | - |
|---|
| 307 | | -static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) |
|---|
| 308 | | -{ |
|---|
| 309 | | - return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); |
|---|
| 310 | | -} |
|---|
| 311 | | - |
|---|
| 312 | | -static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) |
|---|
| 313 | | -{ |
|---|
| 314 | | - return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); |
|---|
| 315 | | -} |
|---|
| 316 | | - |
|---|
| 317 | | -static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) |
|---|
| 318 | | -{ |
|---|
| 319 | | - struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
|---|
| 320 | | - struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
|---|
| 321 | | - struct crypto_spawn *spawn = &ictx->spawn; |
|---|
| 322 | | - struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
|---|
| 323 | | - struct crypto_blkcipher *cipher; |
|---|
| 324 | | - |
|---|
| 325 | | - cipher = crypto_spawn_blkcipher(spawn); |
|---|
| 326 | | - if (IS_ERR(cipher)) |
|---|
| 327 | | - return PTR_ERR(cipher); |
|---|
| 328 | | - |
|---|
| 329 | | - ctx->child = cipher; |
|---|
| 330 | | - tfm->crt_ablkcipher.reqsize = |
|---|
| 331 | | - sizeof(struct cryptd_blkcipher_request_ctx); |
|---|
| 332 | | - return 0; |
|---|
| 333 | | -} |
|---|
| 334 | | - |
|---|
| 335 | | -static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) |
|---|
| 336 | | -{ |
|---|
| 337 | | - struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
|---|
| 338 | | - |
|---|
| 339 | | - crypto_free_blkcipher(ctx->child); |
|---|
| 206 | + *mask |= crypto_algt_inherited_mask(algt); |
|---|
| 340 | 207 | } |
|---|
| 341 | 208 | |
|---|
| 342 | 209 | static int cryptd_init_instance(struct crypto_instance *inst, |
|---|
| .. | .. |
|---|
| 356 | 223 | return 0; |
|---|
| 357 | 224 | } |
|---|
| 358 | 225 | |
|---|
| 359 | | -static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, |
|---|
| 360 | | - unsigned int tail) |
|---|
| 361 | | -{ |
|---|
| 362 | | - char *p; |
|---|
| 363 | | - struct crypto_instance *inst; |
|---|
| 364 | | - int err; |
|---|
| 365 | | - |
|---|
| 366 | | - p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); |
|---|
| 367 | | - if (!p) |
|---|
| 368 | | - return ERR_PTR(-ENOMEM); |
|---|
| 369 | | - |
|---|
| 370 | | - inst = (void *)(p + head); |
|---|
| 371 | | - |
|---|
| 372 | | - err = cryptd_init_instance(inst, alg); |
|---|
| 373 | | - if (err) |
|---|
| 374 | | - goto out_free_inst; |
|---|
| 375 | | - |
|---|
| 376 | | -out: |
|---|
| 377 | | - return p; |
|---|
| 378 | | - |
|---|
| 379 | | -out_free_inst: |
|---|
| 380 | | - kfree(p); |
|---|
| 381 | | - p = ERR_PTR(err); |
|---|
| 382 | | - goto out; |
|---|
| 383 | | -} |
|---|
| 384 | | - |
|---|
| 385 | | -static int cryptd_create_blkcipher(struct crypto_template *tmpl, |
|---|
| 386 | | - struct rtattr **tb, |
|---|
| 387 | | - struct cryptd_queue *queue) |
|---|
| 388 | | -{ |
|---|
| 389 | | - struct cryptd_instance_ctx *ctx; |
|---|
| 390 | | - struct crypto_instance *inst; |
|---|
| 391 | | - struct crypto_alg *alg; |
|---|
| 392 | | - u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; |
|---|
| 393 | | - u32 mask = CRYPTO_ALG_TYPE_MASK; |
|---|
| 394 | | - int err; |
|---|
| 395 | | - |
|---|
| 396 | | - cryptd_check_internal(tb, &type, &mask); |
|---|
| 397 | | - |
|---|
| 398 | | - alg = crypto_get_attr_alg(tb, type, mask); |
|---|
| 399 | | - if (IS_ERR(alg)) |
|---|
| 400 | | - return PTR_ERR(alg); |
|---|
| 401 | | - |
|---|
| 402 | | - inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); |
|---|
| 403 | | - err = PTR_ERR(inst); |
|---|
| 404 | | - if (IS_ERR(inst)) |
|---|
| 405 | | - goto out_put_alg; |
|---|
| 406 | | - |
|---|
| 407 | | - ctx = crypto_instance_ctx(inst); |
|---|
| 408 | | - ctx->queue = queue; |
|---|
| 409 | | - |
|---|
| 410 | | - err = crypto_init_spawn(&ctx->spawn, alg, inst, |
|---|
| 411 | | - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); |
|---|
| 412 | | - if (err) |
|---|
| 413 | | - goto out_free_inst; |
|---|
| 414 | | - |
|---|
| 415 | | - type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
|---|
| 416 | | - if (alg->cra_flags & CRYPTO_ALG_INTERNAL) |
|---|
| 417 | | - type |= CRYPTO_ALG_INTERNAL; |
|---|
| 418 | | - inst->alg.cra_flags = type; |
|---|
| 419 | | - inst->alg.cra_type = &crypto_ablkcipher_type; |
|---|
| 420 | | - |
|---|
| 421 | | - inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; |
|---|
| 422 | | - inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; |
|---|
| 423 | | - inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; |
|---|
| 424 | | - |
|---|
| 425 | | - inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; |
|---|
| 426 | | - |
|---|
| 427 | | - inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); |
|---|
| 428 | | - |
|---|
| 429 | | - inst->alg.cra_init = cryptd_blkcipher_init_tfm; |
|---|
| 430 | | - inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; |
|---|
| 431 | | - |
|---|
| 432 | | - inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; |
|---|
| 433 | | - inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; |
|---|
| 434 | | - inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; |
|---|
| 435 | | - |
|---|
| 436 | | - err = crypto_register_instance(tmpl, inst); |
|---|
| 437 | | - if (err) { |
|---|
| 438 | | - crypto_drop_spawn(&ctx->spawn); |
|---|
| 439 | | -out_free_inst: |
|---|
| 440 | | - kfree(inst); |
|---|
| 441 | | - } |
|---|
| 442 | | - |
|---|
| 443 | | -out_put_alg: |
|---|
| 444 | | - crypto_mod_put(alg); |
|---|
| 445 | | - return err; |
|---|
| 446 | | -} |
|---|
| 447 | | - |
|---|
| 448 | 226 | static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, |
|---|
| 449 | 227 | const u8 *key, unsigned int keylen) |
|---|
| 450 | 228 | { |
|---|
| 451 | 229 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); |
|---|
| 452 | | - struct crypto_skcipher *child = ctx->child; |
|---|
| 453 | | - int err; |
|---|
| 230 | + struct crypto_sync_skcipher *child = ctx->child; |
|---|
| 454 | 231 | |
|---|
| 455 | | - crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
|---|
| 456 | | - crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & |
|---|
| 232 | + crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
|---|
| 233 | + crypto_sync_skcipher_set_flags(child, |
|---|
| 234 | + crypto_skcipher_get_flags(parent) & |
|---|
| 457 | 235 | CRYPTO_TFM_REQ_MASK); |
|---|
| 458 | | - err = crypto_skcipher_setkey(child, key, keylen); |
|---|
| 459 | | - crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & |
|---|
| 460 | | - CRYPTO_TFM_RES_MASK); |
|---|
| 461 | | - return err; |
|---|
| 236 | + return crypto_sync_skcipher_setkey(child, key, keylen); |
|---|
| 462 | 237 | } |
|---|
| 463 | 238 | |
|---|
| 464 | 239 | static void cryptd_skcipher_complete(struct skcipher_request *req, int err) |
|---|
| .. | .. |
|---|
| 466 | 241 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
|---|
| 467 | 242 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
|---|
| 468 | 243 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); |
|---|
| 469 | | - int refcnt = atomic_read(&ctx->refcnt); |
|---|
| 244 | + int refcnt = refcount_read(&ctx->refcnt); |
|---|
| 470 | 245 | |
|---|
| 471 | 246 | local_bh_disable(); |
|---|
| 472 | 247 | rctx->complete(&req->base, err); |
|---|
| 473 | 248 | local_bh_enable(); |
|---|
| 474 | 249 | |
|---|
| 475 | | - if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) |
|---|
| 250 | + if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) |
|---|
| 476 | 251 | crypto_free_skcipher(tfm); |
|---|
| 477 | 252 | } |
|---|
| 478 | 253 | |
|---|
| .. | .. |
|---|
| 483 | 258 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); |
|---|
| 484 | 259 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
|---|
| 485 | 260 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
|---|
| 486 | | - struct crypto_skcipher *child = ctx->child; |
|---|
| 487 | | - SKCIPHER_REQUEST_ON_STACK(subreq, child); |
|---|
| 261 | + struct crypto_sync_skcipher *child = ctx->child; |
|---|
| 262 | + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); |
|---|
| 488 | 263 | |
|---|
| 489 | 264 | if (unlikely(err == -EINPROGRESS)) |
|---|
| 490 | 265 | goto out; |
|---|
| 491 | 266 | |
|---|
| 492 | | - skcipher_request_set_tfm(subreq, child); |
|---|
| 267 | + skcipher_request_set_sync_tfm(subreq, child); |
|---|
| 493 | 268 | skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, |
|---|
| 494 | 269 | NULL, NULL); |
|---|
| 495 | 270 | skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, |
|---|
| .. | .. |
|---|
| 511 | 286 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); |
|---|
| 512 | 287 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
|---|
| 513 | 288 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
|---|
| 514 | | - struct crypto_skcipher *child = ctx->child; |
|---|
| 515 | | - SKCIPHER_REQUEST_ON_STACK(subreq, child); |
|---|
| 289 | + struct crypto_sync_skcipher *child = ctx->child; |
|---|
| 290 | + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); |
|---|
| 516 | 291 | |
|---|
| 517 | 292 | if (unlikely(err == -EINPROGRESS)) |
|---|
| 518 | 293 | goto out; |
|---|
| 519 | 294 | |
|---|
| 520 | | - skcipher_request_set_tfm(subreq, child); |
|---|
| 295 | + skcipher_request_set_sync_tfm(subreq, child); |
|---|
| 521 | 296 | skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, |
|---|
| 522 | 297 | NULL, NULL); |
|---|
| 523 | 298 | skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, |
|---|
| .. | .. |
|---|
| 568 | 343 | if (IS_ERR(cipher)) |
|---|
| 569 | 344 | return PTR_ERR(cipher); |
|---|
| 570 | 345 | |
|---|
| 571 | | - ctx->child = cipher; |
|---|
| 346 | + ctx->child = (struct crypto_sync_skcipher *)cipher; |
|---|
| 572 | 347 | crypto_skcipher_set_reqsize( |
|---|
| 573 | 348 | tfm, sizeof(struct cryptd_skcipher_request_ctx)); |
|---|
| 574 | 349 | return 0; |
|---|
| .. | .. |
|---|
| 578 | 353 | { |
|---|
| 579 | 354 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
|---|
| 580 | 355 | |
|---|
| 581 | | - crypto_free_skcipher(ctx->child); |
|---|
| 356 | + crypto_free_sync_skcipher(ctx->child); |
|---|
| 582 | 357 | } |
|---|
| 583 | 358 | |
|---|
| 584 | 359 | static void cryptd_skcipher_free(struct skcipher_instance *inst) |
|---|
| .. | .. |
|---|
| 591 | 366 | |
|---|
| 592 | 367 | static int cryptd_create_skcipher(struct crypto_template *tmpl, |
|---|
| 593 | 368 | struct rtattr **tb, |
|---|
| 369 | + struct crypto_attr_type *algt, |
|---|
| 594 | 370 | struct cryptd_queue *queue) |
|---|
| 595 | 371 | { |
|---|
| 596 | 372 | struct skcipherd_instance_ctx *ctx; |
|---|
| 597 | 373 | struct skcipher_instance *inst; |
|---|
| 598 | 374 | struct skcipher_alg *alg; |
|---|
| 599 | | - const char *name; |
|---|
| 600 | 375 | u32 type; |
|---|
| 601 | 376 | u32 mask; |
|---|
| 602 | 377 | int err; |
|---|
| 603 | 378 | |
|---|
| 604 | | - type = 0; |
|---|
| 605 | | - mask = CRYPTO_ALG_ASYNC; |
|---|
| 606 | | - |
|---|
| 607 | | - cryptd_check_internal(tb, &type, &mask); |
|---|
| 608 | | - |
|---|
| 609 | | - name = crypto_attr_alg_name(tb[1]); |
|---|
| 610 | | - if (IS_ERR(name)) |
|---|
| 611 | | - return PTR_ERR(name); |
|---|
| 379 | + cryptd_type_and_mask(algt, &type, &mask); |
|---|
| 612 | 380 | |
|---|
| 613 | 381 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
|---|
| 614 | 382 | if (!inst) |
|---|
| .. | .. |
|---|
| 617 | 385 | ctx = skcipher_instance_ctx(inst); |
|---|
| 618 | 386 | ctx->queue = queue; |
|---|
| 619 | 387 | |
|---|
| 620 | | - crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); |
|---|
| 621 | | - err = crypto_grab_skcipher(&ctx->spawn, name, type, mask); |
|---|
| 388 | + err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), |
|---|
| 389 | + crypto_attr_alg_name(tb[1]), type, mask); |
|---|
| 622 | 390 | if (err) |
|---|
| 623 | | - goto out_free_inst; |
|---|
| 391 | + goto err_free_inst; |
|---|
| 624 | 392 | |
|---|
| 625 | 393 | alg = crypto_spawn_skcipher_alg(&ctx->spawn); |
|---|
| 626 | 394 | err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); |
|---|
| 627 | 395 | if (err) |
|---|
| 628 | | - goto out_drop_skcipher; |
|---|
| 396 | + goto err_free_inst; |
|---|
| 629 | 397 | |
|---|
| 630 | | - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | |
|---|
| 631 | | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); |
|---|
| 632 | | - |
|---|
| 398 | + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | |
|---|
| 399 | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); |
|---|
| 633 | 400 | inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); |
|---|
| 634 | 401 | inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); |
|---|
| 635 | 402 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); |
|---|
| .. | .. |
|---|
| 648 | 415 | |
|---|
| 649 | 416 | err = skcipher_register_instance(tmpl, inst); |
|---|
| 650 | 417 | if (err) { |
|---|
| 651 | | -out_drop_skcipher: |
|---|
| 652 | | - crypto_drop_skcipher(&ctx->spawn); |
|---|
| 653 | | -out_free_inst: |
|---|
| 654 | | - kfree(inst); |
|---|
| 418 | +err_free_inst: |
|---|
| 419 | + cryptd_skcipher_free(inst); |
|---|
| 655 | 420 | } |
|---|
| 656 | 421 | return err; |
|---|
| 657 | 422 | } |
|---|
| .. | .. |
|---|
| 687 | 452 | { |
|---|
| 688 | 453 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); |
|---|
| 689 | 454 | struct crypto_shash *child = ctx->child; |
|---|
| 690 | | - int err; |
|---|
| 691 | 455 | |
|---|
| 692 | 456 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
|---|
| 693 | 457 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & |
|---|
| 694 | 458 | CRYPTO_TFM_REQ_MASK); |
|---|
| 695 | | - err = crypto_shash_setkey(child, key, keylen); |
|---|
| 696 | | - crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & |
|---|
| 697 | | - CRYPTO_TFM_RES_MASK); |
|---|
| 698 | | - return err; |
|---|
| 459 | + return crypto_shash_setkey(child, key, keylen); |
|---|
| 699 | 460 | } |
|---|
| 700 | 461 | |
|---|
| 701 | 462 | static int cryptd_hash_enqueue(struct ahash_request *req, |
|---|
| .. | .. |
|---|
| 717 | 478 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
|---|
| 718 | 479 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
|---|
| 719 | 480 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
|---|
| 720 | | - int refcnt = atomic_read(&ctx->refcnt); |
|---|
| 481 | + int refcnt = refcount_read(&ctx->refcnt); |
|---|
| 721 | 482 | |
|---|
| 722 | 483 | local_bh_disable(); |
|---|
| 723 | 484 | rctx->complete(&req->base, err); |
|---|
| 724 | 485 | local_bh_enable(); |
|---|
| 725 | 486 | |
|---|
| 726 | | - if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) |
|---|
| 487 | + if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) |
|---|
| 727 | 488 | crypto_free_ahash(tfm); |
|---|
| 728 | 489 | } |
|---|
| 729 | 490 | |
|---|
| .. | .. |
|---|
| 739 | 500 | goto out; |
|---|
| 740 | 501 | |
|---|
| 741 | 502 | desc->tfm = child; |
|---|
| 742 | | - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
|---|
| 743 | 503 | |
|---|
| 744 | 504 | err = crypto_shash_init(desc); |
|---|
| 745 | 505 | |
|---|
| .. | .. |
|---|
| 831 | 591 | goto out; |
|---|
| 832 | 592 | |
|---|
| 833 | 593 | desc->tfm = child; |
|---|
| 834 | | - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
|---|
| 835 | 594 | |
|---|
| 836 | 595 | err = shash_ahash_digest(req, desc); |
|---|
| 837 | 596 | |
|---|
| .. | .. |
|---|
| 860 | 619 | struct shash_desc *desc = cryptd_shash_desc(req); |
|---|
| 861 | 620 | |
|---|
| 862 | 621 | desc->tfm = ctx->child; |
|---|
| 863 | | - desc->flags = req->base.flags; |
|---|
| 864 | 622 | |
|---|
| 865 | 623 | return crypto_shash_import(desc, in); |
|---|
| 866 | 624 | } |
|---|
| 867 | 625 | |
|---|
| 626 | +static void cryptd_hash_free(struct ahash_instance *inst) |
|---|
| 627 | +{ |
|---|
| 628 | + struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst); |
|---|
| 629 | + |
|---|
| 630 | + crypto_drop_shash(&ctx->spawn); |
|---|
| 631 | + kfree(inst); |
|---|
| 632 | +} |
|---|
| 633 | + |
|---|
| 868 | 634 | static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
|---|
| 635 | + struct crypto_attr_type *algt, |
|---|
| 869 | 636 | struct cryptd_queue *queue) |
|---|
| 870 | 637 | { |
|---|
| 871 | 638 | struct hashd_instance_ctx *ctx; |
|---|
| 872 | 639 | struct ahash_instance *inst; |
|---|
| 873 | | - struct shash_alg *salg; |
|---|
| 874 | | - struct crypto_alg *alg; |
|---|
| 875 | | - u32 type = 0; |
|---|
| 876 | | - u32 mask = 0; |
|---|
| 640 | + struct shash_alg *alg; |
|---|
| 641 | + u32 type; |
|---|
| 642 | + u32 mask; |
|---|
| 877 | 643 | int err; |
|---|
| 878 | 644 | |
|---|
| 879 | | - cryptd_check_internal(tb, &type, &mask); |
|---|
| 645 | + cryptd_type_and_mask(algt, &type, &mask); |
|---|
| 880 | 646 | |
|---|
| 881 | | - salg = shash_attr_alg(tb[1], type, mask); |
|---|
| 882 | | - if (IS_ERR(salg)) |
|---|
| 883 | | - return PTR_ERR(salg); |
|---|
| 884 | | - |
|---|
| 885 | | - alg = &salg->base; |
|---|
| 886 | | - inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), |
|---|
| 887 | | - sizeof(*ctx)); |
|---|
| 888 | | - err = PTR_ERR(inst); |
|---|
| 889 | | - if (IS_ERR(inst)) |
|---|
| 890 | | - goto out_put_alg; |
|---|
| 647 | + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
|---|
| 648 | + if (!inst) |
|---|
| 649 | + return -ENOMEM; |
|---|
| 891 | 650 | |
|---|
| 892 | 651 | ctx = ahash_instance_ctx(inst); |
|---|
| 893 | 652 | ctx->queue = queue; |
|---|
| 894 | 653 | |
|---|
| 895 | | - err = crypto_init_shash_spawn(&ctx->spawn, salg, |
|---|
| 896 | | - ahash_crypto_instance(inst)); |
|---|
| 654 | + err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst), |
|---|
| 655 | + crypto_attr_alg_name(tb[1]), type, mask); |
|---|
| 897 | 656 | if (err) |
|---|
| 898 | | - goto out_free_inst; |
|---|
| 657 | + goto err_free_inst; |
|---|
| 658 | + alg = crypto_spawn_shash_alg(&ctx->spawn); |
|---|
| 899 | 659 | |
|---|
| 900 | | - inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | |
|---|
| 901 | | - (alg->cra_flags & (CRYPTO_ALG_INTERNAL | |
|---|
| 902 | | - CRYPTO_ALG_OPTIONAL_KEY)); |
|---|
| 660 | + err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base); |
|---|
| 661 | + if (err) |
|---|
| 662 | + goto err_free_inst; |
|---|
| 903 | 663 | |
|---|
| 904 | | - inst->alg.halg.digestsize = salg->digestsize; |
|---|
| 905 | | - inst->alg.halg.statesize = salg->statesize; |
|---|
| 664 | + inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC | |
|---|
| 665 | + (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL| |
|---|
| 666 | + CRYPTO_ALG_OPTIONAL_KEY)); |
|---|
| 667 | + inst->alg.halg.digestsize = alg->digestsize; |
|---|
| 668 | + inst->alg.halg.statesize = alg->statesize; |
|---|
| 906 | 669 | inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); |
|---|
| 907 | 670 | |
|---|
| 908 | 671 | inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; |
|---|
| .. | .. |
|---|
| 914 | 677 | inst->alg.finup = cryptd_hash_finup_enqueue; |
|---|
| 915 | 678 | inst->alg.export = cryptd_hash_export; |
|---|
| 916 | 679 | inst->alg.import = cryptd_hash_import; |
|---|
| 917 | | - if (crypto_shash_alg_has_setkey(salg)) |
|---|
| 680 | + if (crypto_shash_alg_has_setkey(alg)) |
|---|
| 918 | 681 | inst->alg.setkey = cryptd_hash_setkey; |
|---|
| 919 | 682 | inst->alg.digest = cryptd_hash_digest_enqueue; |
|---|
| 920 | 683 | |
|---|
| 684 | + inst->free = cryptd_hash_free; |
|---|
| 685 | + |
|---|
| 921 | 686 | err = ahash_register_instance(tmpl, inst); |
|---|
| 922 | 687 | if (err) { |
|---|
| 923 | | - crypto_drop_shash(&ctx->spawn); |
|---|
| 924 | | -out_free_inst: |
|---|
| 925 | | - kfree(inst); |
|---|
| 688 | +err_free_inst: |
|---|
| 689 | + cryptd_hash_free(inst); |
|---|
| 926 | 690 | } |
|---|
| 927 | | - |
|---|
| 928 | | -out_put_alg: |
|---|
| 929 | | - crypto_mod_put(alg); |
|---|
| 930 | 691 | return err; |
|---|
| 931 | 692 | } |
|---|
| 932 | 693 | |
|---|
| .. | .. |
|---|
| 971 | 732 | |
|---|
| 972 | 733 | out: |
|---|
| 973 | 734 | ctx = crypto_aead_ctx(tfm); |
|---|
| 974 | | - refcnt = atomic_read(&ctx->refcnt); |
|---|
| 735 | + refcnt = refcount_read(&ctx->refcnt); |
|---|
| 975 | 736 | |
|---|
| 976 | 737 | local_bh_disable(); |
|---|
| 977 | 738 | compl(&req->base, err); |
|---|
| 978 | 739 | local_bh_enable(); |
|---|
| 979 | 740 | |
|---|
| 980 | | - if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) |
|---|
| 741 | + if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) |
|---|
| 981 | 742 | crypto_free_aead(tfm); |
|---|
| 982 | 743 | } |
|---|
| 983 | 744 | |
|---|
| .. | .. |
|---|
| 1048 | 809 | crypto_free_aead(ctx->child); |
|---|
| 1049 | 810 | } |
|---|
| 1050 | 811 | |
|---|
| 812 | +static void cryptd_aead_free(struct aead_instance *inst) |
|---|
| 813 | +{ |
|---|
| 814 | + struct aead_instance_ctx *ctx = aead_instance_ctx(inst); |
|---|
| 815 | + |
|---|
| 816 | + crypto_drop_aead(&ctx->aead_spawn); |
|---|
| 817 | + kfree(inst); |
|---|
| 818 | +} |
|---|
| 819 | + |
|---|
| 1051 | 820 | static int cryptd_create_aead(struct crypto_template *tmpl, |
|---|
| 1052 | 821 | struct rtattr **tb, |
|---|
| 822 | + struct crypto_attr_type *algt, |
|---|
| 1053 | 823 | struct cryptd_queue *queue) |
|---|
| 1054 | 824 | { |
|---|
| 1055 | 825 | struct aead_instance_ctx *ctx; |
|---|
| 1056 | 826 | struct aead_instance *inst; |
|---|
| 1057 | 827 | struct aead_alg *alg; |
|---|
| 1058 | | - const char *name; |
|---|
| 1059 | | - u32 type = 0; |
|---|
| 1060 | | - u32 mask = CRYPTO_ALG_ASYNC; |
|---|
| 828 | + u32 type; |
|---|
| 829 | + u32 mask; |
|---|
| 1061 | 830 | int err; |
|---|
| 1062 | 831 | |
|---|
| 1063 | | - cryptd_check_internal(tb, &type, &mask); |
|---|
| 1064 | | - |
|---|
| 1065 | | - name = crypto_attr_alg_name(tb[1]); |
|---|
| 1066 | | - if (IS_ERR(name)) |
|---|
| 1067 | | - return PTR_ERR(name); |
|---|
| 832 | + cryptd_type_and_mask(algt, &type, &mask); |
|---|
| 1068 | 833 | |
|---|
| 1069 | 834 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
|---|
| 1070 | 835 | if (!inst) |
|---|
| .. | .. |
|---|
| 1073 | 838 | ctx = aead_instance_ctx(inst); |
|---|
| 1074 | 839 | ctx->queue = queue; |
|---|
| 1075 | 840 | |
|---|
| 1076 | | - crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst)); |
|---|
| 1077 | | - err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask); |
|---|
| 841 | + err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst), |
|---|
| 842 | + crypto_attr_alg_name(tb[1]), type, mask); |
|---|
| 1078 | 843 | if (err) |
|---|
| 1079 | | - goto out_free_inst; |
|---|
| 844 | + goto err_free_inst; |
|---|
| 1080 | 845 | |
|---|
| 1081 | 846 | alg = crypto_spawn_aead_alg(&ctx->aead_spawn); |
|---|
| 1082 | 847 | err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); |
|---|
| 1083 | 848 | if (err) |
|---|
| 1084 | | - goto out_drop_aead; |
|---|
| 849 | + goto err_free_inst; |
|---|
| 1085 | 850 | |
|---|
| 1086 | | - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | |
|---|
| 1087 | | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); |
|---|
| 851 | + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | |
|---|
| 852 | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); |
|---|
| 1088 | 853 | inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); |
|---|
| 1089 | 854 | |
|---|
| 1090 | 855 | inst->alg.ivsize = crypto_aead_alg_ivsize(alg); |
|---|
| .. | .. |
|---|
| 1097 | 862 | inst->alg.encrypt = cryptd_aead_encrypt_enqueue; |
|---|
| 1098 | 863 | inst->alg.decrypt = cryptd_aead_decrypt_enqueue; |
|---|
| 1099 | 864 | |
|---|
| 865 | + inst->free = cryptd_aead_free; |
|---|
| 866 | + |
|---|
| 1100 | 867 | err = aead_register_instance(tmpl, inst); |
|---|
| 1101 | 868 | if (err) { |
|---|
| 1102 | | -out_drop_aead: |
|---|
| 1103 | | - crypto_drop_aead(&ctx->aead_spawn); |
|---|
| 1104 | | -out_free_inst: |
|---|
| 1105 | | - kfree(inst); |
|---|
| 869 | +err_free_inst: |
|---|
| 870 | + cryptd_aead_free(inst); |
|---|
| 1106 | 871 | } |
|---|
| 1107 | 872 | return err; |
|---|
| 1108 | 873 | } |
|---|
| .. | .. |
|---|
| 1118 | 883 | return PTR_ERR(algt); |
|---|
| 1119 | 884 | |
|---|
| 1120 | 885 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
|---|
| 1121 | | - case CRYPTO_ALG_TYPE_BLKCIPHER: |
|---|
| 1122 | | - if ((algt->type & CRYPTO_ALG_TYPE_MASK) == |
|---|
| 1123 | | - CRYPTO_ALG_TYPE_BLKCIPHER) |
|---|
| 1124 | | - return cryptd_create_blkcipher(tmpl, tb, &queue); |
|---|
| 1125 | | - |
|---|
| 1126 | | - return cryptd_create_skcipher(tmpl, tb, &queue); |
|---|
| 1127 | | - case CRYPTO_ALG_TYPE_DIGEST: |
|---|
| 1128 | | - return cryptd_create_hash(tmpl, tb, &queue); |
|---|
| 886 | + case CRYPTO_ALG_TYPE_SKCIPHER: |
|---|
| 887 | + return cryptd_create_skcipher(tmpl, tb, algt, &queue); |
|---|
| 888 | + case CRYPTO_ALG_TYPE_HASH: |
|---|
| 889 | + return cryptd_create_hash(tmpl, tb, algt, &queue); |
|---|
| 1129 | 890 | case CRYPTO_ALG_TYPE_AEAD: |
|---|
| 1130 | | - return cryptd_create_aead(tmpl, tb, &queue); |
|---|
| 891 | + return cryptd_create_aead(tmpl, tb, algt, &queue); |
|---|
| 1131 | 892 | } |
|---|
| 1132 | 893 | |
|---|
| 1133 | 894 | return -EINVAL; |
|---|
| 1134 | 895 | } |
|---|
| 1135 | 896 | |
|---|
| 1136 | | -static void cryptd_free(struct crypto_instance *inst) |
|---|
| 1137 | | -{ |
|---|
| 1138 | | - struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); |
|---|
| 1139 | | - struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); |
|---|
| 1140 | | - struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); |
|---|
| 1141 | | - |
|---|
| 1142 | | - switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { |
|---|
| 1143 | | - case CRYPTO_ALG_TYPE_AHASH: |
|---|
| 1144 | | - crypto_drop_shash(&hctx->spawn); |
|---|
| 1145 | | - kfree(ahash_instance(inst)); |
|---|
| 1146 | | - return; |
|---|
| 1147 | | - case CRYPTO_ALG_TYPE_AEAD: |
|---|
| 1148 | | - crypto_drop_aead(&aead_ctx->aead_spawn); |
|---|
| 1149 | | - kfree(aead_instance(inst)); |
|---|
| 1150 | | - return; |
|---|
| 1151 | | - default: |
|---|
| 1152 | | - crypto_drop_spawn(&ctx->spawn); |
|---|
| 1153 | | - kfree(inst); |
|---|
| 1154 | | - } |
|---|
| 1155 | | -} |
|---|
| 1156 | | - |
|---|
| 1157 | 897 | static struct crypto_template cryptd_tmpl = { |
|---|
| 1158 | 898 | .name = "cryptd", |
|---|
| 1159 | 899 | .create = cryptd_create, |
|---|
| 1160 | | - .free = cryptd_free, |
|---|
| 1161 | 900 | .module = THIS_MODULE, |
|---|
| 1162 | 901 | }; |
|---|
| 1163 | | - |
|---|
| 1164 | | -struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, |
|---|
| 1165 | | - u32 type, u32 mask) |
|---|
| 1166 | | -{ |
|---|
| 1167 | | - char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; |
|---|
| 1168 | | - struct cryptd_blkcipher_ctx *ctx; |
|---|
| 1169 | | - struct crypto_tfm *tfm; |
|---|
| 1170 | | - |
|---|
| 1171 | | - if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, |
|---|
| 1172 | | - "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
|---|
| 1173 | | - return ERR_PTR(-EINVAL); |
|---|
| 1174 | | - type = crypto_skcipher_type(type); |
|---|
| 1175 | | - mask &= ~CRYPTO_ALG_TYPE_MASK; |
|---|
| 1176 | | - mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); |
|---|
| 1177 | | - tfm = crypto_alloc_base(cryptd_alg_name, type, mask); |
|---|
| 1178 | | - if (IS_ERR(tfm)) |
|---|
| 1179 | | - return ERR_CAST(tfm); |
|---|
| 1180 | | - if (tfm->__crt_alg->cra_module != THIS_MODULE) { |
|---|
| 1181 | | - crypto_free_tfm(tfm); |
|---|
| 1182 | | - return ERR_PTR(-EINVAL); |
|---|
| 1183 | | - } |
|---|
| 1184 | | - |
|---|
| 1185 | | - ctx = crypto_tfm_ctx(tfm); |
|---|
| 1186 | | - atomic_set(&ctx->refcnt, 1); |
|---|
| 1187 | | - |
|---|
| 1188 | | - return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); |
|---|
| 1189 | | -} |
|---|
| 1190 | | -EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); |
|---|
| 1191 | | - |
|---|
| 1192 | | -struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) |
|---|
| 1193 | | -{ |
|---|
| 1194 | | - struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); |
|---|
| 1195 | | - return ctx->child; |
|---|
| 1196 | | -} |
|---|
| 1197 | | -EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); |
|---|
| 1198 | | - |
|---|
| 1199 | | -bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm) |
|---|
| 1200 | | -{ |
|---|
| 1201 | | - struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); |
|---|
| 1202 | | - |
|---|
| 1203 | | - return atomic_read(&ctx->refcnt) - 1; |
|---|
| 1204 | | -} |
|---|
| 1205 | | -EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued); |
|---|
| 1206 | | - |
|---|
| 1207 | | -void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) |
|---|
| 1208 | | -{ |
|---|
| 1209 | | - struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); |
|---|
| 1210 | | - |
|---|
| 1211 | | - if (atomic_dec_and_test(&ctx->refcnt)) |
|---|
| 1212 | | - crypto_free_ablkcipher(&tfm->base); |
|---|
| 1213 | | -} |
|---|
| 1214 | | -EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); |
|---|
| 1215 | 902 | |
|---|
| 1216 | 903 | struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, |
|---|
| 1217 | 904 | u32 type, u32 mask) |
|---|
| .. | .. |
|---|
| 1234 | 921 | } |
|---|
| 1235 | 922 | |
|---|
| 1236 | 923 | ctx = crypto_skcipher_ctx(tfm); |
|---|
| 1237 | | - atomic_set(&ctx->refcnt, 1); |
|---|
| 924 | + refcount_set(&ctx->refcnt, 1); |
|---|
| 1238 | 925 | |
|---|
| 1239 | 926 | return container_of(tfm, struct cryptd_skcipher, base); |
|---|
| 1240 | 927 | } |
|---|
| .. | .. |
|---|
| 1244 | 931 | { |
|---|
| 1245 | 932 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); |
|---|
| 1246 | 933 | |
|---|
| 1247 | | - return ctx->child; |
|---|
| 934 | + return &ctx->child->base; |
|---|
| 1248 | 935 | } |
|---|
| 1249 | 936 | EXPORT_SYMBOL_GPL(cryptd_skcipher_child); |
|---|
| 1250 | 937 | |
|---|
| .. | .. |
|---|
| 1252 | 939 | { |
|---|
| 1253 | 940 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); |
|---|
| 1254 | 941 | |
|---|
| 1255 | | - return atomic_read(&ctx->refcnt) - 1; |
|---|
| 942 | + return refcount_read(&ctx->refcnt) - 1; |
|---|
| 1256 | 943 | } |
|---|
| 1257 | 944 | EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); |
|---|
| 1258 | 945 | |
|---|
| .. | .. |
|---|
| 1260 | 947 | { |
|---|
| 1261 | 948 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); |
|---|
| 1262 | 949 | |
|---|
| 1263 | | - if (atomic_dec_and_test(&ctx->refcnt)) |
|---|
| 950 | + if (refcount_dec_and_test(&ctx->refcnt)) |
|---|
| 1264 | 951 | crypto_free_skcipher(&tfm->base); |
|---|
| 1265 | 952 | } |
|---|
| 1266 | 953 | EXPORT_SYMBOL_GPL(cryptd_free_skcipher); |
|---|
| .. | .. |
|---|
| 1284 | 971 | } |
|---|
| 1285 | 972 | |
|---|
| 1286 | 973 | ctx = crypto_ahash_ctx(tfm); |
|---|
| 1287 | | - atomic_set(&ctx->refcnt, 1); |
|---|
| 974 | + refcount_set(&ctx->refcnt, 1); |
|---|
| 1288 | 975 | |
|---|
| 1289 | 976 | return __cryptd_ahash_cast(tfm); |
|---|
| 1290 | 977 | } |
|---|
| .. | .. |
|---|
| 1309 | 996 | { |
|---|
| 1310 | 997 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); |
|---|
| 1311 | 998 | |
|---|
| 1312 | | - return atomic_read(&ctx->refcnt) - 1; |
|---|
| 999 | + return refcount_read(&ctx->refcnt) - 1; |
|---|
| 1313 | 1000 | } |
|---|
| 1314 | 1001 | EXPORT_SYMBOL_GPL(cryptd_ahash_queued); |
|---|
| 1315 | 1002 | |
|---|
| .. | .. |
|---|
| 1317 | 1004 | { |
|---|
| 1318 | 1005 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); |
|---|
| 1319 | 1006 | |
|---|
| 1320 | | - if (atomic_dec_and_test(&ctx->refcnt)) |
|---|
| 1007 | + if (refcount_dec_and_test(&ctx->refcnt)) |
|---|
| 1321 | 1008 | crypto_free_ahash(&tfm->base); |
|---|
| 1322 | 1009 | } |
|---|
| 1323 | 1010 | EXPORT_SYMBOL_GPL(cryptd_free_ahash); |
|---|
| .. | .. |
|---|
| 1341 | 1028 | } |
|---|
| 1342 | 1029 | |
|---|
| 1343 | 1030 | ctx = crypto_aead_ctx(tfm); |
|---|
| 1344 | | - atomic_set(&ctx->refcnt, 1); |
|---|
| 1031 | + refcount_set(&ctx->refcnt, 1); |
|---|
| 1345 | 1032 | |
|---|
| 1346 | 1033 | return __cryptd_aead_cast(tfm); |
|---|
| 1347 | 1034 | } |
|---|
| .. | .. |
|---|
| 1359 | 1046 | { |
|---|
| 1360 | 1047 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); |
|---|
| 1361 | 1048 | |
|---|
| 1362 | | - return atomic_read(&ctx->refcnt) - 1; |
|---|
| 1049 | + return refcount_read(&ctx->refcnt) - 1; |
|---|
| 1363 | 1050 | } |
|---|
| 1364 | 1051 | EXPORT_SYMBOL_GPL(cryptd_aead_queued); |
|---|
| 1365 | 1052 | |
|---|
| .. | .. |
|---|
| 1367 | 1054 | { |
|---|
| 1368 | 1055 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); |
|---|
| 1369 | 1056 | |
|---|
| 1370 | | - if (atomic_dec_and_test(&ctx->refcnt)) |
|---|
| 1057 | + if (refcount_dec_and_test(&ctx->refcnt)) |
|---|
| 1371 | 1058 | crypto_free_aead(&tfm->base); |
|---|
| 1372 | 1059 | } |
|---|
| 1373 | 1060 | EXPORT_SYMBOL_GPL(cryptd_free_aead); |
|---|
| .. | .. |
|---|
| 1376 | 1063 | { |
|---|
| 1377 | 1064 | int err; |
|---|
| 1378 | 1065 | |
|---|
| 1066 | + cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, |
|---|
| 1067 | + 1); |
|---|
| 1068 | + if (!cryptd_wq) |
|---|
| 1069 | + return -ENOMEM; |
|---|
| 1070 | + |
|---|
| 1379 | 1071 | err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); |
|---|
| 1380 | 1072 | if (err) |
|---|
| 1381 | | - return err; |
|---|
| 1073 | + goto err_destroy_wq; |
|---|
| 1382 | 1074 | |
|---|
| 1383 | 1075 | err = crypto_register_template(&cryptd_tmpl); |
|---|
| 1384 | 1076 | if (err) |
|---|
| 1385 | | - cryptd_fini_queue(&queue); |
|---|
| 1077 | + goto err_fini_queue; |
|---|
| 1386 | 1078 | |
|---|
| 1079 | + return 0; |
|---|
| 1080 | + |
|---|
| 1081 | +err_fini_queue: |
|---|
| 1082 | + cryptd_fini_queue(&queue); |
|---|
| 1083 | +err_destroy_wq: |
|---|
| 1084 | + destroy_workqueue(cryptd_wq); |
|---|
| 1387 | 1085 | return err; |
|---|
| 1388 | 1086 | } |
|---|
| 1389 | 1087 | |
|---|
| 1390 | 1088 | static void __exit cryptd_exit(void) |
|---|
| 1391 | 1089 | { |
|---|
| 1090 | + destroy_workqueue(cryptd_wq); |
|---|
| 1392 | 1091 | cryptd_fini_queue(&queue); |
|---|
| 1393 | 1092 | crypto_unregister_template(&cryptd_tmpl); |
|---|
| 1394 | 1093 | } |
|---|