hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/crypto/virtio/virtio_crypto_algs.c
....@@ -1,25 +1,14 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /* Algorithms supported by virtio crypto device
23 *
34 * Authors: Gonglei <arei.gonglei@huawei.com>
45 *
56 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
6
- *
7
- * This program is free software; you can redistribute it and/or modify
8
- * it under the terms of the GNU General Public License as published by
9
- * the Free Software Foundation; either version 2 of the License, or
10
- * (at your option) any later version.
11
- *
12
- * This program is distributed in the hope that it will be useful,
13
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
- * GNU General Public License for more details.
16
- *
17
- * You should have received a copy of the GNU General Public License
18
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
197 */
208
219 #include <linux/scatterlist.h>
2210 #include <crypto/algapi.h>
11
+#include <crypto/internal/skcipher.h>
2312 #include <linux/err.h>
2413 #include <crypto/scatterwalk.h>
2514 #include <linux/atomic.h>
....@@ -28,10 +17,10 @@
2817 #include "virtio_crypto_common.h"
2918
3019
31
-struct virtio_crypto_ablkcipher_ctx {
20
+struct virtio_crypto_skcipher_ctx {
3221 struct crypto_engine_ctx enginectx;
3322 struct virtio_crypto *vcrypto;
34
- struct crypto_tfm *tfm;
23
+ struct crypto_skcipher *tfm;
3524
3625 struct virtio_crypto_sym_session_info enc_sess_info;
3726 struct virtio_crypto_sym_session_info dec_sess_info;
....@@ -42,8 +31,8 @@
4231
4332 /* Cipher or aead */
4433 uint32_t type;
45
- struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
46
- struct ablkcipher_request *ablkcipher_req;
34
+ struct virtio_crypto_skcipher_ctx *skcipher_ctx;
35
+ struct skcipher_request *skcipher_req;
4736 uint8_t *iv;
4837 /* Encryption? */
4938 bool encrypt;
....@@ -53,7 +42,7 @@
5342 uint32_t algonum;
5443 uint32_t service;
5544 unsigned int active_devs;
56
- struct crypto_alg algo;
45
+ struct skcipher_alg algo;
5746 };
5847
5948 /*
....@@ -61,9 +50,9 @@
6150 * and crypto algorithms registion.
6251 */
6352 static DEFINE_MUTEX(algs_lock);
64
-static void virtio_crypto_ablkcipher_finalize_req(
53
+static void virtio_crypto_skcipher_finalize_req(
6554 struct virtio_crypto_sym_request *vc_sym_req,
66
- struct ablkcipher_request *req,
55
+ struct skcipher_request *req,
6756 int err);
6857
6958 static void virtio_crypto_dataq_sym_callback
....@@ -71,7 +60,7 @@
7160 {
7261 struct virtio_crypto_sym_request *vc_sym_req =
7362 container_of(vc_req, struct virtio_crypto_sym_request, base);
74
- struct ablkcipher_request *ablk_req;
63
+ struct skcipher_request *ablk_req;
7564 int error;
7665
7766 /* Finish the encrypt or decrypt process */
....@@ -91,8 +80,8 @@
9180 error = -EIO;
9281 break;
9382 }
94
- ablk_req = vc_sym_req->ablkcipher_req;
95
- virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
83
+ ablk_req = vc_sym_req->skcipher_req;
84
+ virtio_crypto_skcipher_finalize_req(vc_sym_req,
9685 ablk_req, error);
9786 }
9887 }
....@@ -122,8 +111,8 @@
122111 return 0;
123112 }
124113
125
-static int virtio_crypto_alg_ablkcipher_init_session(
126
- struct virtio_crypto_ablkcipher_ctx *ctx,
114
+static int virtio_crypto_alg_skcipher_init_session(
115
+ struct virtio_crypto_skcipher_ctx *ctx,
127116 uint32_t alg, const uint8_t *key,
128117 unsigned int keylen,
129118 int encrypt)
....@@ -139,12 +128,10 @@
139128 * Avoid to do DMA from the stack, switch to using
140129 * dynamically-allocated for the key
141130 */
142
- uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC);
131
+ uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
143132
144133 if (!cipher_key)
145134 return -ENOMEM;
146
-
147
- memcpy(cipher_key, key, keylen);
148135
149136 spin_lock(&vcrypto->ctrl_lock);
150137 /* Pad ctrl header */
....@@ -180,7 +167,7 @@
180167 num_in, vcrypto, GFP_ATOMIC);
181168 if (err < 0) {
182169 spin_unlock(&vcrypto->ctrl_lock);
183
- kzfree(cipher_key);
170
+ kfree_sensitive(cipher_key);
184171 return err;
185172 }
186173 virtqueue_kick(vcrypto->ctrl_vq);
....@@ -197,7 +184,7 @@
197184 spin_unlock(&vcrypto->ctrl_lock);
198185 pr_err("virtio_crypto: Create session failed status: %u\n",
199186 le32_to_cpu(vcrypto->input.status));
200
- kzfree(cipher_key);
187
+ kfree_sensitive(cipher_key);
201188 return -EINVAL;
202189 }
203190
....@@ -210,12 +197,12 @@
210197
211198 spin_unlock(&vcrypto->ctrl_lock);
212199
213
- kzfree(cipher_key);
200
+ kfree_sensitive(cipher_key);
214201 return 0;
215202 }
216203
217
-static int virtio_crypto_alg_ablkcipher_close_session(
218
- struct virtio_crypto_ablkcipher_ctx *ctx,
204
+static int virtio_crypto_alg_skcipher_close_session(
205
+ struct virtio_crypto_skcipher_ctx *ctx,
219206 int encrypt)
220207 {
221208 struct scatterlist outhdr, status_sg, *sgs[2];
....@@ -275,8 +262,8 @@
275262 return 0;
276263 }
277264
278
-static int virtio_crypto_alg_ablkcipher_init_sessions(
279
- struct virtio_crypto_ablkcipher_ctx *ctx,
265
+static int virtio_crypto_alg_skcipher_init_sessions(
266
+ struct virtio_crypto_skcipher_ctx *ctx,
280267 const uint8_t *key, unsigned int keylen)
281268 {
282269 uint32_t alg;
....@@ -285,37 +272,33 @@
285272
286273 if (keylen > vcrypto->max_cipher_key_len) {
287274 pr_err("virtio_crypto: the key is too long\n");
288
- goto bad_key;
275
+ return -EINVAL;
289276 }
290277
291278 if (virtio_crypto_alg_validate_key(keylen, &alg))
292
- goto bad_key;
279
+ return -EINVAL;
293280
294281 /* Create encryption session */
295
- ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
282
+ ret = virtio_crypto_alg_skcipher_init_session(ctx,
296283 alg, key, keylen, 1);
297284 if (ret)
298285 return ret;
299286 /* Create decryption session */
300
- ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
287
+ ret = virtio_crypto_alg_skcipher_init_session(ctx,
301288 alg, key, keylen, 0);
302289 if (ret) {
303
- virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
290
+ virtio_crypto_alg_skcipher_close_session(ctx, 1);
304291 return ret;
305292 }
306293 return 0;
307
-
308
-bad_key:
309
- crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
310
- return -EINVAL;
311294 }
312295
313296 /* Note: kernel crypto API realization */
314
-static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
297
+static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
315298 const uint8_t *key,
316299 unsigned int keylen)
317300 {
318
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
301
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
319302 uint32_t alg;
320303 int ret;
321304
....@@ -337,11 +320,11 @@
337320 ctx->vcrypto = vcrypto;
338321 } else {
339322 /* Rekeying, we should close the created sessions previously */
340
- virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
341
- virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
323
+ virtio_crypto_alg_skcipher_close_session(ctx, 1);
324
+ virtio_crypto_alg_skcipher_close_session(ctx, 0);
342325 }
343326
344
- ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
327
+ ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
345328 if (ret) {
346329 virtcrypto_dev_put(ctx->vcrypto);
347330 ctx->vcrypto = NULL;
....@@ -353,14 +336,14 @@
353336 }
354337
355338 static int
356
-__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
357
- struct ablkcipher_request *req,
339
+__virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
340
+ struct skcipher_request *req,
358341 struct data_queue *data_vq)
359342 {
360
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
361
- struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
343
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
344
+ struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
362345 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
363
- unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
346
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
364347 struct virtio_crypto *vcrypto = ctx->vcrypto;
365348 struct virtio_crypto_op_data_req *req_data;
366349 int src_nents, dst_nents;
....@@ -373,7 +356,7 @@
373356 uint8_t *iv;
374357 struct scatterlist *sg;
375358
376
- src_nents = sg_nents_for_len(req->src, req->nbytes);
359
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
377360 if (src_nents < 0) {
378361 pr_err("Invalid number of src SG.\n");
379362 return src_nents;
....@@ -409,13 +392,13 @@
409392 } else {
410393 req_data->header.session_id =
411394 cpu_to_le64(ctx->dec_sess_info.session_id);
412
- req_data->header.opcode =
395
+ req_data->header.opcode =
413396 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
414397 }
415398 req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
416399 req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
417400 req_data->u.sym_req.u.cipher.para.src_data_len =
418
- cpu_to_le32(req->nbytes);
401
+ cpu_to_le32(req->cryptlen);
419402
420403 dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
421404 if (unlikely(dst_len > U32_MAX)) {
....@@ -424,11 +407,11 @@
424407 goto free;
425408 }
426409
427
- dst_len = min_t(unsigned int, req->nbytes, dst_len);
410
+ dst_len = min_t(unsigned int, req->cryptlen, dst_len);
428411 pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
429
- req->nbytes, dst_len);
412
+ req->cryptlen, dst_len);
430413
431
- if (unlikely(req->nbytes + dst_len + ivsize +
414
+ if (unlikely(req->cryptlen + dst_len + ivsize +
432415 sizeof(vc_req->status) > vcrypto->max_size)) {
433416 pr_err("virtio_crypto: The length is too big\n");
434417 err = -EINVAL;
....@@ -454,10 +437,10 @@
454437 err = -ENOMEM;
455438 goto free;
456439 }
457
- memcpy(iv, req->info, ivsize);
440
+ memcpy(iv, req->iv, ivsize);
458441 if (!vc_sym_req->encrypt)
459
- scatterwalk_map_and_copy(req->info, req->src,
460
- req->nbytes - AES_BLOCK_SIZE,
442
+ scatterwalk_map_and_copy(req->iv, req->src,
443
+ req->cryptlen - AES_BLOCK_SIZE,
461444 AES_BLOCK_SIZE, 0);
462445
463446 sg_init_one(&iv_sg, iv, ivsize);
....@@ -489,100 +472,100 @@
489472 return 0;
490473
491474 free_iv:
492
- kzfree(iv);
475
+ kfree_sensitive(iv);
493476 free:
494
- kzfree(req_data);
477
+ kfree_sensitive(req_data);
495478 kfree(sgs);
496479 return err;
497480 }
498481
499
-static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
482
+static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
500483 {
501
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
502
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
484
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
485
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
503486 struct virtio_crypto_sym_request *vc_sym_req =
504
- ablkcipher_request_ctx(req);
487
+ skcipher_request_ctx(req);
505488 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
506489 struct virtio_crypto *vcrypto = ctx->vcrypto;
507490 /* Use the first data virtqueue as default */
508491 struct data_queue *data_vq = &vcrypto->data_vq[0];
509492
510
- if (!req->nbytes)
493
+ if (!req->cryptlen)
511494 return 0;
512
- if (req->nbytes % AES_BLOCK_SIZE)
495
+ if (req->cryptlen % AES_BLOCK_SIZE)
513496 return -EINVAL;
514497
515498 vc_req->dataq = data_vq;
516499 vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
517
- vc_sym_req->ablkcipher_ctx = ctx;
518
- vc_sym_req->ablkcipher_req = req;
500
+ vc_sym_req->skcipher_ctx = ctx;
501
+ vc_sym_req->skcipher_req = req;
519502 vc_sym_req->encrypt = true;
520503
521
- return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
504
+ return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
522505 }
523506
524
-static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
507
+static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
525508 {
526
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
527
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
509
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
510
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
528511 struct virtio_crypto_sym_request *vc_sym_req =
529
- ablkcipher_request_ctx(req);
512
+ skcipher_request_ctx(req);
530513 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
531514 struct virtio_crypto *vcrypto = ctx->vcrypto;
532515 /* Use the first data virtqueue as default */
533516 struct data_queue *data_vq = &vcrypto->data_vq[0];
534517
535
- if (!req->nbytes)
518
+ if (!req->cryptlen)
536519 return 0;
537
- if (req->nbytes % AES_BLOCK_SIZE)
520
+ if (req->cryptlen % AES_BLOCK_SIZE)
538521 return -EINVAL;
539522
540523 vc_req->dataq = data_vq;
541524 vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
542
- vc_sym_req->ablkcipher_ctx = ctx;
543
- vc_sym_req->ablkcipher_req = req;
525
+ vc_sym_req->skcipher_ctx = ctx;
526
+ vc_sym_req->skcipher_req = req;
544527 vc_sym_req->encrypt = false;
545528
546
- return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
529
+ return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
547530 }
548531
549
-static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
532
+static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
550533 {
551
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
534
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
552535
553
- tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
536
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
554537 ctx->tfm = tfm;
555538
556
- ctx->enginectx.op.do_one_request = virtio_crypto_ablkcipher_crypt_req;
539
+ ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
557540 ctx->enginectx.op.prepare_request = NULL;
558541 ctx->enginectx.op.unprepare_request = NULL;
559542 return 0;
560543 }
561544
562
-static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
545
+static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
563546 {
564
- struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
547
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
565548
566549 if (!ctx->vcrypto)
567550 return;
568551
569
- virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
570
- virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
552
+ virtio_crypto_alg_skcipher_close_session(ctx, 1);
553
+ virtio_crypto_alg_skcipher_close_session(ctx, 0);
571554 virtcrypto_dev_put(ctx->vcrypto);
572555 ctx->vcrypto = NULL;
573556 }
574557
575
-int virtio_crypto_ablkcipher_crypt_req(
558
+int virtio_crypto_skcipher_crypt_req(
576559 struct crypto_engine *engine, void *vreq)
577560 {
578
- struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base);
561
+ struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
579562 struct virtio_crypto_sym_request *vc_sym_req =
580
- ablkcipher_request_ctx(req);
563
+ skcipher_request_ctx(req);
581564 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
582565 struct data_queue *data_vq = vc_req->dataq;
583566 int ret;
584567
585
- ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
568
+ ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
586569 if (ret < 0)
587570 return ret;
588571
....@@ -591,19 +574,19 @@
591574 return 0;
592575 }
593576
594
-static void virtio_crypto_ablkcipher_finalize_req(
577
+static void virtio_crypto_skcipher_finalize_req(
595578 struct virtio_crypto_sym_request *vc_sym_req,
596
- struct ablkcipher_request *req,
579
+ struct skcipher_request *req,
597580 int err)
598581 {
599582 if (vc_sym_req->encrypt)
600
- scatterwalk_map_and_copy(req->info, req->dst,
601
- req->nbytes - AES_BLOCK_SIZE,
583
+ scatterwalk_map_and_copy(req->iv, req->dst,
584
+ req->cryptlen - AES_BLOCK_SIZE,
602585 AES_BLOCK_SIZE, 0);
603
- kzfree(vc_sym_req->iv);
586
+ kfree_sensitive(vc_sym_req->iv);
604587 virtcrypto_clear_request(&vc_sym_req->base);
605588
606
- crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
589
+ crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
607590 req, err);
608591 }
609592
....@@ -611,27 +594,22 @@
611594 .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
612595 .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
613596 .algo = {
614
- .cra_name = "cbc(aes)",
615
- .cra_driver_name = "virtio_crypto_aes_cbc",
616
- .cra_priority = 150,
617
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
618
- .cra_blocksize = AES_BLOCK_SIZE,
619
- .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
620
- .cra_alignmask = 0,
621
- .cra_module = THIS_MODULE,
622
- .cra_type = &crypto_ablkcipher_type,
623
- .cra_init = virtio_crypto_ablkcipher_init,
624
- .cra_exit = virtio_crypto_ablkcipher_exit,
625
- .cra_u = {
626
- .ablkcipher = {
627
- .setkey = virtio_crypto_ablkcipher_setkey,
628
- .decrypt = virtio_crypto_ablkcipher_decrypt,
629
- .encrypt = virtio_crypto_ablkcipher_encrypt,
630
- .min_keysize = AES_MIN_KEY_SIZE,
631
- .max_keysize = AES_MAX_KEY_SIZE,
632
- .ivsize = AES_BLOCK_SIZE,
633
- },
634
- },
597
+ .base.cra_name = "cbc(aes)",
598
+ .base.cra_driver_name = "virtio_crypto_aes_cbc",
599
+ .base.cra_priority = 150,
600
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
601
+ CRYPTO_ALG_ALLOCATES_MEMORY,
602
+ .base.cra_blocksize = AES_BLOCK_SIZE,
603
+ .base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx),
604
+ .base.cra_module = THIS_MODULE,
605
+ .init = virtio_crypto_skcipher_init,
606
+ .exit = virtio_crypto_skcipher_exit,
607
+ .setkey = virtio_crypto_skcipher_setkey,
608
+ .decrypt = virtio_crypto_skcipher_decrypt,
609
+ .encrypt = virtio_crypto_skcipher_encrypt,
610
+ .min_keysize = AES_MIN_KEY_SIZE,
611
+ .max_keysize = AES_MAX_KEY_SIZE,
612
+ .ivsize = AES_BLOCK_SIZE,
635613 },
636614 } };
637615
....@@ -651,14 +629,14 @@
651629 continue;
652630
653631 if (virtio_crypto_algs[i].active_devs == 0) {
654
- ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
632
+ ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
655633 if (ret)
656634 goto unlock;
657635 }
658636
659637 virtio_crypto_algs[i].active_devs++;
660638 dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
661
- virtio_crypto_algs[i].algo.cra_name);
639
+ virtio_crypto_algs[i].algo.base.cra_name);
662640 }
663641
664642 unlock:
....@@ -682,7 +660,7 @@
682660 continue;
683661
684662 if (virtio_crypto_algs[i].active_devs == 1)
685
- crypto_unregister_alg(&virtio_crypto_algs[i].algo);
663
+ crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
686664
687665 virtio_crypto_algs[i].active_devs--;
688666 }