forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/crypto/s5p-sss.c
....@@ -232,6 +232,7 @@
232232 * struct samsung_aes_variant - platform specific SSS driver data
233233 * @aes_offset: AES register offset from SSS module's base.
234234 * @hash_offset: HASH register offset from SSS module's base.
235
+ * @clk_names: names of clocks needed to run SSS IP
235236 *
236237 * Specifies platform specific configuration of SSS module.
237238 * Note: A structure for driver specific platform data is used for future
....@@ -240,6 +241,7 @@
240241 struct samsung_aes_variant {
241242 unsigned int aes_offset;
242243 unsigned int hash_offset;
244
+ const char *clk_names[2];
243245 };
244246
245247 struct s5p_aes_reqctx {
....@@ -249,8 +251,8 @@
249251 struct s5p_aes_ctx {
250252 struct s5p_aes_dev *dev;
251253
252
- uint8_t aes_key[AES_MAX_KEY_SIZE];
253
- uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
254
+ u8 aes_key[AES_MAX_KEY_SIZE];
255
+ u8 nonce[CTR_RFC3686_NONCE_SIZE];
254256 int keylen;
255257 };
256258
....@@ -258,6 +260,7 @@
258260 * struct s5p_aes_dev - Crypto device state container
259261 * @dev: Associated device
260262 * @clk: Clock for accessing hardware
263
+ * @pclk: APB bus clock necessary to access the hardware
261264 * @ioaddr: Mapped IO memory region
262265 * @aes_ioaddr: Per-varian offset for AES block IO memory
263266 * @irq_fc: Feed control interrupt line
....@@ -296,11 +299,12 @@
296299 struct s5p_aes_dev {
297300 struct device *dev;
298301 struct clk *clk;
302
+ struct clk *pclk;
299303 void __iomem *ioaddr;
300304 void __iomem *aes_ioaddr;
301305 int irq_fc;
302306
303
- struct ablkcipher_request *req;
307
+ struct skcipher_request *req;
304308 struct s5p_aes_ctx *ctx;
305309 struct scatterlist *sg_src;
306310 struct scatterlist *sg_dst;
....@@ -339,13 +343,13 @@
339343 * @engine: Bits for selecting type of HASH in SSS block
340344 * @sg: sg for DMA transfer
341345 * @sg_len: Length of sg for DMA transfer
342
- * @sgl[]: sg for joining buffer and req->src scatterlist
346
+ * @sgl: sg for joining buffer and req->src scatterlist
343347 * @skip: Skip offset in req->src for current op
344348 * @total: Total number of bytes for current request
345349 * @finup: Keep state for finup or final.
346350 * @error: Keep track of error.
347351 * @bufcnt: Number of bytes holded in buffer[]
348
- * @buffer[]: For byte(s) from end of req->src in UPDATE op
352
+ * @buffer: For byte(s) from end of req->src in UPDATE op
349353 */
350354 struct s5p_hash_reqctx {
351355 struct s5p_aes_dev *dd;
....@@ -366,7 +370,7 @@
366370 bool error;
367371
368372 u32 bufcnt;
369
- u8 buffer[0];
373
+ u8 buffer[];
370374 };
371375
372376 /**
....@@ -384,11 +388,19 @@
384388 static const struct samsung_aes_variant s5p_aes_data = {
385389 .aes_offset = 0x4000,
386390 .hash_offset = 0x6000,
391
+ .clk_names = { "secss", },
387392 };
388393
389394 static const struct samsung_aes_variant exynos_aes_data = {
390395 .aes_offset = 0x200,
391396 .hash_offset = 0x400,
397
+ .clk_names = { "secss", },
398
+};
399
+
400
+static const struct samsung_aes_variant exynos5433_slim_aes_data = {
401
+ .aes_offset = 0x400,
402
+ .hash_offset = 0x800,
403
+ .clk_names = { "pclk", "aclk", },
392404 };
393405
394406 static const struct of_device_id s5p_sss_dt_match[] = {
....@@ -399,6 +411,10 @@
399411 {
400412 .compatible = "samsung,exynos4210-secss",
401413 .data = &exynos_aes_data,
414
+ },
415
+ {
416
+ .compatible = "samsung,exynos5433-slim-sss",
417
+ .data = &exynos5433_slim_aes_data,
402418 },
403419 { },
404420 };
....@@ -441,7 +457,7 @@
441457 if (!*sg)
442458 return;
443459
444
- len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
460
+ len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
445461 free_pages((unsigned long)sg_virt(*sg), get_order(len));
446462
447463 kfree(*sg);
....@@ -463,19 +479,27 @@
463479
464480 static void s5p_sg_done(struct s5p_aes_dev *dev)
465481 {
482
+ struct skcipher_request *req = dev->req;
483
+ struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
484
+
466485 if (dev->sg_dst_cpy) {
467486 dev_dbg(dev->dev,
468487 "Copying %d bytes of output data back to original place\n",
469
- dev->req->nbytes);
488
+ dev->req->cryptlen);
470489 s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
471
- dev->req->nbytes, 1);
490
+ dev->req->cryptlen, 1);
472491 }
473492 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
474493 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
494
+ if (reqctx->mode & FLAGS_AES_CBC)
495
+ memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
496
+
497
+ else if (reqctx->mode & FLAGS_AES_CTR)
498
+ memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
475499 }
476500
477501 /* Calls the completion. Cannot be called with dev->lock hold. */
478
-static void s5p_aes_complete(struct ablkcipher_request *req, int err)
502
+static void s5p_aes_complete(struct skcipher_request *req, int err)
479503 {
480504 req->base.complete(&req->base, err);
481505 }
....@@ -500,7 +524,7 @@
500524 if (!*dst)
501525 return -ENOMEM;
502526
503
- len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
527
+ len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
504528 pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
505529 if (!pages) {
506530 kfree(*dst);
....@@ -508,7 +532,7 @@
508532 return -ENOMEM;
509533 }
510534
511
- s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
535
+ s5p_sg_copy_buf(pages, src, dev->req->cryptlen, 0);
512536
513537 sg_init_table(*dst, 1);
514538 sg_set_buf(*dst, pages, len);
....@@ -518,46 +542,28 @@
518542
519543 static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
520544 {
521
- int err;
545
+ if (!sg->length)
546
+ return -EINVAL;
522547
523
- if (!sg->length) {
524
- err = -EINVAL;
525
- goto exit;
526
- }
527
-
528
- err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
529
- if (!err) {
530
- err = -ENOMEM;
531
- goto exit;
532
- }
548
+ if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
549
+ return -ENOMEM;
533550
534551 dev->sg_dst = sg;
535
- err = 0;
536552
537
-exit:
538
- return err;
553
+ return 0;
539554 }
540555
541556 static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
542557 {
543
- int err;
558
+ if (!sg->length)
559
+ return -EINVAL;
544560
545
- if (!sg->length) {
546
- err = -EINVAL;
547
- goto exit;
548
- }
549
-
550
- err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
551
- if (!err) {
552
- err = -ENOMEM;
553
- goto exit;
554
- }
561
+ if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
562
+ return -ENOMEM;
555563
556564 dev->sg_src = sg;
557
- err = 0;
558565
559
-exit:
560
- return err;
566
+ return 0;
561567 }
562568
563569 /*
....@@ -655,15 +661,14 @@
655661 {
656662 struct platform_device *pdev = dev_id;
657663 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
658
- struct ablkcipher_request *req;
664
+ struct skcipher_request *req;
659665 int err_dma_tx = 0;
660666 int err_dma_rx = 0;
661667 int err_dma_hx = 0;
662668 bool tx_end = false;
663669 bool hx_end = false;
664670 unsigned long flags;
665
- uint32_t status;
666
- u32 st_bits;
671
+ u32 status, st_bits;
667672 int err;
668673
669674 spin_lock_irqsave(&dev->lock, flags);
....@@ -1121,7 +1126,7 @@
11211126 * s5p_hash_prepare_sgs() - prepare sg for processing
11221127 * @ctx: request context
11231128 * @sg: source scatterlist request
1124
- * @nbytes: number of bytes to process from sg
1129
+ * @new_len: number of bytes to process from sg
11251130 * @final: final flag
11261131 *
11271132 * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
....@@ -1517,38 +1522,6 @@
15171522 }
15181523
15191524 /**
1520
- * s5p_hash_shash_digest() - calculate shash digest
1521
- * @tfm: crypto transformation
1522
- * @flags: tfm flags
1523
- * @data: input data
1524
- * @len: length of data
1525
- * @out: output buffer
1526
- */
1527
-static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
1528
- const u8 *data, unsigned int len, u8 *out)
1529
-{
1530
- SHASH_DESC_ON_STACK(shash, tfm);
1531
-
1532
- shash->tfm = tfm;
1533
- shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
1534
-
1535
- return crypto_shash_digest(shash, data, len, out);
1536
-}
1537
-
1538
-/**
1539
- * s5p_hash_final_shash() - calculate shash digest
1540
- * @req: AHASH request
1541
- */
1542
-static int s5p_hash_final_shash(struct ahash_request *req)
1543
-{
1544
- struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1545
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1546
-
1547
- return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
1548
- ctx->buffer, ctx->bufcnt, req->result);
1549
-}
1550
-
1551
-/**
15521525 * s5p_hash_final() - close up hash and calculate digest
15531526 * @req: AHASH request
15541527 *
....@@ -1579,8 +1552,12 @@
15791552 if (ctx->error)
15801553 return -EINVAL; /* uncompleted hash is not needed */
15811554
1582
- if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
1583
- return s5p_hash_final_shash(req);
1555
+ if (!ctx->digcnt && ctx->bufcnt < BUFLEN) {
1556
+ struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1557
+
1558
+ return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer,
1559
+ ctx->bufcnt, req->result);
1560
+ }
15841561
15851562 return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
15861563 }
....@@ -1832,13 +1809,18 @@
18321809 };
18331810
18341811 static void s5p_set_aes(struct s5p_aes_dev *dev,
1835
- const uint8_t *key, const uint8_t *iv,
1812
+ const u8 *key, const u8 *iv, const u8 *ctr,
18361813 unsigned int keylen)
18371814 {
18381815 void __iomem *keystart;
18391816
18401817 if (iv)
1841
- memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
1818
+ memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv,
1819
+ AES_BLOCK_SIZE);
1820
+
1821
+ if (ctr)
1822
+ memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr,
1823
+ AES_BLOCK_SIZE);
18421824
18431825 if (keylen == AES_KEYSIZE_256)
18441826 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
....@@ -1862,7 +1844,7 @@
18621844 }
18631845
18641846 static int s5p_set_indata_start(struct s5p_aes_dev *dev,
1865
- struct ablkcipher_request *req)
1847
+ struct skcipher_request *req)
18661848 {
18671849 struct scatterlist *sg;
18681850 int err;
....@@ -1889,7 +1871,7 @@
18891871 }
18901872
18911873 static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
1892
- struct ablkcipher_request *req)
1874
+ struct skcipher_request *req)
18931875 {
18941876 struct scatterlist *sg;
18951877 int err;
....@@ -1917,24 +1899,28 @@
19171899
19181900 static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
19191901 {
1920
- struct ablkcipher_request *req = dev->req;
1921
- uint32_t aes_control;
1902
+ struct skcipher_request *req = dev->req;
1903
+ u32 aes_control;
19221904 unsigned long flags;
19231905 int err;
1924
- u8 *iv;
1906
+ u8 *iv, *ctr;
19251907
1908
+ /* This sets bit [13:12] to 00, which selects 128-bit counter */
19261909 aes_control = SSS_AES_KEY_CHANGE_MODE;
19271910 if (mode & FLAGS_AES_DECRYPT)
19281911 aes_control |= SSS_AES_MODE_DECRYPT;
19291912
19301913 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
19311914 aes_control |= SSS_AES_CHAIN_MODE_CBC;
1932
- iv = req->info;
1915
+ iv = req->iv;
1916
+ ctr = NULL;
19331917 } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
19341918 aes_control |= SSS_AES_CHAIN_MODE_CTR;
1935
- iv = req->info;
1919
+ iv = NULL;
1920
+ ctr = req->iv;
19361921 } else {
19371922 iv = NULL; /* AES_ECB */
1923
+ ctr = NULL;
19381924 }
19391925
19401926 if (dev->ctx->keylen == AES_KEYSIZE_192)
....@@ -1966,7 +1952,7 @@
19661952 goto outdata_error;
19671953
19681954 SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
1969
- s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
1955
+ s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
19701956
19711957 s5p_set_dma_indata(dev, dev->sg_src);
19721958 s5p_set_dma_outdata(dev, dev->sg_dst);
....@@ -2009,24 +1995,24 @@
20091995 if (backlog)
20101996 backlog->complete(backlog, -EINPROGRESS);
20111997
2012
- dev->req = ablkcipher_request_cast(async_req);
1998
+ dev->req = skcipher_request_cast(async_req);
20131999 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
2014
- reqctx = ablkcipher_request_ctx(dev->req);
2000
+ reqctx = skcipher_request_ctx(dev->req);
20152001
20162002 s5p_aes_crypt_start(dev, reqctx->mode);
20172003 }
20182004
20192005 static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
2020
- struct ablkcipher_request *req)
2006
+ struct skcipher_request *req)
20212007 {
20222008 unsigned long flags;
20232009 int err;
20242010
20252011 spin_lock_irqsave(&dev->lock, flags);
2026
- err = ablkcipher_enqueue_request(&dev->queue, req);
2012
+ err = crypto_enqueue_request(&dev->queue, &req->base);
20272013 if (dev->busy) {
20282014 spin_unlock_irqrestore(&dev->lock, flags);
2029
- goto exit;
2015
+ return err;
20302016 }
20312017 dev->busy = true;
20322018
....@@ -2034,19 +2020,22 @@
20342020
20352021 tasklet_schedule(&dev->tasklet);
20362022
2037
-exit:
20382023 return err;
20392024 }
20402025
2041
-static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
2026
+static int s5p_aes_crypt(struct skcipher_request *req, unsigned long mode)
20422027 {
2043
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
2044
- struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
2045
- struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
2028
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
2029
+ struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
2030
+ struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
20462031 struct s5p_aes_dev *dev = ctx->dev;
20472032
2048
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
2049
- dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
2033
+ if (!req->cryptlen)
2034
+ return 0;
2035
+
2036
+ if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE) &&
2037
+ ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
2038
+ dev_dbg(dev->dev, "request size is not exact amount of AES blocks\n");
20502039 return -EINVAL;
20512040 }
20522041
....@@ -2055,10 +2044,10 @@
20552044 return s5p_aes_handle_req(dev, req);
20562045 }
20572046
2058
-static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
2059
- const uint8_t *key, unsigned int keylen)
2047
+static int s5p_aes_setkey(struct crypto_skcipher *cipher,
2048
+ const u8 *key, unsigned int keylen)
20602049 {
2061
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
2050
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
20622051 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
20632052
20642053 if (keylen != AES_KEYSIZE_128 &&
....@@ -2072,79 +2061,97 @@
20722061 return 0;
20732062 }
20742063
2075
-static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
2064
+static int s5p_aes_ecb_encrypt(struct skcipher_request *req)
20762065 {
20772066 return s5p_aes_crypt(req, 0);
20782067 }
20792068
2080
-static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
2069
+static int s5p_aes_ecb_decrypt(struct skcipher_request *req)
20812070 {
20822071 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
20832072 }
20842073
2085
-static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
2074
+static int s5p_aes_cbc_encrypt(struct skcipher_request *req)
20862075 {
20872076 return s5p_aes_crypt(req, FLAGS_AES_CBC);
20882077 }
20892078
2090
-static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
2079
+static int s5p_aes_cbc_decrypt(struct skcipher_request *req)
20912080 {
20922081 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
20932082 }
20942083
2095
-static int s5p_aes_cra_init(struct crypto_tfm *tfm)
2084
+static int s5p_aes_ctr_crypt(struct skcipher_request *req)
20962085 {
2097
- struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
2086
+ return s5p_aes_crypt(req, FLAGS_AES_CTR);
2087
+}
2088
+
2089
+static int s5p_aes_init_tfm(struct crypto_skcipher *tfm)
2090
+{
2091
+ struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
20982092
20992093 ctx->dev = s5p_dev;
2100
- tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
2094
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct s5p_aes_reqctx));
21012095
21022096 return 0;
21032097 }
21042098
2105
-static struct crypto_alg algs[] = {
2099
+static struct skcipher_alg algs[] = {
21062100 {
2107
- .cra_name = "ecb(aes)",
2108
- .cra_driver_name = "ecb-aes-s5p",
2109
- .cra_priority = 100,
2110
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2111
- CRYPTO_ALG_ASYNC |
2101
+ .base.cra_name = "ecb(aes)",
2102
+ .base.cra_driver_name = "ecb-aes-s5p",
2103
+ .base.cra_priority = 100,
2104
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
21122105 CRYPTO_ALG_KERN_DRIVER_ONLY,
2113
- .cra_blocksize = AES_BLOCK_SIZE,
2114
- .cra_ctxsize = sizeof(struct s5p_aes_ctx),
2115
- .cra_alignmask = 0x0f,
2116
- .cra_type = &crypto_ablkcipher_type,
2117
- .cra_module = THIS_MODULE,
2118
- .cra_init = s5p_aes_cra_init,
2119
- .cra_u.ablkcipher = {
2120
- .min_keysize = AES_MIN_KEY_SIZE,
2121
- .max_keysize = AES_MAX_KEY_SIZE,
2122
- .setkey = s5p_aes_setkey,
2123
- .encrypt = s5p_aes_ecb_encrypt,
2124
- .decrypt = s5p_aes_ecb_decrypt,
2125
- }
2106
+ .base.cra_blocksize = AES_BLOCK_SIZE,
2107
+ .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
2108
+ .base.cra_alignmask = 0x0f,
2109
+ .base.cra_module = THIS_MODULE,
2110
+
2111
+ .min_keysize = AES_MIN_KEY_SIZE,
2112
+ .max_keysize = AES_MAX_KEY_SIZE,
2113
+ .setkey = s5p_aes_setkey,
2114
+ .encrypt = s5p_aes_ecb_encrypt,
2115
+ .decrypt = s5p_aes_ecb_decrypt,
2116
+ .init = s5p_aes_init_tfm,
21262117 },
21272118 {
2128
- .cra_name = "cbc(aes)",
2129
- .cra_driver_name = "cbc-aes-s5p",
2130
- .cra_priority = 100,
2131
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2132
- CRYPTO_ALG_ASYNC |
2119
+ .base.cra_name = "cbc(aes)",
2120
+ .base.cra_driver_name = "cbc-aes-s5p",
2121
+ .base.cra_priority = 100,
2122
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
21332123 CRYPTO_ALG_KERN_DRIVER_ONLY,
2134
- .cra_blocksize = AES_BLOCK_SIZE,
2135
- .cra_ctxsize = sizeof(struct s5p_aes_ctx),
2136
- .cra_alignmask = 0x0f,
2137
- .cra_type = &crypto_ablkcipher_type,
2138
- .cra_module = THIS_MODULE,
2139
- .cra_init = s5p_aes_cra_init,
2140
- .cra_u.ablkcipher = {
2141
- .min_keysize = AES_MIN_KEY_SIZE,
2142
- .max_keysize = AES_MAX_KEY_SIZE,
2143
- .ivsize = AES_BLOCK_SIZE,
2144
- .setkey = s5p_aes_setkey,
2145
- .encrypt = s5p_aes_cbc_encrypt,
2146
- .decrypt = s5p_aes_cbc_decrypt,
2147
- }
2124
+ .base.cra_blocksize = AES_BLOCK_SIZE,
2125
+ .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
2126
+ .base.cra_alignmask = 0x0f,
2127
+ .base.cra_module = THIS_MODULE,
2128
+
2129
+ .min_keysize = AES_MIN_KEY_SIZE,
2130
+ .max_keysize = AES_MAX_KEY_SIZE,
2131
+ .ivsize = AES_BLOCK_SIZE,
2132
+ .setkey = s5p_aes_setkey,
2133
+ .encrypt = s5p_aes_cbc_encrypt,
2134
+ .decrypt = s5p_aes_cbc_decrypt,
2135
+ .init = s5p_aes_init_tfm,
2136
+ },
2137
+ {
2138
+ .base.cra_name = "ctr(aes)",
2139
+ .base.cra_driver_name = "ctr-aes-s5p",
2140
+ .base.cra_priority = 100,
2141
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
2142
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
2143
+ .base.cra_blocksize = 1,
2144
+ .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
2145
+ .base.cra_alignmask = 0x0f,
2146
+ .base.cra_module = THIS_MODULE,
2147
+
2148
+ .min_keysize = AES_MIN_KEY_SIZE,
2149
+ .max_keysize = AES_MAX_KEY_SIZE,
2150
+ .ivsize = AES_BLOCK_SIZE,
2151
+ .setkey = s5p_aes_setkey,
2152
+ .encrypt = s5p_aes_ctr_crypt,
2153
+ .decrypt = s5p_aes_ctr_crypt,
2154
+ .init = s5p_aes_init_tfm,
21482155 },
21492156 };
21502157
....@@ -2195,16 +2202,36 @@
21952202 return PTR_ERR(pdata->ioaddr);
21962203 }
21972204
2198
- pdata->clk = devm_clk_get(dev, "secss");
2199
- if (IS_ERR(pdata->clk)) {
2200
- dev_err(dev, "failed to find secss clock source\n");
2201
- return -ENOENT;
2202
- }
2205
+ pdata->clk = devm_clk_get(dev, variant->clk_names[0]);
2206
+ if (IS_ERR(pdata->clk))
2207
+ return dev_err_probe(dev, PTR_ERR(pdata->clk),
2208
+ "failed to find secss clock %s\n",
2209
+ variant->clk_names[0]);
22032210
22042211 err = clk_prepare_enable(pdata->clk);
22052212 if (err < 0) {
2206
- dev_err(dev, "Enabling SSS clk failed, err %d\n", err);
2213
+ dev_err(dev, "Enabling clock %s failed, err %d\n",
2214
+ variant->clk_names[0], err);
22072215 return err;
2216
+ }
2217
+
2218
+ if (variant->clk_names[1]) {
2219
+ pdata->pclk = devm_clk_get(dev, variant->clk_names[1]);
2220
+ if (IS_ERR(pdata->pclk)) {
2221
+ err = dev_err_probe(dev, PTR_ERR(pdata->pclk),
2222
+ "failed to find clock %s\n",
2223
+ variant->clk_names[1]);
2224
+ goto err_clk;
2225
+ }
2226
+
2227
+ err = clk_prepare_enable(pdata->pclk);
2228
+ if (err < 0) {
2229
+ dev_err(dev, "Enabling clock %s failed, err %d\n",
2230
+ variant->clk_names[0], err);
2231
+ goto err_clk;
2232
+ }
2233
+ } else {
2234
+ pdata->pclk = NULL;
22082235 }
22092236
22102237 spin_lock_init(&pdata->lock);
....@@ -2236,7 +2263,7 @@
22362263 crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
22372264
22382265 for (i = 0; i < ARRAY_SIZE(algs); i++) {
2239
- err = crypto_register_alg(&algs[i]);
2266
+ err = crypto_register_skcipher(&algs[i]);
22402267 if (err)
22412268 goto err_algs;
22422269 }
....@@ -2273,17 +2300,19 @@
22732300
22742301 err_algs:
22752302 if (i < ARRAY_SIZE(algs))
2276
- dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name,
2303
+ dev_err(dev, "can't register '%s': %d\n", algs[i].base.cra_name,
22772304 err);
22782305
22792306 for (j = 0; j < i; j++)
2280
- crypto_unregister_alg(&algs[j]);
2307
+ crypto_unregister_skcipher(&algs[j]);
22812308
22822309 tasklet_kill(&pdata->tasklet);
22832310
22842311 err_irq:
2285
- clk_disable_unprepare(pdata->clk);
2312
+ clk_disable_unprepare(pdata->pclk);
22862313
2314
+err_clk:
2315
+ clk_disable_unprepare(pdata->clk);
22872316 s5p_dev = NULL;
22882317
22892318 return err;
....@@ -2298,7 +2327,7 @@
22982327 return -ENODEV;
22992328
23002329 for (i = 0; i < ARRAY_SIZE(algs); i++)
2301
- crypto_unregister_alg(&algs[i]);
2330
+ crypto_unregister_skcipher(&algs[i]);
23022331
23032332 tasklet_kill(&pdata->tasklet);
23042333 if (pdata->use_hash) {
....@@ -2310,6 +2339,8 @@
23102339 pdata->use_hash = false;
23112340 }
23122341
2342
+ clk_disable_unprepare(pdata->pclk);
2343
+
23132344 clk_disable_unprepare(pdata->clk);
23142345 s5p_dev = NULL;
23152346