hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/drivers/crypto/caam/caampkc.c
....@@ -1,7 +1,9 @@
1
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
12 /*
23 * caam - Freescale FSL CAAM support for Public Key Cryptography
34 *
45 * Copyright 2016 Freescale Semiconductor, Inc.
6
+ * Copyright 2018-2019 NXP
57 *
68 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
79 * all the desired key parameters, input and output pointers.
....@@ -15,19 +17,36 @@
1517 #include "sg_sw_sec4.h"
1618 #include "caampkc.h"
1719
18
-#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
20
+#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
1921 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
20
- sizeof(struct rsa_priv_f1_pdb))
22
+ SIZEOF_RSA_PRIV_F1_PDB)
2123 #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
22
- sizeof(struct rsa_priv_f2_pdb))
24
+ SIZEOF_RSA_PRIV_F2_PDB)
2325 #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
24
- sizeof(struct rsa_priv_f3_pdb))
26
+ SIZEOF_RSA_PRIV_F3_PDB)
27
+#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
28
+
29
+/* buffer filled with zeros, used for padding */
30
+static u8 *zero_buffer;
31
+
32
+/*
33
+ * variable used to avoid double free of resources in case
34
+ * algorithm registration was unsuccessful
35
+ */
36
+static bool init_done;
37
+
38
+struct caam_akcipher_alg {
39
+ struct akcipher_alg akcipher;
40
+ bool registered;
41
+};
2542
2643 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
2744 struct akcipher_request *req)
2845 {
46
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
47
+
2948 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
30
- dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
49
+ dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
3150
3251 if (edesc->sec4_sg_bytes)
3352 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
....@@ -98,74 +117,82 @@
98117 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
99118 {
100119 struct akcipher_request *req = context;
120
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
121
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
101122 struct rsa_edesc *edesc;
123
+ int ecode = 0;
124
+ bool has_bklog;
102125
103126 if (err)
104
- caam_jr_strstatus(dev, err);
127
+ ecode = caam_jr_strstatus(dev, err);
105128
106
- edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
129
+ edesc = req_ctx->edesc;
130
+ has_bklog = edesc->bklog;
107131
108132 rsa_pub_unmap(dev, edesc, req);
109133 rsa_io_unmap(dev, edesc, req);
110134 kfree(edesc);
111135
112
- akcipher_request_complete(req, err);
136
+ /*
137
+ * If no backlog flag, the completion of the request is done
138
+ * by CAAM, not crypto engine.
139
+ */
140
+ if (!has_bklog)
141
+ akcipher_request_complete(req, ecode);
142
+ else
143
+ crypto_finalize_akcipher_request(jrp->engine, req, ecode);
113144 }
114145
115
-static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
116
- void *context)
146
+static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
147
+ void *context)
117148 {
118149 struct akcipher_request *req = context;
150
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
151
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
152
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
153
+ struct caam_rsa_key *key = &ctx->key;
154
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
119155 struct rsa_edesc *edesc;
156
+ int ecode = 0;
157
+ bool has_bklog;
120158
121159 if (err)
122
- caam_jr_strstatus(dev, err);
160
+ ecode = caam_jr_strstatus(dev, err);
123161
124
- edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
162
+ edesc = req_ctx->edesc;
163
+ has_bklog = edesc->bklog;
125164
126
- rsa_priv_f1_unmap(dev, edesc, req);
165
+ switch (key->priv_form) {
166
+ case FORM1:
167
+ rsa_priv_f1_unmap(dev, edesc, req);
168
+ break;
169
+ case FORM2:
170
+ rsa_priv_f2_unmap(dev, edesc, req);
171
+ break;
172
+ case FORM3:
173
+ rsa_priv_f3_unmap(dev, edesc, req);
174
+ }
175
+
127176 rsa_io_unmap(dev, edesc, req);
128177 kfree(edesc);
129178
130
- akcipher_request_complete(req, err);
179
+ /*
180
+ * If no backlog flag, the completion of the request is done
181
+ * by CAAM, not crypto engine.
182
+ */
183
+ if (!has_bklog)
184
+ akcipher_request_complete(req, ecode);
185
+ else
186
+ crypto_finalize_akcipher_request(jrp->engine, req, ecode);
131187 }
132188
133
-static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
134
- void *context)
135
-{
136
- struct akcipher_request *req = context;
137
- struct rsa_edesc *edesc;
138
-
139
- if (err)
140
- caam_jr_strstatus(dev, err);
141
-
142
- edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
143
-
144
- rsa_priv_f2_unmap(dev, edesc, req);
145
- rsa_io_unmap(dev, edesc, req);
146
- kfree(edesc);
147
-
148
- akcipher_request_complete(req, err);
149
-}
150
-
151
-static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
152
- void *context)
153
-{
154
- struct akcipher_request *req = context;
155
- struct rsa_edesc *edesc;
156
-
157
- if (err)
158
- caam_jr_strstatus(dev, err);
159
-
160
- edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
161
-
162
- rsa_priv_f3_unmap(dev, edesc, req);
163
- rsa_io_unmap(dev, edesc, req);
164
- kfree(edesc);
165
-
166
- akcipher_request_complete(req, err);
167
-}
168
-
189
+/**
190
+ * Count leading zeros, need it to strip, from a given scatterlist
191
+ *
192
+ * @sgl : scatterlist to count zeros from
193
+ * @nbytes: number of zeros, in bytes, to strip
194
+ * @flags : operation flags
195
+ */
169196 static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
170197 unsigned int nbytes,
171198 unsigned int flags)
....@@ -185,7 +212,8 @@
185212 lzeros = 0;
186213 len = 0;
187214 while (nbytes > 0) {
188
- while (len && !*buff) {
215
+ /* do not strip more than given bytes */
216
+ while (len && !*buff && lzeros < nbytes) {
189217 lzeros++;
190218 len--;
191219 buff++;
....@@ -194,7 +222,9 @@
194222 if (len && *buff)
195223 break;
196224
197
- sg_miter_next(&miter);
225
+ if (!sg_miter_next(&miter))
226
+ break;
227
+
198228 buff = miter.addr;
199229 len = miter.length;
200230
....@@ -216,29 +246,67 @@
216246 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
217247 struct device *dev = ctx->dev;
218248 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
249
+ struct caam_rsa_key *key = &ctx->key;
219250 struct rsa_edesc *edesc;
220251 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
221252 GFP_KERNEL : GFP_ATOMIC;
222253 int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
223
- int sgc;
224254 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
225255 int src_nents, dst_nents;
256
+ int mapped_src_nents, mapped_dst_nents;
257
+ unsigned int diff_size = 0;
226258 int lzeros;
227259
228
- lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
229
- if (lzeros < 0)
230
- return ERR_PTR(lzeros);
260
+ if (req->src_len > key->n_sz) {
261
+ /*
262
+ * strip leading zeros and
263
+ * return the number of zeros to skip
264
+ */
265
+ lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
266
+ key->n_sz, sg_flags);
267
+ if (lzeros < 0)
268
+ return ERR_PTR(lzeros);
231269
232
- req->src_len -= lzeros;
233
- req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
270
+ req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
271
+ lzeros);
272
+ req_ctx->fixup_src_len = req->src_len - lzeros;
273
+ } else {
274
+ /*
275
+ * input src is less then n key modulus,
276
+ * so there will be zero padding
277
+ */
278
+ diff_size = key->n_sz - req->src_len;
279
+ req_ctx->fixup_src = req->src;
280
+ req_ctx->fixup_src_len = req->src_len;
281
+ }
234282
235
- src_nents = sg_nents_for_len(req->src, req->src_len);
283
+ src_nents = sg_nents_for_len(req_ctx->fixup_src,
284
+ req_ctx->fixup_src_len);
236285 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
237286
238
- if (src_nents > 1)
239
- sec4_sg_len = src_nents;
240
- if (dst_nents > 1)
241
- sec4_sg_len += dst_nents;
287
+ mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
288
+ DMA_TO_DEVICE);
289
+ if (unlikely(!mapped_src_nents)) {
290
+ dev_err(dev, "unable to map source\n");
291
+ return ERR_PTR(-ENOMEM);
292
+ }
293
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
294
+ DMA_FROM_DEVICE);
295
+ if (unlikely(!mapped_dst_nents)) {
296
+ dev_err(dev, "unable to map destination\n");
297
+ goto src_fail;
298
+ }
299
+
300
+ if (!diff_size && mapped_src_nents == 1)
301
+ sec4_sg_len = 0; /* no need for an input hw s/g table */
302
+ else
303
+ sec4_sg_len = mapped_src_nents + !!diff_size;
304
+ sec4_sg_index = sec4_sg_len;
305
+
306
+ if (mapped_dst_nents > 1)
307
+ sec4_sg_len += pad_sg_nents(mapped_dst_nents);
308
+ else
309
+ sec4_sg_len = pad_sg_nents(sec4_sg_len);
242310
243311 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
244312
....@@ -246,37 +314,32 @@
246314 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
247315 GFP_DMA | flags);
248316 if (!edesc)
249
- return ERR_PTR(-ENOMEM);
250
-
251
- sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
252
- if (unlikely(!sgc)) {
253
- dev_err(dev, "unable to map source\n");
254
- goto src_fail;
255
- }
256
-
257
- sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
258
- if (unlikely(!sgc)) {
259
- dev_err(dev, "unable to map destination\n");
260317 goto dst_fail;
261
- }
262318
263319 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
320
+ if (diff_size)
321
+ dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
322
+ 0);
264323
265
- sec4_sg_index = 0;
266
- if (src_nents > 1) {
267
- sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
268
- sec4_sg_index += src_nents;
269
- }
270
- if (dst_nents > 1)
271
- sg_to_sec4_sg_last(req->dst, dst_nents,
324
+ if (sec4_sg_index)
325
+ sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
326
+ edesc->sec4_sg + !!diff_size, 0);
327
+
328
+ if (mapped_dst_nents > 1)
329
+ sg_to_sec4_sg_last(req->dst, req->dst_len,
272330 edesc->sec4_sg + sec4_sg_index, 0);
273331
274332 /* Save nents for later use in Job Descriptor */
275333 edesc->src_nents = src_nents;
276334 edesc->dst_nents = dst_nents;
277335
336
+ req_ctx->edesc = edesc;
337
+
278338 if (!sec4_sg_bytes)
279339 return edesc;
340
+
341
+ edesc->mapped_src_nents = mapped_src_nents;
342
+ edesc->mapped_dst_nents = mapped_dst_nents;
280343
281344 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
282345 sec4_sg_bytes, DMA_TO_DEVICE);
....@@ -287,21 +350,53 @@
287350
288351 edesc->sec4_sg_bytes = sec4_sg_bytes;
289352
353
+ print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
354
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
355
+ edesc->sec4_sg_bytes, 1);
356
+
290357 return edesc;
291358
292359 sec4_sg_fail:
293
- dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
294
-dst_fail:
295
- dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
296
-src_fail:
297360 kfree(edesc);
361
+dst_fail:
362
+ dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
363
+src_fail:
364
+ dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
298365 return ERR_PTR(-ENOMEM);
366
+}
367
+
368
+static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
369
+{
370
+ struct akcipher_request *req = container_of(areq,
371
+ struct akcipher_request,
372
+ base);
373
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
374
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
375
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
376
+ struct device *jrdev = ctx->dev;
377
+ u32 *desc = req_ctx->edesc->hw_desc;
378
+ int ret;
379
+
380
+ req_ctx->edesc->bklog = true;
381
+
382
+ ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
383
+
384
+ if (ret != -EINPROGRESS) {
385
+ rsa_pub_unmap(jrdev, req_ctx->edesc, req);
386
+ rsa_io_unmap(jrdev, req_ctx->edesc, req);
387
+ kfree(req_ctx->edesc);
388
+ } else {
389
+ ret = 0;
390
+ }
391
+
392
+ return ret;
299393 }
300394
301395 static int set_rsa_pub_pdb(struct akcipher_request *req,
302396 struct rsa_edesc *edesc)
303397 {
304398 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
399
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
305400 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
306401 struct caam_rsa_key *key = &ctx->key;
307402 struct device *dev = ctx->dev;
....@@ -321,15 +416,15 @@
321416 return -ENOMEM;
322417 }
323418
324
- if (edesc->src_nents > 1) {
419
+ if (edesc->mapped_src_nents > 1) {
325420 pdb->sgf |= RSA_PDB_SGF_F;
326421 pdb->f_dma = edesc->sec4_sg_dma;
327
- sec4_sg_index += edesc->src_nents;
422
+ sec4_sg_index += edesc->mapped_src_nents;
328423 } else {
329
- pdb->f_dma = sg_dma_address(req->src);
424
+ pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
330425 }
331426
332
- if (edesc->dst_nents > 1) {
427
+ if (edesc->mapped_dst_nents > 1) {
333428 pdb->sgf |= RSA_PDB_SGF_G;
334429 pdb->g_dma = edesc->sec4_sg_dma +
335430 sec4_sg_index * sizeof(struct sec4_sg_entry);
....@@ -338,7 +433,7 @@
338433 }
339434
340435 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
341
- pdb->f_len = req->src_len;
436
+ pdb->f_len = req_ctx->fixup_src_len;
342437
343438 return 0;
344439 }
....@@ -366,15 +461,18 @@
366461 return -ENOMEM;
367462 }
368463
369
- if (edesc->src_nents > 1) {
464
+ if (edesc->mapped_src_nents > 1) {
370465 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
371466 pdb->g_dma = edesc->sec4_sg_dma;
372
- sec4_sg_index += edesc->src_nents;
467
+ sec4_sg_index += edesc->mapped_src_nents;
468
+
373469 } else {
374
- pdb->g_dma = sg_dma_address(req->src);
470
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
471
+
472
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
375473 }
376474
377
- if (edesc->dst_nents > 1) {
475
+ if (edesc->mapped_dst_nents > 1) {
378476 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
379477 pdb->f_dma = edesc->sec4_sg_dma +
380478 sec4_sg_index * sizeof(struct sec4_sg_entry);
....@@ -429,15 +527,17 @@
429527 goto unmap_tmp1;
430528 }
431529
432
- if (edesc->src_nents > 1) {
530
+ if (edesc->mapped_src_nents > 1) {
433531 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
434532 pdb->g_dma = edesc->sec4_sg_dma;
435
- sec4_sg_index += edesc->src_nents;
533
+ sec4_sg_index += edesc->mapped_src_nents;
436534 } else {
437
- pdb->g_dma = sg_dma_address(req->src);
535
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
536
+
537
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
438538 }
439539
440
- if (edesc->dst_nents > 1) {
540
+ if (edesc->mapped_dst_nents > 1) {
441541 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
442542 pdb->f_dma = edesc->sec4_sg_dma +
443543 sec4_sg_index * sizeof(struct sec4_sg_entry);
....@@ -516,15 +616,17 @@
516616 goto unmap_tmp1;
517617 }
518618
519
- if (edesc->src_nents > 1) {
619
+ if (edesc->mapped_src_nents > 1) {
520620 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
521621 pdb->g_dma = edesc->sec4_sg_dma;
522
- sec4_sg_index += edesc->src_nents;
622
+ sec4_sg_index += edesc->mapped_src_nents;
523623 } else {
524
- pdb->g_dma = sg_dma_address(req->src);
624
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
625
+
626
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
525627 }
526628
527
- if (edesc->dst_nents > 1) {
629
+ if (edesc->mapped_dst_nents > 1) {
528630 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
529631 pdb->f_dma = edesc->sec4_sg_dma +
530632 sec4_sg_index * sizeof(struct sec4_sg_entry);
....@@ -551,6 +653,53 @@
551653 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
552654
553655 return -ENOMEM;
656
+}
657
+
658
+static int akcipher_enqueue_req(struct device *jrdev,
659
+ void (*cbk)(struct device *jrdev, u32 *desc,
660
+ u32 err, void *context),
661
+ struct akcipher_request *req)
662
+{
663
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
664
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
665
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
666
+ struct caam_rsa_key *key = &ctx->key;
667
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
668
+ struct rsa_edesc *edesc = req_ctx->edesc;
669
+ u32 *desc = edesc->hw_desc;
670
+ int ret;
671
+
672
+ req_ctx->akcipher_op_done = cbk;
673
+ /*
674
+ * Only the backlog request are sent to crypto-engine since the others
675
+ * can be handled by CAAM, if free, especially since JR has up to 1024
676
+ * entries (more than the 10 entries from crypto-engine).
677
+ */
678
+ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
679
+ ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
680
+ req);
681
+ else
682
+ ret = caam_jr_enqueue(jrdev, desc, cbk, req);
683
+
684
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
685
+ switch (key->priv_form) {
686
+ case FORM1:
687
+ rsa_priv_f1_unmap(jrdev, edesc, req);
688
+ break;
689
+ case FORM2:
690
+ rsa_priv_f2_unmap(jrdev, edesc, req);
691
+ break;
692
+ case FORM3:
693
+ rsa_priv_f3_unmap(jrdev, edesc, req);
694
+ break;
695
+ default:
696
+ rsa_pub_unmap(jrdev, edesc, req);
697
+ }
698
+ rsa_io_unmap(jrdev, edesc, req);
699
+ kfree(edesc);
700
+ }
701
+
702
+ return ret;
554703 }
555704
556705 static int caam_rsa_enc(struct akcipher_request *req)
....@@ -584,11 +733,7 @@
584733 /* Initialize Job Descriptor */
585734 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
586735
587
- ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
588
- if (!ret)
589
- return -EINPROGRESS;
590
-
591
- rsa_pub_unmap(jrdev, edesc, req);
736
+ return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
592737
593738 init_fail:
594739 rsa_io_unmap(jrdev, edesc, req);
....@@ -617,11 +762,7 @@
617762 /* Initialize Job Descriptor */
618763 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
619764
620
- ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
621
- if (!ret)
622
- return -EINPROGRESS;
623
-
624
- rsa_priv_f1_unmap(jrdev, edesc, req);
765
+ return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
625766
626767 init_fail:
627768 rsa_io_unmap(jrdev, edesc, req);
....@@ -650,11 +791,7 @@
650791 /* Initialize Job Descriptor */
651792 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
652793
653
- ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
654
- if (!ret)
655
- return -EINPROGRESS;
656
-
657
- rsa_priv_f2_unmap(jrdev, edesc, req);
794
+ return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
658795
659796 init_fail:
660797 rsa_io_unmap(jrdev, edesc, req);
....@@ -683,11 +820,7 @@
683820 /* Initialize Job Descriptor */
684821 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
685822
686
- ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
687
- if (!ret)
688
- return -EINPROGRESS;
689
-
690
- rsa_priv_f3_unmap(jrdev, edesc, req);
823
+ return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
691824
692825 init_fail:
693826 rsa_io_unmap(jrdev, edesc, req);
....@@ -723,14 +856,14 @@
723856
724857 static void caam_rsa_free_key(struct caam_rsa_key *key)
725858 {
726
- kzfree(key->d);
727
- kzfree(key->p);
728
- kzfree(key->q);
729
- kzfree(key->dp);
730
- kzfree(key->dq);
731
- kzfree(key->qinv);
732
- kzfree(key->tmp1);
733
- kzfree(key->tmp2);
859
+ kfree_sensitive(key->d);
860
+ kfree_sensitive(key->p);
861
+ kfree_sensitive(key->q);
862
+ kfree_sensitive(key->dp);
863
+ kfree_sensitive(key->dq);
864
+ kfree_sensitive(key->qinv);
865
+ kfree_sensitive(key->tmp1);
866
+ kfree_sensitive(key->tmp2);
734867 kfree(key->e);
735868 kfree(key->n);
736869 memset(key, 0, sizeof(*key));
....@@ -814,7 +947,7 @@
814947 return ret;
815948
816949 /* Copy key in DMA zone */
817
- rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
950
+ rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
818951 if (!rsa_key->e)
819952 goto err;
820953
....@@ -835,8 +968,6 @@
835968
836969 rsa_key->e_sz = raw_key.e_sz;
837970 rsa_key->n_sz = raw_key.n_sz;
838
-
839
- memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
840971
841972 return 0;
842973 err:
....@@ -889,17 +1020,17 @@
8891020 return;
8901021
8911022 free_dq:
892
- kzfree(rsa_key->dq);
1023
+ kfree_sensitive(rsa_key->dq);
8931024 free_dp:
894
- kzfree(rsa_key->dp);
1025
+ kfree_sensitive(rsa_key->dp);
8951026 free_tmp2:
896
- kzfree(rsa_key->tmp2);
1027
+ kfree_sensitive(rsa_key->tmp2);
8971028 free_tmp1:
898
- kzfree(rsa_key->tmp1);
1029
+ kfree_sensitive(rsa_key->tmp1);
8991030 free_q:
900
- kzfree(rsa_key->q);
1031
+ kfree_sensitive(rsa_key->q);
9011032 free_p:
902
- kzfree(rsa_key->p);
1033
+ kfree_sensitive(rsa_key->p);
9031034 }
9041035
9051036 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
....@@ -918,11 +1049,11 @@
9181049 return ret;
9191050
9201051 /* Copy key in DMA zone */
921
- rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
1052
+ rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
9221053 if (!rsa_key->d)
9231054 goto err;
9241055
925
- rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
1056
+ rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
9261057 if (!rsa_key->e)
9271058 goto err;
9281059
....@@ -944,9 +1075,6 @@
9441075 rsa_key->d_sz = raw_key.d_sz;
9451076 rsa_key->e_sz = raw_key.e_sz;
9461077 rsa_key->n_sz = raw_key.n_sz;
947
-
948
- memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
949
- memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
9501078
9511079 caam_rsa_set_priv_key_form(ctx, &raw_key);
9521080
....@@ -976,6 +1104,17 @@
9761104 return PTR_ERR(ctx->dev);
9771105 }
9781106
1107
+ ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1108
+ CAAM_RSA_MAX_INPUT_SIZE - 1,
1109
+ DMA_TO_DEVICE);
1110
+ if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1111
+ dev_err(ctx->dev, "unable to map padding\n");
1112
+ caam_jr_free(ctx->dev);
1113
+ return -ENOMEM;
1114
+ }
1115
+
1116
+ ctx->enginectx.op.do_one_request = akcipher_do_one_req;
1117
+
9791118 return 0;
9801119 }
9811120
....@@ -985,90 +1124,90 @@
9851124 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
9861125 struct caam_rsa_key *key = &ctx->key;
9871126
1127
+ dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1128
+ 1, DMA_TO_DEVICE);
9881129 caam_rsa_free_key(key);
9891130 caam_jr_free(ctx->dev);
9901131 }
9911132
992
-static struct akcipher_alg caam_rsa = {
993
- .encrypt = caam_rsa_enc,
994
- .decrypt = caam_rsa_dec,
995
- .sign = caam_rsa_dec,
996
- .verify = caam_rsa_enc,
997
- .set_pub_key = caam_rsa_set_pub_key,
998
- .set_priv_key = caam_rsa_set_priv_key,
999
- .max_size = caam_rsa_max_size,
1000
- .init = caam_rsa_init_tfm,
1001
- .exit = caam_rsa_exit_tfm,
1002
- .reqsize = sizeof(struct caam_rsa_req_ctx),
1003
- .base = {
1004
- .cra_name = "rsa",
1005
- .cra_driver_name = "rsa-caam",
1006
- .cra_priority = 3000,
1007
- .cra_module = THIS_MODULE,
1008
- .cra_ctxsize = sizeof(struct caam_rsa_ctx),
1009
- },
1133
+static struct caam_akcipher_alg caam_rsa = {
1134
+ .akcipher = {
1135
+ .encrypt = caam_rsa_enc,
1136
+ .decrypt = caam_rsa_dec,
1137
+ .set_pub_key = caam_rsa_set_pub_key,
1138
+ .set_priv_key = caam_rsa_set_priv_key,
1139
+ .max_size = caam_rsa_max_size,
1140
+ .init = caam_rsa_init_tfm,
1141
+ .exit = caam_rsa_exit_tfm,
1142
+ .reqsize = sizeof(struct caam_rsa_req_ctx),
1143
+ .base = {
1144
+ .cra_name = "rsa",
1145
+ .cra_driver_name = "rsa-caam",
1146
+ .cra_priority = 3000,
1147
+ .cra_module = THIS_MODULE,
1148
+ .cra_ctxsize = sizeof(struct caam_rsa_ctx),
1149
+ },
1150
+ }
10101151 };
10111152
10121153 /* Public Key Cryptography module initialization handler */
1013
-static int __init caam_pkc_init(void)
1154
+int caam_pkc_init(struct device *ctrldev)
10141155 {
1015
- struct device_node *dev_node;
1016
- struct platform_device *pdev;
1017
- struct device *ctrldev;
1018
- struct caam_drv_private *priv;
1019
- u32 cha_inst, pk_inst;
1156
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1157
+ u32 pk_inst, pkha;
10201158 int err;
1021
-
1022
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1023
- if (!dev_node) {
1024
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1025
- if (!dev_node)
1026
- return -ENODEV;
1027
- }
1028
-
1029
- pdev = of_find_device_by_node(dev_node);
1030
- if (!pdev) {
1031
- of_node_put(dev_node);
1032
- return -ENODEV;
1033
- }
1034
-
1035
- ctrldev = &pdev->dev;
1036
- priv = dev_get_drvdata(ctrldev);
1037
- of_node_put(dev_node);
1038
-
1039
- /*
1040
- * If priv is NULL, it's probably because the caam driver wasn't
1041
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1042
- */
1043
- if (!priv)
1044
- return -ENODEV;
1159
+ init_done = false;
10451160
10461161 /* Determine public key hardware accelerator presence. */
1047
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1048
- pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1162
+ if (priv->era < 10) {
1163
+ pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1164
+ CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1165
+ } else {
1166
+ pkha = rd_reg32(&priv->ctrl->vreg.pkha);
1167
+ pk_inst = pkha & CHA_VER_NUM_MASK;
1168
+
1169
+ /*
1170
+ * Newer CAAMs support partially disabled functionality. If this is the
1171
+ * case, the number is non-zero, but this bit is set to indicate that
1172
+ * no encryption or decryption is supported. Only signing and verifying
1173
+ * is supported.
1174
+ */
1175
+ if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
1176
+ pk_inst = 0;
1177
+ }
10491178
10501179 /* Do not register algorithms if PKHA is not present. */
10511180 if (!pk_inst)
1052
- return -ENODEV;
1181
+ return 0;
10531182
1054
- err = crypto_register_akcipher(&caam_rsa);
1055
- if (err)
1183
+ /* allocate zero buffer, used for padding input */
1184
+ zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
1185
+ GFP_KERNEL);
1186
+ if (!zero_buffer)
1187
+ return -ENOMEM;
1188
+
1189
+ err = crypto_register_akcipher(&caam_rsa.akcipher);
1190
+
1191
+ if (err) {
1192
+ kfree(zero_buffer);
10561193 dev_warn(ctrldev, "%s alg registration failed\n",
1057
- caam_rsa.base.cra_driver_name);
1058
- else
1194
+ caam_rsa.akcipher.base.cra_driver_name);
1195
+ } else {
1196
+ init_done = true;
1197
+ caam_rsa.registered = true;
10591198 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1199
+ }
10601200
10611201 return err;
10621202 }
10631203
1064
-static void __exit caam_pkc_exit(void)
1204
+void caam_pkc_exit(void)
10651205 {
1066
- crypto_unregister_akcipher(&caam_rsa);
1206
+ if (!init_done)
1207
+ return;
1208
+
1209
+ if (caam_rsa.registered)
1210
+ crypto_unregister_akcipher(&caam_rsa.akcipher);
1211
+
1212
+ kfree(zero_buffer);
10671213 }
1068
-
1069
-module_init(caam_pkc_init);
1070
-module_exit(caam_pkc_exit);
1071
-
1072
-MODULE_LICENSE("Dual BSD/GPL");
1073
-MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
1074
-MODULE_AUTHOR("Freescale Semiconductor");