| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * caam - Freescale FSL CAAM support for Public Key Cryptography |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright 2016 Freescale Semiconductor, Inc. |
|---|
| 6 | + * Copyright 2018-2019 NXP |
|---|
| 5 | 7 | * |
|---|
| 6 | 8 | * There is no Shared Descriptor for PKC so that the Job Descriptor must carry |
|---|
| 7 | 9 | * all the desired key parameters, input and output pointers. |
|---|
| .. | .. |
|---|
| 15 | 17 | #include "sg_sw_sec4.h" |
|---|
| 16 | 18 | #include "caampkc.h" |
|---|
| 17 | 19 | |
|---|
| 18 | | -#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb)) |
|---|
| 20 | +#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB) |
|---|
| 19 | 21 | #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ |
|---|
| 20 | | - sizeof(struct rsa_priv_f1_pdb)) |
|---|
| 22 | + SIZEOF_RSA_PRIV_F1_PDB) |
|---|
| 21 | 23 | #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \ |
|---|
| 22 | | - sizeof(struct rsa_priv_f2_pdb)) |
|---|
| 24 | + SIZEOF_RSA_PRIV_F2_PDB) |
|---|
| 23 | 25 | #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ |
|---|
| 24 | | - sizeof(struct rsa_priv_f3_pdb)) |
|---|
| 26 | + SIZEOF_RSA_PRIV_F3_PDB) |
|---|
| 27 | +#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */ |
|---|
| 28 | + |
|---|
| 29 | +/* buffer filled with zeros, used for padding */ |
|---|
| 30 | +static u8 *zero_buffer; |
|---|
| 31 | + |
|---|
| 32 | +/* |
|---|
| 33 | + * variable used to avoid double free of resources in case |
|---|
| 34 | + * algorithm registration was unsuccessful |
|---|
| 35 | + */ |
|---|
| 36 | +static bool init_done; |
|---|
| 37 | + |
|---|
| 38 | +struct caam_akcipher_alg { |
|---|
| 39 | + struct akcipher_alg akcipher; |
|---|
| 40 | + bool registered; |
|---|
| 41 | +}; |
|---|
| 25 | 42 | |
|---|
| 26 | 43 | static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, |
|---|
| 27 | 44 | struct akcipher_request *req) |
|---|
| 28 | 45 | { |
|---|
| 46 | + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
|---|
| 47 | + |
|---|
| 29 | 48 | dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE); |
|---|
| 30 | | - dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); |
|---|
| 49 | + dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE); |
|---|
| 31 | 50 | |
|---|
| 32 | 51 | if (edesc->sec4_sg_bytes) |
|---|
| 33 | 52 | dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, |
|---|
| .. | .. |
|---|
| 98 | 117 | static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) |
|---|
| 99 | 118 | { |
|---|
| 100 | 119 | struct akcipher_request *req = context; |
|---|
| 120 | + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
|---|
| 121 | + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); |
|---|
| 101 | 122 | struct rsa_edesc *edesc; |
|---|
| 123 | + int ecode = 0; |
|---|
| 124 | + bool has_bklog; |
|---|
| 102 | 125 | |
|---|
| 103 | 126 | if (err) |
|---|
| 104 | | - caam_jr_strstatus(dev, err); |
|---|
| 127 | + ecode = caam_jr_strstatus(dev, err); |
|---|
| 105 | 128 | |
|---|
| 106 | | - edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); |
|---|
| 129 | + edesc = req_ctx->edesc; |
|---|
| 130 | + has_bklog = edesc->bklog; |
|---|
| 107 | 131 | |
|---|
| 108 | 132 | rsa_pub_unmap(dev, edesc, req); |
|---|
| 109 | 133 | rsa_io_unmap(dev, edesc, req); |
|---|
| 110 | 134 | kfree(edesc); |
|---|
| 111 | 135 | |
|---|
| 112 | | - akcipher_request_complete(req, err); |
|---|
| 136 | + /* |
|---|
| 137 | + * If no backlog flag, the completion of the request is done |
|---|
| 138 | + * by CAAM, not crypto engine. |
|---|
| 139 | + */ |
|---|
| 140 | + if (!has_bklog) |
|---|
| 141 | + akcipher_request_complete(req, ecode); |
|---|
| 142 | + else |
|---|
| 143 | + crypto_finalize_akcipher_request(jrp->engine, req, ecode); |
|---|
| 113 | 144 | } |
|---|
| 114 | 145 | |
|---|
| 115 | | -static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err, |
|---|
| 116 | | - void *context) |
|---|
| 146 | +static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err, |
|---|
| 147 | + void *context) |
|---|
| 117 | 148 | { |
|---|
| 118 | 149 | struct akcipher_request *req = context; |
|---|
| 150 | + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
|---|
| 151 | + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); |
|---|
| 152 | + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
|---|
| 153 | + struct caam_rsa_key *key = &ctx->key; |
|---|
| 154 | + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
|---|
| 119 | 155 | struct rsa_edesc *edesc; |
|---|
| 156 | + int ecode = 0; |
|---|
| 157 | + bool has_bklog; |
|---|
| 120 | 158 | |
|---|
| 121 | 159 | if (err) |
|---|
| 122 | | - caam_jr_strstatus(dev, err); |
|---|
| 160 | + ecode = caam_jr_strstatus(dev, err); |
|---|
| 123 | 161 | |
|---|
| 124 | | - edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); |
|---|
| 162 | + edesc = req_ctx->edesc; |
|---|
| 163 | + has_bklog = edesc->bklog; |
|---|
| 125 | 164 | |
|---|
| 126 | | - rsa_priv_f1_unmap(dev, edesc, req); |
|---|
| 165 | + switch (key->priv_form) { |
|---|
| 166 | + case FORM1: |
|---|
| 167 | + rsa_priv_f1_unmap(dev, edesc, req); |
|---|
| 168 | + break; |
|---|
| 169 | + case FORM2: |
|---|
| 170 | + rsa_priv_f2_unmap(dev, edesc, req); |
|---|
| 171 | + break; |
|---|
| 172 | + case FORM3: |
|---|
| 173 | + rsa_priv_f3_unmap(dev, edesc, req); |
|---|
| 174 | + } |
|---|
| 175 | + |
|---|
| 127 | 176 | rsa_io_unmap(dev, edesc, req); |
|---|
| 128 | 177 | kfree(edesc); |
|---|
| 129 | 178 | |
|---|
| 130 | | - akcipher_request_complete(req, err); |
|---|
| 179 | + /* |
|---|
| 180 | + * If no backlog flag, the completion of the request is done |
|---|
| 181 | + * by CAAM, not crypto engine. |
|---|
| 182 | + */ |
|---|
| 183 | + if (!has_bklog) |
|---|
| 184 | + akcipher_request_complete(req, ecode); |
|---|
| 185 | + else |
|---|
| 186 | + crypto_finalize_akcipher_request(jrp->engine, req, ecode); |
|---|
| 131 | 187 | } |
|---|
| 132 | 188 | |
|---|
| 133 | | -static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err, |
|---|
| 134 | | - void *context) |
|---|
| 135 | | -{ |
|---|
| 136 | | - struct akcipher_request *req = context; |
|---|
| 137 | | - struct rsa_edesc *edesc; |
|---|
| 138 | | - |
|---|
| 139 | | - if (err) |
|---|
| 140 | | - caam_jr_strstatus(dev, err); |
|---|
| 141 | | - |
|---|
| 142 | | - edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); |
|---|
| 143 | | - |
|---|
| 144 | | - rsa_priv_f2_unmap(dev, edesc, req); |
|---|
| 145 | | - rsa_io_unmap(dev, edesc, req); |
|---|
| 146 | | - kfree(edesc); |
|---|
| 147 | | - |
|---|
| 148 | | - akcipher_request_complete(req, err); |
|---|
| 149 | | -} |
|---|
| 150 | | - |
|---|
| 151 | | -static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, |
|---|
| 152 | | - void *context) |
|---|
| 153 | | -{ |
|---|
| 154 | | - struct akcipher_request *req = context; |
|---|
| 155 | | - struct rsa_edesc *edesc; |
|---|
| 156 | | - |
|---|
| 157 | | - if (err) |
|---|
| 158 | | - caam_jr_strstatus(dev, err); |
|---|
| 159 | | - |
|---|
| 160 | | - edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); |
|---|
| 161 | | - |
|---|
| 162 | | - rsa_priv_f3_unmap(dev, edesc, req); |
|---|
| 163 | | - rsa_io_unmap(dev, edesc, req); |
|---|
| 164 | | - kfree(edesc); |
|---|
| 165 | | - |
|---|
| 166 | | - akcipher_request_complete(req, err); |
|---|
| 167 | | -} |
|---|
| 168 | | - |
|---|
| 189 | +/** |
|---|
| 190 | + * Count leading zeros, need it to strip, from a given scatterlist |
|---|
| 191 | + * |
|---|
| 192 | + * @sgl : scatterlist to count zeros from |
|---|
| 193 | + * @nbytes: number of zeros, in bytes, to strip |
|---|
| 194 | + * @flags : operation flags |
|---|
| 195 | + */ |
|---|
| 169 | 196 | static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, |
|---|
| 170 | 197 | unsigned int nbytes, |
|---|
| 171 | 198 | unsigned int flags) |
|---|
| .. | .. |
|---|
| 185 | 212 | lzeros = 0; |
|---|
| 186 | 213 | len = 0; |
|---|
| 187 | 214 | while (nbytes > 0) { |
|---|
| 188 | | - while (len && !*buff) { |
|---|
| 215 | + /* do not strip more than given bytes */ |
|---|
| 216 | + while (len && !*buff && lzeros < nbytes) { |
|---|
| 189 | 217 | lzeros++; |
|---|
| 190 | 218 | len--; |
|---|
| 191 | 219 | buff++; |
|---|
| .. | .. |
|---|
| 216 | 244 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
|---|
| 217 | 245 | struct device *dev = ctx->dev; |
|---|
| 218 | 246 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
|---|
| 247 | + struct caam_rsa_key *key = &ctx->key; |
|---|
| 219 | 248 | struct rsa_edesc *edesc; |
|---|
| 220 | 249 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
|---|
| 221 | 250 | GFP_KERNEL : GFP_ATOMIC; |
|---|
| 222 | 251 | int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0; |
|---|
| 223 | | - int sgc; |
|---|
| 224 | 252 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; |
|---|
| 225 | 253 | int src_nents, dst_nents; |
|---|
| 254 | + int mapped_src_nents, mapped_dst_nents; |
|---|
| 255 | + unsigned int diff_size = 0; |
|---|
| 226 | 256 | int lzeros; |
|---|
| 227 | 257 | |
|---|
| 228 | | - lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags); |
|---|
| 229 | | - if (lzeros < 0) |
|---|
| 230 | | - return ERR_PTR(lzeros); |
|---|
| 258 | + if (req->src_len > key->n_sz) { |
|---|
| 259 | + /* |
|---|
| 260 | + * strip leading zeros and |
|---|
| 261 | + * return the number of zeros to skip |
|---|
| 262 | + */ |
|---|
| 263 | + lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len - |
|---|
| 264 | + key->n_sz, sg_flags); |
|---|
| 265 | + if (lzeros < 0) |
|---|
| 266 | + return ERR_PTR(lzeros); |
|---|
| 231 | 267 | |
|---|
| 232 | | - req->src_len -= lzeros; |
|---|
| 233 | | - req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros); |
|---|
| 268 | + req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src, |
|---|
| 269 | + lzeros); |
|---|
| 270 | + req_ctx->fixup_src_len = req->src_len - lzeros; |
|---|
| 271 | + } else { |
|---|
| 272 | + /* |
|---|
| 273 | + * input src is less then n key modulus, |
|---|
| 274 | + * so there will be zero padding |
|---|
| 275 | + */ |
|---|
| 276 | + diff_size = key->n_sz - req->src_len; |
|---|
| 277 | + req_ctx->fixup_src = req->src; |
|---|
| 278 | + req_ctx->fixup_src_len = req->src_len; |
|---|
| 279 | + } |
|---|
| 234 | 280 | |
|---|
| 235 | | - src_nents = sg_nents_for_len(req->src, req->src_len); |
|---|
| 281 | + src_nents = sg_nents_for_len(req_ctx->fixup_src, |
|---|
| 282 | + req_ctx->fixup_src_len); |
|---|
| 236 | 283 | dst_nents = sg_nents_for_len(req->dst, req->dst_len); |
|---|
| 237 | 284 | |
|---|
| 238 | | - if (src_nents > 1) |
|---|
| 239 | | - sec4_sg_len = src_nents; |
|---|
| 240 | | - if (dst_nents > 1) |
|---|
| 241 | | - sec4_sg_len += dst_nents; |
|---|
| 285 | + mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents, |
|---|
| 286 | + DMA_TO_DEVICE); |
|---|
| 287 | + if (unlikely(!mapped_src_nents)) { |
|---|
| 288 | + dev_err(dev, "unable to map source\n"); |
|---|
| 289 | + return ERR_PTR(-ENOMEM); |
|---|
| 290 | + } |
|---|
| 291 | + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, |
|---|
| 292 | + DMA_FROM_DEVICE); |
|---|
| 293 | + if (unlikely(!mapped_dst_nents)) { |
|---|
| 294 | + dev_err(dev, "unable to map destination\n"); |
|---|
| 295 | + goto src_fail; |
|---|
| 296 | + } |
|---|
| 297 | + |
|---|
| 298 | + if (!diff_size && mapped_src_nents == 1) |
|---|
| 299 | + sec4_sg_len = 0; /* no need for an input hw s/g table */ |
|---|
| 300 | + else |
|---|
| 301 | + sec4_sg_len = mapped_src_nents + !!diff_size; |
|---|
| 302 | + sec4_sg_index = sec4_sg_len; |
|---|
| 303 | + |
|---|
| 304 | + if (mapped_dst_nents > 1) |
|---|
| 305 | + sec4_sg_len += pad_sg_nents(mapped_dst_nents); |
|---|
| 306 | + else |
|---|
| 307 | + sec4_sg_len = pad_sg_nents(sec4_sg_len); |
|---|
| 242 | 308 | |
|---|
| 243 | 309 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); |
|---|
| 244 | 310 | |
|---|
| .. | .. |
|---|
| 246 | 312 | edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, |
|---|
| 247 | 313 | GFP_DMA | flags); |
|---|
| 248 | 314 | if (!edesc) |
|---|
| 249 | | - return ERR_PTR(-ENOMEM); |
|---|
| 250 | | - |
|---|
| 251 | | - sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE); |
|---|
| 252 | | - if (unlikely(!sgc)) { |
|---|
| 253 | | - dev_err(dev, "unable to map source\n"); |
|---|
| 254 | | - goto src_fail; |
|---|
| 255 | | - } |
|---|
| 256 | | - |
|---|
| 257 | | - sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); |
|---|
| 258 | | - if (unlikely(!sgc)) { |
|---|
| 259 | | - dev_err(dev, "unable to map destination\n"); |
|---|
| 260 | 315 | goto dst_fail; |
|---|
| 261 | | - } |
|---|
| 262 | 316 | |
|---|
| 263 | 317 | edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; |
|---|
| 318 | + if (diff_size) |
|---|
| 319 | + dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size, |
|---|
| 320 | + 0); |
|---|
| 264 | 321 | |
|---|
| 265 | | - sec4_sg_index = 0; |
|---|
| 266 | | - if (src_nents > 1) { |
|---|
| 267 | | - sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); |
|---|
| 268 | | - sec4_sg_index += src_nents; |
|---|
| 269 | | - } |
|---|
| 270 | | - if (dst_nents > 1) |
|---|
| 271 | | - sg_to_sec4_sg_last(req->dst, dst_nents, |
|---|
| 322 | + if (sec4_sg_index) |
|---|
| 323 | + sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len, |
|---|
| 324 | + edesc->sec4_sg + !!diff_size, 0); |
|---|
| 325 | + |
|---|
| 326 | + if (mapped_dst_nents > 1) |
|---|
| 327 | + sg_to_sec4_sg_last(req->dst, req->dst_len, |
|---|
| 272 | 328 | edesc->sec4_sg + sec4_sg_index, 0); |
|---|
| 273 | 329 | |
|---|
| 274 | 330 | /* Save nents for later use in Job Descriptor */ |
|---|
| 275 | 331 | edesc->src_nents = src_nents; |
|---|
| 276 | 332 | edesc->dst_nents = dst_nents; |
|---|
| 277 | 333 | |
|---|
| 334 | + req_ctx->edesc = edesc; |
|---|
| 335 | + |
|---|
| 278 | 336 | if (!sec4_sg_bytes) |
|---|
| 279 | 337 | return edesc; |
|---|
| 338 | + |
|---|
| 339 | + edesc->mapped_src_nents = mapped_src_nents; |
|---|
| 340 | + edesc->mapped_dst_nents = mapped_dst_nents; |
|---|
| 280 | 341 | |
|---|
| 281 | 342 | edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg, |
|---|
| 282 | 343 | sec4_sg_bytes, DMA_TO_DEVICE); |
|---|
| .. | .. |
|---|
| 287 | 348 | |
|---|
| 288 | 349 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
|---|
| 289 | 350 | |
|---|
| 351 | + print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ", |
|---|
| 352 | + DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, |
|---|
| 353 | + edesc->sec4_sg_bytes, 1); |
|---|
| 354 | + |
|---|
| 290 | 355 | return edesc; |
|---|
| 291 | 356 | |
|---|
| 292 | 357 | sec4_sg_fail: |
|---|
| 293 | | - dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); |
|---|
| 294 | | -dst_fail: |
|---|
| 295 | | - dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); |
|---|
| 296 | | -src_fail: |
|---|
| 297 | 358 | kfree(edesc); |
|---|
| 359 | +dst_fail: |
|---|
| 360 | + dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); |
|---|
| 361 | +src_fail: |
|---|
| 362 | + dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE); |
|---|
| 298 | 363 | return ERR_PTR(-ENOMEM); |
|---|
| 364 | +} |
|---|
| 365 | + |
|---|
| 366 | +static int akcipher_do_one_req(struct crypto_engine *engine, void *areq) |
|---|
| 367 | +{ |
|---|
| 368 | + struct akcipher_request *req = container_of(areq, |
|---|
| 369 | + struct akcipher_request, |
|---|
| 370 | + base); |
|---|
| 371 | + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
|---|
| 372 | + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
|---|
| 373 | + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
|---|
| 374 | + struct device *jrdev = ctx->dev; |
|---|
| 375 | + u32 *desc = req_ctx->edesc->hw_desc; |
|---|
| 376 | + int ret; |
|---|
| 377 | + |
|---|
| 378 | + req_ctx->edesc->bklog = true; |
|---|
| 379 | + |
|---|
| 380 | + ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req); |
|---|
| 381 | + |
|---|
| 382 | + if (ret != -EINPROGRESS) { |
|---|
| 383 | + rsa_pub_unmap(jrdev, req_ctx->edesc, req); |
|---|
| 384 | + rsa_io_unmap(jrdev, req_ctx->edesc, req); |
|---|
| 385 | + kfree(req_ctx->edesc); |
|---|
| 386 | + } else { |
|---|
| 387 | + ret = 0; |
|---|
| 388 | + } |
|---|
| 389 | + |
|---|
| 390 | + return ret; |
|---|
| 299 | 391 | } |
|---|
| 300 | 392 | |
|---|
| 301 | 393 | static int set_rsa_pub_pdb(struct akcipher_request *req, |
|---|
| 302 | 394 | struct rsa_edesc *edesc) |
|---|
| 303 | 395 | { |
|---|
| 304 | 396 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
|---|
| 397 | + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
|---|
| 305 | 398 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
|---|
| 306 | 399 | struct caam_rsa_key *key = &ctx->key; |
|---|
| 307 | 400 | struct device *dev = ctx->dev; |
|---|
| .. | .. |
|---|
| 321 | 414 | return -ENOMEM; |
|---|
| 322 | 415 | } |
|---|
| 323 | 416 | |
|---|
| 324 | | - if (edesc->src_nents > 1) { |
|---|
| 417 | + if (edesc->mapped_src_nents > 1) { |
|---|
| 325 | 418 | pdb->sgf |= RSA_PDB_SGF_F; |
|---|
| 326 | 419 | pdb->f_dma = edesc->sec4_sg_dma; |
|---|
| 327 | | - sec4_sg_index += edesc->src_nents; |
|---|
| 420 | + sec4_sg_index += edesc->mapped_src_nents; |
|---|
| 328 | 421 | } else { |
|---|
| 329 | | - pdb->f_dma = sg_dma_address(req->src); |
|---|
| 422 | + pdb->f_dma = sg_dma_address(req_ctx->fixup_src); |
|---|
| 330 | 423 | } |
|---|
| 331 | 424 | |
|---|
| 332 | | - if (edesc->dst_nents > 1) { |
|---|
| 425 | + if (edesc->mapped_dst_nents > 1) { |
|---|
| 333 | 426 | pdb->sgf |= RSA_PDB_SGF_G; |
|---|
| 334 | 427 | pdb->g_dma = edesc->sec4_sg_dma + |
|---|
| 335 | 428 | sec4_sg_index * sizeof(struct sec4_sg_entry); |
|---|
| .. | .. |
|---|
| 338 | 431 | } |
|---|
| 339 | 432 | |
|---|
| 340 | 433 | pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz; |
|---|
| 341 | | - pdb->f_len = req->src_len; |
|---|
| 434 | + pdb->f_len = req_ctx->fixup_src_len; |
|---|
| 342 | 435 | |
|---|
| 343 | 436 | return 0; |
|---|
| 344 | 437 | } |
|---|
| .. | .. |
|---|
| 366 | 459 | return -ENOMEM; |
|---|
| 367 | 460 | } |
|---|
| 368 | 461 | |
|---|
| 369 | | - if (edesc->src_nents > 1) { |
|---|
| 462 | + if (edesc->mapped_src_nents > 1) { |
|---|
| 370 | 463 | pdb->sgf |= RSA_PRIV_PDB_SGF_G; |
|---|
| 371 | 464 | pdb->g_dma = edesc->sec4_sg_dma; |
|---|
| 372 | | - sec4_sg_index += edesc->src_nents; |
|---|
| 465 | + sec4_sg_index += edesc->mapped_src_nents; |
|---|
| 466 | + |
|---|
| 373 | 467 | } else { |
|---|
| 374 | | - pdb->g_dma = sg_dma_address(req->src); |
|---|
| 468 | + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
|---|
| 469 | + |
|---|
| 470 | + pdb->g_dma = sg_dma_address(req_ctx->fixup_src); |
|---|
| 375 | 471 | } |
|---|
| 376 | 472 | |
|---|
| 377 | | - if (edesc->dst_nents > 1) { |
|---|
| 473 | + if (edesc->mapped_dst_nents > 1) { |
|---|
| 378 | 474 | pdb->sgf |= RSA_PRIV_PDB_SGF_F; |
|---|
| 379 | 475 | pdb->f_dma = edesc->sec4_sg_dma + |
|---|
| 380 | 476 | sec4_sg_index * sizeof(struct sec4_sg_entry); |
|---|
| .. | .. |
|---|
| 429 | 525 | goto unmap_tmp1; |
|---|
| 430 | 526 | } |
|---|
| 431 | 527 | |
|---|
| 432 | | - if (edesc->src_nents > 1) { |
|---|
| 528 | + if (edesc->mapped_src_nents > 1) { |
|---|
| 433 | 529 | pdb->sgf |= RSA_PRIV_PDB_SGF_G; |
|---|
| 434 | 530 | pdb->g_dma = edesc->sec4_sg_dma; |
|---|
| 435 | | - sec4_sg_index += edesc->src_nents; |
|---|
| 531 | + sec4_sg_index += edesc->mapped_src_nents; |
|---|
| 436 | 532 | } else { |
|---|
| 437 | | - pdb->g_dma = sg_dma_address(req->src); |
|---|
| 533 | + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
|---|
| 534 | + |
|---|
| 535 | + pdb->g_dma = sg_dma_address(req_ctx->fixup_src); |
|---|
| 438 | 536 | } |
|---|
| 439 | 537 | |
|---|
| 440 | | - if (edesc->dst_nents > 1) { |
|---|
| 538 | + if (edesc->mapped_dst_nents > 1) { |
|---|
| 441 | 539 | pdb->sgf |= RSA_PRIV_PDB_SGF_F; |
|---|
| 442 | 540 | pdb->f_dma = edesc->sec4_sg_dma + |
|---|
| 443 | 541 | sec4_sg_index * sizeof(struct sec4_sg_entry); |
|---|
| .. | .. |
|---|
| 516 | 614 | goto unmap_tmp1; |
|---|
| 517 | 615 | } |
|---|
| 518 | 616 | |
|---|
| 519 | | - if (edesc->src_nents > 1) { |
|---|
| 617 | + if (edesc->mapped_src_nents > 1) { |
|---|
| 520 | 618 | pdb->sgf |= RSA_PRIV_PDB_SGF_G; |
|---|
| 521 | 619 | pdb->g_dma = edesc->sec4_sg_dma; |
|---|
| 522 | | - sec4_sg_index += edesc->src_nents; |
|---|
| 620 | + sec4_sg_index += edesc->mapped_src_nents; |
|---|
| 523 | 621 | } else { |
|---|
| 524 | | - pdb->g_dma = sg_dma_address(req->src); |
|---|
| 622 | + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
|---|
| 623 | + |
|---|
| 624 | + pdb->g_dma = sg_dma_address(req_ctx->fixup_src); |
|---|
| 525 | 625 | } |
|---|
| 526 | 626 | |
|---|
| 527 | | - if (edesc->dst_nents > 1) { |
|---|
| 627 | + if (edesc->mapped_dst_nents > 1) { |
|---|
| 528 | 628 | pdb->sgf |= RSA_PRIV_PDB_SGF_F; |
|---|
| 529 | 629 | pdb->f_dma = edesc->sec4_sg_dma + |
|---|
| 530 | 630 | sec4_sg_index * sizeof(struct sec4_sg_entry); |
|---|
| .. | .. |
|---|
| 551 | 651 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); |
|---|
| 552 | 652 | |
|---|
| 553 | 653 | return -ENOMEM; |
|---|
| 654 | +} |
|---|
| 655 | + |
|---|
| 656 | +static int akcipher_enqueue_req(struct device *jrdev, |
|---|
| 657 | + void (*cbk)(struct device *jrdev, u32 *desc, |
|---|
| 658 | + u32 err, void *context), |
|---|
| 659 | + struct akcipher_request *req) |
|---|
| 660 | +{ |
|---|
| 661 | + struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); |
|---|
| 662 | + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
|---|
| 663 | + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
|---|
| 664 | + struct caam_rsa_key *key = &ctx->key; |
|---|
| 665 | + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
|---|
| 666 | + struct rsa_edesc *edesc = req_ctx->edesc; |
|---|
| 667 | + u32 *desc = edesc->hw_desc; |
|---|
| 668 | + int ret; |
|---|
| 669 | + |
|---|
| 670 | + req_ctx->akcipher_op_done = cbk; |
|---|
| 671 | + /* |
|---|
| 672 | + * Only the backlog request are sent to crypto-engine since the others |
|---|
| 673 | + * can be handled by CAAM, if free, especially since JR has up to 1024 |
|---|
| 674 | + * entries (more than the 10 entries from crypto-engine). |
|---|
| 675 | + */ |
|---|
| 676 | + if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) |
|---|
| 677 | + ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine, |
|---|
| 678 | + req); |
|---|
| 679 | + else |
|---|
| 680 | + ret = caam_jr_enqueue(jrdev, desc, cbk, req); |
|---|
| 681 | + |
|---|
| 682 | + if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { |
|---|
| 683 | + switch (key->priv_form) { |
|---|
| 684 | + case FORM1: |
|---|
| 685 | + rsa_priv_f1_unmap(jrdev, edesc, req); |
|---|
| 686 | + break; |
|---|
| 687 | + case FORM2: |
|---|
| 688 | + rsa_priv_f2_unmap(jrdev, edesc, req); |
|---|
| 689 | + break; |
|---|
| 690 | + case FORM3: |
|---|
| 691 | + rsa_priv_f3_unmap(jrdev, edesc, req); |
|---|
| 692 | + break; |
|---|
| 693 | + default: |
|---|
| 694 | + rsa_pub_unmap(jrdev, edesc, req); |
|---|
| 695 | + } |
|---|
| 696 | + rsa_io_unmap(jrdev, edesc, req); |
|---|
| 697 | + kfree(edesc); |
|---|
| 698 | + } |
|---|
| 699 | + |
|---|
| 700 | + return ret; |
|---|
| 554 | 701 | } |
|---|
| 555 | 702 | |
|---|
| 556 | 703 | static int caam_rsa_enc(struct akcipher_request *req) |
|---|
| .. | .. |
|---|
| 584 | 731 | /* Initialize Job Descriptor */ |
|---|
| 585 | 732 | init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub); |
|---|
| 586 | 733 | |
|---|
| 587 | | - ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req); |
|---|
| 588 | | - if (!ret) |
|---|
| 589 | | - return -EINPROGRESS; |
|---|
| 590 | | - |
|---|
| 591 | | - rsa_pub_unmap(jrdev, edesc, req); |
|---|
| 734 | + return akcipher_enqueue_req(jrdev, rsa_pub_done, req); |
|---|
| 592 | 735 | |
|---|
| 593 | 736 | init_fail: |
|---|
| 594 | 737 | rsa_io_unmap(jrdev, edesc, req); |
|---|
| .. | .. |
|---|
| 617 | 760 | /* Initialize Job Descriptor */ |
|---|
| 618 | 761 | init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1); |
|---|
| 619 | 762 | |
|---|
| 620 | | - ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req); |
|---|
| 621 | | - if (!ret) |
|---|
| 622 | | - return -EINPROGRESS; |
|---|
| 623 | | - |
|---|
| 624 | | - rsa_priv_f1_unmap(jrdev, edesc, req); |
|---|
| 763 | + return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req); |
|---|
| 625 | 764 | |
|---|
| 626 | 765 | init_fail: |
|---|
| 627 | 766 | rsa_io_unmap(jrdev, edesc, req); |
|---|
| .. | .. |
|---|
| 650 | 789 | /* Initialize Job Descriptor */ |
|---|
| 651 | 790 | init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2); |
|---|
| 652 | 791 | |
|---|
| 653 | | - ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req); |
|---|
| 654 | | - if (!ret) |
|---|
| 655 | | - return -EINPROGRESS; |
|---|
| 656 | | - |
|---|
| 657 | | - rsa_priv_f2_unmap(jrdev, edesc, req); |
|---|
| 792 | + return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req); |
|---|
| 658 | 793 | |
|---|
| 659 | 794 | init_fail: |
|---|
| 660 | 795 | rsa_io_unmap(jrdev, edesc, req); |
|---|
| .. | .. |
|---|
| 683 | 818 | /* Initialize Job Descriptor */ |
|---|
| 684 | 819 | init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3); |
|---|
| 685 | 820 | |
|---|
| 686 | | - ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req); |
|---|
| 687 | | - if (!ret) |
|---|
| 688 | | - return -EINPROGRESS; |
|---|
| 689 | | - |
|---|
| 690 | | - rsa_priv_f3_unmap(jrdev, edesc, req); |
|---|
| 821 | + return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req); |
|---|
| 691 | 822 | |
|---|
| 692 | 823 | init_fail: |
|---|
| 693 | 824 | rsa_io_unmap(jrdev, edesc, req); |
|---|
| .. | .. |
|---|
| 723 | 854 | |
|---|
| 724 | 855 | static void caam_rsa_free_key(struct caam_rsa_key *key) |
|---|
| 725 | 856 | { |
|---|
| 726 | | - kzfree(key->d); |
|---|
| 727 | | - kzfree(key->p); |
|---|
| 728 | | - kzfree(key->q); |
|---|
| 729 | | - kzfree(key->dp); |
|---|
| 730 | | - kzfree(key->dq); |
|---|
| 731 | | - kzfree(key->qinv); |
|---|
| 732 | | - kzfree(key->tmp1); |
|---|
| 733 | | - kzfree(key->tmp2); |
|---|
| 857 | + kfree_sensitive(key->d); |
|---|
| 858 | + kfree_sensitive(key->p); |
|---|
| 859 | + kfree_sensitive(key->q); |
|---|
| 860 | + kfree_sensitive(key->dp); |
|---|
| 861 | + kfree_sensitive(key->dq); |
|---|
| 862 | + kfree_sensitive(key->qinv); |
|---|
| 863 | + kfree_sensitive(key->tmp1); |
|---|
| 864 | + kfree_sensitive(key->tmp2); |
|---|
| 734 | 865 | kfree(key->e); |
|---|
| 735 | 866 | kfree(key->n); |
|---|
| 736 | 867 | memset(key, 0, sizeof(*key)); |
|---|
| .. | .. |
|---|
| 814 | 945 | return ret; |
|---|
| 815 | 946 | |
|---|
| 816 | 947 | /* Copy key in DMA zone */ |
|---|
| 817 | | - rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL); |
|---|
| 948 | + rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL); |
|---|
| 818 | 949 | if (!rsa_key->e) |
|---|
| 819 | 950 | goto err; |
|---|
| 820 | 951 | |
|---|
| .. | .. |
|---|
| 835 | 966 | |
|---|
| 836 | 967 | rsa_key->e_sz = raw_key.e_sz; |
|---|
| 837 | 968 | rsa_key->n_sz = raw_key.n_sz; |
|---|
| 838 | | - |
|---|
| 839 | | - memcpy(rsa_key->e, raw_key.e, raw_key.e_sz); |
|---|
| 840 | 969 | |
|---|
| 841 | 970 | return 0; |
|---|
| 842 | 971 | err: |
|---|
| .. | .. |
|---|
| 889 | 1018 | return; |
|---|
| 890 | 1019 | |
|---|
| 891 | 1020 | free_dq: |
|---|
| 892 | | - kzfree(rsa_key->dq); |
|---|
| 1021 | + kfree_sensitive(rsa_key->dq); |
|---|
| 893 | 1022 | free_dp: |
|---|
| 894 | | - kzfree(rsa_key->dp); |
|---|
| 1023 | + kfree_sensitive(rsa_key->dp); |
|---|
| 895 | 1024 | free_tmp2: |
|---|
| 896 | | - kzfree(rsa_key->tmp2); |
|---|
| 1025 | + kfree_sensitive(rsa_key->tmp2); |
|---|
| 897 | 1026 | free_tmp1: |
|---|
| 898 | | - kzfree(rsa_key->tmp1); |
|---|
| 1027 | + kfree_sensitive(rsa_key->tmp1); |
|---|
| 899 | 1028 | free_q: |
|---|
| 900 | | - kzfree(rsa_key->q); |
|---|
| 1029 | + kfree_sensitive(rsa_key->q); |
|---|
| 901 | 1030 | free_p: |
|---|
| 902 | | - kzfree(rsa_key->p); |
|---|
| 1031 | + kfree_sensitive(rsa_key->p); |
|---|
| 903 | 1032 | } |
|---|
| 904 | 1033 | |
|---|
| 905 | 1034 | static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, |
|---|
| .. | .. |
|---|
| 918 | 1047 | return ret; |
|---|
| 919 | 1048 | |
|---|
| 920 | 1049 | /* Copy key in DMA zone */ |
|---|
| 921 | | - rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL); |
|---|
| 1050 | + rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL); |
|---|
| 922 | 1051 | if (!rsa_key->d) |
|---|
| 923 | 1052 | goto err; |
|---|
| 924 | 1053 | |
|---|
| 925 | | - rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL); |
|---|
| 1054 | + rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL); |
|---|
| 926 | 1055 | if (!rsa_key->e) |
|---|
| 927 | 1056 | goto err; |
|---|
| 928 | 1057 | |
|---|
| .. | .. |
|---|
| 944 | 1073 | rsa_key->d_sz = raw_key.d_sz; |
|---|
| 945 | 1074 | rsa_key->e_sz = raw_key.e_sz; |
|---|
| 946 | 1075 | rsa_key->n_sz = raw_key.n_sz; |
|---|
| 947 | | - |
|---|
| 948 | | - memcpy(rsa_key->d, raw_key.d, raw_key.d_sz); |
|---|
| 949 | | - memcpy(rsa_key->e, raw_key.e, raw_key.e_sz); |
|---|
| 950 | 1076 | |
|---|
| 951 | 1077 | caam_rsa_set_priv_key_form(ctx, &raw_key); |
|---|
| 952 | 1078 | |
|---|
| .. | .. |
|---|
| 976 | 1102 | return PTR_ERR(ctx->dev); |
|---|
| 977 | 1103 | } |
|---|
| 978 | 1104 | |
|---|
| 1105 | + ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer, |
|---|
| 1106 | + CAAM_RSA_MAX_INPUT_SIZE - 1, |
|---|
| 1107 | + DMA_TO_DEVICE); |
|---|
| 1108 | + if (dma_mapping_error(ctx->dev, ctx->padding_dma)) { |
|---|
| 1109 | + dev_err(ctx->dev, "unable to map padding\n"); |
|---|
| 1110 | + caam_jr_free(ctx->dev); |
|---|
| 1111 | + return -ENOMEM; |
|---|
| 1112 | + } |
|---|
| 1113 | + |
|---|
| 1114 | + ctx->enginectx.op.do_one_request = akcipher_do_one_req; |
|---|
| 1115 | + |
|---|
| 979 | 1116 | return 0; |
|---|
| 980 | 1117 | } |
|---|
| 981 | 1118 | |
|---|
| .. | .. |
|---|
| 985 | 1122 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
|---|
| 986 | 1123 | struct caam_rsa_key *key = &ctx->key; |
|---|
| 987 | 1124 | |
|---|
| 1125 | + dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE - |
|---|
| 1126 | + 1, DMA_TO_DEVICE); |
|---|
| 988 | 1127 | caam_rsa_free_key(key); |
|---|
| 989 | 1128 | caam_jr_free(ctx->dev); |
|---|
| 990 | 1129 | } |
|---|
| 991 | 1130 | |
|---|
| 992 | | -static struct akcipher_alg caam_rsa = { |
|---|
| 993 | | - .encrypt = caam_rsa_enc, |
|---|
| 994 | | - .decrypt = caam_rsa_dec, |
|---|
| 995 | | - .sign = caam_rsa_dec, |
|---|
| 996 | | - .verify = caam_rsa_enc, |
|---|
| 997 | | - .set_pub_key = caam_rsa_set_pub_key, |
|---|
| 998 | | - .set_priv_key = caam_rsa_set_priv_key, |
|---|
| 999 | | - .max_size = caam_rsa_max_size, |
|---|
| 1000 | | - .init = caam_rsa_init_tfm, |
|---|
| 1001 | | - .exit = caam_rsa_exit_tfm, |
|---|
| 1002 | | - .reqsize = sizeof(struct caam_rsa_req_ctx), |
|---|
| 1003 | | - .base = { |
|---|
| 1004 | | - .cra_name = "rsa", |
|---|
| 1005 | | - .cra_driver_name = "rsa-caam", |
|---|
| 1006 | | - .cra_priority = 3000, |
|---|
| 1007 | | - .cra_module = THIS_MODULE, |
|---|
| 1008 | | - .cra_ctxsize = sizeof(struct caam_rsa_ctx), |
|---|
| 1009 | | - }, |
|---|
| 1131 | +static struct caam_akcipher_alg caam_rsa = { |
|---|
| 1132 | + .akcipher = { |
|---|
| 1133 | + .encrypt = caam_rsa_enc, |
|---|
| 1134 | + .decrypt = caam_rsa_dec, |
|---|
| 1135 | + .set_pub_key = caam_rsa_set_pub_key, |
|---|
| 1136 | + .set_priv_key = caam_rsa_set_priv_key, |
|---|
| 1137 | + .max_size = caam_rsa_max_size, |
|---|
| 1138 | + .init = caam_rsa_init_tfm, |
|---|
| 1139 | + .exit = caam_rsa_exit_tfm, |
|---|
| 1140 | + .reqsize = sizeof(struct caam_rsa_req_ctx), |
|---|
| 1141 | + .base = { |
|---|
| 1142 | + .cra_name = "rsa", |
|---|
| 1143 | + .cra_driver_name = "rsa-caam", |
|---|
| 1144 | + .cra_priority = 3000, |
|---|
| 1145 | + .cra_module = THIS_MODULE, |
|---|
| 1146 | + .cra_ctxsize = sizeof(struct caam_rsa_ctx), |
|---|
| 1147 | + }, |
|---|
| 1148 | + } |
|---|
| 1010 | 1149 | }; |
|---|
| 1011 | 1150 | |
|---|
| 1012 | 1151 | /* Public Key Cryptography module initialization handler */ |
|---|
| 1013 | | -static int __init caam_pkc_init(void) |
|---|
| 1152 | +int caam_pkc_init(struct device *ctrldev) |
|---|
| 1014 | 1153 | { |
|---|
| 1015 | | - struct device_node *dev_node; |
|---|
| 1016 | | - struct platform_device *pdev; |
|---|
| 1017 | | - struct device *ctrldev; |
|---|
| 1018 | | - struct caam_drv_private *priv; |
|---|
| 1019 | | - u32 cha_inst, pk_inst; |
|---|
| 1154 | + struct caam_drv_private *priv = dev_get_drvdata(ctrldev); |
|---|
| 1155 | + u32 pk_inst, pkha; |
|---|
| 1020 | 1156 | int err; |
|---|
| 1021 | | - |
|---|
| 1022 | | - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
|---|
| 1023 | | - if (!dev_node) { |
|---|
| 1024 | | - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); |
|---|
| 1025 | | - if (!dev_node) |
|---|
| 1026 | | - return -ENODEV; |
|---|
| 1027 | | - } |
|---|
| 1028 | | - |
|---|
| 1029 | | - pdev = of_find_device_by_node(dev_node); |
|---|
| 1030 | | - if (!pdev) { |
|---|
| 1031 | | - of_node_put(dev_node); |
|---|
| 1032 | | - return -ENODEV; |
|---|
| 1033 | | - } |
|---|
| 1034 | | - |
|---|
| 1035 | | - ctrldev = &pdev->dev; |
|---|
| 1036 | | - priv = dev_get_drvdata(ctrldev); |
|---|
| 1037 | | - of_node_put(dev_node); |
|---|
| 1038 | | - |
|---|
| 1039 | | - /* |
|---|
| 1040 | | - * If priv is NULL, it's probably because the caam driver wasn't |
|---|
| 1041 | | - * properly initialized (e.g. RNG4 init failed). Thus, bail out here. |
|---|
| 1042 | | - */ |
|---|
| 1043 | | - if (!priv) |
|---|
| 1044 | | - return -ENODEV; |
|---|
| 1157 | + init_done = false; |
|---|
| 1045 | 1158 | |
|---|
| 1046 | 1159 | /* Determine public key hardware accelerator presence. */ |
|---|
| 1047 | | - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); |
|---|
| 1048 | | - pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; |
|---|
| 1160 | + if (priv->era < 10) { |
|---|
| 1161 | + pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & |
|---|
| 1162 | + CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; |
|---|
| 1163 | + } else { |
|---|
| 1164 | + pkha = rd_reg32(&priv->ctrl->vreg.pkha); |
|---|
| 1165 | + pk_inst = pkha & CHA_VER_NUM_MASK; |
|---|
| 1166 | + |
|---|
| 1167 | + /* |
|---|
| 1168 | + * Newer CAAMs support partially disabled functionality. If this is the |
|---|
| 1169 | + * case, the number is non-zero, but this bit is set to indicate that |
|---|
| 1170 | + * no encryption or decryption is supported. Only signing and verifying |
|---|
| 1171 | + * is supported. |
|---|
| 1172 | + */ |
|---|
| 1173 | + if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT) |
|---|
| 1174 | + pk_inst = 0; |
|---|
| 1175 | + } |
|---|
| 1049 | 1176 | |
|---|
| 1050 | 1177 | /* Do not register algorithms if PKHA is not present. */ |
|---|
| 1051 | 1178 | if (!pk_inst) |
|---|
| 1052 | | - return -ENODEV; |
|---|
| 1179 | + return 0; |
|---|
| 1053 | 1180 | |
|---|
| 1054 | | - err = crypto_register_akcipher(&caam_rsa); |
|---|
| 1055 | | - if (err) |
|---|
| 1181 | + /* allocate zero buffer, used for padding input */ |
|---|
| 1182 | + zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA | |
|---|
| 1183 | + GFP_KERNEL); |
|---|
| 1184 | + if (!zero_buffer) |
|---|
| 1185 | + return -ENOMEM; |
|---|
| 1186 | + |
|---|
| 1187 | + err = crypto_register_akcipher(&caam_rsa.akcipher); |
|---|
| 1188 | + |
|---|
| 1189 | + if (err) { |
|---|
| 1190 | + kfree(zero_buffer); |
|---|
| 1056 | 1191 | dev_warn(ctrldev, "%s alg registration failed\n", |
|---|
| 1057 | | - caam_rsa.base.cra_driver_name); |
|---|
| 1058 | | - else |
|---|
| 1192 | + caam_rsa.akcipher.base.cra_driver_name); |
|---|
| 1193 | + } else { |
|---|
| 1194 | + init_done = true; |
|---|
| 1195 | + caam_rsa.registered = true; |
|---|
| 1059 | 1196 | dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); |
|---|
| 1197 | + } |
|---|
| 1060 | 1198 | |
|---|
| 1061 | 1199 | return err; |
|---|
| 1062 | 1200 | } |
|---|
| 1063 | 1201 | |
|---|
| 1064 | | -static void __exit caam_pkc_exit(void) |
|---|
| 1202 | +void caam_pkc_exit(void) |
|---|
| 1065 | 1203 | { |
|---|
| 1066 | | - crypto_unregister_akcipher(&caam_rsa); |
|---|
| 1204 | + if (!init_done) |
|---|
| 1205 | + return; |
|---|
| 1206 | + |
|---|
| 1207 | + if (caam_rsa.registered) |
|---|
| 1208 | + crypto_unregister_akcipher(&caam_rsa.akcipher); |
|---|
| 1209 | + |
|---|
| 1210 | + kfree(zero_buffer); |
|---|
| 1067 | 1211 | } |
|---|
| 1068 | | - |
|---|
| 1069 | | -module_init(caam_pkc_init); |
|---|
| 1070 | | -module_exit(caam_pkc_exit); |
|---|
| 1071 | | - |
|---|
| 1072 | | -MODULE_LICENSE("Dual BSD/GPL"); |
|---|
| 1073 | | -MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API"); |
|---|
| 1074 | | -MODULE_AUTHOR("Freescale Semiconductor"); |
|---|