.. | .. |
---|
272 | 272 | static void rk_crypto_irq_timer_handle(struct timer_list *t) |
---|
273 | 273 | { |
---|
274 | 274 | struct rk_crypto_dev *rk_dev = from_timer(rk_dev, t, timer); |
---|
| 275 | + unsigned long flags; |
---|
| 276 | + |
---|
| 277 | + spin_lock_irqsave(&rk_dev->lock, flags); |
---|
275 | 278 | |
---|
276 | 279 | rk_dev->err = -ETIMEDOUT; |
---|
277 | 280 | rk_dev->stat.timeout_cnt++; |
---|
| 281 | + |
---|
| 282 | + rk_unload_data(rk_dev); |
---|
| 283 | + |
---|
| 284 | + spin_unlock_irqrestore(&rk_dev->lock, flags); |
---|
| 285 | + |
---|
278 | 286 | tasklet_schedule(&rk_dev->done_task); |
---|
279 | 287 | } |
---|
280 | 288 | |
---|
281 | 289 | static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) |
---|
282 | 290 | { |
---|
283 | 291 | struct rk_crypto_dev *rk_dev = platform_get_drvdata(dev_id); |
---|
284 | | - struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev->async_req); |
---|
| 292 | + struct rk_alg_ctx *alg_ctx; |
---|
| 293 | + unsigned long flags; |
---|
285 | 294 | |
---|
286 | | - spin_lock(&rk_dev->lock); |
---|
| 295 | + spin_lock_irqsave(&rk_dev->lock, flags); |
---|
| 296 | + |
---|
| 297 | + /* reset timeout timer */ |
---|
| 298 | + start_irq_timer(rk_dev); |
---|
| 299 | + |
---|
| 300 | + alg_ctx = rk_alg_ctx_cast(rk_dev->async_req); |
---|
287 | 301 | |
---|
288 | 302 | rk_dev->stat.irq_cnt++; |
---|
289 | 303 | |
---|
290 | 304 | if (alg_ctx->ops.irq_handle) |
---|
291 | 305 | alg_ctx->ops.irq_handle(irq, dev_id); |
---|
292 | 306 | |
---|
293 | | - tasklet_schedule(&rk_dev->done_task); |
---|
| 307 | + /* already trigger timeout */ |
---|
| 308 | + if (rk_dev->err != -ETIMEDOUT) { |
---|
| 309 | + spin_unlock_irqrestore(&rk_dev->lock, flags); |
---|
| 310 | + tasklet_schedule(&rk_dev->done_task); |
---|
| 311 | + } else { |
---|
| 312 | + spin_unlock_irqrestore(&rk_dev->lock, flags); |
---|
| 313 | + } |
---|
294 | 314 | |
---|
295 | | - spin_unlock(&rk_dev->lock); |
---|
296 | 315 | return IRQ_HANDLED; |
---|
297 | 316 | } |
---|
298 | 317 | |
---|
.. | .. |
---|
390 | 409 | struct crypto_async_request *async_req, *backlog; |
---|
391 | 410 | unsigned long flags; |
---|
392 | 411 | |
---|
| 412 | + spin_lock_irqsave(&rk_dev->lock, flags); |
---|
393 | 413 | if (rk_dev->async_req) { |
---|
394 | 414 | dev_err(rk_dev->dev, "%s: Unexpected crypto paths.\n", __func__); |
---|
395 | | - return; |
---|
| 415 | + goto exit; |
---|
396 | 416 | } |
---|
397 | 417 | |
---|
398 | 418 | rk_dev->err = 0; |
---|
399 | | - spin_lock_irqsave(&rk_dev->lock, flags); |
---|
| 419 | + |
---|
400 | 420 | backlog = crypto_get_backlog(&rk_dev->queue); |
---|
401 | 421 | async_req = crypto_dequeue_request(&rk_dev->queue); |
---|
402 | 422 | |
---|
403 | 423 | if (!async_req) { |
---|
404 | 424 | rk_dev->busy = false; |
---|
405 | | - spin_unlock_irqrestore(&rk_dev->lock, flags); |
---|
406 | | - return; |
---|
| 425 | + goto exit; |
---|
407 | 426 | } |
---|
408 | 427 | rk_dev->stat.dequeue_cnt++; |
---|
409 | | - spin_unlock_irqrestore(&rk_dev->lock, flags); |
---|
410 | 428 | |
---|
411 | 429 | if (backlog) { |
---|
412 | 430 | backlog->complete(backlog, -EINPROGRESS); |
---|
.. | .. |
---|
417 | 435 | rk_dev->err = rk_start_op(rk_dev); |
---|
418 | 436 | if (rk_dev->err) |
---|
419 | 437 | rk_complete_op(rk_dev, rk_dev->err); |
---|
| 438 | + |
---|
| 439 | +exit: |
---|
| 440 | + spin_unlock_irqrestore(&rk_dev->lock, flags); |
---|
420 | 441 | } |
---|
421 | 442 | |
---|
422 | 443 | static void rk_crypto_done_task_cb(unsigned long data) |
---|
423 | 444 | { |
---|
424 | 445 | struct rk_crypto_dev *rk_dev = (struct rk_crypto_dev *)data; |
---|
425 | | - struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev->async_req); |
---|
| 446 | + struct rk_alg_ctx *alg_ctx; |
---|
| 447 | + unsigned long flags; |
---|
| 448 | + |
---|
| 449 | + spin_lock_irqsave(&rk_dev->lock, flags); |
---|
| 450 | + |
---|
| 451 | + if (!rk_dev->async_req) { |
---|
| 452 | + dev_err(rk_dev->dev, "done task receive invalid async_req\n"); |
---|
| 453 | + spin_unlock_irqrestore(&rk_dev->lock, flags); |
---|
| 454 | + return; |
---|
| 455 | + } |
---|
| 456 | + |
---|
| 457 | + alg_ctx = rk_alg_ctx_cast(rk_dev->async_req); |
---|
426 | 458 | |
---|
427 | 459 | rk_dev->stat.done_cnt++; |
---|
428 | 460 | |
---|
.. | .. |
---|
440 | 472 | if (rk_dev->err) |
---|
441 | 473 | goto exit; |
---|
442 | 474 | |
---|
| 475 | + spin_unlock_irqrestore(&rk_dev->lock, flags); |
---|
| 476 | + |
---|
443 | 477 | return; |
---|
444 | 478 | exit: |
---|
445 | 479 | rk_complete_op(rk_dev, rk_dev->err); |
---|
| 480 | + spin_unlock_irqrestore(&rk_dev->lock, flags); |
---|
446 | 481 | } |
---|
447 | 482 | |
---|
448 | 483 | static struct rk_crypto_algt *rk_crypto_find_algs(struct rk_crypto_dev *rk_dev, |
---|
.. | .. |
---|
663 | 698 | static const struct of_device_id crypto_of_id_table[] = { |
---|
664 | 699 | |
---|
665 | 700 | #if IS_ENABLED(CONFIG_CRYPTO_DEV_ROCKCHIP_V3) |
---|
| 701 | + /* crypto v4 in belows same with crypto-v3*/ |
---|
| 702 | + { |
---|
| 703 | + .compatible = "rockchip,crypto-v4", |
---|
| 704 | + .data = (void *)&cryto_v3_soc_data, |
---|
| 705 | + }, |
---|
| 706 | + |
---|
666 | 707 | /* crypto v3 in belows */ |
---|
667 | 708 | { |
---|
668 | 709 | .compatible = "rockchip,crypto-v3", |
---|