.. | .. |
---|
281 | 281 | static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) |
---|
282 | 282 | { |
---|
283 | 283 | struct rk_crypto_dev *rk_dev = platform_get_drvdata(dev_id); |
---|
284 | | - struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev->async_req); |
---|
| 284 | + struct rk_alg_ctx *alg_ctx; |
---|
285 | 285 | |
---|
286 | 286 | spin_lock(&rk_dev->lock); |
---|
| 287 | + |
---|
| 288 | + alg_ctx = rk_alg_ctx_cast(rk_dev->async_req); |
---|
287 | 289 | |
---|
288 | 290 | rk_dev->stat.irq_cnt++; |
---|
289 | 291 | |
---|
.. | .. |
---|
390 | 392 | struct crypto_async_request *async_req, *backlog; |
---|
391 | 393 | unsigned long flags; |
---|
392 | 394 | |
---|
| 395 | + spin_lock_irqsave(&rk_dev->lock, flags); |
---|
393 | 396 | if (rk_dev->async_req) { |
---|
394 | 397 | dev_err(rk_dev->dev, "%s: Unexpected crypto paths.\n", __func__); |
---|
395 | | - return; |
---|
| 398 | + goto exit; |
---|
396 | 399 | } |
---|
397 | 400 | |
---|
398 | 401 | rk_dev->err = 0; |
---|
399 | | - spin_lock_irqsave(&rk_dev->lock, flags); |
---|
| 402 | + |
---|
400 | 403 | backlog = crypto_get_backlog(&rk_dev->queue); |
---|
401 | 404 | async_req = crypto_dequeue_request(&rk_dev->queue); |
---|
402 | 405 | |
---|
403 | 406 | if (!async_req) { |
---|
404 | 407 | rk_dev->busy = false; |
---|
405 | | - spin_unlock_irqrestore(&rk_dev->lock, flags); |
---|
406 | | - return; |
---|
| 408 | + goto exit; |
---|
407 | 409 | } |
---|
408 | 410 | rk_dev->stat.dequeue_cnt++; |
---|
409 | | - spin_unlock_irqrestore(&rk_dev->lock, flags); |
---|
410 | 411 | |
---|
411 | 412 | if (backlog) { |
---|
412 | 413 | backlog->complete(backlog, -EINPROGRESS); |
---|
.. | .. |
---|
417 | 418 | rk_dev->err = rk_start_op(rk_dev); |
---|
418 | 419 | if (rk_dev->err) |
---|
419 | 420 | rk_complete_op(rk_dev, rk_dev->err); |
---|
| 421 | + |
---|
| 422 | +exit: |
---|
| 423 | + spin_unlock_irqrestore(&rk_dev->lock, flags); |
---|
420 | 424 | } |
---|
421 | 425 | |
---|
422 | 426 | static void rk_crypto_done_task_cb(unsigned long data) |
---|
423 | 427 | { |
---|
424 | 428 | struct rk_crypto_dev *rk_dev = (struct rk_crypto_dev *)data; |
---|
425 | | - struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev->async_req); |
---|
| 429 | + struct rk_alg_ctx *alg_ctx; |
---|
| 430 | + unsigned long flags; |
---|
| 431 | + |
---|
| 432 | + spin_lock_irqsave(&rk_dev->lock, flags); |
---|
| 433 | + |
---|
| 434 | + if (!rk_dev->async_req) { |
---|
| 435 | + dev_err(rk_dev->dev, "done task receive invalid async_req\n"); |
---|
| 436 | + spin_unlock_irqrestore(&rk_dev->lock, flags); |
---|
| 437 | + return; |
---|
| 438 | + } |
---|
| 439 | + |
---|
| 440 | + alg_ctx = rk_alg_ctx_cast(rk_dev->async_req); |
---|
426 | 441 | |
---|
427 | 442 | rk_dev->stat.done_cnt++; |
---|
428 | 443 | |
---|
.. | .. |
---|
440 | 455 | if (rk_dev->err) |
---|
441 | 456 | goto exit; |
---|
442 | 457 | |
---|
| 458 | + spin_unlock_irqrestore(&rk_dev->lock, flags); |
---|
| 459 | + |
---|
443 | 460 | return; |
---|
444 | 461 | exit: |
---|
445 | 462 | rk_complete_op(rk_dev, rk_dev->err); |
---|
| 463 | + spin_unlock_irqrestore(&rk_dev->lock, flags); |
---|
446 | 464 | } |
---|
447 | 465 | |
---|
448 | 466 | static struct rk_crypto_algt *rk_crypto_find_algs(struct rk_crypto_dev *rk_dev, |
---|
.. | .. |
---|
663 | 681 | static const struct of_device_id crypto_of_id_table[] = { |
---|
664 | 682 | |
---|
665 | 683 | #if IS_ENABLED(CONFIG_CRYPTO_DEV_ROCKCHIP_V3) |
---|
| 684 | + /* crypto v4 in belows same with crypto-v3*/ |
---|
| 685 | + { |
---|
| 686 | + .compatible = "rockchip,crypto-v4", |
---|
| 687 | + .data = (void *)&cryto_v3_soc_data, |
---|
| 688 | + }, |
---|
| 689 | + |
---|
666 | 690 | /* crypto v3 in belows */ |
---|
667 | 691 | { |
---|
668 | 692 | .compatible = "rockchip,crypto-v3", |
---|