| .. | .. |
|---|
| 4 | 4 | * Queue Interface backend functionality |
|---|
| 5 | 5 | * |
|---|
| 6 | 6 | * Copyright 2013-2016 Freescale Semiconductor, Inc. |
|---|
| 7 | | - * Copyright 2016-2017 NXP |
|---|
| 7 | + * Copyright 2016-2017, 2019-2020 NXP |
|---|
| 8 | 8 | */ |
|---|
| 9 | 9 | |
|---|
| 10 | 10 | #include <linux/cpumask.h> |
|---|
| 11 | 11 | #include <linux/kthread.h> |
|---|
| 12 | 12 | #include <soc/fsl/qman.h> |
|---|
| 13 | 13 | |
|---|
| 14 | +#include "debugfs.h" |
|---|
| 14 | 15 | #include "regs.h" |
|---|
| 15 | 16 | #include "qi.h" |
|---|
| 16 | 17 | #include "desc.h" |
|---|
| .. | .. |
|---|
| 18 | 19 | #include "desc_constr.h" |
|---|
| 19 | 20 | |
|---|
| 20 | 21 | #define PREHDR_RSLS_SHIFT 31 |
|---|
| 22 | +#define PREHDR_ABS BIT(25) |
|---|
| 21 | 23 | |
|---|
| 22 | 24 | /* |
|---|
| 23 | 25 | * Use a reasonable backlog of frames (per CPU) as congestion threshold, |
|---|
| .. | .. |
|---|
| 58 | 60 | /* |
|---|
| 59 | 61 | * caam_qi_priv - CAAM QI backend private params |
|---|
| 60 | 62 | * @cgr: QMan congestion group |
|---|
| 61 | | - * @qi_pdev: platform device for QI backend |
|---|
| 62 | 63 | */ |
|---|
| 63 | 64 | struct caam_qi_priv { |
|---|
| 64 | 65 | struct qman_cgr cgr; |
|---|
| 65 | | - struct platform_device *qi_pdev; |
|---|
| 66 | 66 | }; |
|---|
| 67 | 67 | |
|---|
| 68 | 68 | static struct caam_qi_priv qipriv ____cacheline_aligned; |
|---|
| .. | .. |
|---|
| 73 | 73 | */ |
|---|
| 74 | 74 | bool caam_congested __read_mostly; |
|---|
| 75 | 75 | EXPORT_SYMBOL(caam_congested); |
|---|
| 76 | | - |
|---|
| 77 | | -#ifdef CONFIG_DEBUG_FS |
|---|
| 78 | | -/* |
|---|
| 79 | | - * This is a counter for the number of times the congestion group (where all |
|---|
| 80 | | - * the request and response queueus are) reached congestion. Incremented |
|---|
| 81 | | - * each time the congestion callback is called with congested == true. |
|---|
| 82 | | - */ |
|---|
| 83 | | -static u64 times_congested; |
|---|
| 84 | | -#endif |
|---|
| 85 | | - |
|---|
| 86 | | -/* |
|---|
| 87 | | - * CPU from where the module initialised. This is required because QMan driver |
|---|
| 88 | | - * requires CGRs to be removed from same CPU from where they were originally |
|---|
| 89 | | - * allocated. |
|---|
| 90 | | - */ |
|---|
| 91 | | -static int mod_init_cpu; |
|---|
| 92 | 76 | |
|---|
| 93 | 77 | /* |
|---|
| 94 | 78 | * This is a a cache of buffers, from which the users of CAAM QI driver |
|---|
| .. | .. |
|---|
| 101 | 85 | * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here |
|---|
| 102 | 86 | */ |
|---|
| 103 | 87 | static struct kmem_cache *qi_cache; |
|---|
| 88 | + |
|---|
| 89 | +static void *caam_iova_to_virt(struct iommu_domain *domain, |
|---|
| 90 | + dma_addr_t iova_addr) |
|---|
| 91 | +{ |
|---|
| 92 | + phys_addr_t phys_addr; |
|---|
| 93 | + |
|---|
| 94 | + phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; |
|---|
| 95 | + |
|---|
| 96 | + return phys_to_virt(phys_addr); |
|---|
| 97 | +} |
|---|
| 104 | 98 | |
|---|
| 105 | 99 | int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) |
|---|
| 106 | 100 | { |
|---|
| .. | .. |
|---|
| 122 | 116 | |
|---|
| 123 | 117 | do { |
|---|
| 124 | 118 | ret = qman_enqueue(req->drv_ctx->req_fq, &fd); |
|---|
| 125 | | - if (likely(!ret)) |
|---|
| 119 | + if (likely(!ret)) { |
|---|
| 120 | + refcount_inc(&req->drv_ctx->refcnt); |
|---|
| 126 | 121 | return 0; |
|---|
| 122 | + } |
|---|
| 127 | 123 | |
|---|
| 128 | 124 | if (ret != -EBUSY) |
|---|
| 129 | 125 | break; |
|---|
| .. | .. |
|---|
| 142 | 138 | const struct qm_fd *fd; |
|---|
| 143 | 139 | struct caam_drv_req *drv_req; |
|---|
| 144 | 140 | struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); |
|---|
| 141 | + struct caam_drv_private *priv = dev_get_drvdata(qidev); |
|---|
| 145 | 142 | |
|---|
| 146 | 143 | fd = &msg->ern.fd; |
|---|
| 147 | 144 | |
|---|
| 148 | | - if (qm_fd_get_format(fd) != qm_fd_compound) { |
|---|
| 149 | | - dev_err(qidev, "Non-compound FD from CAAM\n"); |
|---|
| 150 | | - return; |
|---|
| 151 | | - } |
|---|
| 152 | | - |
|---|
| 153 | | - drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd)); |
|---|
| 145 | + drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd)); |
|---|
| 154 | 146 | if (!drv_req) { |
|---|
| 155 | 147 | dev_err(qidev, |
|---|
| 156 | 148 | "Can't find original request for CAAM response\n"); |
|---|
| 157 | 149 | return; |
|---|
| 158 | 150 | } |
|---|
| 159 | 151 | |
|---|
| 152 | + refcount_dec(&drv_req->drv_ctx->refcnt); |
|---|
| 153 | + |
|---|
| 154 | + if (qm_fd_get_format(fd) != qm_fd_compound) { |
|---|
| 155 | + dev_err(qidev, "Non-compound FD from CAAM\n"); |
|---|
| 156 | + return; |
|---|
| 157 | + } |
|---|
| 158 | + |
|---|
| 160 | 159 | dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), |
|---|
| 161 | 160 | sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); |
|---|
| 162 | 161 | |
|---|
| 163 | | - drv_req->cbk(drv_req, -EIO); |
|---|
| 162 | + if (fd->status) |
|---|
| 163 | + drv_req->cbk(drv_req, be32_to_cpu(fd->status)); |
|---|
| 164 | + else |
|---|
| 165 | + drv_req->cbk(drv_req, JRSTA_SSRC_QI); |
|---|
| 164 | 166 | } |
|---|
| 165 | 167 | |
|---|
| 166 | 168 | static struct qman_fq *create_caam_req_fq(struct device *qidev, |
|---|
| .. | .. |
|---|
| 281 | 283 | return ret; |
|---|
| 282 | 284 | } |
|---|
| 283 | 285 | |
|---|
| 284 | | -static int empty_caam_fq(struct qman_fq *fq) |
|---|
| 286 | +static int empty_caam_fq(struct qman_fq *fq, struct caam_drv_ctx *drv_ctx) |
|---|
| 285 | 287 | { |
|---|
| 286 | 288 | int ret; |
|---|
| 289 | + int retries = 10; |
|---|
| 287 | 290 | struct qm_mcr_queryfq_np np; |
|---|
| 288 | 291 | |
|---|
| 289 | 292 | /* Wait till the older CAAM FQ get empty */ |
|---|
| .. | .. |
|---|
| 298 | 301 | msleep(20); |
|---|
| 299 | 302 | } while (1); |
|---|
| 300 | 303 | |
|---|
| 301 | | - /* |
|---|
| 302 | | - * Give extra time for pending jobs from this FQ in holding tanks |
|---|
| 303 | | - * to get processed |
|---|
| 304 | | - */ |
|---|
| 305 | | - msleep(20); |
|---|
| 304 | + /* Wait until pending jobs from this FQ are processed by CAAM */ |
|---|
| 305 | + do { |
|---|
| 306 | + if (refcount_read(&drv_ctx->refcnt) == 1) |
|---|
| 307 | + break; |
|---|
| 308 | + |
|---|
| 309 | + msleep(20); |
|---|
| 310 | + } while (--retries); |
|---|
| 311 | + |
|---|
| 312 | + if (!retries) |
|---|
| 313 | + dev_warn_once(drv_ctx->qidev, "%d frames from FQID %u still pending in CAAM\n", |
|---|
| 314 | + refcount_read(&drv_ctx->refcnt), fq->fqid); |
|---|
| 315 | + |
|---|
| 306 | 316 | return 0; |
|---|
| 307 | 317 | } |
|---|
| 308 | 318 | |
|---|
| .. | .. |
|---|
| 325 | 335 | /* Create a new req FQ in parked state */ |
|---|
| 326 | 336 | new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq, |
|---|
| 327 | 337 | drv_ctx->context_a, 0); |
|---|
| 328 | | - if (unlikely(IS_ERR_OR_NULL(new_fq))) { |
|---|
| 338 | + if (IS_ERR(new_fq)) { |
|---|
| 329 | 339 | dev_err(qidev, "FQ allocation for shdesc update failed\n"); |
|---|
| 330 | 340 | return PTR_ERR(new_fq); |
|---|
| 331 | 341 | } |
|---|
| .. | .. |
|---|
| 334 | 344 | drv_ctx->req_fq = new_fq; |
|---|
| 335 | 345 | |
|---|
| 336 | 346 | /* Empty and remove the older FQ */ |
|---|
| 337 | | - ret = empty_caam_fq(old_fq); |
|---|
| 347 | + ret = empty_caam_fq(old_fq, drv_ctx); |
|---|
| 338 | 348 | if (ret) { |
|---|
| 339 | 349 | dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret); |
|---|
| 340 | 350 | |
|---|
| .. | .. |
|---|
| 353 | 363 | */ |
|---|
| 354 | 364 | drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | |
|---|
| 355 | 365 | num_words); |
|---|
| 366 | + drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS); |
|---|
| 356 | 367 | memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); |
|---|
| 357 | 368 | dma_sync_single_for_device(qidev, drv_ctx->context_a, |
|---|
| 358 | 369 | sizeof(drv_ctx->sh_desc) + |
|---|
| .. | .. |
|---|
| 408 | 419 | */ |
|---|
| 409 | 420 | drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | |
|---|
| 410 | 421 | num_words); |
|---|
| 422 | + drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS); |
|---|
| 411 | 423 | memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); |
|---|
| 412 | 424 | size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc); |
|---|
| 413 | 425 | hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size, |
|---|
| .. | .. |
|---|
| 438 | 450 | /* Attach request FQ */ |
|---|
| 439 | 451 | drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc, |
|---|
| 440 | 452 | QMAN_INITFQ_FLAG_SCHED); |
|---|
| 441 | | - if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) { |
|---|
| 453 | + if (IS_ERR(drv_ctx->req_fq)) { |
|---|
| 442 | 454 | dev_err(qidev, "create_caam_req_fq failed\n"); |
|---|
| 443 | 455 | dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL); |
|---|
| 444 | 456 | kfree(drv_ctx); |
|---|
| 445 | 457 | return ERR_PTR(-ENOMEM); |
|---|
| 446 | 458 | } |
|---|
| 459 | + |
|---|
| 460 | + /* init reference counter used to track references to request FQ */ |
|---|
| 461 | + refcount_set(&drv_ctx->refcnt, 1); |
|---|
| 447 | 462 | |
|---|
| 448 | 463 | drv_ctx->qidev = qidev; |
|---|
| 449 | 464 | return drv_ctx; |
|---|
| .. | .. |
|---|
| 492 | 507 | } |
|---|
| 493 | 508 | EXPORT_SYMBOL(caam_drv_ctx_rel); |
|---|
| 494 | 509 | |
|---|
| 495 | | -int caam_qi_shutdown(struct device *qidev) |
|---|
| 510 | +static void caam_qi_shutdown(void *data) |
|---|
| 496 | 511 | { |
|---|
| 497 | | - int i, ret; |
|---|
| 498 | | - struct caam_qi_priv *priv = dev_get_drvdata(qidev); |
|---|
| 512 | + int i; |
|---|
| 513 | + struct device *qidev = data; |
|---|
| 514 | + struct caam_qi_priv *priv = &qipriv; |
|---|
| 499 | 515 | const cpumask_t *cpus = qman_affine_cpus(); |
|---|
| 500 | | - struct cpumask old_cpumask = current->cpus_allowed; |
|---|
| 501 | 516 | |
|---|
| 502 | 517 | for_each_cpu(i, cpus) { |
|---|
| 503 | 518 | struct napi_struct *irqtask; |
|---|
| .. | .. |
|---|
| 510 | 525 | dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); |
|---|
| 511 | 526 | } |
|---|
| 512 | 527 | |
|---|
| 513 | | - /* |
|---|
| 514 | | - * QMan driver requires CGRs to be deleted from same CPU from where they |
|---|
| 515 | | - * were instantiated. Hence we get the module removal execute from the |
|---|
| 516 | | - * same CPU from where it was originally inserted. |
|---|
| 517 | | - */ |
|---|
| 518 | | - set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu)); |
|---|
| 519 | | - |
|---|
| 520 | | - ret = qman_delete_cgr(&priv->cgr); |
|---|
| 521 | | - if (ret) |
|---|
| 522 | | - dev_err(qidev, "Deletion of CGR failed: %d\n", ret); |
|---|
| 523 | | - else |
|---|
| 524 | | - qman_release_cgrid(priv->cgr.cgrid); |
|---|
| 528 | + qman_delete_cgr_safe(&priv->cgr); |
|---|
| 529 | + qman_release_cgrid(priv->cgr.cgrid); |
|---|
| 525 | 530 | |
|---|
| 526 | 531 | kmem_cache_destroy(qi_cache); |
|---|
| 527 | | - |
|---|
| 528 | | - /* Now that we're done with the CGRs, restore the cpus allowed mask */ |
|---|
| 529 | | - set_cpus_allowed_ptr(current, &old_cpumask); |
|---|
| 530 | | - |
|---|
| 531 | | - platform_device_unregister(priv->qi_pdev); |
|---|
| 532 | | - return ret; |
|---|
| 533 | 532 | } |
|---|
| 534 | 533 | |
|---|
| 535 | 534 | static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) |
|---|
| .. | .. |
|---|
| 537 | 536 | caam_congested = congested; |
|---|
| 538 | 537 | |
|---|
| 539 | 538 | if (congested) { |
|---|
| 540 | | -#ifdef CONFIG_DEBUG_FS |
|---|
| 541 | | - times_congested++; |
|---|
| 542 | | -#endif |
|---|
| 539 | + caam_debugfs_qi_congested(); |
|---|
| 540 | + |
|---|
| 543 | 541 | pr_debug_ratelimited("CAAM entered congestion\n"); |
|---|
| 544 | 542 | |
|---|
| 545 | 543 | } else { |
|---|
| .. | .. |
|---|
| 572 | 570 | struct caam_drv_req *drv_req; |
|---|
| 573 | 571 | const struct qm_fd *fd; |
|---|
| 574 | 572 | struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); |
|---|
| 573 | + struct caam_drv_private *priv = dev_get_drvdata(qidev); |
|---|
| 575 | 574 | u32 status; |
|---|
| 576 | 575 | |
|---|
| 577 | 576 | if (caam_qi_napi_schedule(p, caam_napi)) |
|---|
| 578 | 577 | return qman_cb_dqrr_stop; |
|---|
| 579 | 578 | |
|---|
| 580 | 579 | fd = &dqrr->fd; |
|---|
| 580 | + |
|---|
| 581 | + drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd)); |
|---|
| 582 | + if (unlikely(!drv_req)) { |
|---|
| 583 | + dev_err(qidev, |
|---|
| 584 | + "Can't find original request for caam response\n"); |
|---|
| 585 | + return qman_cb_dqrr_consume; |
|---|
| 586 | + } |
|---|
| 587 | + |
|---|
| 588 | + refcount_dec(&drv_req->drv_ctx->refcnt); |
|---|
| 589 | + |
|---|
| 581 | 590 | status = be32_to_cpu(fd->status); |
|---|
| 582 | 591 | if (unlikely(status)) { |
|---|
| 583 | 592 | u32 ssrc = status & JRSTA_SSRC_MASK; |
|---|
| .. | .. |
|---|
| 585 | 594 | |
|---|
| 586 | 595 | if (ssrc != JRSTA_SSRC_CCB_ERROR || |
|---|
| 587 | 596 | err_id != JRSTA_CCBERR_ERRID_ICVCHK) |
|---|
| 588 | | - dev_err(qidev, "Error: %#x in CAAM response FD\n", |
|---|
| 589 | | - status); |
|---|
| 597 | + dev_err_ratelimited(qidev, |
|---|
| 598 | + "Error: %#x in CAAM response FD\n", |
|---|
| 599 | + status); |
|---|
| 590 | 600 | } |
|---|
| 591 | 601 | |
|---|
| 592 | 602 | if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) { |
|---|
| 593 | 603 | dev_err(qidev, "Non-compound FD from CAAM\n"); |
|---|
| 594 | | - return qman_cb_dqrr_consume; |
|---|
| 595 | | - } |
|---|
| 596 | | - |
|---|
| 597 | | - drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd)); |
|---|
| 598 | | - if (unlikely(!drv_req)) { |
|---|
| 599 | | - dev_err(qidev, |
|---|
| 600 | | - "Can't find original request for caam response\n"); |
|---|
| 601 | 604 | return qman_cb_dqrr_consume; |
|---|
| 602 | 605 | } |
|---|
| 603 | 606 | |
|---|
| .. | .. |
|---|
| 714 | 717 | int caam_qi_init(struct platform_device *caam_pdev) |
|---|
| 715 | 718 | { |
|---|
| 716 | 719 | int err, i; |
|---|
| 717 | | - struct platform_device *qi_pdev; |
|---|
| 718 | 720 | struct device *ctrldev = &caam_pdev->dev, *qidev; |
|---|
| 719 | 721 | struct caam_drv_private *ctrlpriv; |
|---|
| 720 | 722 | const cpumask_t *cpus = qman_affine_cpus(); |
|---|
| 721 | | - struct cpumask old_cpumask = current->cpus_allowed; |
|---|
| 722 | | - static struct platform_device_info qi_pdev_info = { |
|---|
| 723 | | - .name = "caam_qi", |
|---|
| 724 | | - .id = PLATFORM_DEVID_NONE |
|---|
| 725 | | - }; |
|---|
| 726 | | - |
|---|
| 727 | | - /* |
|---|
| 728 | | - * QMAN requires CGRs to be removed from same CPU+portal from where it |
|---|
| 729 | | - * was originally allocated. Hence we need to note down the |
|---|
| 730 | | - * initialisation CPU and use the same CPU for module exit. |
|---|
| 731 | | - * We select the first CPU to from the list of portal owning CPUs. |
|---|
| 732 | | - * Then we pin module init to this CPU. |
|---|
| 733 | | - */ |
|---|
| 734 | | - mod_init_cpu = cpumask_first(cpus); |
|---|
| 735 | | - set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu)); |
|---|
| 736 | | - |
|---|
| 737 | | - qi_pdev_info.parent = ctrldev; |
|---|
| 738 | | - qi_pdev_info.dma_mask = dma_get_mask(ctrldev); |
|---|
| 739 | | - qi_pdev = platform_device_register_full(&qi_pdev_info); |
|---|
| 740 | | - if (IS_ERR(qi_pdev)) |
|---|
| 741 | | - return PTR_ERR(qi_pdev); |
|---|
| 742 | | - set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev)); |
|---|
| 743 | 723 | |
|---|
| 744 | 724 | ctrlpriv = dev_get_drvdata(ctrldev); |
|---|
| 745 | | - qidev = &qi_pdev->dev; |
|---|
| 746 | | - |
|---|
| 747 | | - qipriv.qi_pdev = qi_pdev; |
|---|
| 748 | | - dev_set_drvdata(qidev, &qipriv); |
|---|
| 725 | + qidev = ctrldev; |
|---|
| 749 | 726 | |
|---|
| 750 | 727 | /* Initialize the congestion detection */ |
|---|
| 751 | 728 | err = init_cgr(qidev); |
|---|
| 752 | 729 | if (err) { |
|---|
| 753 | 730 | dev_err(qidev, "CGR initialization failed: %d\n", err); |
|---|
| 754 | | - platform_device_unregister(qi_pdev); |
|---|
| 755 | 731 | return err; |
|---|
| 756 | 732 | } |
|---|
| 757 | 733 | |
|---|
| .. | .. |
|---|
| 760 | 736 | if (err) { |
|---|
| 761 | 737 | dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err); |
|---|
| 762 | 738 | free_rsp_fqs(); |
|---|
| 763 | | - platform_device_unregister(qi_pdev); |
|---|
| 764 | 739 | return err; |
|---|
| 765 | 740 | } |
|---|
| 766 | 741 | |
|---|
| .. | .. |
|---|
| 783 | 758 | napi_enable(irqtask); |
|---|
| 784 | 759 | } |
|---|
| 785 | 760 | |
|---|
| 786 | | - /* Hook up QI device to parent controlling caam device */ |
|---|
| 787 | | - ctrlpriv->qidev = qidev; |
|---|
| 788 | | - |
|---|
| 789 | 761 | qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0, |
|---|
| 790 | 762 | SLAB_CACHE_DMA, NULL); |
|---|
| 791 | 763 | if (!qi_cache) { |
|---|
| 792 | 764 | dev_err(qidev, "Can't allocate CAAM cache\n"); |
|---|
| 793 | 765 | free_rsp_fqs(); |
|---|
| 794 | | - platform_device_unregister(qi_pdev); |
|---|
| 795 | 766 | return -ENOMEM; |
|---|
| 796 | 767 | } |
|---|
| 797 | 768 | |
|---|
| 798 | | - /* Done with the CGRs; restore the cpus allowed mask */ |
|---|
| 799 | | - set_cpus_allowed_ptr(current, &old_cpumask); |
|---|
| 800 | | -#ifdef CONFIG_DEBUG_FS |
|---|
| 801 | | - debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, |
|---|
| 802 | | - ×_congested, &caam_fops_u64_ro); |
|---|
| 803 | | -#endif |
|---|
| 769 | + caam_debugfs_qi_init(ctrlpriv); |
|---|
| 770 | + |
|---|
| 771 | + err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv); |
|---|
| 772 | + if (err) |
|---|
| 773 | + return err; |
|---|
| 774 | + |
|---|
| 804 | 775 | dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); |
|---|
| 805 | 776 | return 0; |
|---|
| 806 | 777 | } |
|---|