.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST |
---|
3 | 4 | * was acquired by Western Digital in 2012. |
---|
4 | 5 | * |
---|
5 | 6 | * Copyright 2012 sTec, Inc. |
---|
6 | 7 | * Copyright (c) 2017 Western Digital Corporation or its affiliates. |
---|
7 | | - * |
---|
8 | | - * This file is part of the Linux kernel, and is made available under |
---|
9 | | - * the terms of the GNU General Public License version 2. |
---|
10 | 8 | */ |
---|
11 | 9 | |
---|
12 | 10 | #include <linux/kernel.h> |
---|
.. | .. |
---|
27 | 25 | #include <linux/dma-mapping.h> |
---|
28 | 26 | #include <linux/completion.h> |
---|
29 | 27 | #include <linux/scatterlist.h> |
---|
30 | | -#include <linux/version.h> |
---|
31 | 28 | #include <linux/err.h> |
---|
32 | 29 | #include <linux/aer.h> |
---|
33 | 30 | #include <linux/wait.h> |
---|
.. | .. |
---|
181 | 178 | struct fit_completion_entry_v1 completion; |
---|
182 | 179 | |
---|
183 | 180 | struct fit_comp_error_info err_info; |
---|
| 181 | + int retries; |
---|
184 | 182 | |
---|
185 | 183 | blk_status_t status; |
---|
186 | 184 | }; |
---|
.. | .. |
---|
382 | 380 | * READ/WRITE REQUESTS |
---|
383 | 381 | ***************************************************************************** |
---|
384 | 382 | */ |
---|
385 | | -static void skd_inc_in_flight(struct request *rq, void *data, bool reserved) |
---|
| 383 | +static bool skd_inc_in_flight(struct request *rq, void *data, bool reserved) |
---|
386 | 384 | { |
---|
387 | 385 | int *count = data; |
---|
388 | 386 | |
---|
389 | 387 | count++; |
---|
| 388 | + return true; |
---|
390 | 389 | } |
---|
391 | 390 | |
---|
392 | 391 | static int skd_in_flight(struct skd_device *skdev) |
---|
.. | .. |
---|
493 | 492 | |
---|
494 | 493 | if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE)) |
---|
495 | 494 | return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE; |
---|
| 495 | + |
---|
| 496 | + if (!(req->rq_flags & RQF_DONTPREP)) { |
---|
| 497 | + skreq->retries = 0; |
---|
| 498 | + req->rq_flags |= RQF_DONTPREP; |
---|
| 499 | + } |
---|
496 | 500 | |
---|
497 | 501 | blk_mq_start_request(req); |
---|
498 | 502 | |
---|
.. | .. |
---|
632 | 636 | * Map scatterlist to PCI bus addresses. |
---|
633 | 637 | * Note PCI might change the number of entries. |
---|
634 | 638 | */ |
---|
635 | | - n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir); |
---|
| 639 | + n_sg = dma_map_sg(&skdev->pdev->dev, sgl, n_sg, skreq->data_dir); |
---|
636 | 640 | if (n_sg <= 0) |
---|
637 | 641 | return false; |
---|
638 | 642 | |
---|
.. | .. |
---|
682 | 686 | skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = |
---|
683 | 687 | skreq->sksg_dma_address + |
---|
684 | 688 | ((skreq->n_sg) * sizeof(struct fit_sg_descriptor)); |
---|
685 | | - pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir); |
---|
| 689 | + dma_unmap_sg(&skdev->pdev->dev, &skreq->sg[0], skreq->n_sg, |
---|
| 690 | + skreq->data_dir); |
---|
686 | 691 | } |
---|
687 | 692 | |
---|
688 | 693 | /* |
---|
.. | .. |
---|
1411 | 1416 | case SKD_CHECK_STATUS_REPORT_GOOD: |
---|
1412 | 1417 | case SKD_CHECK_STATUS_REPORT_SMART_ALERT: |
---|
1413 | 1418 | skreq->status = BLK_STS_OK; |
---|
1414 | | - blk_mq_complete_request(req); |
---|
| 1419 | + if (likely(!blk_should_fake_timeout(req->q))) |
---|
| 1420 | + blk_mq_complete_request(req); |
---|
1415 | 1421 | break; |
---|
1416 | 1422 | |
---|
1417 | 1423 | case SKD_CHECK_STATUS_BUSY_IMMINENT: |
---|
.. | .. |
---|
1424 | 1430 | break; |
---|
1425 | 1431 | |
---|
1426 | 1432 | case SKD_CHECK_STATUS_REQUEUE_REQUEST: |
---|
1427 | | - if ((unsigned long) ++req->special < SKD_MAX_RETRIES) { |
---|
| 1433 | + if (++skreq->retries < SKD_MAX_RETRIES) { |
---|
1428 | 1434 | skd_log_skreq(skdev, skreq, "retry"); |
---|
1429 | 1435 | blk_mq_requeue_request(req, true); |
---|
1430 | 1436 | break; |
---|
1431 | 1437 | } |
---|
1432 | | - /* fall through */ |
---|
| 1438 | + fallthrough; |
---|
1433 | 1439 | |
---|
1434 | 1440 | case SKD_CHECK_STATUS_REPORT_ERROR: |
---|
1435 | 1441 | default: |
---|
1436 | 1442 | skreq->status = BLK_STS_IOERR; |
---|
1437 | | - blk_mq_complete_request(req); |
---|
| 1443 | + if (likely(!blk_should_fake_timeout(req->q))) |
---|
| 1444 | + blk_mq_complete_request(req); |
---|
1438 | 1445 | break; |
---|
1439 | 1446 | } |
---|
1440 | 1447 | } |
---|
.. | .. |
---|
1554 | 1561 | */ |
---|
1555 | 1562 | if (likely(cmp_status == SAM_STAT_GOOD)) { |
---|
1556 | 1563 | skreq->status = BLK_STS_OK; |
---|
1557 | | - blk_mq_complete_request(rq); |
---|
| 1564 | + if (likely(!blk_should_fake_timeout(rq->q))) |
---|
| 1565 | + blk_mq_complete_request(rq); |
---|
1558 | 1566 | } else { |
---|
1559 | 1567 | skd_resolve_req_exception(skdev, skreq, rq); |
---|
1560 | 1568 | } |
---|
.. | .. |
---|
1886 | 1894 | skd_skdev_state_to_str(skdev->state), skdev->state); |
---|
1887 | 1895 | } |
---|
1888 | 1896 | |
---|
1889 | | -static void skd_recover_request(struct request *req, void *data, bool reserved) |
---|
| 1897 | +static bool skd_recover_request(struct request *req, void *data, bool reserved) |
---|
1890 | 1898 | { |
---|
1891 | 1899 | struct skd_device *const skdev = data; |
---|
1892 | 1900 | struct skd_request_context *skreq = blk_mq_rq_to_pdu(req); |
---|
1893 | 1901 | |
---|
1894 | 1902 | if (skreq->state != SKD_REQ_STATE_BUSY) |
---|
1895 | | - return; |
---|
| 1903 | + return true; |
---|
1896 | 1904 | |
---|
1897 | 1905 | skd_log_skreq(skdev, skreq, "recover"); |
---|
1898 | 1906 | |
---|
.. | .. |
---|
1903 | 1911 | skreq->state = SKD_REQ_STATE_IDLE; |
---|
1904 | 1912 | skreq->status = BLK_STS_IOERR; |
---|
1905 | 1913 | blk_mq_complete_request(req); |
---|
| 1914 | + return true; |
---|
1906 | 1915 | } |
---|
1907 | 1916 | |
---|
1908 | 1917 | static void skd_recover_requests(struct skd_device *skdev) |
---|
.. | .. |
---|
2632 | 2641 | "comp pci_alloc, total bytes %zd entries %d\n", |
---|
2633 | 2642 | SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY); |
---|
2634 | 2643 | |
---|
2635 | | - skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE, |
---|
2636 | | - &skdev->cq_dma_address); |
---|
| 2644 | + skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, |
---|
| 2645 | + &skdev->cq_dma_address, GFP_KERNEL); |
---|
2637 | 2646 | |
---|
2638 | 2647 | if (skcomp == NULL) { |
---|
2639 | 2648 | rc = -ENOMEM; |
---|
.. | .. |
---|
2674 | 2683 | |
---|
2675 | 2684 | skmsg->id = i + SKD_ID_FIT_MSG; |
---|
2676 | 2685 | |
---|
2677 | | - skmsg->msg_buf = pci_alloc_consistent(skdev->pdev, |
---|
2678 | | - SKD_N_FITMSG_BYTES, |
---|
2679 | | - &skmsg->mb_dma_address); |
---|
2680 | | - |
---|
| 2686 | + skmsg->msg_buf = dma_alloc_coherent(&skdev->pdev->dev, |
---|
| 2687 | + SKD_N_FITMSG_BYTES, |
---|
| 2688 | + &skmsg->mb_dma_address, |
---|
| 2689 | + GFP_KERNEL); |
---|
2681 | 2690 | if (skmsg->msg_buf == NULL) { |
---|
2682 | 2691 | rc = -ENOMEM; |
---|
2683 | 2692 | goto err_out; |
---|
.. | .. |
---|
2687 | 2696 | (FIT_QCMD_ALIGN - 1), |
---|
2688 | 2697 | "not aligned: msg_buf %p mb_dma_address %pad\n", |
---|
2689 | 2698 | skmsg->msg_buf, &skmsg->mb_dma_address); |
---|
2690 | | - memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES); |
---|
2691 | 2699 | } |
---|
2692 | 2700 | |
---|
2693 | 2701 | err_out: |
---|
.. | .. |
---|
2834 | 2842 | skdev->sgs_per_request * sizeof(struct scatterlist); |
---|
2835 | 2843 | skdev->tag_set.numa_node = NUMA_NO_NODE; |
---|
2836 | 2844 | skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | |
---|
2837 | | - BLK_MQ_F_SG_MERGE | |
---|
2838 | 2845 | BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO); |
---|
2839 | 2846 | skdev->tag_set.driver_data = skdev; |
---|
2840 | 2847 | rc = blk_mq_alloc_tag_set(&skdev->tag_set); |
---|
.. | .. |
---|
2971 | 2978 | static void skd_free_skcomp(struct skd_device *skdev) |
---|
2972 | 2979 | { |
---|
2973 | 2980 | if (skdev->skcomp_table) |
---|
2974 | | - pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE, |
---|
2975 | | - skdev->skcomp_table, skdev->cq_dma_address); |
---|
| 2981 | + dma_free_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, |
---|
| 2982 | + skdev->skcomp_table, skdev->cq_dma_address); |
---|
2976 | 2983 | |
---|
2977 | 2984 | skdev->skcomp_table = NULL; |
---|
2978 | 2985 | skdev->cq_dma_address = 0; |
---|
.. | .. |
---|
2991 | 2998 | skmsg = &skdev->skmsg_table[i]; |
---|
2992 | 2999 | |
---|
2993 | 3000 | if (skmsg->msg_buf != NULL) { |
---|
2994 | | - pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES, |
---|
2995 | | - skmsg->msg_buf, |
---|
| 3001 | + dma_free_coherent(&skdev->pdev->dev, SKD_N_FITMSG_BYTES, |
---|
| 3002 | + skmsg->msg_buf, |
---|
2996 | 3003 | skmsg->mb_dma_address); |
---|
2997 | 3004 | } |
---|
2998 | 3005 | skmsg->msg_buf = NULL; |
---|
.. | .. |
---|
3104 | 3111 | static int skd_bdev_attach(struct device *parent, struct skd_device *skdev) |
---|
3105 | 3112 | { |
---|
3106 | 3113 | dev_dbg(&skdev->pdev->dev, "add_disk\n"); |
---|
3107 | | - device_add_disk(parent, skdev->disk); |
---|
| 3114 | + device_add_disk(parent, skdev->disk, NULL); |
---|
3108 | 3115 | return 0; |
---|
3109 | 3116 | } |
---|
3110 | 3117 | |
---|
.. | .. |
---|
3172 | 3179 | rc = pci_request_regions(pdev, DRV_NAME); |
---|
3173 | 3180 | if (rc) |
---|
3174 | 3181 | goto err_out; |
---|
3175 | | - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
---|
3176 | | - if (!rc) { |
---|
3177 | | - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { |
---|
3178 | | - dev_err(&pdev->dev, "consistent DMA mask error %d\n", |
---|
3179 | | - rc); |
---|
3180 | | - } |
---|
3181 | | - } else { |
---|
3182 | | - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
---|
3183 | | - if (rc) { |
---|
3184 | | - dev_err(&pdev->dev, "DMA mask error %d\n", rc); |
---|
3185 | | - goto err_out_regions; |
---|
3186 | | - } |
---|
| 3182 | + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); |
---|
| 3183 | + if (rc) |
---|
| 3184 | + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
---|
| 3185 | + if (rc) { |
---|
| 3186 | + dev_err(&pdev->dev, "DMA mask error %d\n", rc); |
---|
| 3187 | + goto err_out_regions; |
---|
3187 | 3188 | } |
---|
3188 | 3189 | |
---|
3189 | 3190 | if (!skd_major) { |
---|
.. | .. |
---|
3367 | 3368 | rc = pci_request_regions(pdev, DRV_NAME); |
---|
3368 | 3369 | if (rc) |
---|
3369 | 3370 | goto err_out; |
---|
3370 | | - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
---|
3371 | | - if (!rc) { |
---|
3372 | | - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { |
---|
3373 | | - |
---|
3374 | | - dev_err(&pdev->dev, "consistent DMA mask error %d\n", |
---|
3375 | | - rc); |
---|
3376 | | - } |
---|
3377 | | - } else { |
---|
3378 | | - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
---|
3379 | | - if (rc) { |
---|
3380 | | - |
---|
3381 | | - dev_err(&pdev->dev, "DMA mask error %d\n", rc); |
---|
3382 | | - goto err_out_regions; |
---|
3383 | | - } |
---|
| 3371 | + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); |
---|
| 3372 | + if (rc) |
---|
| 3373 | + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
---|
| 3374 | + if (rc) { |
---|
| 3375 | + dev_err(&pdev->dev, "DMA mask error %d\n", rc); |
---|
| 3376 | + goto err_out_regions; |
---|
3384 | 3377 | } |
---|
3385 | 3378 | |
---|
3386 | 3379 | pci_set_master(pdev); |
---|