| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Driver for the Micron P320 SSD |
|---|
| 3 | 4 | * Copyright (C) 2011 Micron Technology, Inc. |
|---|
| .. | .. |
|---|
| 5 | 6 | * Portions of this code were derived from works subjected to the |
|---|
| 6 | 7 | * following copyright: |
|---|
| 7 | 8 | * Copyright (C) 2009 Integrated Device Technology, Inc. |
|---|
| 8 | | - * |
|---|
| 9 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 10 | | - * it under the terms of the GNU General Public License as published by |
|---|
| 11 | | - * the Free Software Foundation; either version 2 of the License, or |
|---|
| 12 | | - * (at your option) any later version. |
|---|
| 13 | | - * |
|---|
| 14 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 15 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 16 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 17 | | - * GNU General Public License for more details. |
|---|
| 18 | | - * |
|---|
| 19 | 9 | */ |
|---|
| 20 | 10 | |
|---|
| 21 | 11 | #include <linux/pci.h> |
|---|
| .. | .. |
|---|
| 40 | 30 | #include <linux/export.h> |
|---|
| 41 | 31 | #include <linux/debugfs.h> |
|---|
| 42 | 32 | #include <linux/prefetch.h> |
|---|
| 33 | +#include <linux/numa.h> |
|---|
| 43 | 34 | #include "mtip32xx.h" |
|---|
| 44 | 35 | |
|---|
| 45 | 36 | #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32) |
|---|
| .. | .. |
|---|
| 138 | 129 | /* |
|---|
| 139 | 130 | * This function check_for_surprise_removal is called |
|---|
| 140 | 131 | * while card is removed from the system and it will |
|---|
| 141 | | - * read the vendor id from the configration space |
|---|
| 132 | + * read the vendor id from the configuration space |
|---|
| 142 | 133 | * |
|---|
| 143 | 134 | * @pdev Pointer to the pci_dev structure. |
|---|
| 144 | 135 | * |
|---|
| .. | .. |
|---|
| 166 | 157 | } |
|---|
| 167 | 158 | |
|---|
| 168 | 159 | return false; /* device present */ |
|---|
| 169 | | -} |
|---|
| 170 | | - |
|---|
| 171 | | -/* we have to use runtime tag to setup command header */ |
|---|
| 172 | | -static void mtip_init_cmd_header(struct request *rq) |
|---|
| 173 | | -{ |
|---|
| 174 | | - struct driver_data *dd = rq->q->queuedata; |
|---|
| 175 | | - struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); |
|---|
| 176 | | - |
|---|
| 177 | | - /* Point the command headers at the command tables. */ |
|---|
| 178 | | - cmd->command_header = dd->port->command_list + |
|---|
| 179 | | - (sizeof(struct mtip_cmd_hdr) * rq->tag); |
|---|
| 180 | | - cmd->command_header_dma = dd->port->command_list_dma + |
|---|
| 181 | | - (sizeof(struct mtip_cmd_hdr) * rq->tag); |
|---|
| 182 | | - |
|---|
| 183 | | - if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags)) |
|---|
| 184 | | - cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16); |
|---|
| 185 | | - |
|---|
| 186 | | - cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF); |
|---|
| 187 | | -} |
|---|
| 188 | | - |
|---|
| 189 | | -static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd) |
|---|
| 190 | | -{ |
|---|
| 191 | | - struct request *rq; |
|---|
| 192 | | - |
|---|
| 193 | | - if (mtip_check_surprise_removal(dd->pdev)) |
|---|
| 194 | | - return NULL; |
|---|
| 195 | | - |
|---|
| 196 | | - rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED); |
|---|
| 197 | | - if (IS_ERR(rq)) |
|---|
| 198 | | - return NULL; |
|---|
| 199 | | - |
|---|
| 200 | | - /* Internal cmd isn't submitted via .queue_rq */ |
|---|
| 201 | | - mtip_init_cmd_header(rq); |
|---|
| 202 | | - |
|---|
| 203 | | - return blk_mq_rq_to_pdu(rq); |
|---|
| 204 | 160 | } |
|---|
| 205 | 161 | |
|---|
| 206 | 162 | static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd, |
|---|
| .. | .. |
|---|
| 536 | 492 | struct request *req = blk_mq_rq_from_pdu(cmd); |
|---|
| 537 | 493 | |
|---|
| 538 | 494 | cmd->status = status; |
|---|
| 539 | | - blk_mq_complete_request(req); |
|---|
| 495 | + if (likely(!blk_should_fake_timeout(req->q))) |
|---|
| 496 | + blk_mq_complete_request(req); |
|---|
| 540 | 497 | } |
|---|
| 541 | 498 | |
|---|
| 542 | 499 | /* |
|---|
| .. | .. |
|---|
| 1023 | 980 | return -EFAULT; |
|---|
| 1024 | 981 | } |
|---|
| 1025 | 982 | |
|---|
| 1026 | | - int_cmd = mtip_get_int_command(dd); |
|---|
| 1027 | | - if (!int_cmd) { |
|---|
| 983 | + if (mtip_check_surprise_removal(dd->pdev)) |
|---|
| 984 | + return -EFAULT; |
|---|
| 985 | + |
|---|
| 986 | + rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED); |
|---|
| 987 | + if (IS_ERR(rq)) { |
|---|
| 1028 | 988 | dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n"); |
|---|
| 1029 | 989 | return -EFAULT; |
|---|
| 1030 | 990 | } |
|---|
| 1031 | | - rq = blk_mq_rq_from_pdu(int_cmd); |
|---|
| 1032 | | - rq->special = &icmd; |
|---|
| 1033 | 991 | |
|---|
| 1034 | 992 | set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); |
|---|
| 1035 | 993 | |
|---|
| .. | .. |
|---|
| 1050 | 1008 | } |
|---|
| 1051 | 1009 | |
|---|
| 1052 | 1010 | /* Copy the command to the command table */ |
|---|
| 1011 | + int_cmd = blk_mq_rq_to_pdu(rq); |
|---|
| 1012 | + int_cmd->icmd = &icmd; |
|---|
| 1053 | 1013 | memcpy(int_cmd->command, fis, fis_len*4); |
|---|
| 1054 | 1014 | |
|---|
| 1055 | 1015 | rq->timeout = timeout; |
|---|
| .. | .. |
|---|
| 1222 | 1182 | set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); |
|---|
| 1223 | 1183 | else |
|---|
| 1224 | 1184 | clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); |
|---|
| 1225 | | - |
|---|
| 1226 | | -#ifdef MTIP_TRIM /* Disabling TRIM support temporarily */ |
|---|
| 1227 | | - /* Demux ID.DRAT & ID.RZAT to determine trim support */ |
|---|
| 1228 | | - if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5)) |
|---|
| 1229 | | - port->dd->trim_supp = true; |
|---|
| 1230 | | - else |
|---|
| 1231 | | -#endif |
|---|
| 1232 | | - port->dd->trim_supp = false; |
|---|
| 1233 | 1185 | |
|---|
| 1234 | 1186 | /* Set the identify buffer as valid. */ |
|---|
| 1235 | 1187 | port->identify_valid = 1; |
|---|
| .. | .. |
|---|
| 1418 | 1370 | } |
|---|
| 1419 | 1371 | |
|---|
| 1420 | 1372 | /* |
|---|
| 1421 | | - * Trim unused sectors |
|---|
| 1422 | | - * |
|---|
| 1423 | | - * @dd pointer to driver_data structure |
|---|
| 1424 | | - * @lba starting lba |
|---|
| 1425 | | - * @len # of 512b sectors to trim |
|---|
| 1426 | | - * |
|---|
| 1427 | | - * return value |
|---|
| 1428 | | - * -ENOMEM Out of dma memory |
|---|
| 1429 | | - * -EINVAL Invalid parameters passed in, trim not supported |
|---|
| 1430 | | - * -EIO Error submitting trim request to hw |
|---|
| 1431 | | - */ |
|---|
| 1432 | | -static int mtip_send_trim(struct driver_data *dd, unsigned int lba, |
|---|
| 1433 | | - unsigned int len) |
|---|
| 1434 | | -{ |
|---|
| 1435 | | - int i, rv = 0; |
|---|
| 1436 | | - u64 tlba, tlen, sect_left; |
|---|
| 1437 | | - struct mtip_trim_entry *buf; |
|---|
| 1438 | | - dma_addr_t dma_addr; |
|---|
| 1439 | | - struct host_to_dev_fis fis; |
|---|
| 1440 | | - |
|---|
| 1441 | | - if (!len || dd->trim_supp == false) |
|---|
| 1442 | | - return -EINVAL; |
|---|
| 1443 | | - |
|---|
| 1444 | | - /* Trim request too big */ |
|---|
| 1445 | | - WARN_ON(len > (MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES)); |
|---|
| 1446 | | - |
|---|
| 1447 | | - /* Trim request not aligned on 4k boundary */ |
|---|
| 1448 | | - WARN_ON(len % 8 != 0); |
|---|
| 1449 | | - |
|---|
| 1450 | | - /* Warn if vu_trim structure is too big */ |
|---|
| 1451 | | - WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE); |
|---|
| 1452 | | - |
|---|
| 1453 | | - /* Allocate a DMA buffer for the trim structure */ |
|---|
| 1454 | | - buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr, |
|---|
| 1455 | | - GFP_KERNEL); |
|---|
| 1456 | | - if (!buf) |
|---|
| 1457 | | - return -ENOMEM; |
|---|
| 1458 | | - memset(buf, 0, ATA_SECT_SIZE); |
|---|
| 1459 | | - |
|---|
| 1460 | | - for (i = 0, sect_left = len, tlba = lba; |
|---|
| 1461 | | - i < MTIP_MAX_TRIM_ENTRIES && sect_left; |
|---|
| 1462 | | - i++) { |
|---|
| 1463 | | - tlen = (sect_left >= MTIP_MAX_TRIM_ENTRY_LEN ? |
|---|
| 1464 | | - MTIP_MAX_TRIM_ENTRY_LEN : |
|---|
| 1465 | | - sect_left); |
|---|
| 1466 | | - buf[i].lba = __force_bit2int cpu_to_le32(tlba); |
|---|
| 1467 | | - buf[i].range = __force_bit2int cpu_to_le16(tlen); |
|---|
| 1468 | | - tlba += tlen; |
|---|
| 1469 | | - sect_left -= tlen; |
|---|
| 1470 | | - } |
|---|
| 1471 | | - WARN_ON(sect_left != 0); |
|---|
| 1472 | | - |
|---|
| 1473 | | - /* Build the fis */ |
|---|
| 1474 | | - memset(&fis, 0, sizeof(struct host_to_dev_fis)); |
|---|
| 1475 | | - fis.type = 0x27; |
|---|
| 1476 | | - fis.opts = 1 << 7; |
|---|
| 1477 | | - fis.command = 0xfb; |
|---|
| 1478 | | - fis.features = 0x60; |
|---|
| 1479 | | - fis.sect_count = 1; |
|---|
| 1480 | | - fis.device = ATA_DEVICE_OBS; |
|---|
| 1481 | | - |
|---|
| 1482 | | - if (mtip_exec_internal_command(dd->port, |
|---|
| 1483 | | - &fis, |
|---|
| 1484 | | - 5, |
|---|
| 1485 | | - dma_addr, |
|---|
| 1486 | | - ATA_SECT_SIZE, |
|---|
| 1487 | | - 0, |
|---|
| 1488 | | - MTIP_TRIM_TIMEOUT_MS) < 0) |
|---|
| 1489 | | - rv = -EIO; |
|---|
| 1490 | | - |
|---|
| 1491 | | - dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr); |
|---|
| 1492 | | - return rv; |
|---|
| 1493 | | -} |
|---|
| 1494 | | - |
|---|
| 1495 | | -/* |
|---|
| 1496 | 1373 | * Get the drive capacity. |
|---|
| 1497 | 1374 | * |
|---|
| 1498 | 1375 | * @dd Pointer to the device data structure. |
|---|
| .. | .. |
|---|
| 1585 | 1462 | int n; |
|---|
| 1586 | 1463 | unsigned int dma_len; |
|---|
| 1587 | 1464 | struct mtip_cmd_sg *command_sg; |
|---|
| 1588 | | - struct scatterlist *sg = command->sg; |
|---|
| 1465 | + struct scatterlist *sg; |
|---|
| 1589 | 1466 | |
|---|
| 1590 | 1467 | command_sg = command->command + AHCI_CMD_TBL_HDR_SZ; |
|---|
| 1591 | 1468 | |
|---|
| 1592 | | - for (n = 0; n < nents; n++) { |
|---|
| 1469 | + for_each_sg(command->sg, sg, nents, n) { |
|---|
| 1593 | 1470 | dma_len = sg_dma_len(sg); |
|---|
| 1594 | 1471 | if (dma_len > 0x400000) |
|---|
| 1595 | 1472 | dev_err(&dd->pdev->dev, |
|---|
| 1596 | 1473 | "DMA segment length truncated\n"); |
|---|
| 1597 | | - command_sg->info = __force_bit2int |
|---|
| 1598 | | - cpu_to_le32((dma_len-1) & 0x3FFFFF); |
|---|
| 1599 | | - command_sg->dba = __force_bit2int |
|---|
| 1600 | | - cpu_to_le32(sg_dma_address(sg)); |
|---|
| 1601 | | - command_sg->dba_upper = __force_bit2int |
|---|
| 1474 | + command_sg->info = cpu_to_le32((dma_len-1) & 0x3FFFFF); |
|---|
| 1475 | + command_sg->dba = cpu_to_le32(sg_dma_address(sg)); |
|---|
| 1476 | + command_sg->dba_upper = |
|---|
| 1602 | 1477 | cpu_to_le32((sg_dma_address(sg) >> 16) >> 16); |
|---|
| 1603 | 1478 | command_sg++; |
|---|
| 1604 | | - sg++; |
|---|
| 1605 | 1479 | } |
|---|
| 1606 | 1480 | } |
|---|
| 1607 | 1481 | |
|---|
| .. | .. |
|---|
| 1694 | 1568 | if (!user_buffer) |
|---|
| 1695 | 1569 | return -EFAULT; |
|---|
| 1696 | 1570 | |
|---|
| 1697 | | - buf = dmam_alloc_coherent(&port->dd->pdev->dev, |
|---|
| 1571 | + buf = dma_alloc_coherent(&port->dd->pdev->dev, |
|---|
| 1698 | 1572 | ATA_SECT_SIZE * xfer_sz, |
|---|
| 1699 | 1573 | &dma_addr, |
|---|
| 1700 | 1574 | GFP_KERNEL); |
|---|
| .. | .. |
|---|
| 1704 | 1578 | ATA_SECT_SIZE * xfer_sz); |
|---|
| 1705 | 1579 | return -ENOMEM; |
|---|
| 1706 | 1580 | } |
|---|
| 1707 | | - memset(buf, 0, ATA_SECT_SIZE * xfer_sz); |
|---|
| 1708 | 1581 | } |
|---|
| 1709 | 1582 | |
|---|
| 1710 | 1583 | /* Build the FIS. */ |
|---|
| .. | .. |
|---|
| 1772 | 1645 | } |
|---|
| 1773 | 1646 | exit_drive_command: |
|---|
| 1774 | 1647 | if (buf) |
|---|
| 1775 | | - dmam_free_coherent(&port->dd->pdev->dev, |
|---|
| 1648 | + dma_free_coherent(&port->dd->pdev->dev, |
|---|
| 1776 | 1649 | ATA_SECT_SIZE * xfer_sz, buf, dma_addr); |
|---|
| 1777 | 1650 | return rv; |
|---|
| 1778 | 1651 | } |
|---|
| .. | .. |
|---|
| 1862 | 1735 | if (IS_ERR(outbuf)) |
|---|
| 1863 | 1736 | return PTR_ERR(outbuf); |
|---|
| 1864 | 1737 | |
|---|
| 1865 | | - outbuf_dma = pci_map_single(dd->pdev, |
|---|
| 1866 | | - outbuf, |
|---|
| 1867 | | - taskout, |
|---|
| 1868 | | - DMA_TO_DEVICE); |
|---|
| 1869 | | - if (pci_dma_mapping_error(dd->pdev, outbuf_dma)) { |
|---|
| 1738 | + outbuf_dma = dma_map_single(&dd->pdev->dev, outbuf, |
|---|
| 1739 | + taskout, DMA_TO_DEVICE); |
|---|
| 1740 | + if (dma_mapping_error(&dd->pdev->dev, outbuf_dma)) { |
|---|
| 1870 | 1741 | err = -ENOMEM; |
|---|
| 1871 | 1742 | goto abort; |
|---|
| 1872 | 1743 | } |
|---|
| .. | .. |
|---|
| 1880 | 1751 | inbuf = NULL; |
|---|
| 1881 | 1752 | goto abort; |
|---|
| 1882 | 1753 | } |
|---|
| 1883 | | - inbuf_dma = pci_map_single(dd->pdev, |
|---|
| 1884 | | - inbuf, |
|---|
| 1885 | | - taskin, DMA_FROM_DEVICE); |
|---|
| 1886 | | - if (pci_dma_mapping_error(dd->pdev, inbuf_dma)) { |
|---|
| 1754 | + inbuf_dma = dma_map_single(&dd->pdev->dev, inbuf, |
|---|
| 1755 | + taskin, DMA_FROM_DEVICE); |
|---|
| 1756 | + if (dma_mapping_error(&dd->pdev->dev, inbuf_dma)) { |
|---|
| 1887 | 1757 | err = -ENOMEM; |
|---|
| 1888 | 1758 | goto abort; |
|---|
| 1889 | 1759 | } |
|---|
| .. | .. |
|---|
| 1945 | 1815 | dev_warn(&dd->pdev->dev, |
|---|
| 1946 | 1816 | "data movement but " |
|---|
| 1947 | 1817 | "sect_count is 0\n"); |
|---|
| 1948 | | - err = -EINVAL; |
|---|
| 1949 | | - goto abort; |
|---|
| 1818 | + err = -EINVAL; |
|---|
| 1819 | + goto abort; |
|---|
| 1950 | 1820 | } |
|---|
| 1951 | 1821 | } |
|---|
| 1952 | 1822 | } |
|---|
| .. | .. |
|---|
| 2002 | 1872 | |
|---|
| 2003 | 1873 | /* reclaim the DMA buffers.*/ |
|---|
| 2004 | 1874 | if (inbuf_dma) |
|---|
| 2005 | | - pci_unmap_single(dd->pdev, inbuf_dma, |
|---|
| 2006 | | - taskin, DMA_FROM_DEVICE); |
|---|
| 1875 | + dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin, |
|---|
| 1876 | + DMA_FROM_DEVICE); |
|---|
| 2007 | 1877 | if (outbuf_dma) |
|---|
| 2008 | | - pci_unmap_single(dd->pdev, outbuf_dma, |
|---|
| 2009 | | - taskout, DMA_TO_DEVICE); |
|---|
| 1878 | + dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout, |
|---|
| 1879 | + DMA_TO_DEVICE); |
|---|
| 2010 | 1880 | inbuf_dma = 0; |
|---|
| 2011 | 1881 | outbuf_dma = 0; |
|---|
| 2012 | 1882 | |
|---|
| .. | .. |
|---|
| 2053 | 1923 | } |
|---|
| 2054 | 1924 | abort: |
|---|
| 2055 | 1925 | if (inbuf_dma) |
|---|
| 2056 | | - pci_unmap_single(dd->pdev, inbuf_dma, |
|---|
| 2057 | | - taskin, DMA_FROM_DEVICE); |
|---|
| 1926 | + dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin, |
|---|
| 1927 | + DMA_FROM_DEVICE); |
|---|
| 2058 | 1928 | if (outbuf_dma) |
|---|
| 2059 | | - pci_unmap_single(dd->pdev, outbuf_dma, |
|---|
| 2060 | | - taskout, DMA_TO_DEVICE); |
|---|
| 1929 | + dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout, |
|---|
| 1930 | + DMA_TO_DEVICE); |
|---|
| 2061 | 1931 | kfree(outbuf); |
|---|
| 2062 | 1932 | kfree(inbuf); |
|---|
| 2063 | 1933 | |
|---|
| .. | .. |
|---|
| 2174 | 2044 | * @dd Pointer to the driver data structure. |
|---|
| 2175 | 2045 | * @start First sector to read. |
|---|
| 2176 | 2046 | * @nsect Number of sectors to read. |
|---|
| 2177 | | - * @nents Number of entries in scatter list for the read command. |
|---|
| 2178 | 2047 | * @tag The tag of this read command. |
|---|
| 2179 | 2048 | * @callback Pointer to the function that should be called |
|---|
| 2180 | 2049 | * when the read completes. |
|---|
| .. | .. |
|---|
| 2186 | 2055 | * None |
|---|
| 2187 | 2056 | */ |
|---|
| 2188 | 2057 | static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, |
|---|
| 2189 | | - struct mtip_cmd *command, int nents, |
|---|
| 2058 | + struct mtip_cmd *command, |
|---|
| 2190 | 2059 | struct blk_mq_hw_ctx *hctx) |
|---|
| 2191 | 2060 | { |
|---|
| 2061 | + struct mtip_cmd_hdr *hdr = |
|---|
| 2062 | + dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag; |
|---|
| 2192 | 2063 | struct host_to_dev_fis *fis; |
|---|
| 2193 | 2064 | struct mtip_port *port = dd->port; |
|---|
| 2194 | 2065 | int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
|---|
| 2195 | 2066 | u64 start = blk_rq_pos(rq); |
|---|
| 2196 | 2067 | unsigned int nsect = blk_rq_sectors(rq); |
|---|
| 2068 | + unsigned int nents; |
|---|
| 2197 | 2069 | |
|---|
| 2198 | 2070 | /* Map the scatter list for DMA access */ |
|---|
| 2071 | + nents = blk_rq_map_sg(hctx->queue, rq, command->sg); |
|---|
| 2199 | 2072 | nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); |
|---|
| 2200 | 2073 | |
|---|
| 2201 | 2074 | prefetch(&port->flags); |
|---|
| .. | .. |
|---|
| 2236 | 2109 | fis->device |= 1 << 7; |
|---|
| 2237 | 2110 | |
|---|
| 2238 | 2111 | /* Populate the command header */ |
|---|
| 2239 | | - command->command_header->opts = |
|---|
| 2240 | | - __force_bit2int cpu_to_le32( |
|---|
| 2241 | | - (nents << 16) | 5 | AHCI_CMD_PREFETCH); |
|---|
| 2242 | | - command->command_header->byte_count = 0; |
|---|
| 2112 | + hdr->ctba = cpu_to_le32(command->command_dma & 0xFFFFFFFF); |
|---|
| 2113 | + if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags)) |
|---|
| 2114 | + hdr->ctbau = cpu_to_le32((command->command_dma >> 16) >> 16); |
|---|
| 2115 | + hdr->opts = cpu_to_le32((nents << 16) | 5 | AHCI_CMD_PREFETCH); |
|---|
| 2116 | + hdr->byte_count = 0; |
|---|
| 2243 | 2117 | |
|---|
| 2244 | 2118 | command->direction = dma_dir; |
|---|
| 2245 | 2119 | |
|---|
| .. | .. |
|---|
| 2718 | 2592 | cmd->direction); |
|---|
| 2719 | 2593 | |
|---|
| 2720 | 2594 | if (unlikely(cmd->unaligned)) |
|---|
| 2721 | | - up(&dd->port->cmd_slot_unal); |
|---|
| 2595 | + atomic_inc(&dd->port->cmd_slot_unal); |
|---|
| 2722 | 2596 | |
|---|
| 2723 | 2597 | blk_mq_end_request(rq, cmd->status); |
|---|
| 2724 | 2598 | } |
|---|
| 2725 | 2599 | |
|---|
| 2726 | | -static void mtip_abort_cmd(struct request *req, void *data, bool reserved) |
|---|
| 2600 | +static bool mtip_abort_cmd(struct request *req, void *data, bool reserved) |
|---|
| 2727 | 2601 | { |
|---|
| 2728 | 2602 | struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req); |
|---|
| 2729 | 2603 | struct driver_data *dd = data; |
|---|
| .. | .. |
|---|
| 2733 | 2607 | clear_bit(req->tag, dd->port->cmds_to_issue); |
|---|
| 2734 | 2608 | cmd->status = BLK_STS_IOERR; |
|---|
| 2735 | 2609 | mtip_softirq_done_fn(req); |
|---|
| 2610 | + return true; |
|---|
| 2736 | 2611 | } |
|---|
| 2737 | 2612 | |
|---|
| 2738 | | -static void mtip_queue_cmd(struct request *req, void *data, bool reserved) |
|---|
| 2613 | +static bool mtip_queue_cmd(struct request *req, void *data, bool reserved) |
|---|
| 2739 | 2614 | { |
|---|
| 2740 | 2615 | struct driver_data *dd = data; |
|---|
| 2741 | 2616 | |
|---|
| 2742 | 2617 | set_bit(req->tag, dd->port->cmds_to_issue); |
|---|
| 2743 | 2618 | blk_abort_request(req); |
|---|
| 2619 | + return true; |
|---|
| 2744 | 2620 | } |
|---|
| 2745 | 2621 | |
|---|
| 2746 | 2622 | /* |
|---|
| .. | .. |
|---|
| 2806 | 2682 | |
|---|
| 2807 | 2683 | blk_mq_quiesce_queue(dd->queue); |
|---|
| 2808 | 2684 | |
|---|
| 2809 | | - spin_lock(dd->queue->queue_lock); |
|---|
| 2810 | | - blk_mq_tagset_busy_iter(&dd->tags, |
|---|
| 2811 | | - mtip_queue_cmd, dd); |
|---|
| 2812 | | - spin_unlock(dd->queue->queue_lock); |
|---|
| 2685 | + blk_mq_tagset_busy_iter(&dd->tags, mtip_queue_cmd, dd); |
|---|
| 2813 | 2686 | |
|---|
| 2814 | 2687 | set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags); |
|---|
| 2815 | 2688 | |
|---|
| .. | .. |
|---|
| 2876 | 2749 | struct mtip_port *port = dd->port; |
|---|
| 2877 | 2750 | |
|---|
| 2878 | 2751 | if (port->block1) |
|---|
| 2879 | | - dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, |
|---|
| 2752 | + dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, |
|---|
| 2880 | 2753 | port->block1, port->block1_dma); |
|---|
| 2881 | 2754 | |
|---|
| 2882 | 2755 | if (port->command_list) { |
|---|
| 2883 | | - dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, |
|---|
| 2756 | + dma_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, |
|---|
| 2884 | 2757 | port->command_list, port->command_list_dma); |
|---|
| 2885 | 2758 | } |
|---|
| 2886 | 2759 | } |
|---|
| .. | .. |
|---|
| 2899 | 2772 | |
|---|
| 2900 | 2773 | /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */ |
|---|
| 2901 | 2774 | port->block1 = |
|---|
| 2902 | | - dmam_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, |
|---|
| 2775 | + dma_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, |
|---|
| 2903 | 2776 | &port->block1_dma, GFP_KERNEL); |
|---|
| 2904 | 2777 | if (!port->block1) |
|---|
| 2905 | 2778 | return -ENOMEM; |
|---|
| 2906 | | - memset(port->block1, 0, BLOCK_DMA_ALLOC_SZ); |
|---|
| 2907 | 2779 | |
|---|
| 2908 | 2780 | /* Allocate dma memory for command list */ |
|---|
| 2909 | 2781 | port->command_list = |
|---|
| 2910 | | - dmam_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, |
|---|
| 2782 | + dma_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, |
|---|
| 2911 | 2783 | &port->command_list_dma, GFP_KERNEL); |
|---|
| 2912 | 2784 | if (!port->command_list) { |
|---|
| 2913 | | - dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, |
|---|
| 2785 | + dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, |
|---|
| 2914 | 2786 | port->block1, port->block1_dma); |
|---|
| 2915 | 2787 | port->block1 = NULL; |
|---|
| 2916 | 2788 | port->block1_dma = 0; |
|---|
| 2917 | 2789 | return -ENOMEM; |
|---|
| 2918 | 2790 | } |
|---|
| 2919 | | - memset(port->command_list, 0, AHCI_CMD_TBL_SZ); |
|---|
| 2920 | 2791 | |
|---|
| 2921 | 2792 | /* Setup all pointers into first DMA region */ |
|---|
| 2922 | 2793 | port->rxfis = port->block1 + AHCI_RX_FIS_OFFSET; |
|---|
| .. | .. |
|---|
| 3029 | 2900 | else |
|---|
| 3030 | 2901 | dd->unal_qdepth = 0; |
|---|
| 3031 | 2902 | |
|---|
| 3032 | | - sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth); |
|---|
| 2903 | + atomic_set(&dd->port->cmd_slot_unal, dd->unal_qdepth); |
|---|
| 3033 | 2904 | |
|---|
| 3034 | 2905 | /* Spinlock to prevent concurrent issue */ |
|---|
| 3035 | 2906 | for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++) |
|---|
| .. | .. |
|---|
| 3095 | 2966 | mtip_start_port(dd->port); |
|---|
| 3096 | 2967 | |
|---|
| 3097 | 2968 | /* Setup the ISR and enable interrupts. */ |
|---|
| 3098 | | - rv = devm_request_irq(&dd->pdev->dev, |
|---|
| 3099 | | - dd->pdev->irq, |
|---|
| 3100 | | - mtip_irq_handler, |
|---|
| 3101 | | - IRQF_SHARED, |
|---|
| 3102 | | - dev_driver_string(&dd->pdev->dev), |
|---|
| 3103 | | - dd); |
|---|
| 3104 | | - |
|---|
| 2969 | + rv = request_irq(dd->pdev->irq, mtip_irq_handler, IRQF_SHARED, |
|---|
| 2970 | + dev_driver_string(&dd->pdev->dev), dd); |
|---|
| 3105 | 2971 | if (rv) { |
|---|
| 3106 | 2972 | dev_err(&dd->pdev->dev, |
|---|
| 3107 | 2973 | "Unable to allocate IRQ %d\n", dd->pdev->irq); |
|---|
| .. | .. |
|---|
| 3129 | 2995 | |
|---|
| 3130 | 2996 | /* Release the IRQ. */ |
|---|
| 3131 | 2997 | irq_set_affinity_hint(dd->pdev->irq, NULL); |
|---|
| 3132 | | - devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); |
|---|
| 2998 | + free_irq(dd->pdev->irq, dd); |
|---|
| 3133 | 2999 | |
|---|
| 3134 | 3000 | out2: |
|---|
| 3135 | 3001 | mtip_deinit_port(dd->port); |
|---|
| .. | .. |
|---|
| 3184 | 3050 | |
|---|
| 3185 | 3051 | /* Release the IRQ. */ |
|---|
| 3186 | 3052 | irq_set_affinity_hint(dd->pdev->irq, NULL); |
|---|
| 3187 | | - devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); |
|---|
| 3053 | + free_irq(dd->pdev->irq, dd); |
|---|
| 3188 | 3054 | msleep(1000); |
|---|
| 3189 | 3055 | |
|---|
| 3190 | 3056 | /* Free dma regions */ |
|---|
| .. | .. |
|---|
| 3534 | 3400 | return false; |
|---|
| 3535 | 3401 | } |
|---|
| 3536 | 3402 | |
|---|
| 3537 | | -/* |
|---|
| 3538 | | - * Block layer make request function. |
|---|
| 3539 | | - * |
|---|
| 3540 | | - * This function is called by the kernel to process a BIO for |
|---|
| 3541 | | - * the P320 device. |
|---|
| 3542 | | - * |
|---|
| 3543 | | - * @queue Pointer to the request queue. Unused other than to obtain |
|---|
| 3544 | | - * the driver data structure. |
|---|
| 3545 | | - * @rq Pointer to the request. |
|---|
| 3546 | | - * |
|---|
| 3547 | | - */ |
|---|
| 3548 | | -static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq) |
|---|
| 3403 | +static inline bool is_stopped(struct driver_data *dd, struct request *rq) |
|---|
| 3549 | 3404 | { |
|---|
| 3550 | | - struct driver_data *dd = hctx->queue->queuedata; |
|---|
| 3551 | | - struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); |
|---|
| 3552 | | - unsigned int nents; |
|---|
| 3405 | + if (likely(!(dd->dd_flag & MTIP_DDF_STOP_IO))) |
|---|
| 3406 | + return false; |
|---|
| 3553 | 3407 | |
|---|
| 3554 | | - if (is_se_active(dd)) |
|---|
| 3555 | | - return -ENODATA; |
|---|
| 3408 | + if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) |
|---|
| 3409 | + return true; |
|---|
| 3410 | + if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag)) |
|---|
| 3411 | + return true; |
|---|
| 3412 | + if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag) && |
|---|
| 3413 | + rq_data_dir(rq)) |
|---|
| 3414 | + return true; |
|---|
| 3415 | + if (test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) |
|---|
| 3416 | + return true; |
|---|
| 3417 | + if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) |
|---|
| 3418 | + return true; |
|---|
| 3556 | 3419 | |
|---|
| 3557 | | - if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { |
|---|
| 3558 | | - if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, |
|---|
| 3559 | | - &dd->dd_flag))) { |
|---|
| 3560 | | - return -ENXIO; |
|---|
| 3561 | | - } |
|---|
| 3562 | | - if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) { |
|---|
| 3563 | | - return -ENODATA; |
|---|
| 3564 | | - } |
|---|
| 3565 | | - if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT, |
|---|
| 3566 | | - &dd->dd_flag) && |
|---|
| 3567 | | - rq_data_dir(rq))) { |
|---|
| 3568 | | - return -ENODATA; |
|---|
| 3569 | | - } |
|---|
| 3570 | | - if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) || |
|---|
| 3571 | | - test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))) |
|---|
| 3572 | | - return -ENODATA; |
|---|
| 3573 | | - } |
|---|
| 3574 | | - |
|---|
| 3575 | | - if (req_op(rq) == REQ_OP_DISCARD) { |
|---|
| 3576 | | - int err; |
|---|
| 3577 | | - |
|---|
| 3578 | | - err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq)); |
|---|
| 3579 | | - blk_mq_end_request(rq, err ? BLK_STS_IOERR : BLK_STS_OK); |
|---|
| 3580 | | - return 0; |
|---|
| 3581 | | - } |
|---|
| 3582 | | - |
|---|
| 3583 | | - /* Create the scatter list for this request. */ |
|---|
| 3584 | | - nents = blk_rq_map_sg(hctx->queue, rq, cmd->sg); |
|---|
| 3585 | | - |
|---|
| 3586 | | - /* Issue the read/write. */ |
|---|
| 3587 | | - mtip_hw_submit_io(dd, rq, cmd, nents, hctx); |
|---|
| 3588 | | - return 0; |
|---|
| 3420 | + return false; |
|---|
| 3589 | 3421 | } |
|---|
| 3590 | 3422 | |
|---|
| 3591 | 3423 | static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, |
|---|
| .. | .. |
|---|
| 3606 | 3438 | cmd->unaligned = 1; |
|---|
| 3607 | 3439 | } |
|---|
| 3608 | 3440 | |
|---|
| 3609 | | - if (cmd->unaligned && down_trylock(&dd->port->cmd_slot_unal)) |
|---|
| 3441 | + if (cmd->unaligned && atomic_dec_if_positive(&dd->port->cmd_slot_unal) >= 0) |
|---|
| 3610 | 3442 | return true; |
|---|
| 3611 | 3443 | |
|---|
| 3612 | 3444 | return false; |
|---|
| .. | .. |
|---|
| 3616 | 3448 | struct request *rq) |
|---|
| 3617 | 3449 | { |
|---|
| 3618 | 3450 | struct driver_data *dd = hctx->queue->queuedata; |
|---|
| 3619 | | - struct mtip_int_cmd *icmd = rq->special; |
|---|
| 3620 | 3451 | struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); |
|---|
| 3452 | + struct mtip_int_cmd *icmd = cmd->icmd; |
|---|
| 3453 | + struct mtip_cmd_hdr *hdr = |
|---|
| 3454 | + dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag; |
|---|
| 3621 | 3455 | struct mtip_cmd_sg *command_sg; |
|---|
| 3622 | 3456 | |
|---|
| 3623 | 3457 | if (mtip_commands_active(dd->port)) |
|---|
| 3624 | | - return BLK_STS_RESOURCE; |
|---|
| 3458 | + return BLK_STS_DEV_RESOURCE; |
|---|
| 3625 | 3459 | |
|---|
| 3460 | + hdr->ctba = cpu_to_le32(cmd->command_dma & 0xFFFFFFFF); |
|---|
| 3461 | + if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags)) |
|---|
| 3462 | + hdr->ctbau = cpu_to_le32((cmd->command_dma >> 16) >> 16); |
|---|
| 3626 | 3463 | /* Populate the SG list */ |
|---|
| 3627 | | - cmd->command_header->opts = |
|---|
| 3628 | | - __force_bit2int cpu_to_le32(icmd->opts | icmd->fis_len); |
|---|
| 3464 | + hdr->opts = cpu_to_le32(icmd->opts | icmd->fis_len); |
|---|
| 3629 | 3465 | if (icmd->buf_len) { |
|---|
| 3630 | 3466 | command_sg = cmd->command + AHCI_CMD_TBL_HDR_SZ; |
|---|
| 3631 | 3467 | |
|---|
| 3632 | | - command_sg->info = |
|---|
| 3633 | | - __force_bit2int cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF); |
|---|
| 3634 | | - command_sg->dba = |
|---|
| 3635 | | - __force_bit2int cpu_to_le32(icmd->buffer & 0xFFFFFFFF); |
|---|
| 3468 | + command_sg->info = cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF); |
|---|
| 3469 | + command_sg->dba = cpu_to_le32(icmd->buffer & 0xFFFFFFFF); |
|---|
| 3636 | 3470 | command_sg->dba_upper = |
|---|
| 3637 | | - __force_bit2int cpu_to_le32((icmd->buffer >> 16) >> 16); |
|---|
| 3471 | + cpu_to_le32((icmd->buffer >> 16) >> 16); |
|---|
| 3638 | 3472 | |
|---|
| 3639 | | - cmd->command_header->opts |= |
|---|
| 3640 | | - __force_bit2int cpu_to_le32((1 << 16)); |
|---|
| 3473 | + hdr->opts |= cpu_to_le32((1 << 16)); |
|---|
| 3641 | 3474 | } |
|---|
| 3642 | 3475 | |
|---|
| 3643 | 3476 | /* Populate the command header */ |
|---|
| 3644 | | - cmd->command_header->byte_count = 0; |
|---|
| 3477 | + hdr->byte_count = 0; |
|---|
| 3645 | 3478 | |
|---|
| 3646 | 3479 | blk_mq_start_request(rq); |
|---|
| 3647 | 3480 | mtip_issue_non_ncq_command(dd->port, rq->tag); |
|---|
| .. | .. |
|---|
| 3651 | 3484 | static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx, |
|---|
| 3652 | 3485 | const struct blk_mq_queue_data *bd) |
|---|
| 3653 | 3486 | { |
|---|
| 3487 | + struct driver_data *dd = hctx->queue->queuedata; |
|---|
| 3654 | 3488 | struct request *rq = bd->rq; |
|---|
| 3655 | | - int ret; |
|---|
| 3656 | | - |
|---|
| 3657 | | - mtip_init_cmd_header(rq); |
|---|
| 3489 | + struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); |
|---|
| 3658 | 3490 | |
|---|
| 3659 | 3491 | if (blk_rq_is_passthrough(rq)) |
|---|
| 3660 | 3492 | return mtip_issue_reserved_cmd(hctx, rq); |
|---|
| 3661 | 3493 | |
|---|
| 3662 | 3494 | if (unlikely(mtip_check_unal_depth(hctx, rq))) |
|---|
| 3663 | | - return BLK_STS_RESOURCE; |
|---|
| 3495 | + return BLK_STS_DEV_RESOURCE; |
|---|
| 3496 | + |
|---|
| 3497 | + if (is_se_active(dd) || is_stopped(dd, rq)) |
|---|
| 3498 | + return BLK_STS_IOERR; |
|---|
| 3664 | 3499 | |
|---|
| 3665 | 3500 | blk_mq_start_request(rq); |
|---|
| 3666 | 3501 | |
|---|
| 3667 | | - ret = mtip_submit_request(hctx, rq); |
|---|
| 3668 | | - if (likely(!ret)) |
|---|
| 3669 | | - return BLK_STS_OK; |
|---|
| 3670 | | - return BLK_STS_IOERR; |
|---|
| 3502 | + mtip_hw_submit_io(dd, rq, cmd, hctx); |
|---|
| 3503 | + return BLK_STS_OK; |
|---|
| 3671 | 3504 | } |
|---|
| 3672 | 3505 | |
|---|
| 3673 | 3506 | static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq, |
|---|
| .. | .. |
|---|
| 3679 | 3512 | if (!cmd->command) |
|---|
| 3680 | 3513 | return; |
|---|
| 3681 | 3514 | |
|---|
| 3682 | | - dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, |
|---|
| 3683 | | - cmd->command, cmd->command_dma); |
|---|
| 3515 | + dma_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, cmd->command, |
|---|
| 3516 | + cmd->command_dma); |
|---|
| 3684 | 3517 | } |
|---|
| 3685 | 3518 | |
|---|
| 3686 | 3519 | static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq, |
|---|
| .. | .. |
|---|
| 3689 | 3522 | struct driver_data *dd = set->driver_data; |
|---|
| 3690 | 3523 | struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); |
|---|
| 3691 | 3524 | |
|---|
| 3692 | | - cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, |
|---|
| 3525 | + cmd->command = dma_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, |
|---|
| 3693 | 3526 | &cmd->command_dma, GFP_KERNEL); |
|---|
| 3694 | 3527 | if (!cmd->command) |
|---|
| 3695 | 3528 | return -ENOMEM; |
|---|
| 3696 | | - |
|---|
| 3697 | | - memset(cmd->command, 0, CMD_DMA_ALLOC_SZ); |
|---|
| 3698 | 3529 | |
|---|
| 3699 | 3530 | sg_init_table(cmd->sg, MTIP_MAX_SG); |
|---|
| 3700 | 3531 | return 0; |
|---|
| .. | .. |
|---|
| 3841 | 3672 | blk_queue_physical_block_size(dd->queue, 4096); |
|---|
| 3842 | 3673 | blk_queue_max_hw_sectors(dd->queue, 0xffff); |
|---|
| 3843 | 3674 | blk_queue_max_segment_size(dd->queue, 0x400000); |
|---|
| 3675 | + dma_set_max_seg_size(&dd->pdev->dev, 0x400000); |
|---|
| 3844 | 3676 | blk_queue_io_min(dd->queue, 4096); |
|---|
| 3845 | | - |
|---|
| 3846 | | - /* Signal trim support */ |
|---|
| 3847 | | - if (dd->trim_supp == true) { |
|---|
| 3848 | | - blk_queue_flag_set(QUEUE_FLAG_DISCARD, dd->queue); |
|---|
| 3849 | | - dd->queue->limits.discard_granularity = 4096; |
|---|
| 3850 | | - blk_queue_max_discard_sectors(dd->queue, |
|---|
| 3851 | | - MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES); |
|---|
| 3852 | | - } |
|---|
| 3853 | 3677 | |
|---|
| 3854 | 3678 | /* Set the capacity of the device in 512 byte sectors. */ |
|---|
| 3855 | 3679 | if (!(mtip_hw_get_capacity(dd, &capacity))) { |
|---|
| .. | .. |
|---|
| 3861 | 3685 | set_capacity(dd->disk, capacity); |
|---|
| 3862 | 3686 | |
|---|
| 3863 | 3687 | /* Enable the block device and add it to /dev */ |
|---|
| 3864 | | - device_add_disk(&dd->pdev->dev, dd->disk); |
|---|
| 3688 | + device_add_disk(&dd->pdev->dev, dd->disk, NULL); |
|---|
| 3865 | 3689 | |
|---|
| 3866 | 3690 | dd->bdev = bdget_disk(dd->disk, 0); |
|---|
| 3867 | 3691 | /* |
|---|
| .. | .. |
|---|
| 3923 | 3747 | return rv; |
|---|
| 3924 | 3748 | } |
|---|
| 3925 | 3749 | |
|---|
| 3926 | | -static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv) |
|---|
| 3750 | +static bool mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv) |
|---|
| 3927 | 3751 | { |
|---|
| 3928 | 3752 | struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); |
|---|
| 3929 | 3753 | |
|---|
| 3930 | 3754 | cmd->status = BLK_STS_IOERR; |
|---|
| 3931 | 3755 | blk_mq_complete_request(rq); |
|---|
| 3756 | + return true; |
|---|
| 3932 | 3757 | } |
|---|
| 3933 | 3758 | |
|---|
| 3934 | 3759 | /* |
|---|
| .. | .. |
|---|
| 4087 | 3912 | /* Helper for selecting a node in round robin mode */ |
|---|
| 4088 | 3913 | static inline int mtip_get_next_rr_node(void) |
|---|
| 4089 | 3914 | { |
|---|
| 4090 | | - static int next_node = -1; |
|---|
| 3915 | + static int next_node = NUMA_NO_NODE; |
|---|
| 4091 | 3916 | |
|---|
| 4092 | | - if (next_node == -1) { |
|---|
| 3917 | + if (next_node == NUMA_NO_NODE) { |
|---|
| 4093 | 3918 | next_node = first_online_node; |
|---|
| 4094 | 3919 | return next_node; |
|---|
| 4095 | 3920 | } |
|---|
| .. | .. |
|---|
| 4216 | 4041 | goto iomap_err; |
|---|
| 4217 | 4042 | } |
|---|
| 4218 | 4043 | |
|---|
| 4219 | | - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
|---|
| 4220 | | - rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
|---|
| 4221 | | - |
|---|
| 4222 | | - if (rv) { |
|---|
| 4223 | | - rv = pci_set_consistent_dma_mask(pdev, |
|---|
| 4224 | | - DMA_BIT_MASK(32)); |
|---|
| 4225 | | - if (rv) { |
|---|
| 4226 | | - dev_warn(&pdev->dev, |
|---|
| 4227 | | - "64-bit DMA enable failed\n"); |
|---|
| 4228 | | - goto setmask_err; |
|---|
| 4229 | | - } |
|---|
| 4230 | | - } |
|---|
| 4044 | + rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); |
|---|
| 4045 | + if (rv) { |
|---|
| 4046 | + dev_warn(&pdev->dev, "64-bit DMA enable failed\n"); |
|---|
| 4047 | + goto setmask_err; |
|---|
| 4231 | 4048 | } |
|---|
| 4232 | 4049 | |
|---|
| 4233 | 4050 | /* Copy the info we may need later into the private data structure. */ |
|---|