hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/block/mtip32xx/mtip32xx.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Driver for the Micron P320 SSD
34 * Copyright (C) 2011 Micron Technology, Inc.
....@@ -5,17 +6,6 @@
56 * Portions of this code were derived from works subjected to the
67 * following copyright:
78 * Copyright (C) 2009 Integrated Device Technology, Inc.
8
- *
9
- * This program is free software; you can redistribute it and/or modify
10
- * it under the terms of the GNU General Public License as published by
11
- * the Free Software Foundation; either version 2 of the License, or
12
- * (at your option) any later version.
13
- *
14
- * This program is distributed in the hope that it will be useful,
15
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
16
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17
- * GNU General Public License for more details.
18
- *
199 */
2010
2111 #include <linux/pci.h>
....@@ -40,6 +30,7 @@
4030 #include <linux/export.h>
4131 #include <linux/debugfs.h>
4232 #include <linux/prefetch.h>
33
+#include <linux/numa.h>
4334 #include "mtip32xx.h"
4435
4536 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
....@@ -138,7 +129,7 @@
138129 /*
139130 * This function check_for_surprise_removal is called
140131 * while card is removed from the system and it will
141
- * read the vendor id from the configration space
132
+ * read the vendor id from the configuration space
142133 *
143134 * @pdev Pointer to the pci_dev structure.
144135 *
....@@ -166,41 +157,6 @@
166157 }
167158
168159 return false; /* device present */
169
-}
170
-
171
-/* we have to use runtime tag to setup command header */
172
-static void mtip_init_cmd_header(struct request *rq)
173
-{
174
- struct driver_data *dd = rq->q->queuedata;
175
- struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
176
-
177
- /* Point the command headers at the command tables. */
178
- cmd->command_header = dd->port->command_list +
179
- (sizeof(struct mtip_cmd_hdr) * rq->tag);
180
- cmd->command_header_dma = dd->port->command_list_dma +
181
- (sizeof(struct mtip_cmd_hdr) * rq->tag);
182
-
183
- if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags))
184
- cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16);
185
-
186
- cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF);
187
-}
188
-
189
-static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
190
-{
191
- struct request *rq;
192
-
193
- if (mtip_check_surprise_removal(dd->pdev))
194
- return NULL;
195
-
196
- rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
197
- if (IS_ERR(rq))
198
- return NULL;
199
-
200
- /* Internal cmd isn't submitted via .queue_rq */
201
- mtip_init_cmd_header(rq);
202
-
203
- return blk_mq_rq_to_pdu(rq);
204160 }
205161
206162 static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
....@@ -536,7 +492,8 @@
536492 struct request *req = blk_mq_rq_from_pdu(cmd);
537493
538494 cmd->status = status;
539
- blk_mq_complete_request(req);
495
+ if (likely(!blk_should_fake_timeout(req->q)))
496
+ blk_mq_complete_request(req);
540497 }
541498
542499 /*
....@@ -1023,13 +980,14 @@
1023980 return -EFAULT;
1024981 }
1025982
1026
- int_cmd = mtip_get_int_command(dd);
1027
- if (!int_cmd) {
983
+ if (mtip_check_surprise_removal(dd->pdev))
984
+ return -EFAULT;
985
+
986
+ rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
987
+ if (IS_ERR(rq)) {
1028988 dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
1029989 return -EFAULT;
1030990 }
1031
- rq = blk_mq_rq_from_pdu(int_cmd);
1032
- rq->special = &icmd;
1033991
1034992 set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1035993
....@@ -1050,6 +1008,8 @@
10501008 }
10511009
10521010 /* Copy the command to the command table */
1011
+ int_cmd = blk_mq_rq_to_pdu(rq);
1012
+ int_cmd->icmd = &icmd;
10531013 memcpy(int_cmd->command, fis, fis_len*4);
10541014
10551015 rq->timeout = timeout;
....@@ -1222,14 +1182,6 @@
12221182 set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
12231183 else
12241184 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
1225
-
1226
-#ifdef MTIP_TRIM /* Disabling TRIM support temporarily */
1227
- /* Demux ID.DRAT & ID.RZAT to determine trim support */
1228
- if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
1229
- port->dd->trim_supp = true;
1230
- else
1231
-#endif
1232
- port->dd->trim_supp = false;
12331185
12341186 /* Set the identify buffer as valid. */
12351187 port->identify_valid = 1;
....@@ -1418,81 +1370,6 @@
14181370 }
14191371
14201372 /*
1421
- * Trim unused sectors
1422
- *
1423
- * @dd pointer to driver_data structure
1424
- * @lba starting lba
1425
- * @len # of 512b sectors to trim
1426
- *
1427
- * return value
1428
- * -ENOMEM Out of dma memory
1429
- * -EINVAL Invalid parameters passed in, trim not supported
1430
- * -EIO Error submitting trim request to hw
1431
- */
1432
-static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
1433
- unsigned int len)
1434
-{
1435
- int i, rv = 0;
1436
- u64 tlba, tlen, sect_left;
1437
- struct mtip_trim_entry *buf;
1438
- dma_addr_t dma_addr;
1439
- struct host_to_dev_fis fis;
1440
-
1441
- if (!len || dd->trim_supp == false)
1442
- return -EINVAL;
1443
-
1444
- /* Trim request too big */
1445
- WARN_ON(len > (MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES));
1446
-
1447
- /* Trim request not aligned on 4k boundary */
1448
- WARN_ON(len % 8 != 0);
1449
-
1450
- /* Warn if vu_trim structure is too big */
1451
- WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE);
1452
-
1453
- /* Allocate a DMA buffer for the trim structure */
1454
- buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr,
1455
- GFP_KERNEL);
1456
- if (!buf)
1457
- return -ENOMEM;
1458
- memset(buf, 0, ATA_SECT_SIZE);
1459
-
1460
- for (i = 0, sect_left = len, tlba = lba;
1461
- i < MTIP_MAX_TRIM_ENTRIES && sect_left;
1462
- i++) {
1463
- tlen = (sect_left >= MTIP_MAX_TRIM_ENTRY_LEN ?
1464
- MTIP_MAX_TRIM_ENTRY_LEN :
1465
- sect_left);
1466
- buf[i].lba = __force_bit2int cpu_to_le32(tlba);
1467
- buf[i].range = __force_bit2int cpu_to_le16(tlen);
1468
- tlba += tlen;
1469
- sect_left -= tlen;
1470
- }
1471
- WARN_ON(sect_left != 0);
1472
-
1473
- /* Build the fis */
1474
- memset(&fis, 0, sizeof(struct host_to_dev_fis));
1475
- fis.type = 0x27;
1476
- fis.opts = 1 << 7;
1477
- fis.command = 0xfb;
1478
- fis.features = 0x60;
1479
- fis.sect_count = 1;
1480
- fis.device = ATA_DEVICE_OBS;
1481
-
1482
- if (mtip_exec_internal_command(dd->port,
1483
- &fis,
1484
- 5,
1485
- dma_addr,
1486
- ATA_SECT_SIZE,
1487
- 0,
1488
- MTIP_TRIM_TIMEOUT_MS) < 0)
1489
- rv = -EIO;
1490
-
1491
- dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr);
1492
- return rv;
1493
-}
1494
-
1495
-/*
14961373 * Get the drive capacity.
14971374 *
14981375 * @dd Pointer to the device data structure.
....@@ -1585,23 +1462,20 @@
15851462 int n;
15861463 unsigned int dma_len;
15871464 struct mtip_cmd_sg *command_sg;
1588
- struct scatterlist *sg = command->sg;
1465
+ struct scatterlist *sg;
15891466
15901467 command_sg = command->command + AHCI_CMD_TBL_HDR_SZ;
15911468
1592
- for (n = 0; n < nents; n++) {
1469
+ for_each_sg(command->sg, sg, nents, n) {
15931470 dma_len = sg_dma_len(sg);
15941471 if (dma_len > 0x400000)
15951472 dev_err(&dd->pdev->dev,
15961473 "DMA segment length truncated\n");
1597
- command_sg->info = __force_bit2int
1598
- cpu_to_le32((dma_len-1) & 0x3FFFFF);
1599
- command_sg->dba = __force_bit2int
1600
- cpu_to_le32(sg_dma_address(sg));
1601
- command_sg->dba_upper = __force_bit2int
1474
+ command_sg->info = cpu_to_le32((dma_len-1) & 0x3FFFFF);
1475
+ command_sg->dba = cpu_to_le32(sg_dma_address(sg));
1476
+ command_sg->dba_upper =
16021477 cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
16031478 command_sg++;
1604
- sg++;
16051479 }
16061480 }
16071481
....@@ -1694,7 +1568,7 @@
16941568 if (!user_buffer)
16951569 return -EFAULT;
16961570
1697
- buf = dmam_alloc_coherent(&port->dd->pdev->dev,
1571
+ buf = dma_alloc_coherent(&port->dd->pdev->dev,
16981572 ATA_SECT_SIZE * xfer_sz,
16991573 &dma_addr,
17001574 GFP_KERNEL);
....@@ -1704,7 +1578,6 @@
17041578 ATA_SECT_SIZE * xfer_sz);
17051579 return -ENOMEM;
17061580 }
1707
- memset(buf, 0, ATA_SECT_SIZE * xfer_sz);
17081581 }
17091582
17101583 /* Build the FIS. */
....@@ -1772,7 +1645,7 @@
17721645 }
17731646 exit_drive_command:
17741647 if (buf)
1775
- dmam_free_coherent(&port->dd->pdev->dev,
1648
+ dma_free_coherent(&port->dd->pdev->dev,
17761649 ATA_SECT_SIZE * xfer_sz, buf, dma_addr);
17771650 return rv;
17781651 }
....@@ -1862,11 +1735,9 @@
18621735 if (IS_ERR(outbuf))
18631736 return PTR_ERR(outbuf);
18641737
1865
- outbuf_dma = pci_map_single(dd->pdev,
1866
- outbuf,
1867
- taskout,
1868
- DMA_TO_DEVICE);
1869
- if (pci_dma_mapping_error(dd->pdev, outbuf_dma)) {
1738
+ outbuf_dma = dma_map_single(&dd->pdev->dev, outbuf,
1739
+ taskout, DMA_TO_DEVICE);
1740
+ if (dma_mapping_error(&dd->pdev->dev, outbuf_dma)) {
18701741 err = -ENOMEM;
18711742 goto abort;
18721743 }
....@@ -1880,10 +1751,9 @@
18801751 inbuf = NULL;
18811752 goto abort;
18821753 }
1883
- inbuf_dma = pci_map_single(dd->pdev,
1884
- inbuf,
1885
- taskin, DMA_FROM_DEVICE);
1886
- if (pci_dma_mapping_error(dd->pdev, inbuf_dma)) {
1754
+ inbuf_dma = dma_map_single(&dd->pdev->dev, inbuf,
1755
+ taskin, DMA_FROM_DEVICE);
1756
+ if (dma_mapping_error(&dd->pdev->dev, inbuf_dma)) {
18871757 err = -ENOMEM;
18881758 goto abort;
18891759 }
....@@ -1945,8 +1815,8 @@
19451815 dev_warn(&dd->pdev->dev,
19461816 "data movement but "
19471817 "sect_count is 0\n");
1948
- err = -EINVAL;
1949
- goto abort;
1818
+ err = -EINVAL;
1819
+ goto abort;
19501820 }
19511821 }
19521822 }
....@@ -2002,11 +1872,11 @@
20021872
20031873 /* reclaim the DMA buffers.*/
20041874 if (inbuf_dma)
2005
- pci_unmap_single(dd->pdev, inbuf_dma,
2006
- taskin, DMA_FROM_DEVICE);
1875
+ dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
1876
+ DMA_FROM_DEVICE);
20071877 if (outbuf_dma)
2008
- pci_unmap_single(dd->pdev, outbuf_dma,
2009
- taskout, DMA_TO_DEVICE);
1878
+ dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
1879
+ DMA_TO_DEVICE);
20101880 inbuf_dma = 0;
20111881 outbuf_dma = 0;
20121882
....@@ -2053,11 +1923,11 @@
20531923 }
20541924 abort:
20551925 if (inbuf_dma)
2056
- pci_unmap_single(dd->pdev, inbuf_dma,
2057
- taskin, DMA_FROM_DEVICE);
1926
+ dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
1927
+ DMA_FROM_DEVICE);
20581928 if (outbuf_dma)
2059
- pci_unmap_single(dd->pdev, outbuf_dma,
2060
- taskout, DMA_TO_DEVICE);
1929
+ dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
1930
+ DMA_TO_DEVICE);
20611931 kfree(outbuf);
20621932 kfree(inbuf);
20631933
....@@ -2174,7 +2044,6 @@
21742044 * @dd Pointer to the driver data structure.
21752045 * @start First sector to read.
21762046 * @nsect Number of sectors to read.
2177
- * @nents Number of entries in scatter list for the read command.
21782047 * @tag The tag of this read command.
21792048 * @callback Pointer to the function that should be called
21802049 * when the read completes.
....@@ -2186,16 +2055,20 @@
21862055 * None
21872056 */
21882057 static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
2189
- struct mtip_cmd *command, int nents,
2058
+ struct mtip_cmd *command,
21902059 struct blk_mq_hw_ctx *hctx)
21912060 {
2061
+ struct mtip_cmd_hdr *hdr =
2062
+ dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
21922063 struct host_to_dev_fis *fis;
21932064 struct mtip_port *port = dd->port;
21942065 int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
21952066 u64 start = blk_rq_pos(rq);
21962067 unsigned int nsect = blk_rq_sectors(rq);
2068
+ unsigned int nents;
21972069
21982070 /* Map the scatter list for DMA access */
2071
+ nents = blk_rq_map_sg(hctx->queue, rq, command->sg);
21992072 nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
22002073
22012074 prefetch(&port->flags);
....@@ -2236,10 +2109,11 @@
22362109 fis->device |= 1 << 7;
22372110
22382111 /* Populate the command header */
2239
- command->command_header->opts =
2240
- __force_bit2int cpu_to_le32(
2241
- (nents << 16) | 5 | AHCI_CMD_PREFETCH);
2242
- command->command_header->byte_count = 0;
2112
+ hdr->ctba = cpu_to_le32(command->command_dma & 0xFFFFFFFF);
2113
+ if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags))
2114
+ hdr->ctbau = cpu_to_le32((command->command_dma >> 16) >> 16);
2115
+ hdr->opts = cpu_to_le32((nents << 16) | 5 | AHCI_CMD_PREFETCH);
2116
+ hdr->byte_count = 0;
22432117
22442118 command->direction = dma_dir;
22452119
....@@ -2718,12 +2592,12 @@
27182592 cmd->direction);
27192593
27202594 if (unlikely(cmd->unaligned))
2721
- up(&dd->port->cmd_slot_unal);
2595
+ atomic_inc(&dd->port->cmd_slot_unal);
27222596
27232597 blk_mq_end_request(rq, cmd->status);
27242598 }
27252599
2726
-static void mtip_abort_cmd(struct request *req, void *data, bool reserved)
2600
+static bool mtip_abort_cmd(struct request *req, void *data, bool reserved)
27272601 {
27282602 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
27292603 struct driver_data *dd = data;
....@@ -2733,14 +2607,16 @@
27332607 clear_bit(req->tag, dd->port->cmds_to_issue);
27342608 cmd->status = BLK_STS_IOERR;
27352609 mtip_softirq_done_fn(req);
2610
+ return true;
27362611 }
27372612
2738
-static void mtip_queue_cmd(struct request *req, void *data, bool reserved)
2613
+static bool mtip_queue_cmd(struct request *req, void *data, bool reserved)
27392614 {
27402615 struct driver_data *dd = data;
27412616
27422617 set_bit(req->tag, dd->port->cmds_to_issue);
27432618 blk_abort_request(req);
2619
+ return true;
27442620 }
27452621
27462622 /*
....@@ -2806,10 +2682,7 @@
28062682
28072683 blk_mq_quiesce_queue(dd->queue);
28082684
2809
- spin_lock(dd->queue->queue_lock);
2810
- blk_mq_tagset_busy_iter(&dd->tags,
2811
- mtip_queue_cmd, dd);
2812
- spin_unlock(dd->queue->queue_lock);
2685
+ blk_mq_tagset_busy_iter(&dd->tags, mtip_queue_cmd, dd);
28132686
28142687 set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
28152688
....@@ -2876,11 +2749,11 @@
28762749 struct mtip_port *port = dd->port;
28772750
28782751 if (port->block1)
2879
- dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
2752
+ dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
28802753 port->block1, port->block1_dma);
28812754
28822755 if (port->command_list) {
2883
- dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
2756
+ dma_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
28842757 port->command_list, port->command_list_dma);
28852758 }
28862759 }
....@@ -2899,24 +2772,22 @@
28992772
29002773 /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */
29012774 port->block1 =
2902
- dmam_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
2775
+ dma_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
29032776 &port->block1_dma, GFP_KERNEL);
29042777 if (!port->block1)
29052778 return -ENOMEM;
2906
- memset(port->block1, 0, BLOCK_DMA_ALLOC_SZ);
29072779
29082780 /* Allocate dma memory for command list */
29092781 port->command_list =
2910
- dmam_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
2782
+ dma_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
29112783 &port->command_list_dma, GFP_KERNEL);
29122784 if (!port->command_list) {
2913
- dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
2785
+ dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
29142786 port->block1, port->block1_dma);
29152787 port->block1 = NULL;
29162788 port->block1_dma = 0;
29172789 return -ENOMEM;
29182790 }
2919
- memset(port->command_list, 0, AHCI_CMD_TBL_SZ);
29202791
29212792 /* Setup all pointers into first DMA region */
29222793 port->rxfis = port->block1 + AHCI_RX_FIS_OFFSET;
....@@ -3029,7 +2900,7 @@
30292900 else
30302901 dd->unal_qdepth = 0;
30312902
3032
- sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth);
2903
+ atomic_set(&dd->port->cmd_slot_unal, dd->unal_qdepth);
30332904
30342905 /* Spinlock to prevent concurrent issue */
30352906 for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
....@@ -3095,13 +2966,8 @@
30952966 mtip_start_port(dd->port);
30962967
30972968 /* Setup the ISR and enable interrupts. */
3098
- rv = devm_request_irq(&dd->pdev->dev,
3099
- dd->pdev->irq,
3100
- mtip_irq_handler,
3101
- IRQF_SHARED,
3102
- dev_driver_string(&dd->pdev->dev),
3103
- dd);
3104
-
2969
+ rv = request_irq(dd->pdev->irq, mtip_irq_handler, IRQF_SHARED,
2970
+ dev_driver_string(&dd->pdev->dev), dd);
31052971 if (rv) {
31062972 dev_err(&dd->pdev->dev,
31072973 "Unable to allocate IRQ %d\n", dd->pdev->irq);
....@@ -3129,7 +2995,7 @@
31292995
31302996 /* Release the IRQ. */
31312997 irq_set_affinity_hint(dd->pdev->irq, NULL);
3132
- devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
2998
+ free_irq(dd->pdev->irq, dd);
31332999
31343000 out2:
31353001 mtip_deinit_port(dd->port);
....@@ -3184,7 +3050,7 @@
31843050
31853051 /* Release the IRQ. */
31863052 irq_set_affinity_hint(dd->pdev->irq, NULL);
3187
- devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
3053
+ free_irq(dd->pdev->irq, dd);
31883054 msleep(1000);
31893055
31903056 /* Free dma regions */
....@@ -3534,58 +3400,24 @@
35343400 return false;
35353401 }
35363402
3537
-/*
3538
- * Block layer make request function.
3539
- *
3540
- * This function is called by the kernel to process a BIO for
3541
- * the P320 device.
3542
- *
3543
- * @queue Pointer to the request queue. Unused other than to obtain
3544
- * the driver data structure.
3545
- * @rq Pointer to the request.
3546
- *
3547
- */
3548
-static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
3403
+static inline bool is_stopped(struct driver_data *dd, struct request *rq)
35493404 {
3550
- struct driver_data *dd = hctx->queue->queuedata;
3551
- struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3552
- unsigned int nents;
3405
+ if (likely(!(dd->dd_flag & MTIP_DDF_STOP_IO)))
3406
+ return false;
35533407
3554
- if (is_se_active(dd))
3555
- return -ENODATA;
3408
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
3409
+ return true;
3410
+ if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
3411
+ return true;
3412
+ if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag) &&
3413
+ rq_data_dir(rq))
3414
+ return true;
3415
+ if (test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
3416
+ return true;
3417
+ if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
3418
+ return true;
35563419
3557
- if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
3558
- if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
3559
- &dd->dd_flag))) {
3560
- return -ENXIO;
3561
- }
3562
- if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) {
3563
- return -ENODATA;
3564
- }
3565
- if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT,
3566
- &dd->dd_flag) &&
3567
- rq_data_dir(rq))) {
3568
- return -ENODATA;
3569
- }
3570
- if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) ||
3571
- test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)))
3572
- return -ENODATA;
3573
- }
3574
-
3575
- if (req_op(rq) == REQ_OP_DISCARD) {
3576
- int err;
3577
-
3578
- err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
3579
- blk_mq_end_request(rq, err ? BLK_STS_IOERR : BLK_STS_OK);
3580
- return 0;
3581
- }
3582
-
3583
- /* Create the scatter list for this request. */
3584
- nents = blk_rq_map_sg(hctx->queue, rq, cmd->sg);
3585
-
3586
- /* Issue the read/write. */
3587
- mtip_hw_submit_io(dd, rq, cmd, nents, hctx);
3588
- return 0;
3420
+ return false;
35893421 }
35903422
35913423 static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
....@@ -3606,7 +3438,7 @@
36063438 cmd->unaligned = 1;
36073439 }
36083440
3609
- if (cmd->unaligned && down_trylock(&dd->port->cmd_slot_unal))
3441
+ if (cmd->unaligned && atomic_dec_if_positive(&dd->port->cmd_slot_unal) >= 0)
36103442 return true;
36113443
36123444 return false;
....@@ -3616,32 +3448,33 @@
36163448 struct request *rq)
36173449 {
36183450 struct driver_data *dd = hctx->queue->queuedata;
3619
- struct mtip_int_cmd *icmd = rq->special;
36203451 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3452
+ struct mtip_int_cmd *icmd = cmd->icmd;
3453
+ struct mtip_cmd_hdr *hdr =
3454
+ dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
36213455 struct mtip_cmd_sg *command_sg;
36223456
36233457 if (mtip_commands_active(dd->port))
3624
- return BLK_STS_RESOURCE;
3458
+ return BLK_STS_DEV_RESOURCE;
36253459
3460
+ hdr->ctba = cpu_to_le32(cmd->command_dma & 0xFFFFFFFF);
3461
+ if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags))
3462
+ hdr->ctbau = cpu_to_le32((cmd->command_dma >> 16) >> 16);
36263463 /* Populate the SG list */
3627
- cmd->command_header->opts =
3628
- __force_bit2int cpu_to_le32(icmd->opts | icmd->fis_len);
3464
+ hdr->opts = cpu_to_le32(icmd->opts | icmd->fis_len);
36293465 if (icmd->buf_len) {
36303466 command_sg = cmd->command + AHCI_CMD_TBL_HDR_SZ;
36313467
3632
- command_sg->info =
3633
- __force_bit2int cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF);
3634
- command_sg->dba =
3635
- __force_bit2int cpu_to_le32(icmd->buffer & 0xFFFFFFFF);
3468
+ command_sg->info = cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF);
3469
+ command_sg->dba = cpu_to_le32(icmd->buffer & 0xFFFFFFFF);
36363470 command_sg->dba_upper =
3637
- __force_bit2int cpu_to_le32((icmd->buffer >> 16) >> 16);
3471
+ cpu_to_le32((icmd->buffer >> 16) >> 16);
36383472
3639
- cmd->command_header->opts |=
3640
- __force_bit2int cpu_to_le32((1 << 16));
3473
+ hdr->opts |= cpu_to_le32((1 << 16));
36413474 }
36423475
36433476 /* Populate the command header */
3644
- cmd->command_header->byte_count = 0;
3477
+ hdr->byte_count = 0;
36453478
36463479 blk_mq_start_request(rq);
36473480 mtip_issue_non_ncq_command(dd->port, rq->tag);
....@@ -3651,23 +3484,23 @@
36513484 static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
36523485 const struct blk_mq_queue_data *bd)
36533486 {
3487
+ struct driver_data *dd = hctx->queue->queuedata;
36543488 struct request *rq = bd->rq;
3655
- int ret;
3656
-
3657
- mtip_init_cmd_header(rq);
3489
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
36583490
36593491 if (blk_rq_is_passthrough(rq))
36603492 return mtip_issue_reserved_cmd(hctx, rq);
36613493
36623494 if (unlikely(mtip_check_unal_depth(hctx, rq)))
3663
- return BLK_STS_RESOURCE;
3495
+ return BLK_STS_DEV_RESOURCE;
3496
+
3497
+ if (is_se_active(dd) || is_stopped(dd, rq))
3498
+ return BLK_STS_IOERR;
36643499
36653500 blk_mq_start_request(rq);
36663501
3667
- ret = mtip_submit_request(hctx, rq);
3668
- if (likely(!ret))
3669
- return BLK_STS_OK;
3670
- return BLK_STS_IOERR;
3502
+ mtip_hw_submit_io(dd, rq, cmd, hctx);
3503
+ return BLK_STS_OK;
36713504 }
36723505
36733506 static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
....@@ -3679,8 +3512,8 @@
36793512 if (!cmd->command)
36803513 return;
36813514
3682
- dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
3683
- cmd->command, cmd->command_dma);
3515
+ dma_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, cmd->command,
3516
+ cmd->command_dma);
36843517 }
36853518
36863519 static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
....@@ -3689,12 +3522,10 @@
36893522 struct driver_data *dd = set->driver_data;
36903523 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
36913524
3692
- cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
3525
+ cmd->command = dma_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
36933526 &cmd->command_dma, GFP_KERNEL);
36943527 if (!cmd->command)
36953528 return -ENOMEM;
3696
-
3697
- memset(cmd->command, 0, CMD_DMA_ALLOC_SZ);
36983529
36993530 sg_init_table(cmd->sg, MTIP_MAX_SG);
37003531 return 0;
....@@ -3841,15 +3672,8 @@
38413672 blk_queue_physical_block_size(dd->queue, 4096);
38423673 blk_queue_max_hw_sectors(dd->queue, 0xffff);
38433674 blk_queue_max_segment_size(dd->queue, 0x400000);
3675
+ dma_set_max_seg_size(&dd->pdev->dev, 0x400000);
38443676 blk_queue_io_min(dd->queue, 4096);
3845
-
3846
- /* Signal trim support */
3847
- if (dd->trim_supp == true) {
3848
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, dd->queue);
3849
- dd->queue->limits.discard_granularity = 4096;
3850
- blk_queue_max_discard_sectors(dd->queue,
3851
- MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES);
3852
- }
38533677
38543678 /* Set the capacity of the device in 512 byte sectors. */
38553679 if (!(mtip_hw_get_capacity(dd, &capacity))) {
....@@ -3861,7 +3685,7 @@
38613685 set_capacity(dd->disk, capacity);
38623686
38633687 /* Enable the block device and add it to /dev */
3864
- device_add_disk(&dd->pdev->dev, dd->disk);
3688
+ device_add_disk(&dd->pdev->dev, dd->disk, NULL);
38653689
38663690 dd->bdev = bdget_disk(dd->disk, 0);
38673691 /*
....@@ -3923,12 +3747,13 @@
39233747 return rv;
39243748 }
39253749
3926
-static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
3750
+static bool mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
39273751 {
39283752 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
39293753
39303754 cmd->status = BLK_STS_IOERR;
39313755 blk_mq_complete_request(rq);
3756
+ return true;
39323757 }
39333758
39343759 /*
....@@ -4087,9 +3912,9 @@
40873912 /* Helper for selecting a node in round robin mode */
40883913 static inline int mtip_get_next_rr_node(void)
40893914 {
4090
- static int next_node = -1;
3915
+ static int next_node = NUMA_NO_NODE;
40913916
4092
- if (next_node == -1) {
3917
+ if (next_node == NUMA_NO_NODE) {
40933918 next_node = first_online_node;
40943919 return next_node;
40953920 }
....@@ -4216,18 +4041,10 @@
42164041 goto iomap_err;
42174042 }
42184043
4219
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4220
- rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4221
-
4222
- if (rv) {
4223
- rv = pci_set_consistent_dma_mask(pdev,
4224
- DMA_BIT_MASK(32));
4225
- if (rv) {
4226
- dev_warn(&pdev->dev,
4227
- "64-bit DMA enable failed\n");
4228
- goto setmask_err;
4229
- }
4230
- }
4044
+ rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4045
+ if (rv) {
4046
+ dev_warn(&pdev->dev, "64-bit DMA enable failed\n");
4047
+ goto setmask_err;
42314048 }
42324049
42334050 /* Copy the info we may need later into the private data structure. */