hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/scsi/mvumi.c
....@@ -1,24 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Marvell UMI driver
34 *
45 * Copyright 2011 Marvell. <jyli@marvell.com>
5
- *
6
- * This file is licensed under GPLv2.
7
- *
8
- * This program is free software; you can redistribute it and/or
9
- * modify it under the terms of the GNU General Public License as
10
- * published by the Free Software Foundation; version 2 of the
11
- * License.
12
- *
13
- * This program is distributed in the hope that it will be useful,
14
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16
- * General Public License for more details.
17
- *
18
- * You should have received a copy of the GNU General Public License
19
- * along with this program; if not, write to the Free Software
20
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21
- * USA
226 */
237
248 #include <linux/kernel.h>
....@@ -143,8 +127,9 @@
143127
144128 case RESOURCE_UNCACHED_MEMORY:
145129 size = round_up(size, 8);
146
- res->virt_addr = pci_zalloc_consistent(mhba->pdev, size,
147
- &res->bus_addr);
130
+ res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size,
131
+ &res->bus_addr,
132
+ GFP_KERNEL);
148133 if (!res->virt_addr) {
149134 dev_err(&mhba->pdev->dev,
150135 "unable to allocate consistent mem,"
....@@ -175,7 +160,7 @@
175160 list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
176161 switch (res->type) {
177162 case RESOURCE_UNCACHED_MEMORY:
178
- pci_free_consistent(mhba->pdev, res->size,
163
+ dma_free_coherent(&mhba->pdev->dev, res->size,
179164 res->virt_addr, res->bus_addr);
180165 break;
181166 case RESOURCE_CACHED_MEMORY:
....@@ -210,23 +195,22 @@
210195 unsigned int sgnum = scsi_sg_count(scmd);
211196 dma_addr_t busaddr;
212197
213
- sg = scsi_sglist(scmd);
214
- *sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
215
- (int) scmd->sc_data_direction);
198
+ *sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
199
+ scmd->sc_data_direction);
216200 if (*sg_count > mhba->max_sge) {
217201 dev_err(&mhba->pdev->dev,
218202 "sg count[0x%x] is bigger than max sg[0x%x].\n",
219203 *sg_count, mhba->max_sge);
220
- pci_unmap_sg(mhba->pdev, sg, sgnum,
221
- (int) scmd->sc_data_direction);
204
+ dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
205
+ scmd->sc_data_direction);
222206 return -1;
223207 }
224
- for (i = 0; i < *sg_count; i++) {
225
- busaddr = sg_dma_address(&sg[i]);
208
+ scsi_for_each_sg(scmd, sg, *sg_count, i) {
209
+ busaddr = sg_dma_address(sg);
226210 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
227211 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
228212 m_sg->flags = 0;
229
- sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
213
+ sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg)));
230214 if ((i + 1) == *sg_count)
231215 m_sg->flags |= 1U << mhba->eot_flag;
232216
....@@ -246,7 +230,8 @@
246230 if (size == 0)
247231 return 0;
248232
249
- virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr);
233
+ virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr,
234
+ GFP_KERNEL);
250235 if (!virt_addr)
251236 return -1;
252237
....@@ -274,8 +259,8 @@
274259 }
275260 INIT_LIST_HEAD(&cmd->queue_pointer);
276261
277
- cmd->frame = pci_alloc_consistent(mhba->pdev,
278
- mhba->ib_max_size, &cmd->frame_phys);
262
+ cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size,
263
+ &cmd->frame_phys, GFP_KERNEL);
279264 if (!cmd->frame) {
280265 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
281266 " frame,size = %d.\n", mhba->ib_max_size);
....@@ -287,7 +272,7 @@
287272 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
288273 dev_err(&mhba->pdev->dev, "failed to allocate memory"
289274 " for internal frame\n");
290
- pci_free_consistent(mhba->pdev, mhba->ib_max_size,
275
+ dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
291276 cmd->frame, cmd->frame_phys);
292277 kfree(cmd);
293278 return NULL;
....@@ -313,10 +298,10 @@
313298 phy_addr = (dma_addr_t) m_sg->baseaddr_l |
314299 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
315300
316
- pci_free_consistent(mhba->pdev, size, cmd->data_buf,
301
+ dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf,
317302 phy_addr);
318303 }
319
- pci_free_consistent(mhba->pdev, mhba->ib_max_size,
304
+ dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
320305 cmd->frame, cmd->frame_phys);
321306 kfree(cmd);
322307 }
....@@ -663,16 +648,17 @@
663648 }
664649 }
665650
666
-static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
651
+static int mvumi_pci_set_master(struct pci_dev *pdev)
667652 {
668
- unsigned int ret = 0;
653
+ int ret = 0;
654
+
669655 pci_set_master(pdev);
670656
671657 if (IS_DMA64) {
672
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
673
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
658
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
659
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
674660 } else
675
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
661
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
676662
677663 return ret;
678664 }
....@@ -715,8 +701,8 @@
715701
716702 mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
717703
718
- scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
719
- scmd->serial_number, scmd->cmnd[0], scmd->retries);
704
+ scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n",
705
+ scmd->request->tag, scmd->cmnd[0], scmd->retries);
720706
721707 return mhba->instancet->reset_host(mhba);
722708 }
....@@ -749,7 +735,7 @@
749735 spin_lock_irqsave(mhba->shost->host_lock, flags);
750736 atomic_dec(&cmd->sync_cmd);
751737 if (mhba->tag_cmd[cmd->frame->tag]) {
752
- mhba->tag_cmd[cmd->frame->tag] = 0;
738
+ mhba->tag_cmd[cmd->frame->tag] = NULL;
753739 dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
754740 cmd->frame->tag);
755741 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
....@@ -771,7 +757,7 @@
771757 mvumi_free_cmds(mhba);
772758 mvumi_release_mem_resource(mhba);
773759 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
774
- pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
760
+ dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
775761 mhba->handshake_page, mhba->handshake_page_phys);
776762 kfree(mhba->regs);
777763 pci_release_regions(mhba->pdev);
....@@ -1339,9 +1325,9 @@
13391325 }
13401326
13411327 if (scsi_bufflen(scmd))
1342
- pci_unmap_sg(mhba->pdev, scsi_sglist(scmd),
1328
+ dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
13431329 scsi_sg_count(scmd),
1344
- (int) scmd->sc_data_direction);
1330
+ scmd->sc_data_direction);
13451331 cmd->scmd->scsi_done(scmd);
13461332 mvumi_return_cmd(mhba, cmd);
13471333 }
....@@ -1791,7 +1777,7 @@
17911777 cmd = mhba->tag_cmd[ob_frame->tag];
17921778
17931779 atomic_dec(&mhba->fw_outstanding);
1794
- mhba->tag_cmd[ob_frame->tag] = 0;
1780
+ mhba->tag_cmd[ob_frame->tag] = NULL;
17951781 tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
17961782 if (cmd->scmd)
17971783 mvumi_complete_cmd(mhba, cmd, ob_frame);
....@@ -2101,7 +2087,6 @@
21012087 unsigned long irq_flags;
21022088
21032089 spin_lock_irqsave(shost->host_lock, irq_flags);
2104
- scsi_cmd_get_serial(shost, scmd);
21052090
21062091 mhba = (struct mvumi_hba *) shost->hostdata;
21072092 scmd->result = 0;
....@@ -2137,7 +2122,7 @@
21372122 spin_lock_irqsave(mhba->shost->host_lock, flags);
21382123
21392124 if (mhba->tag_cmd[cmd->frame->tag]) {
2140
- mhba->tag_cmd[cmd->frame->tag] = 0;
2125
+ mhba->tag_cmd[cmd->frame->tag] = NULL;
21412126 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
21422127 }
21432128 if (!list_empty(&cmd->queue_pointer))
....@@ -2148,9 +2133,9 @@
21482133 scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
21492134 scmd->SCp.ptr = NULL;
21502135 if (scsi_bufflen(scmd)) {
2151
- pci_unmap_sg(mhba->pdev, scsi_sglist(scmd),
2136
+ dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
21522137 scsi_sg_count(scmd),
2153
- (int)scmd->sc_data_direction);
2138
+ scmd->sc_data_direction);
21542139 }
21552140 mvumi_return_cmd(mhba, cmd);
21562141 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
....@@ -2195,6 +2180,7 @@
21952180 .eh_timed_out = mvumi_timed_out,
21962181 .eh_host_reset_handler = mvumi_host_reset,
21972182 .bios_param = mvumi_bios_param,
2183
+ .dma_boundary = PAGE_SIZE - 1,
21982184 .this_id = -1,
21992185 };
22002186
....@@ -2362,8 +2348,8 @@
23622348 ret = -ENOMEM;
23632349 goto fail_alloc_mem;
23642350 }
2365
- mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
2366
- &mhba->handshake_page_phys);
2351
+ mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev,
2352
+ HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL);
23672353 if (!mhba->handshake_page) {
23682354 dev_err(&mhba->pdev->dev,
23692355 "failed to allocate memory for handshake\n");
....@@ -2383,7 +2369,7 @@
23832369
23842370 fail_ready_state:
23852371 mvumi_release_mem_resource(mhba);
2386
- pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
2372
+ dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
23872373 mhba->handshake_page, mhba->handshake_page_phys);
23882374 fail_alloc_page:
23892375 kfree(mhba->regs);
....@@ -2481,20 +2467,9 @@
24812467 if (ret)
24822468 return ret;
24832469
2484
- pci_set_master(pdev);
2485
-
2486
- if (IS_DMA64) {
2487
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2488
- if (ret) {
2489
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2490
- if (ret)
2491
- goto fail_set_dma_mask;
2492
- }
2493
- } else {
2494
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2495
- if (ret)
2496
- goto fail_set_dma_mask;
2497
- }
2470
+ ret = mvumi_pci_set_master(pdev);
2471
+ if (ret)
2472
+ goto fail_set_dma_mask;
24982473
24992474 host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
25002475 if (!host) {
....@@ -2628,19 +2603,11 @@
26282603 dev_err(&pdev->dev, "enable device failed\n");
26292604 return ret;
26302605 }
2631
- pci_set_master(pdev);
2632
- if (IS_DMA64) {
2633
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2634
- if (ret) {
2635
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2636
- if (ret)
2637
- goto fail;
2638
- }
2639
- } else {
2640
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2641
- if (ret)
2642
- goto fail;
2643
- }
2606
+
2607
+ ret = mvumi_pci_set_master(pdev);
2608
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2609
+ if (ret)
2610
+ goto fail;
26442611 ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
26452612 if (ret)
26462613 goto fail;