.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Marvell UMI driver |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright 2011 Marvell. <jyli@marvell.com> |
---|
5 | | - * |
---|
6 | | - * This file is licensed under GPLv2. |
---|
7 | | - * |
---|
8 | | - * This program is free software; you can redistribute it and/or |
---|
9 | | - * modify it under the terms of the GNU General Public License as |
---|
10 | | - * published by the Free Software Foundation; version 2 of the |
---|
11 | | - * License. |
---|
12 | | - * |
---|
13 | | - * This program is distributed in the hope that it will be useful, |
---|
14 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
15 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
16 | | - * General Public License for more details. |
---|
17 | | - * |
---|
18 | | - * You should have received a copy of the GNU General Public License |
---|
19 | | - * along with this program; if not, write to the Free Software |
---|
20 | | - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 |
---|
21 | | - * USA |
---|
22 | 6 | */ |
---|
23 | 7 | |
---|
24 | 8 | #include <linux/kernel.h> |
---|
.. | .. |
---|
143 | 127 | |
---|
144 | 128 | case RESOURCE_UNCACHED_MEMORY: |
---|
145 | 129 | size = round_up(size, 8); |
---|
146 | | - res->virt_addr = pci_zalloc_consistent(mhba->pdev, size, |
---|
147 | | - &res->bus_addr); |
---|
| 130 | + res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, |
---|
| 131 | + &res->bus_addr, |
---|
| 132 | + GFP_KERNEL); |
---|
148 | 133 | if (!res->virt_addr) { |
---|
149 | 134 | dev_err(&mhba->pdev->dev, |
---|
150 | 135 | "unable to allocate consistent mem," |
---|
.. | .. |
---|
175 | 160 | list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) { |
---|
176 | 161 | switch (res->type) { |
---|
177 | 162 | case RESOURCE_UNCACHED_MEMORY: |
---|
178 | | - pci_free_consistent(mhba->pdev, res->size, |
---|
| 163 | + dma_free_coherent(&mhba->pdev->dev, res->size, |
---|
179 | 164 | res->virt_addr, res->bus_addr); |
---|
180 | 165 | break; |
---|
181 | 166 | case RESOURCE_CACHED_MEMORY: |
---|
.. | .. |
---|
210 | 195 | unsigned int sgnum = scsi_sg_count(scmd); |
---|
211 | 196 | dma_addr_t busaddr; |
---|
212 | 197 | |
---|
213 | | - sg = scsi_sglist(scmd); |
---|
214 | | - *sg_count = pci_map_sg(mhba->pdev, sg, sgnum, |
---|
215 | | - (int) scmd->sc_data_direction); |
---|
| 198 | + *sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum, |
---|
| 199 | + scmd->sc_data_direction); |
---|
216 | 200 | if (*sg_count > mhba->max_sge) { |
---|
217 | 201 | dev_err(&mhba->pdev->dev, |
---|
218 | 202 | "sg count[0x%x] is bigger than max sg[0x%x].\n", |
---|
219 | 203 | *sg_count, mhba->max_sge); |
---|
220 | | - pci_unmap_sg(mhba->pdev, sg, sgnum, |
---|
221 | | - (int) scmd->sc_data_direction); |
---|
| 204 | + dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum, |
---|
| 205 | + scmd->sc_data_direction); |
---|
222 | 206 | return -1; |
---|
223 | 207 | } |
---|
224 | | - for (i = 0; i < *sg_count; i++) { |
---|
225 | | - busaddr = sg_dma_address(&sg[i]); |
---|
| 208 | + scsi_for_each_sg(scmd, sg, *sg_count, i) { |
---|
| 209 | + busaddr = sg_dma_address(sg); |
---|
226 | 210 | m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); |
---|
227 | 211 | m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); |
---|
228 | 212 | m_sg->flags = 0; |
---|
229 | | - sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i]))); |
---|
| 213 | + sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg))); |
---|
230 | 214 | if ((i + 1) == *sg_count) |
---|
231 | 215 | m_sg->flags |= 1U << mhba->eot_flag; |
---|
232 | 216 | |
---|
.. | .. |
---|
246 | 230 | if (size == 0) |
---|
247 | 231 | return 0; |
---|
248 | 232 | |
---|
249 | | - virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr); |
---|
| 233 | + virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr, |
---|
| 234 | + GFP_KERNEL); |
---|
250 | 235 | if (!virt_addr) |
---|
251 | 236 | return -1; |
---|
252 | 237 | |
---|
.. | .. |
---|
274 | 259 | } |
---|
275 | 260 | INIT_LIST_HEAD(&cmd->queue_pointer); |
---|
276 | 261 | |
---|
277 | | - cmd->frame = pci_alloc_consistent(mhba->pdev, |
---|
278 | | - mhba->ib_max_size, &cmd->frame_phys); |
---|
| 262 | + cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size, |
---|
| 263 | + &cmd->frame_phys, GFP_KERNEL); |
---|
279 | 264 | if (!cmd->frame) { |
---|
280 | 265 | dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" |
---|
281 | 266 | " frame,size = %d.\n", mhba->ib_max_size); |
---|
.. | .. |
---|
287 | 272 | if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { |
---|
288 | 273 | dev_err(&mhba->pdev->dev, "failed to allocate memory" |
---|
289 | 274 | " for internal frame\n"); |
---|
290 | | - pci_free_consistent(mhba->pdev, mhba->ib_max_size, |
---|
| 275 | + dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size, |
---|
291 | 276 | cmd->frame, cmd->frame_phys); |
---|
292 | 277 | kfree(cmd); |
---|
293 | 278 | return NULL; |
---|
.. | .. |
---|
313 | 298 | phy_addr = (dma_addr_t) m_sg->baseaddr_l | |
---|
314 | 299 | (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16); |
---|
315 | 300 | |
---|
316 | | - pci_free_consistent(mhba->pdev, size, cmd->data_buf, |
---|
| 301 | + dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf, |
---|
317 | 302 | phy_addr); |
---|
318 | 303 | } |
---|
319 | | - pci_free_consistent(mhba->pdev, mhba->ib_max_size, |
---|
| 304 | + dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size, |
---|
320 | 305 | cmd->frame, cmd->frame_phys); |
---|
321 | 306 | kfree(cmd); |
---|
322 | 307 | } |
---|
.. | .. |
---|
663 | 648 | } |
---|
664 | 649 | } |
---|
665 | 650 | |
---|
666 | | -static unsigned int mvumi_pci_set_master(struct pci_dev *pdev) |
---|
| 651 | +static int mvumi_pci_set_master(struct pci_dev *pdev) |
---|
667 | 652 | { |
---|
668 | | - unsigned int ret = 0; |
---|
| 653 | + int ret = 0; |
---|
| 654 | + |
---|
669 | 655 | pci_set_master(pdev); |
---|
670 | 656 | |
---|
671 | 657 | if (IS_DMA64) { |
---|
672 | | - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) |
---|
673 | | - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
---|
| 658 | + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) |
---|
| 659 | + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
---|
674 | 660 | } else |
---|
675 | | - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
---|
| 661 | + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
---|
676 | 662 | |
---|
677 | 663 | return ret; |
---|
678 | 664 | } |
---|
.. | .. |
---|
715 | 701 | |
---|
716 | 702 | mhba = (struct mvumi_hba *) scmd->device->host->hostdata; |
---|
717 | 703 | |
---|
718 | | - scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n", |
---|
719 | | - scmd->serial_number, scmd->cmnd[0], scmd->retries); |
---|
| 704 | + scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n", |
---|
| 705 | + scmd->request->tag, scmd->cmnd[0], scmd->retries); |
---|
720 | 706 | |
---|
721 | 707 | return mhba->instancet->reset_host(mhba); |
---|
722 | 708 | } |
---|
.. | .. |
---|
749 | 735 | spin_lock_irqsave(mhba->shost->host_lock, flags); |
---|
750 | 736 | atomic_dec(&cmd->sync_cmd); |
---|
751 | 737 | if (mhba->tag_cmd[cmd->frame->tag]) { |
---|
752 | | - mhba->tag_cmd[cmd->frame->tag] = 0; |
---|
| 738 | + mhba->tag_cmd[cmd->frame->tag] = NULL; |
---|
753 | 739 | dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n", |
---|
754 | 740 | cmd->frame->tag); |
---|
755 | 741 | tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); |
---|
.. | .. |
---|
771 | 757 | mvumi_free_cmds(mhba); |
---|
772 | 758 | mvumi_release_mem_resource(mhba); |
---|
773 | 759 | mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); |
---|
774 | | - pci_free_consistent(mhba->pdev, HSP_MAX_SIZE, |
---|
| 760 | + dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE, |
---|
775 | 761 | mhba->handshake_page, mhba->handshake_page_phys); |
---|
776 | 762 | kfree(mhba->regs); |
---|
777 | 763 | pci_release_regions(mhba->pdev); |
---|
.. | .. |
---|
1339 | 1325 | } |
---|
1340 | 1326 | |
---|
1341 | 1327 | if (scsi_bufflen(scmd)) |
---|
1342 | | - pci_unmap_sg(mhba->pdev, scsi_sglist(scmd), |
---|
| 1328 | + dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), |
---|
1343 | 1329 | scsi_sg_count(scmd), |
---|
1344 | | - (int) scmd->sc_data_direction); |
---|
| 1330 | + scmd->sc_data_direction); |
---|
1345 | 1331 | cmd->scmd->scsi_done(scmd); |
---|
1346 | 1332 | mvumi_return_cmd(mhba, cmd); |
---|
1347 | 1333 | } |
---|
.. | .. |
---|
1791 | 1777 | cmd = mhba->tag_cmd[ob_frame->tag]; |
---|
1792 | 1778 | |
---|
1793 | 1779 | atomic_dec(&mhba->fw_outstanding); |
---|
1794 | | - mhba->tag_cmd[ob_frame->tag] = 0; |
---|
| 1780 | + mhba->tag_cmd[ob_frame->tag] = NULL; |
---|
1795 | 1781 | tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag); |
---|
1796 | 1782 | if (cmd->scmd) |
---|
1797 | 1783 | mvumi_complete_cmd(mhba, cmd, ob_frame); |
---|
.. | .. |
---|
2101 | 2087 | unsigned long irq_flags; |
---|
2102 | 2088 | |
---|
2103 | 2089 | spin_lock_irqsave(shost->host_lock, irq_flags); |
---|
2104 | | - scsi_cmd_get_serial(shost, scmd); |
---|
2105 | 2090 | |
---|
2106 | 2091 | mhba = (struct mvumi_hba *) shost->hostdata; |
---|
2107 | 2092 | scmd->result = 0; |
---|
.. | .. |
---|
2137 | 2122 | spin_lock_irqsave(mhba->shost->host_lock, flags); |
---|
2138 | 2123 | |
---|
2139 | 2124 | if (mhba->tag_cmd[cmd->frame->tag]) { |
---|
2140 | | - mhba->tag_cmd[cmd->frame->tag] = 0; |
---|
| 2125 | + mhba->tag_cmd[cmd->frame->tag] = NULL; |
---|
2141 | 2126 | tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); |
---|
2142 | 2127 | } |
---|
2143 | 2128 | if (!list_empty(&cmd->queue_pointer)) |
---|
.. | .. |
---|
2148 | 2133 | scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16); |
---|
2149 | 2134 | scmd->SCp.ptr = NULL; |
---|
2150 | 2135 | if (scsi_bufflen(scmd)) { |
---|
2151 | | - pci_unmap_sg(mhba->pdev, scsi_sglist(scmd), |
---|
| 2136 | + dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), |
---|
2152 | 2137 | scsi_sg_count(scmd), |
---|
2153 | | - (int)scmd->sc_data_direction); |
---|
| 2138 | + scmd->sc_data_direction); |
---|
2154 | 2139 | } |
---|
2155 | 2140 | mvumi_return_cmd(mhba, cmd); |
---|
2156 | 2141 | spin_unlock_irqrestore(mhba->shost->host_lock, flags); |
---|
.. | .. |
---|
2195 | 2180 | .eh_timed_out = mvumi_timed_out, |
---|
2196 | 2181 | .eh_host_reset_handler = mvumi_host_reset, |
---|
2197 | 2182 | .bios_param = mvumi_bios_param, |
---|
| 2183 | + .dma_boundary = PAGE_SIZE - 1, |
---|
2198 | 2184 | .this_id = -1, |
---|
2199 | 2185 | }; |
---|
2200 | 2186 | |
---|
.. | .. |
---|
2362 | 2348 | ret = -ENOMEM; |
---|
2363 | 2349 | goto fail_alloc_mem; |
---|
2364 | 2350 | } |
---|
2365 | | - mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE, |
---|
2366 | | - &mhba->handshake_page_phys); |
---|
| 2351 | + mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev, |
---|
| 2352 | + HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL); |
---|
2367 | 2353 | if (!mhba->handshake_page) { |
---|
2368 | 2354 | dev_err(&mhba->pdev->dev, |
---|
2369 | 2355 | "failed to allocate memory for handshake\n"); |
---|
.. | .. |
---|
2383 | 2369 | |
---|
2384 | 2370 | fail_ready_state: |
---|
2385 | 2371 | mvumi_release_mem_resource(mhba); |
---|
2386 | | - pci_free_consistent(mhba->pdev, HSP_MAX_SIZE, |
---|
| 2372 | + dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE, |
---|
2387 | 2373 | mhba->handshake_page, mhba->handshake_page_phys); |
---|
2388 | 2374 | fail_alloc_page: |
---|
2389 | 2375 | kfree(mhba->regs); |
---|
.. | .. |
---|
2481 | 2467 | if (ret) |
---|
2482 | 2468 | return ret; |
---|
2483 | 2469 | |
---|
2484 | | - pci_set_master(pdev); |
---|
2485 | | - |
---|
2486 | | - if (IS_DMA64) { |
---|
2487 | | - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
---|
2488 | | - if (ret) { |
---|
2489 | | - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
---|
2490 | | - if (ret) |
---|
2491 | | - goto fail_set_dma_mask; |
---|
2492 | | - } |
---|
2493 | | - } else { |
---|
2494 | | - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
---|
2495 | | - if (ret) |
---|
2496 | | - goto fail_set_dma_mask; |
---|
2497 | | - } |
---|
| 2470 | + ret = mvumi_pci_set_master(pdev); |
---|
| 2471 | + if (ret) |
---|
| 2472 | + goto fail_set_dma_mask; |
---|
2498 | 2473 | |
---|
2499 | 2474 | host = scsi_host_alloc(&mvumi_template, sizeof(*mhba)); |
---|
2500 | 2475 | if (!host) { |
---|
.. | .. |
---|
2628 | 2603 | dev_err(&pdev->dev, "enable device failed\n"); |
---|
2629 | 2604 | return ret; |
---|
2630 | 2605 | } |
---|
2631 | | - pci_set_master(pdev); |
---|
2632 | | - if (IS_DMA64) { |
---|
2633 | | - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
---|
2634 | | - if (ret) { |
---|
2635 | | - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
---|
2636 | | - if (ret) |
---|
2637 | | - goto fail; |
---|
2638 | | - } |
---|
2639 | | - } else { |
---|
2640 | | - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
---|
2641 | | - if (ret) |
---|
2642 | | - goto fail; |
---|
2643 | | - } |
---|
| 2606 | + |
---|
| 2607 | + ret = mvumi_pci_set_master(pdev); |
---|
| 2608 | + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
---|
| 2609 | + if (ret) |
---|
| 2610 | + goto fail; |
---|
2644 | 2611 | ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME); |
---|
2645 | 2612 | if (ret) |
---|
2646 | 2613 | goto fail; |
---|