| .. | .. |
|---|
| 1 | 1 | /* |
|---|
| 2 | 2 | * Disk Array driver for HP Smart Array SAS controllers |
|---|
| 3 | + * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries |
|---|
| 3 | 4 | * Copyright 2016 Microsemi Corporation |
|---|
| 4 | 5 | * Copyright 2014-2015 PMC-Sierra, Inc. |
|---|
| 5 | 6 | * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. |
|---|
| .. | .. |
|---|
| 21 | 22 | #include <linux/interrupt.h> |
|---|
| 22 | 23 | #include <linux/types.h> |
|---|
| 23 | 24 | #include <linux/pci.h> |
|---|
| 24 | | -#include <linux/pci-aspm.h> |
|---|
| 25 | 25 | #include <linux/kernel.h> |
|---|
| 26 | 26 | #include <linux/slab.h> |
|---|
| 27 | 27 | #include <linux/delay.h> |
|---|
| .. | .. |
|---|
| 60 | 60 | * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' |
|---|
| 61 | 61 | * with an optional trailing '-' followed by a byte value (0-255). |
|---|
| 62 | 62 | */ |
|---|
| 63 | | -#define HPSA_DRIVER_VERSION "3.4.20-125" |
|---|
| 63 | +#define HPSA_DRIVER_VERSION "3.4.20-200" |
|---|
| 64 | 64 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" |
|---|
| 65 | 65 | #define HPSA "hpsa" |
|---|
| 66 | 66 | |
|---|
| .. | .. |
|---|
| 73 | 73 | |
|---|
| 74 | 74 | /*define how many times we will try a command because of bus resets */ |
|---|
| 75 | 75 | #define MAX_CMD_RETRIES 3 |
|---|
| 76 | +/* How long to wait before giving up on a command */ |
|---|
| 77 | +#define HPSA_EH_PTRAID_TIMEOUT (240 * HZ) |
|---|
| 76 | 78 | |
|---|
| 77 | 79 | /* Embedded module documentation macros - see modules.h */ |
|---|
| 78 | 80 | MODULE_AUTHOR("Hewlett-Packard Company"); |
|---|
| .. | .. |
|---|
| 251 | 253 | |
|---|
| 252 | 254 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); |
|---|
| 253 | 255 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); |
|---|
| 254 | | -static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg); |
|---|
| 256 | +static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, |
|---|
| 257 | + void __user *arg); |
|---|
| 258 | +static int hpsa_passthru_ioctl(struct ctlr_info *h, |
|---|
| 259 | + IOCTL_Command_struct *iocommand); |
|---|
| 260 | +static int hpsa_big_passthru_ioctl(struct ctlr_info *h, |
|---|
| 261 | + BIG_IOCTL_Command_struct *ioc); |
|---|
| 255 | 262 | |
|---|
| 256 | 263 | #ifdef CONFIG_COMPAT |
|---|
| 257 | | -static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, |
|---|
| 264 | +static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, |
|---|
| 258 | 265 | void __user *arg); |
|---|
| 259 | 266 | #endif |
|---|
| 260 | 267 | |
|---|
| .. | .. |
|---|
| 341 | 348 | static inline bool hpsa_is_cmd_idle(struct CommandList *c) |
|---|
| 342 | 349 | { |
|---|
| 343 | 350 | return c->scsi_cmd == SCSI_CMD_IDLE; |
|---|
| 344 | | -} |
|---|
| 345 | | - |
|---|
| 346 | | -static inline bool hpsa_is_pending_event(struct CommandList *c) |
|---|
| 347 | | -{ |
|---|
| 348 | | - return c->reset_pending; |
|---|
| 349 | 351 | } |
|---|
| 350 | 352 | |
|---|
| 351 | 353 | /* extract sense key, asc, and ascq from sense data. -1 means invalid. */ |
|---|
| .. | .. |
|---|
| 971 | 973 | .scan_finished = hpsa_scan_finished, |
|---|
| 972 | 974 | .change_queue_depth = hpsa_change_queue_depth, |
|---|
| 973 | 975 | .this_id = -1, |
|---|
| 974 | | - .use_clustering = ENABLE_CLUSTERING, |
|---|
| 975 | 976 | .eh_device_reset_handler = hpsa_eh_device_reset_handler, |
|---|
| 976 | 977 | .ioctl = hpsa_ioctl, |
|---|
| 977 | 978 | .slave_alloc = hpsa_slave_alloc, |
|---|
| .. | .. |
|---|
| 1150 | 1151 | { |
|---|
| 1151 | 1152 | dial_down_lockup_detection_during_fw_flash(h, c); |
|---|
| 1152 | 1153 | atomic_inc(&h->commands_outstanding); |
|---|
| 1154 | + if (c->device) |
|---|
| 1155 | + atomic_inc(&c->device->commands_outstanding); |
|---|
| 1153 | 1156 | |
|---|
| 1154 | 1157 | reply_queue = h->reply_map[raw_smp_processor_id()]; |
|---|
| 1155 | 1158 | switch (c->cmd_type) { |
|---|
| .. | .. |
|---|
| 1173 | 1176 | |
|---|
| 1174 | 1177 | static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) |
|---|
| 1175 | 1178 | { |
|---|
| 1176 | | - if (unlikely(hpsa_is_pending_event(c))) |
|---|
| 1177 | | - return finish_cmd(c); |
|---|
| 1178 | | - |
|---|
| 1179 | 1179 | __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); |
|---|
| 1180 | 1180 | } |
|---|
| 1181 | 1181 | |
|---|
| .. | .. |
|---|
| 1334 | 1334 | dev_warn(&h->pdev->dev, "physical device with no LUN=0," |
|---|
| 1335 | 1335 | " suspect firmware bug or unsupported hardware " |
|---|
| 1336 | 1336 | "configuration.\n"); |
|---|
| 1337 | | - return -1; |
|---|
| 1337 | + return -1; |
|---|
| 1338 | 1338 | } |
|---|
| 1339 | 1339 | |
|---|
| 1340 | 1340 | lun_assigned: |
|---|
| .. | .. |
|---|
| 1847 | 1847 | return count; |
|---|
| 1848 | 1848 | } |
|---|
| 1849 | 1849 | |
|---|
| 1850 | +#define NUM_WAIT 20 |
|---|
| 1850 | 1851 | static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h, |
|---|
| 1851 | 1852 | struct hpsa_scsi_dev_t *device) |
|---|
| 1852 | 1853 | { |
|---|
| 1853 | 1854 | int cmds = 0; |
|---|
| 1854 | 1855 | int waits = 0; |
|---|
| 1856 | + int num_wait = NUM_WAIT; |
|---|
| 1857 | + |
|---|
| 1858 | + if (device->external) |
|---|
| 1859 | + num_wait = HPSA_EH_PTRAID_TIMEOUT; |
|---|
| 1855 | 1860 | |
|---|
| 1856 | 1861 | while (1) { |
|---|
| 1857 | 1862 | cmds = hpsa_find_outstanding_commands_for_dev(h, device); |
|---|
| 1858 | 1863 | if (cmds == 0) |
|---|
| 1859 | 1864 | break; |
|---|
| 1860 | | - if (++waits > 20) |
|---|
| 1865 | + if (++waits > num_wait) |
|---|
| 1861 | 1866 | break; |
|---|
| 1862 | 1867 | msleep(1000); |
|---|
| 1863 | 1868 | } |
|---|
| 1864 | 1869 | |
|---|
| 1865 | | - if (waits > 20) |
|---|
| 1870 | + if (waits > num_wait) { |
|---|
| 1866 | 1871 | dev_warn(&h->pdev->dev, |
|---|
| 1867 | | - "%s: removing device with %d outstanding commands!\n", |
|---|
| 1868 | | - __func__, cmds); |
|---|
| 1872 | + "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n", |
|---|
| 1873 | + __func__, |
|---|
| 1874 | + h->scsi_host->host_no, |
|---|
| 1875 | + device->bus, device->target, device->lun, cmds); |
|---|
| 1876 | + } |
|---|
| 1869 | 1877 | } |
|---|
| 1870 | 1878 | |
|---|
| 1871 | 1879 | static void hpsa_remove_device(struct ctlr_info *h, |
|---|
| .. | .. |
|---|
| 2127 | 2135 | } |
|---|
| 2128 | 2136 | |
|---|
| 2129 | 2137 | /* configure scsi device based on internal per-device structure */ |
|---|
| 2138 | +#define CTLR_TIMEOUT (120 * HZ) |
|---|
| 2130 | 2139 | static int hpsa_slave_configure(struct scsi_device *sdev) |
|---|
| 2131 | 2140 | { |
|---|
| 2132 | 2141 | struct hpsa_scsi_dev_t *sd; |
|---|
| .. | .. |
|---|
| 2136 | 2145 | sdev->no_uld_attach = !sd || !sd->expose_device; |
|---|
| 2137 | 2146 | |
|---|
| 2138 | 2147 | if (sd) { |
|---|
| 2139 | | - if (sd->external) |
|---|
| 2148 | + sd->was_removed = 0; |
|---|
| 2149 | + queue_depth = sd->queue_depth != 0 ? |
|---|
| 2150 | + sd->queue_depth : sdev->host->can_queue; |
|---|
| 2151 | + if (sd->external) { |
|---|
| 2140 | 2152 | queue_depth = EXTERNAL_QD; |
|---|
| 2141 | | - else |
|---|
| 2142 | | - queue_depth = sd->queue_depth != 0 ? |
|---|
| 2143 | | - sd->queue_depth : sdev->host->can_queue; |
|---|
| 2144 | | - } else |
|---|
| 2153 | + sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT; |
|---|
| 2154 | + blk_queue_rq_timeout(sdev->request_queue, |
|---|
| 2155 | + HPSA_EH_PTRAID_TIMEOUT); |
|---|
| 2156 | + } |
|---|
| 2157 | + if (is_hba_lunid(sd->scsi3addr)) { |
|---|
| 2158 | + sdev->eh_timeout = CTLR_TIMEOUT; |
|---|
| 2159 | + blk_queue_rq_timeout(sdev->request_queue, CTLR_TIMEOUT); |
|---|
| 2160 | + } |
|---|
| 2161 | + } else { |
|---|
| 2145 | 2162 | queue_depth = sdev->host->can_queue; |
|---|
| 2163 | + } |
|---|
| 2146 | 2164 | |
|---|
| 2147 | 2165 | scsi_change_queue_depth(sdev, queue_depth); |
|---|
| 2148 | 2166 | |
|---|
| .. | .. |
|---|
| 2151 | 2169 | |
|---|
| 2152 | 2170 | static void hpsa_slave_destroy(struct scsi_device *sdev) |
|---|
| 2153 | 2171 | { |
|---|
| 2154 | | - /* nothing to do. */ |
|---|
| 2172 | + struct hpsa_scsi_dev_t *hdev = NULL; |
|---|
| 2173 | + |
|---|
| 2174 | + hdev = sdev->hostdata; |
|---|
| 2175 | + |
|---|
| 2176 | + if (hdev) |
|---|
| 2177 | + hdev->was_removed = 1; |
|---|
| 2155 | 2178 | } |
|---|
| 2156 | 2179 | |
|---|
| 2157 | 2180 | static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h) |
|---|
| .. | .. |
|---|
| 2245 | 2268 | |
|---|
| 2246 | 2269 | chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; |
|---|
| 2247 | 2270 | chain_size = le32_to_cpu(cp->sg[0].length); |
|---|
| 2248 | | - temp64 = pci_map_single(h->pdev, chain_block, chain_size, |
|---|
| 2249 | | - PCI_DMA_TODEVICE); |
|---|
| 2271 | + temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size, |
|---|
| 2272 | + DMA_TO_DEVICE); |
|---|
| 2250 | 2273 | if (dma_mapping_error(&h->pdev->dev, temp64)) { |
|---|
| 2251 | 2274 | /* prevent subsequent unmapping */ |
|---|
| 2252 | 2275 | cp->sg->address = 0; |
|---|
| .. | .. |
|---|
| 2266 | 2289 | chain_sg = cp->sg; |
|---|
| 2267 | 2290 | temp64 = le64_to_cpu(chain_sg->address); |
|---|
| 2268 | 2291 | chain_size = le32_to_cpu(cp->sg[0].length); |
|---|
| 2269 | | - pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE); |
|---|
| 2292 | + dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE); |
|---|
| 2270 | 2293 | } |
|---|
| 2271 | 2294 | |
|---|
| 2272 | 2295 | static int hpsa_map_sg_chain_block(struct ctlr_info *h, |
|---|
| .. | .. |
|---|
| 2282 | 2305 | chain_len = sizeof(*chain_sg) * |
|---|
| 2283 | 2306 | (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); |
|---|
| 2284 | 2307 | chain_sg->Len = cpu_to_le32(chain_len); |
|---|
| 2285 | | - temp64 = pci_map_single(h->pdev, chain_block, chain_len, |
|---|
| 2286 | | - PCI_DMA_TODEVICE); |
|---|
| 2308 | + temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len, |
|---|
| 2309 | + DMA_TO_DEVICE); |
|---|
| 2287 | 2310 | if (dma_mapping_error(&h->pdev->dev, temp64)) { |
|---|
| 2288 | 2311 | /* prevent subsequent unmapping */ |
|---|
| 2289 | 2312 | chain_sg->Addr = cpu_to_le64(0); |
|---|
| .. | .. |
|---|
| 2302 | 2325 | return; |
|---|
| 2303 | 2326 | |
|---|
| 2304 | 2327 | chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; |
|---|
| 2305 | | - pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr), |
|---|
| 2306 | | - le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE); |
|---|
| 2328 | + dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr), |
|---|
| 2329 | + le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE); |
|---|
| 2307 | 2330 | } |
|---|
| 2308 | 2331 | |
|---|
| 2309 | 2332 | |
|---|
| .. | .. |
|---|
| 2421 | 2444 | break; |
|---|
| 2422 | 2445 | } |
|---|
| 2423 | 2446 | |
|---|
| 2447 | + if (dev->in_reset) |
|---|
| 2448 | + retry = 0; |
|---|
| 2449 | + |
|---|
| 2424 | 2450 | return retry; /* retry on raid path? */ |
|---|
| 2425 | 2451 | } |
|---|
| 2426 | 2452 | |
|---|
| 2427 | 2453 | static void hpsa_cmd_resolve_events(struct ctlr_info *h, |
|---|
| 2428 | 2454 | struct CommandList *c) |
|---|
| 2429 | 2455 | { |
|---|
| 2430 | | - bool do_wake = false; |
|---|
| 2456 | + struct hpsa_scsi_dev_t *dev = c->device; |
|---|
| 2431 | 2457 | |
|---|
| 2432 | 2458 | /* |
|---|
| 2433 | 2459 | * Reset c->scsi_cmd here so that the reset handler will know |
|---|
| .. | .. |
|---|
| 2436 | 2462 | */ |
|---|
| 2437 | 2463 | c->scsi_cmd = SCSI_CMD_IDLE; |
|---|
| 2438 | 2464 | mb(); /* Declare command idle before checking for pending events. */ |
|---|
| 2439 | | - if (c->reset_pending) { |
|---|
| 2440 | | - unsigned long flags; |
|---|
| 2441 | | - struct hpsa_scsi_dev_t *dev; |
|---|
| 2442 | | - |
|---|
| 2443 | | - /* |
|---|
| 2444 | | - * There appears to be a reset pending; lock the lock and |
|---|
| 2445 | | - * reconfirm. If so, then decrement the count of outstanding |
|---|
| 2446 | | - * commands and wake the reset command if this is the last one. |
|---|
| 2447 | | - */ |
|---|
| 2448 | | - spin_lock_irqsave(&h->lock, flags); |
|---|
| 2449 | | - dev = c->reset_pending; /* Re-fetch under the lock. */ |
|---|
| 2450 | | - if (dev && atomic_dec_and_test(&dev->reset_cmds_out)) |
|---|
| 2451 | | - do_wake = true; |
|---|
| 2452 | | - c->reset_pending = NULL; |
|---|
| 2453 | | - spin_unlock_irqrestore(&h->lock, flags); |
|---|
| 2465 | + if (dev) { |
|---|
| 2466 | + atomic_dec(&dev->commands_outstanding); |
|---|
| 2467 | + if (dev->in_reset && |
|---|
| 2468 | + atomic_read(&dev->commands_outstanding) <= 0) |
|---|
| 2469 | + wake_up_all(&h->event_sync_wait_queue); |
|---|
| 2454 | 2470 | } |
|---|
| 2455 | | - |
|---|
| 2456 | | - if (do_wake) |
|---|
| 2457 | | - wake_up_all(&h->event_sync_wait_queue); |
|---|
| 2458 | 2471 | } |
|---|
| 2459 | 2472 | |
|---|
| 2460 | 2473 | static void hpsa_cmd_resolve_and_free(struct ctlr_info *h, |
|---|
| .. | .. |
|---|
| 2502 | 2515 | if (c2->error_data.status == |
|---|
| 2503 | 2516 | IOACCEL2_STATUS_SR_IOACCEL_DISABLED) { |
|---|
| 2504 | 2517 | hpsa_turn_off_ioaccel_for_device(dev); |
|---|
| 2518 | + } |
|---|
| 2519 | + |
|---|
| 2520 | + if (dev->in_reset) { |
|---|
| 2521 | + cmd->result = DID_RESET << 16; |
|---|
| 2522 | + return hpsa_cmd_free_and_done(h, c, cmd); |
|---|
| 2505 | 2523 | } |
|---|
| 2506 | 2524 | |
|---|
| 2507 | 2525 | return hpsa_retry_cmd(h, c); |
|---|
| .. | .. |
|---|
| 2582 | 2600 | cmd->result = (DID_OK << 16); /* host byte */ |
|---|
| 2583 | 2601 | cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ |
|---|
| 2584 | 2602 | |
|---|
| 2603 | + /* SCSI command has already been cleaned up in SML */ |
|---|
| 2604 | + if (dev->was_removed) { |
|---|
| 2605 | + hpsa_cmd_resolve_and_free(h, cp); |
|---|
| 2606 | + return; |
|---|
| 2607 | + } |
|---|
| 2608 | + |
|---|
| 2585 | 2609 | if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) { |
|---|
| 2586 | 2610 | if (dev->physical_device && dev->expose_device && |
|---|
| 2587 | 2611 | dev->removed) { |
|---|
| .. | .. |
|---|
| 2602 | 2626 | cmd->result = DID_NO_CONNECT << 16; |
|---|
| 2603 | 2627 | return hpsa_cmd_free_and_done(h, cp, cmd); |
|---|
| 2604 | 2628 | } |
|---|
| 2605 | | - |
|---|
| 2606 | | - if ((unlikely(hpsa_is_pending_event(cp)))) |
|---|
| 2607 | | - if (cp->reset_pending) |
|---|
| 2608 | | - return hpsa_cmd_free_and_done(h, cp, cmd); |
|---|
| 2609 | 2629 | |
|---|
| 2610 | 2630 | if (cp->cmd_type == CMD_IOACCEL2) |
|---|
| 2611 | 2631 | return process_ioaccel2_completion(h, cp, cmd, dev); |
|---|
| .. | .. |
|---|
| 2655 | 2675 | decode_sense_data(ei->SenseInfo, sense_data_size, |
|---|
| 2656 | 2676 | &sense_key, &asc, &ascq); |
|---|
| 2657 | 2677 | if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { |
|---|
| 2658 | | - if (sense_key == ABORTED_COMMAND) { |
|---|
| 2678 | + switch (sense_key) { |
|---|
| 2679 | + case ABORTED_COMMAND: |
|---|
| 2659 | 2680 | cmd->result |= DID_SOFT_ERROR << 16; |
|---|
| 2681 | + break; |
|---|
| 2682 | + case UNIT_ATTENTION: |
|---|
| 2683 | + if (asc == 0x3F && ascq == 0x0E) |
|---|
| 2684 | + h->drv_req_rescan = 1; |
|---|
| 2685 | + break; |
|---|
| 2686 | + case ILLEGAL_REQUEST: |
|---|
| 2687 | + if (asc == 0x25 && ascq == 0x00) { |
|---|
| 2688 | + dev->removed = 1; |
|---|
| 2689 | + cmd->result = DID_NO_CONNECT << 16; |
|---|
| 2690 | + } |
|---|
| 2660 | 2691 | break; |
|---|
| 2661 | 2692 | } |
|---|
| 2662 | 2693 | break; |
|---|
| .. | .. |
|---|
| 2767 | 2798 | return hpsa_cmd_free_and_done(h, cp, cmd); |
|---|
| 2768 | 2799 | } |
|---|
| 2769 | 2800 | |
|---|
| 2770 | | -static void hpsa_pci_unmap(struct pci_dev *pdev, |
|---|
| 2771 | | - struct CommandList *c, int sg_used, int data_direction) |
|---|
| 2801 | +static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c, |
|---|
| 2802 | + int sg_used, enum dma_data_direction data_direction) |
|---|
| 2772 | 2803 | { |
|---|
| 2773 | 2804 | int i; |
|---|
| 2774 | 2805 | |
|---|
| 2775 | 2806 | for (i = 0; i < sg_used; i++) |
|---|
| 2776 | | - pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr), |
|---|
| 2807 | + dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr), |
|---|
| 2777 | 2808 | le32_to_cpu(c->SG[i].Len), |
|---|
| 2778 | 2809 | data_direction); |
|---|
| 2779 | 2810 | } |
|---|
| .. | .. |
|---|
| 2782 | 2813 | struct CommandList *cp, |
|---|
| 2783 | 2814 | unsigned char *buf, |
|---|
| 2784 | 2815 | size_t buflen, |
|---|
| 2785 | | - int data_direction) |
|---|
| 2816 | + enum dma_data_direction data_direction) |
|---|
| 2786 | 2817 | { |
|---|
| 2787 | 2818 | u64 addr64; |
|---|
| 2788 | 2819 | |
|---|
| 2789 | | - if (buflen == 0 || data_direction == PCI_DMA_NONE) { |
|---|
| 2820 | + if (buflen == 0 || data_direction == DMA_NONE) { |
|---|
| 2790 | 2821 | cp->Header.SGList = 0; |
|---|
| 2791 | 2822 | cp->Header.SGTotal = cpu_to_le16(0); |
|---|
| 2792 | 2823 | return 0; |
|---|
| 2793 | 2824 | } |
|---|
| 2794 | 2825 | |
|---|
| 2795 | | - addr64 = pci_map_single(pdev, buf, buflen, data_direction); |
|---|
| 2826 | + addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction); |
|---|
| 2796 | 2827 | if (dma_mapping_error(&pdev->dev, addr64)) { |
|---|
| 2797 | 2828 | /* Prevent subsequent unmap of something never mapped */ |
|---|
| 2798 | 2829 | cp->Header.SGList = 0; |
|---|
| .. | .. |
|---|
| 2853 | 2884 | |
|---|
| 2854 | 2885 | #define MAX_DRIVER_CMD_RETRIES 25 |
|---|
| 2855 | 2886 | static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, |
|---|
| 2856 | | - struct CommandList *c, int data_direction, unsigned long timeout_msecs) |
|---|
| 2887 | + struct CommandList *c, enum dma_data_direction data_direction, |
|---|
| 2888 | + unsigned long timeout_msecs) |
|---|
| 2857 | 2889 | { |
|---|
| 2858 | 2890 | int backoff_time = 10, retry_count = 0; |
|---|
| 2859 | 2891 | int rc; |
|---|
| .. | .. |
|---|
| 2977 | 3009 | rc = -1; |
|---|
| 2978 | 3010 | goto out; |
|---|
| 2979 | 3011 | } |
|---|
| 2980 | | - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
|---|
| 2981 | | - PCI_DMA_FROMDEVICE, NO_TIMEOUT); |
|---|
| 3012 | + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, |
|---|
| 3013 | + NO_TIMEOUT); |
|---|
| 2982 | 3014 | if (rc) |
|---|
| 2983 | 3015 | goto out; |
|---|
| 2984 | 3016 | ei = c->err_info; |
|---|
| .. | .. |
|---|
| 3030 | 3062 | rc = -1; |
|---|
| 3031 | 3063 | goto out; |
|---|
| 3032 | 3064 | } |
|---|
| 3033 | | - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
|---|
| 3034 | | - PCI_DMA_FROMDEVICE, NO_TIMEOUT); |
|---|
| 3065 | + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, |
|---|
| 3066 | + NO_TIMEOUT); |
|---|
| 3035 | 3067 | if (rc) |
|---|
| 3036 | 3068 | goto out; |
|---|
| 3037 | 3069 | ei = c->err_info; |
|---|
| .. | .. |
|---|
| 3044 | 3076 | return rc; |
|---|
| 3045 | 3077 | } |
|---|
| 3046 | 3078 | |
|---|
| 3047 | | -static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, |
|---|
| 3079 | +static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, |
|---|
| 3048 | 3080 | u8 reset_type, int reply_queue) |
|---|
| 3049 | 3081 | { |
|---|
| 3050 | 3082 | int rc = IO_OK; |
|---|
| .. | .. |
|---|
| 3052 | 3084 | struct ErrorInfo *ei; |
|---|
| 3053 | 3085 | |
|---|
| 3054 | 3086 | c = cmd_alloc(h); |
|---|
| 3055 | | - |
|---|
| 3087 | + c->device = dev; |
|---|
| 3056 | 3088 | |
|---|
| 3057 | 3089 | /* fill_cmd can't fail here, no data buffer to map. */ |
|---|
| 3058 | | - (void) fill_cmd(c, reset_type, h, NULL, 0, 0, |
|---|
| 3059 | | - scsi3addr, TYPE_MSG); |
|---|
| 3090 | + (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG); |
|---|
| 3060 | 3091 | rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); |
|---|
| 3061 | 3092 | if (rc) { |
|---|
| 3062 | 3093 | dev_warn(&h->pdev->dev, "Failed to send reset command\n"); |
|---|
| .. | .. |
|---|
| 3134 | 3165 | } |
|---|
| 3135 | 3166 | |
|---|
| 3136 | 3167 | static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, |
|---|
| 3137 | | - unsigned char *scsi3addr, u8 reset_type, int reply_queue) |
|---|
| 3168 | + u8 reset_type, int reply_queue) |
|---|
| 3138 | 3169 | { |
|---|
| 3139 | | - int i; |
|---|
| 3140 | 3170 | int rc = 0; |
|---|
| 3141 | 3171 | |
|---|
| 3142 | 3172 | /* We can really only handle one reset at a time */ |
|---|
| .. | .. |
|---|
| 3145 | 3175 | return -EINTR; |
|---|
| 3146 | 3176 | } |
|---|
| 3147 | 3177 | |
|---|
| 3148 | | - BUG_ON(atomic_read(&dev->reset_cmds_out) != 0); |
|---|
| 3149 | | - |
|---|
| 3150 | | - for (i = 0; i < h->nr_cmds; i++) { |
|---|
| 3151 | | - struct CommandList *c = h->cmd_pool + i; |
|---|
| 3152 | | - int refcount = atomic_inc_return(&c->refcount); |
|---|
| 3153 | | - |
|---|
| 3154 | | - if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) { |
|---|
| 3155 | | - unsigned long flags; |
|---|
| 3156 | | - |
|---|
| 3157 | | - /* |
|---|
| 3158 | | - * Mark the target command as having a reset pending, |
|---|
| 3159 | | - * then lock a lock so that the command cannot complete |
|---|
| 3160 | | - * while we're considering it. If the command is not |
|---|
| 3161 | | - * idle then count it; otherwise revoke the event. |
|---|
| 3162 | | - */ |
|---|
| 3163 | | - c->reset_pending = dev; |
|---|
| 3164 | | - spin_lock_irqsave(&h->lock, flags); /* Implied MB */ |
|---|
| 3165 | | - if (!hpsa_is_cmd_idle(c)) |
|---|
| 3166 | | - atomic_inc(&dev->reset_cmds_out); |
|---|
| 3167 | | - else |
|---|
| 3168 | | - c->reset_pending = NULL; |
|---|
| 3169 | | - spin_unlock_irqrestore(&h->lock, flags); |
|---|
| 3170 | | - } |
|---|
| 3171 | | - |
|---|
| 3172 | | - cmd_free(h, c); |
|---|
| 3173 | | - } |
|---|
| 3174 | | - |
|---|
| 3175 | | - rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue); |
|---|
| 3176 | | - if (!rc) |
|---|
| 3178 | + rc = hpsa_send_reset(h, dev, reset_type, reply_queue); |
|---|
| 3179 | + if (!rc) { |
|---|
| 3180 | + /* incremented by sending the reset request */ |
|---|
| 3181 | + atomic_dec(&dev->commands_outstanding); |
|---|
| 3177 | 3182 | wait_event(h->event_sync_wait_queue, |
|---|
| 3178 | | - atomic_read(&dev->reset_cmds_out) == 0 || |
|---|
| 3183 | + atomic_read(&dev->commands_outstanding) <= 0 || |
|---|
| 3179 | 3184 | lockup_detected(h)); |
|---|
| 3185 | + } |
|---|
| 3180 | 3186 | |
|---|
| 3181 | 3187 | if (unlikely(lockup_detected(h))) { |
|---|
| 3182 | 3188 | dev_warn(&h->pdev->dev, |
|---|
| .. | .. |
|---|
| 3184 | 3190 | rc = -ENODEV; |
|---|
| 3185 | 3191 | } |
|---|
| 3186 | 3192 | |
|---|
| 3187 | | - if (unlikely(rc)) |
|---|
| 3188 | | - atomic_set(&dev->reset_cmds_out, 0); |
|---|
| 3189 | | - else |
|---|
| 3190 | | - rc = wait_for_device_to_become_ready(h, scsi3addr, 0); |
|---|
| 3193 | + if (!rc) |
|---|
| 3194 | + rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0); |
|---|
| 3191 | 3195 | |
|---|
| 3192 | 3196 | mutex_unlock(&h->reset_mutex); |
|---|
| 3193 | 3197 | return rc; |
|---|
| .. | .. |
|---|
| 3314 | 3318 | cmd_free(h, c); |
|---|
| 3315 | 3319 | return -1; |
|---|
| 3316 | 3320 | } |
|---|
| 3317 | | - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
|---|
| 3318 | | - PCI_DMA_FROMDEVICE, NO_TIMEOUT); |
|---|
| 3321 | + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, |
|---|
| 3322 | + NO_TIMEOUT); |
|---|
| 3319 | 3323 | if (rc) |
|---|
| 3320 | 3324 | goto out; |
|---|
| 3321 | 3325 | ei = c->err_info; |
|---|
| .. | .. |
|---|
| 3357 | 3361 | c->Request.CDB[2] = bmic_device_index & 0xff; |
|---|
| 3358 | 3362 | c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; |
|---|
| 3359 | 3363 | |
|---|
| 3360 | | - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
|---|
| 3361 | | - PCI_DMA_FROMDEVICE, NO_TIMEOUT); |
|---|
| 3364 | + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, |
|---|
| 3365 | + NO_TIMEOUT); |
|---|
| 3362 | 3366 | if (rc) |
|---|
| 3363 | 3367 | goto out; |
|---|
| 3364 | 3368 | ei = c->err_info; |
|---|
| .. | .. |
|---|
| 3385 | 3389 | if (rc) |
|---|
| 3386 | 3390 | goto out; |
|---|
| 3387 | 3391 | |
|---|
| 3388 | | - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
|---|
| 3389 | | - PCI_DMA_FROMDEVICE, NO_TIMEOUT); |
|---|
| 3392 | + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, |
|---|
| 3393 | + NO_TIMEOUT); |
|---|
| 3390 | 3394 | if (rc) |
|---|
| 3391 | 3395 | goto out; |
|---|
| 3392 | 3396 | ei = c->err_info; |
|---|
| .. | .. |
|---|
| 3416 | 3420 | c->Request.CDB[2] = bmic_device_index & 0xff; |
|---|
| 3417 | 3421 | c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; |
|---|
| 3418 | 3422 | |
|---|
| 3419 | | - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, |
|---|
| 3423 | + hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, |
|---|
| 3420 | 3424 | NO_TIMEOUT); |
|---|
| 3421 | 3425 | ei = c->err_info; |
|---|
| 3422 | 3426 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { |
|---|
| .. | .. |
|---|
| 3445 | 3449 | struct ErrorInfo *ei = NULL; |
|---|
| 3446 | 3450 | struct bmic_sense_storage_box_params *bssbp = NULL; |
|---|
| 3447 | 3451 | struct bmic_identify_physical_device *id_phys = NULL; |
|---|
| 3448 | | - struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; |
|---|
| 3452 | + struct ext_report_lun_entry *rle; |
|---|
| 3449 | 3453 | u16 bmic_device_index = 0; |
|---|
| 3454 | + |
|---|
| 3455 | + if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) |
|---|
| 3456 | + return; |
|---|
| 3457 | + |
|---|
| 3458 | + rle = &rlep->LUN[rle_index]; |
|---|
| 3450 | 3459 | |
|---|
| 3451 | 3460 | encl_dev->eli = |
|---|
| 3452 | 3461 | hpsa_get_enclosure_logical_identifier(h, scsi3addr); |
|---|
| .. | .. |
|---|
| 3492 | 3501 | else |
|---|
| 3493 | 3502 | c->Request.CDB[5] = 0; |
|---|
| 3494 | 3503 | |
|---|
| 3495 | | - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, |
|---|
| 3504 | + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, |
|---|
| 3496 | 3505 | NO_TIMEOUT); |
|---|
| 3497 | 3506 | if (rc) |
|---|
| 3498 | 3507 | goto out; |
|---|
| .. | .. |
|---|
| 3746 | 3755 | } |
|---|
| 3747 | 3756 | if (extended_response) |
|---|
| 3748 | 3757 | c->Request.CDB[1] = extended_response; |
|---|
| 3749 | | - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
|---|
| 3750 | | - PCI_DMA_FROMDEVICE, NO_TIMEOUT); |
|---|
| 3758 | + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, |
|---|
| 3759 | + NO_TIMEOUT); |
|---|
| 3751 | 3760 | if (rc) |
|---|
| 3752 | 3761 | goto out; |
|---|
| 3753 | 3762 | ei = c->err_info; |
|---|
| .. | .. |
|---|
| 3970 | 3979 | memset(this_device->device_id, 0, |
|---|
| 3971 | 3980 | sizeof(this_device->device_id)); |
|---|
| 3972 | 3981 | if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, |
|---|
| 3973 | | - sizeof(this_device->device_id)) < 0) |
|---|
| 3982 | + sizeof(this_device->device_id)) < 0) { |
|---|
| 3974 | 3983 | dev_err(&h->pdev->dev, |
|---|
| 3975 | | - "hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n", |
|---|
| 3984 | + "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n", |
|---|
| 3976 | 3985 | h->ctlr, __func__, |
|---|
| 3977 | 3986 | h->scsi_host->host_no, |
|---|
| 3978 | | - this_device->target, this_device->lun, |
|---|
| 3987 | + this_device->bus, this_device->target, |
|---|
| 3988 | + this_device->lun, |
|---|
| 3979 | 3989 | scsi_device_type(this_device->devtype), |
|---|
| 3980 | 3990 | this_device->model); |
|---|
| 3991 | + rc = HPSA_LV_FAILED; |
|---|
| 3992 | + goto bail_out; |
|---|
| 3993 | + } |
|---|
| 3981 | 3994 | |
|---|
| 3982 | 3995 | if ((this_device->devtype == TYPE_DISK || |
|---|
| 3983 | 3996 | this_device->devtype == TYPE_ZBC) && |
|---|
| .. | .. |
|---|
| 4124 | 4137 | "maximum logical LUNs (%d) exceeded. " |
|---|
| 4125 | 4138 | "%d LUNs ignored.\n", HPSA_MAX_LUN, |
|---|
| 4126 | 4139 | *nlogicals - HPSA_MAX_LUN); |
|---|
| 4127 | | - *nlogicals = HPSA_MAX_LUN; |
|---|
| 4140 | + *nlogicals = HPSA_MAX_LUN; |
|---|
| 4128 | 4141 | } |
|---|
| 4129 | 4142 | if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { |
|---|
| 4130 | 4143 | dev_warn(&h->pdev->dev, |
|---|
| .. | .. |
|---|
| 4172 | 4185 | int rc; |
|---|
| 4173 | 4186 | struct ext_report_lun_entry *rle; |
|---|
| 4174 | 4187 | |
|---|
| 4188 | + if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) |
|---|
| 4189 | + return; |
|---|
| 4190 | + |
|---|
| 4175 | 4191 | rle = &rlep->LUN[rle_index]; |
|---|
| 4176 | 4192 | |
|---|
| 4177 | 4193 | dev->ioaccel_handle = rle->ioaccel_handle; |
|---|
| .. | .. |
|---|
| 4196 | 4212 | struct ReportExtendedLUNdata *rlep, int rle_index, |
|---|
| 4197 | 4213 | struct bmic_identify_physical_device *id_phys) |
|---|
| 4198 | 4214 | { |
|---|
| 4199 | | - struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; |
|---|
| 4215 | + struct ext_report_lun_entry *rle; |
|---|
| 4216 | + |
|---|
| 4217 | + if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) |
|---|
| 4218 | + return; |
|---|
| 4219 | + |
|---|
| 4220 | + rle = &rlep->LUN[rle_index]; |
|---|
| 4200 | 4221 | |
|---|
| 4201 | 4222 | if ((rle->device_flags & 0x08) && this_device->ioaccel_handle) |
|---|
| 4202 | 4223 | this_device->hba_ioaccel_enabled = 1; |
|---|
| .. | .. |
|---|
| 4418 | 4439 | /* |
|---|
| 4419 | 4440 | * Skip over some devices such as a spare. |
|---|
| 4420 | 4441 | */ |
|---|
| 4421 | | - if (!tmpdevice->external && physical_device) { |
|---|
| 4442 | + if (phys_dev_index >= 0 && !tmpdevice->external && |
|---|
| 4443 | + physical_device) { |
|---|
| 4422 | 4444 | skip_device = hpsa_skip_device(h, lunaddrbytes, |
|---|
| 4423 | 4445 | &physdev_list->LUN[phys_dev_index]); |
|---|
| 4424 | 4446 | if (skip_device) |
|---|
| .. | .. |
|---|
| 4676 | 4698 | case WRITE_6: |
|---|
| 4677 | 4699 | case WRITE_12: |
|---|
| 4678 | 4700 | is_write = 1; |
|---|
| 4701 | + fallthrough; |
|---|
| 4679 | 4702 | case READ_6: |
|---|
| 4680 | 4703 | case READ_12: |
|---|
| 4681 | 4704 | if (*cdb_len == 6) { |
|---|
| .. | .. |
|---|
| 4816 | 4839 | return -1; |
|---|
| 4817 | 4840 | |
|---|
| 4818 | 4841 | c->phys_disk = dev; |
|---|
| 4842 | + |
|---|
| 4843 | + if (dev->in_reset) |
|---|
| 4844 | + return -1; |
|---|
| 4819 | 4845 | |
|---|
| 4820 | 4846 | return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, |
|---|
| 4821 | 4847 | cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev); |
|---|
| .. | .. |
|---|
| 5007 | 5033 | } else |
|---|
| 5008 | 5034 | cp->sg_count = (u8) use_sg; |
|---|
| 5009 | 5035 | |
|---|
| 5036 | + if (phys_disk->in_reset) { |
|---|
| 5037 | + cmd->result = DID_RESET << 16; |
|---|
| 5038 | + return -1; |
|---|
| 5039 | + } |
|---|
| 5040 | + |
|---|
| 5010 | 5041 | enqueue_cmd_and_start_io(h, c); |
|---|
| 5011 | 5042 | return 0; |
|---|
| 5012 | 5043 | } |
|---|
| .. | .. |
|---|
| 5022 | 5053 | return -1; |
|---|
| 5023 | 5054 | |
|---|
| 5024 | 5055 | if (!c->scsi_cmd->device->hostdata) |
|---|
| 5056 | + return -1; |
|---|
| 5057 | + |
|---|
| 5058 | + if (phys_disk->in_reset) |
|---|
| 5025 | 5059 | return -1; |
|---|
| 5026 | 5060 | |
|---|
| 5027 | 5061 | /* Try to honor the device's queue depth */ |
|---|
| .. | .. |
|---|
| 5107 | 5141 | if (!dev) |
|---|
| 5108 | 5142 | return -1; |
|---|
| 5109 | 5143 | |
|---|
| 5144 | + if (dev->in_reset) |
|---|
| 5145 | + return -1; |
|---|
| 5146 | + |
|---|
| 5110 | 5147 | /* check for valid opcode, get LBA and block count */ |
|---|
| 5111 | 5148 | switch (cmd->cmnd[0]) { |
|---|
| 5112 | 5149 | case WRITE_6: |
|---|
| 5113 | 5150 | is_write = 1; |
|---|
| 5151 | + fallthrough; |
|---|
| 5114 | 5152 | case READ_6: |
|---|
| 5115 | 5153 | first_block = (((cmd->cmnd[1] & 0x1F) << 16) | |
|---|
| 5116 | 5154 | (cmd->cmnd[2] << 8) | |
|---|
| .. | .. |
|---|
| 5121 | 5159 | break; |
|---|
| 5122 | 5160 | case WRITE_10: |
|---|
| 5123 | 5161 | is_write = 1; |
|---|
| 5162 | + fallthrough; |
|---|
| 5124 | 5163 | case READ_10: |
|---|
| 5125 | 5164 | first_block = |
|---|
| 5126 | 5165 | (((u64) cmd->cmnd[2]) << 24) | |
|---|
| .. | .. |
|---|
| 5133 | 5172 | break; |
|---|
| 5134 | 5173 | case WRITE_12: |
|---|
| 5135 | 5174 | is_write = 1; |
|---|
| 5175 | + fallthrough; |
|---|
| 5136 | 5176 | case READ_12: |
|---|
| 5137 | 5177 | first_block = |
|---|
| 5138 | 5178 | (((u64) cmd->cmnd[2]) << 24) | |
|---|
| .. | .. |
|---|
| 5147 | 5187 | break; |
|---|
| 5148 | 5188 | case WRITE_16: |
|---|
| 5149 | 5189 | is_write = 1; |
|---|
| 5190 | + fallthrough; |
|---|
| 5150 | 5191 | case READ_16: |
|---|
| 5151 | 5192 | first_block = |
|---|
| 5152 | 5193 | (((u64) cmd->cmnd[2]) << 56) | |
|---|
| .. | .. |
|---|
| 5418 | 5459 | */ |
|---|
| 5419 | 5460 | static int hpsa_ciss_submit(struct ctlr_info *h, |
|---|
| 5420 | 5461 | struct CommandList *c, struct scsi_cmnd *cmd, |
|---|
| 5421 | | - unsigned char scsi3addr[]) |
|---|
| 5462 | + struct hpsa_scsi_dev_t *dev) |
|---|
| 5422 | 5463 | { |
|---|
| 5423 | 5464 | cmd->host_scribble = (unsigned char *) c; |
|---|
| 5424 | 5465 | c->cmd_type = CMD_SCSI; |
|---|
| 5425 | 5466 | c->scsi_cmd = cmd; |
|---|
| 5426 | 5467 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
|---|
| 5427 | | - memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); |
|---|
| 5468 | + memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8); |
|---|
| 5428 | 5469 | c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT)); |
|---|
| 5429 | 5470 | |
|---|
| 5430 | 5471 | /* Fill in the request block... */ |
|---|
| .. | .. |
|---|
| 5475 | 5516 | hpsa_cmd_resolve_and_free(h, c); |
|---|
| 5476 | 5517 | return SCSI_MLQUEUE_HOST_BUSY; |
|---|
| 5477 | 5518 | } |
|---|
| 5519 | + |
|---|
| 5520 | + if (dev->in_reset) { |
|---|
| 5521 | + hpsa_cmd_resolve_and_free(h, c); |
|---|
| 5522 | + return SCSI_MLQUEUE_HOST_BUSY; |
|---|
| 5523 | + } |
|---|
| 5524 | + |
|---|
| 5525 | + c->device = dev; |
|---|
| 5526 | + |
|---|
| 5478 | 5527 | enqueue_cmd_and_start_io(h, c); |
|---|
| 5479 | 5528 | /* the cmd'll come back via intr handler in complete_scsi_command() */ |
|---|
| 5480 | 5529 | return 0; |
|---|
| .. | .. |
|---|
| 5526 | 5575 | } |
|---|
| 5527 | 5576 | |
|---|
| 5528 | 5577 | static int hpsa_ioaccel_submit(struct ctlr_info *h, |
|---|
| 5529 | | - struct CommandList *c, struct scsi_cmnd *cmd, |
|---|
| 5530 | | - unsigned char *scsi3addr) |
|---|
| 5578 | + struct CommandList *c, struct scsi_cmnd *cmd) |
|---|
| 5531 | 5579 | { |
|---|
| 5532 | 5580 | struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; |
|---|
| 5533 | 5581 | int rc = IO_ACCEL_INELIGIBLE; |
|---|
| .. | .. |
|---|
| 5535 | 5583 | if (!dev) |
|---|
| 5536 | 5584 | return SCSI_MLQUEUE_HOST_BUSY; |
|---|
| 5537 | 5585 | |
|---|
| 5586 | + if (dev->in_reset) |
|---|
| 5587 | + return SCSI_MLQUEUE_HOST_BUSY; |
|---|
| 5588 | + |
|---|
| 5589 | + if (hpsa_simple_mode) |
|---|
| 5590 | + return IO_ACCEL_INELIGIBLE; |
|---|
| 5591 | + |
|---|
| 5538 | 5592 | cmd->host_scribble = (unsigned char *) c; |
|---|
| 5539 | 5593 | |
|---|
| 5540 | 5594 | if (dev->offload_enabled) { |
|---|
| 5541 | 5595 | hpsa_cmd_init(h, c->cmdindex, c); |
|---|
| 5542 | 5596 | c->cmd_type = CMD_SCSI; |
|---|
| 5543 | 5597 | c->scsi_cmd = cmd; |
|---|
| 5598 | + c->device = dev; |
|---|
| 5544 | 5599 | rc = hpsa_scsi_ioaccel_raid_map(h, c); |
|---|
| 5545 | 5600 | if (rc < 0) /* scsi_dma_map failed. */ |
|---|
| 5546 | 5601 | rc = SCSI_MLQUEUE_HOST_BUSY; |
|---|
| .. | .. |
|---|
| 5548 | 5603 | hpsa_cmd_init(h, c->cmdindex, c); |
|---|
| 5549 | 5604 | c->cmd_type = CMD_SCSI; |
|---|
| 5550 | 5605 | c->scsi_cmd = cmd; |
|---|
| 5606 | + c->device = dev; |
|---|
| 5551 | 5607 | rc = hpsa_scsi_ioaccel_direct_map(h, c); |
|---|
| 5552 | 5608 | if (rc < 0) /* scsi_dma_map failed. */ |
|---|
| 5553 | 5609 | rc = SCSI_MLQUEUE_HOST_BUSY; |
|---|
| .. | .. |
|---|
| 5567 | 5623 | cmd->result = DID_NO_CONNECT << 16; |
|---|
| 5568 | 5624 | return hpsa_cmd_free_and_done(c->h, c, cmd); |
|---|
| 5569 | 5625 | } |
|---|
| 5570 | | - if (c->reset_pending) |
|---|
| 5626 | + |
|---|
| 5627 | + if (dev->in_reset) { |
|---|
| 5628 | + cmd->result = DID_RESET << 16; |
|---|
| 5571 | 5629 | return hpsa_cmd_free_and_done(c->h, c, cmd); |
|---|
| 5630 | + } |
|---|
| 5631 | + |
|---|
| 5572 | 5632 | if (c->cmd_type == CMD_IOACCEL2) { |
|---|
| 5573 | 5633 | struct ctlr_info *h = c->h; |
|---|
| 5574 | 5634 | struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; |
|---|
| .. | .. |
|---|
| 5576 | 5636 | |
|---|
| 5577 | 5637 | if (c2->error_data.serv_response == |
|---|
| 5578 | 5638 | IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) { |
|---|
| 5579 | | - rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr); |
|---|
| 5639 | + rc = hpsa_ioaccel_submit(h, c, cmd); |
|---|
| 5580 | 5640 | if (rc == 0) |
|---|
| 5581 | 5641 | return; |
|---|
| 5582 | 5642 | if (rc == SCSI_MLQUEUE_HOST_BUSY) { |
|---|
| .. | .. |
|---|
| 5592 | 5652 | } |
|---|
| 5593 | 5653 | } |
|---|
| 5594 | 5654 | hpsa_cmd_partial_init(c->h, c->cmdindex, c); |
|---|
| 5595 | | - if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) { |
|---|
| 5655 | + if (hpsa_ciss_submit(c->h, c, cmd, dev)) { |
|---|
| 5596 | 5656 | /* |
|---|
| 5597 | 5657 | * If we get here, it means dma mapping failed. Try |
|---|
| 5598 | 5658 | * again via scsi mid layer, which will then get |
|---|
| .. | .. |
|---|
| 5611 | 5671 | { |
|---|
| 5612 | 5672 | struct ctlr_info *h; |
|---|
| 5613 | 5673 | struct hpsa_scsi_dev_t *dev; |
|---|
| 5614 | | - unsigned char scsi3addr[8]; |
|---|
| 5615 | 5674 | struct CommandList *c; |
|---|
| 5616 | 5675 | int rc = 0; |
|---|
| 5617 | 5676 | |
|---|
| .. | .. |
|---|
| 5633 | 5692 | return 0; |
|---|
| 5634 | 5693 | } |
|---|
| 5635 | 5694 | |
|---|
| 5636 | | - memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); |
|---|
| 5637 | | - |
|---|
| 5638 | 5695 | if (unlikely(lockup_detected(h))) { |
|---|
| 5639 | 5696 | cmd->result = DID_NO_CONNECT << 16; |
|---|
| 5640 | 5697 | cmd->scsi_done(cmd); |
|---|
| 5641 | 5698 | return 0; |
|---|
| 5642 | 5699 | } |
|---|
| 5700 | + |
|---|
| 5701 | + if (dev->in_reset) |
|---|
| 5702 | + return SCSI_MLQUEUE_DEVICE_BUSY; |
|---|
| 5703 | + |
|---|
| 5643 | 5704 | c = cmd_tagged_alloc(h, cmd); |
|---|
| 5705 | + if (c == NULL) |
|---|
| 5706 | + return SCSI_MLQUEUE_DEVICE_BUSY; |
|---|
| 5644 | 5707 | |
|---|
| 5645 | 5708 | /* |
|---|
| 5646 | 5709 | * This is necessary because the SML doesn't zero out this field during |
|---|
| .. | .. |
|---|
| 5655 | 5718 | if (likely(cmd->retries == 0 && |
|---|
| 5656 | 5719 | !blk_rq_is_passthrough(cmd->request) && |
|---|
| 5657 | 5720 | h->acciopath_status)) { |
|---|
| 5658 | | - rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr); |
|---|
| 5721 | + rc = hpsa_ioaccel_submit(h, c, cmd); |
|---|
| 5659 | 5722 | if (rc == 0) |
|---|
| 5660 | 5723 | return 0; |
|---|
| 5661 | 5724 | if (rc == SCSI_MLQUEUE_HOST_BUSY) { |
|---|
| .. | .. |
|---|
| 5663 | 5726 | return SCSI_MLQUEUE_HOST_BUSY; |
|---|
| 5664 | 5727 | } |
|---|
| 5665 | 5728 | } |
|---|
| 5666 | | - return hpsa_ciss_submit(h, c, cmd, scsi3addr); |
|---|
| 5729 | + return hpsa_ciss_submit(h, c, cmd, dev); |
|---|
| 5667 | 5730 | } |
|---|
| 5668 | 5731 | |
|---|
| 5669 | 5732 | static void hpsa_scan_complete(struct ctlr_info *h) |
|---|
| .. | .. |
|---|
| 5771 | 5834 | { |
|---|
| 5772 | 5835 | struct Scsi_Host *sh; |
|---|
| 5773 | 5836 | |
|---|
| 5774 | | - sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); |
|---|
| 5837 | + sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info)); |
|---|
| 5775 | 5838 | if (sh == NULL) { |
|---|
| 5776 | 5839 | dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); |
|---|
| 5777 | 5840 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 5839 | 5902 | /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ |
|---|
| 5840 | 5903 | (void) fill_cmd(c, TEST_UNIT_READY, h, |
|---|
| 5841 | 5904 | NULL, 0, 0, lunaddr, TYPE_CMD); |
|---|
| 5842 | | - rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); |
|---|
| 5905 | + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); |
|---|
| 5843 | 5906 | if (rc) |
|---|
| 5844 | 5907 | return rc; |
|---|
| 5845 | 5908 | /* no unmap needed here because no data xfer. */ |
|---|
| .. | .. |
|---|
| 5945 | 6008 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) |
|---|
| 5946 | 6009 | { |
|---|
| 5947 | 6010 | int rc = SUCCESS; |
|---|
| 6011 | + int i; |
|---|
| 5948 | 6012 | struct ctlr_info *h; |
|---|
| 5949 | | - struct hpsa_scsi_dev_t *dev; |
|---|
| 6013 | + struct hpsa_scsi_dev_t *dev = NULL; |
|---|
| 5950 | 6014 | u8 reset_type; |
|---|
| 5951 | 6015 | char msg[48]; |
|---|
| 5952 | 6016 | unsigned long flags; |
|---|
| .. | .. |
|---|
| 6012 | 6076 | reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical "); |
|---|
| 6013 | 6077 | hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); |
|---|
| 6014 | 6078 | |
|---|
| 6079 | + /* |
|---|
| 6080 | + * wait to see if any commands will complete before sending reset |
|---|
| 6081 | + */ |
|---|
| 6082 | + dev->in_reset = true; /* block any new cmds from OS for this device */ |
|---|
| 6083 | + for (i = 0; i < 10; i++) { |
|---|
| 6084 | + if (atomic_read(&dev->commands_outstanding) > 0) |
|---|
| 6085 | + msleep(1000); |
|---|
| 6086 | + else |
|---|
| 6087 | + break; |
|---|
| 6088 | + } |
|---|
| 6089 | + |
|---|
| 6015 | 6090 | /* send a reset to the SCSI LUN which the command was sent to */ |
|---|
| 6016 | | - rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type, |
|---|
| 6017 | | - DEFAULT_REPLY_QUEUE); |
|---|
| 6091 | + rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE); |
|---|
| 6018 | 6092 | if (rc == 0) |
|---|
| 6019 | 6093 | rc = SUCCESS; |
|---|
| 6020 | 6094 | else |
|---|
| .. | .. |
|---|
| 6028 | 6102 | return_reset_status: |
|---|
| 6029 | 6103 | spin_lock_irqsave(&h->reset_lock, flags); |
|---|
| 6030 | 6104 | h->reset_in_progress = 0; |
|---|
| 6105 | + if (dev) |
|---|
| 6106 | + dev->in_reset = false; |
|---|
| 6031 | 6107 | spin_unlock_irqrestore(&h->reset_lock, flags); |
|---|
| 6032 | 6108 | return rc; |
|---|
| 6033 | 6109 | } |
|---|
| .. | .. |
|---|
| 6053 | 6129 | BUG(); |
|---|
| 6054 | 6130 | } |
|---|
| 6055 | 6131 | |
|---|
| 6056 | | - atomic_inc(&c->refcount); |
|---|
| 6057 | 6132 | if (unlikely(!hpsa_is_cmd_idle(c))) { |
|---|
| 6058 | 6133 | /* |
|---|
| 6059 | 6134 | * We expect that the SCSI layer will hand us a unique tag |
|---|
| .. | .. |
|---|
| 6061 | 6136 | * two requests...because if the selected command isn't idle |
|---|
| 6062 | 6137 | * then someone is going to be very disappointed. |
|---|
| 6063 | 6138 | */ |
|---|
| 6064 | | - dev_err(&h->pdev->dev, |
|---|
| 6065 | | - "tag collision (tag=%d) in cmd_tagged_alloc().\n", |
|---|
| 6066 | | - idx); |
|---|
| 6067 | | - if (c->scsi_cmd != NULL) |
|---|
| 6068 | | - scsi_print_command(c->scsi_cmd); |
|---|
| 6069 | | - scsi_print_command(scmd); |
|---|
| 6139 | + if (idx != h->last_collision_tag) { /* Print once per tag */ |
|---|
| 6140 | + dev_warn(&h->pdev->dev, |
|---|
| 6141 | + "%s: tag collision (tag=%d)\n", __func__, idx); |
|---|
| 6142 | + if (scmd) |
|---|
| 6143 | + scsi_print_command(scmd); |
|---|
| 6144 | + h->last_collision_tag = idx; |
|---|
| 6145 | + } |
|---|
| 6146 | + return NULL; |
|---|
| 6070 | 6147 | } |
|---|
| 6148 | + |
|---|
| 6149 | + atomic_inc(&c->refcount); |
|---|
| 6071 | 6150 | |
|---|
| 6072 | 6151 | hpsa_cmd_partial_init(h, idx, c); |
|---|
| 6073 | 6152 | return c; |
|---|
| .. | .. |
|---|
| 6136 | 6215 | break; /* it's ours now. */ |
|---|
| 6137 | 6216 | } |
|---|
| 6138 | 6217 | hpsa_cmd_partial_init(h, i, c); |
|---|
| 6218 | + c->device = NULL; |
|---|
| 6139 | 6219 | return c; |
|---|
| 6140 | 6220 | } |
|---|
| 6141 | 6221 | |
|---|
| .. | .. |
|---|
| 6158 | 6238 | |
|---|
| 6159 | 6239 | #ifdef CONFIG_COMPAT |
|---|
| 6160 | 6240 | |
|---|
| 6161 | | -static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, |
|---|
| 6241 | +static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd, |
|---|
| 6162 | 6242 | void __user *arg) |
|---|
| 6163 | 6243 | { |
|---|
| 6164 | | - IOCTL32_Command_struct __user *arg32 = |
|---|
| 6165 | | - (IOCTL32_Command_struct __user *) arg; |
|---|
| 6244 | + struct ctlr_info *h = sdev_to_hba(dev); |
|---|
| 6245 | + IOCTL32_Command_struct __user *arg32 = arg; |
|---|
| 6166 | 6246 | IOCTL_Command_struct arg64; |
|---|
| 6167 | | - IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); |
|---|
| 6168 | 6247 | int err; |
|---|
| 6169 | 6248 | u32 cp; |
|---|
| 6170 | 6249 | |
|---|
| 6250 | + if (!arg) |
|---|
| 6251 | + return -EINVAL; |
|---|
| 6252 | + |
|---|
| 6171 | 6253 | memset(&arg64, 0, sizeof(arg64)); |
|---|
| 6172 | | - err = 0; |
|---|
| 6173 | | - err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, |
|---|
| 6174 | | - sizeof(arg64.LUN_info)); |
|---|
| 6175 | | - err |= copy_from_user(&arg64.Request, &arg32->Request, |
|---|
| 6176 | | - sizeof(arg64.Request)); |
|---|
| 6177 | | - err |= copy_from_user(&arg64.error_info, &arg32->error_info, |
|---|
| 6178 | | - sizeof(arg64.error_info)); |
|---|
| 6179 | | - err |= get_user(arg64.buf_size, &arg32->buf_size); |
|---|
| 6180 | | - err |= get_user(cp, &arg32->buf); |
|---|
| 6181 | | - arg64.buf = compat_ptr(cp); |
|---|
| 6182 | | - err |= copy_to_user(p, &arg64, sizeof(arg64)); |
|---|
| 6183 | | - |
|---|
| 6184 | | - if (err) |
|---|
| 6254 | + if (copy_from_user(&arg64, arg32, offsetof(IOCTL_Command_struct, buf))) |
|---|
| 6185 | 6255 | return -EFAULT; |
|---|
| 6256 | + if (get_user(cp, &arg32->buf)) |
|---|
| 6257 | + return -EFAULT; |
|---|
| 6258 | + arg64.buf = compat_ptr(cp); |
|---|
| 6186 | 6259 | |
|---|
| 6187 | | - err = hpsa_ioctl(dev, CCISS_PASSTHRU, p); |
|---|
| 6260 | + if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) |
|---|
| 6261 | + return -EAGAIN; |
|---|
| 6262 | + err = hpsa_passthru_ioctl(h, &arg64); |
|---|
| 6263 | + atomic_inc(&h->passthru_cmds_avail); |
|---|
| 6188 | 6264 | if (err) |
|---|
| 6189 | 6265 | return err; |
|---|
| 6190 | | - err |= copy_in_user(&arg32->error_info, &p->error_info, |
|---|
| 6191 | | - sizeof(arg32->error_info)); |
|---|
| 6192 | | - if (err) |
|---|
| 6266 | + if (copy_to_user(&arg32->error_info, &arg64.error_info, |
|---|
| 6267 | + sizeof(arg32->error_info))) |
|---|
| 6193 | 6268 | return -EFAULT; |
|---|
| 6194 | | - return err; |
|---|
| 6269 | + return 0; |
|---|
| 6195 | 6270 | } |
|---|
| 6196 | 6271 | |
|---|
| 6197 | 6272 | static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, |
|---|
| 6198 | | - int cmd, void __user *arg) |
|---|
| 6273 | + unsigned int cmd, void __user *arg) |
|---|
| 6199 | 6274 | { |
|---|
| 6200 | | - BIG_IOCTL32_Command_struct __user *arg32 = |
|---|
| 6201 | | - (BIG_IOCTL32_Command_struct __user *) arg; |
|---|
| 6275 | + struct ctlr_info *h = sdev_to_hba(dev); |
|---|
| 6276 | + BIG_IOCTL32_Command_struct __user *arg32 = arg; |
|---|
| 6202 | 6277 | BIG_IOCTL_Command_struct arg64; |
|---|
| 6203 | | - BIG_IOCTL_Command_struct __user *p = |
|---|
| 6204 | | - compat_alloc_user_space(sizeof(arg64)); |
|---|
| 6205 | 6278 | int err; |
|---|
| 6206 | 6279 | u32 cp; |
|---|
| 6207 | 6280 | |
|---|
| 6281 | + if (!arg) |
|---|
| 6282 | + return -EINVAL; |
|---|
| 6208 | 6283 | memset(&arg64, 0, sizeof(arg64)); |
|---|
| 6209 | | - err = 0; |
|---|
| 6210 | | - err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, |
|---|
| 6211 | | - sizeof(arg64.LUN_info)); |
|---|
| 6212 | | - err |= copy_from_user(&arg64.Request, &arg32->Request, |
|---|
| 6213 | | - sizeof(arg64.Request)); |
|---|
| 6214 | | - err |= copy_from_user(&arg64.error_info, &arg32->error_info, |
|---|
| 6215 | | - sizeof(arg64.error_info)); |
|---|
| 6216 | | - err |= get_user(arg64.buf_size, &arg32->buf_size); |
|---|
| 6217 | | - err |= get_user(arg64.malloc_size, &arg32->malloc_size); |
|---|
| 6218 | | - err |= get_user(cp, &arg32->buf); |
|---|
| 6219 | | - arg64.buf = compat_ptr(cp); |
|---|
| 6220 | | - err |= copy_to_user(p, &arg64, sizeof(arg64)); |
|---|
| 6221 | | - |
|---|
| 6222 | | - if (err) |
|---|
| 6284 | + if (copy_from_user(&arg64, arg32, |
|---|
| 6285 | + offsetof(BIG_IOCTL32_Command_struct, buf))) |
|---|
| 6223 | 6286 | return -EFAULT; |
|---|
| 6287 | + if (get_user(cp, &arg32->buf)) |
|---|
| 6288 | + return -EFAULT; |
|---|
| 6289 | + arg64.buf = compat_ptr(cp); |
|---|
| 6224 | 6290 | |
|---|
| 6225 | | - err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p); |
|---|
| 6291 | + if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) |
|---|
| 6292 | + return -EAGAIN; |
|---|
| 6293 | + err = hpsa_big_passthru_ioctl(h, &arg64); |
|---|
| 6294 | + atomic_inc(&h->passthru_cmds_avail); |
|---|
| 6226 | 6295 | if (err) |
|---|
| 6227 | 6296 | return err; |
|---|
| 6228 | | - err |= copy_in_user(&arg32->error_info, &p->error_info, |
|---|
| 6229 | | - sizeof(arg32->error_info)); |
|---|
| 6230 | | - if (err) |
|---|
| 6297 | + if (copy_to_user(&arg32->error_info, &arg64.error_info, |
|---|
| 6298 | + sizeof(arg32->error_info))) |
|---|
| 6231 | 6299 | return -EFAULT; |
|---|
| 6232 | | - return err; |
|---|
| 6300 | + return 0; |
|---|
| 6233 | 6301 | } |
|---|
| 6234 | 6302 | |
|---|
| 6235 | | -static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg) |
|---|
| 6303 | +static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, |
|---|
| 6304 | + void __user *arg) |
|---|
| 6236 | 6305 | { |
|---|
| 6237 | 6306 | switch (cmd) { |
|---|
| 6238 | 6307 | case CCISS_GETPCIINFO: |
|---|
| .. | .. |
|---|
| 6301 | 6370 | return 0; |
|---|
| 6302 | 6371 | } |
|---|
| 6303 | 6372 | |
|---|
| 6304 | | -static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) |
|---|
| 6373 | +static int hpsa_passthru_ioctl(struct ctlr_info *h, |
|---|
| 6374 | + IOCTL_Command_struct *iocommand) |
|---|
| 6305 | 6375 | { |
|---|
| 6306 | | - IOCTL_Command_struct iocommand; |
|---|
| 6307 | 6376 | struct CommandList *c; |
|---|
| 6308 | 6377 | char *buff = NULL; |
|---|
| 6309 | 6378 | u64 temp64; |
|---|
| 6310 | 6379 | int rc = 0; |
|---|
| 6311 | 6380 | |
|---|
| 6312 | | - if (!argp) |
|---|
| 6313 | | - return -EINVAL; |
|---|
| 6314 | 6381 | if (!capable(CAP_SYS_RAWIO)) |
|---|
| 6315 | 6382 | return -EPERM; |
|---|
| 6316 | | - if (copy_from_user(&iocommand, argp, sizeof(iocommand))) |
|---|
| 6317 | | - return -EFAULT; |
|---|
| 6318 | | - if ((iocommand.buf_size < 1) && |
|---|
| 6319 | | - (iocommand.Request.Type.Direction != XFER_NONE)) { |
|---|
| 6383 | + if ((iocommand->buf_size < 1) && |
|---|
| 6384 | + (iocommand->Request.Type.Direction != XFER_NONE)) { |
|---|
| 6320 | 6385 | return -EINVAL; |
|---|
| 6321 | 6386 | } |
|---|
| 6322 | | - if (iocommand.buf_size > 0) { |
|---|
| 6323 | | - buff = kmalloc(iocommand.buf_size, GFP_KERNEL); |
|---|
| 6387 | + if (iocommand->buf_size > 0) { |
|---|
| 6388 | + buff = kmalloc(iocommand->buf_size, GFP_KERNEL); |
|---|
| 6324 | 6389 | if (buff == NULL) |
|---|
| 6325 | 6390 | return -ENOMEM; |
|---|
| 6326 | | - if (iocommand.Request.Type.Direction & XFER_WRITE) { |
|---|
| 6391 | + if (iocommand->Request.Type.Direction & XFER_WRITE) { |
|---|
| 6327 | 6392 | /* Copy the data into the buffer we created */ |
|---|
| 6328 | | - if (copy_from_user(buff, iocommand.buf, |
|---|
| 6329 | | - iocommand.buf_size)) { |
|---|
| 6393 | + if (copy_from_user(buff, iocommand->buf, |
|---|
| 6394 | + iocommand->buf_size)) { |
|---|
| 6330 | 6395 | rc = -EFAULT; |
|---|
| 6331 | 6396 | goto out_kfree; |
|---|
| 6332 | 6397 | } |
|---|
| 6333 | 6398 | } else { |
|---|
| 6334 | | - memset(buff, 0, iocommand.buf_size); |
|---|
| 6399 | + memset(buff, 0, iocommand->buf_size); |
|---|
| 6335 | 6400 | } |
|---|
| 6336 | 6401 | } |
|---|
| 6337 | 6402 | c = cmd_alloc(h); |
|---|
| .. | .. |
|---|
| 6341 | 6406 | c->scsi_cmd = SCSI_CMD_BUSY; |
|---|
| 6342 | 6407 | /* Fill in Command Header */ |
|---|
| 6343 | 6408 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
|---|
| 6344 | | - if (iocommand.buf_size > 0) { /* buffer to fill */ |
|---|
| 6409 | + if (iocommand->buf_size > 0) { /* buffer to fill */ |
|---|
| 6345 | 6410 | c->Header.SGList = 1; |
|---|
| 6346 | 6411 | c->Header.SGTotal = cpu_to_le16(1); |
|---|
| 6347 | 6412 | } else { /* no buffers to fill */ |
|---|
| 6348 | 6413 | c->Header.SGList = 0; |
|---|
| 6349 | 6414 | c->Header.SGTotal = cpu_to_le16(0); |
|---|
| 6350 | 6415 | } |
|---|
| 6351 | | - memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); |
|---|
| 6416 | + memcpy(&c->Header.LUN, &iocommand->LUN_info, sizeof(c->Header.LUN)); |
|---|
| 6352 | 6417 | |
|---|
| 6353 | 6418 | /* Fill in Request block */ |
|---|
| 6354 | | - memcpy(&c->Request, &iocommand.Request, |
|---|
| 6419 | + memcpy(&c->Request, &iocommand->Request, |
|---|
| 6355 | 6420 | sizeof(c->Request)); |
|---|
| 6356 | 6421 | |
|---|
| 6357 | 6422 | /* Fill in the scatter gather information */ |
|---|
| 6358 | | - if (iocommand.buf_size > 0) { |
|---|
| 6359 | | - temp64 = pci_map_single(h->pdev, buff, |
|---|
| 6360 | | - iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); |
|---|
| 6423 | + if (iocommand->buf_size > 0) { |
|---|
| 6424 | + temp64 = dma_map_single(&h->pdev->dev, buff, |
|---|
| 6425 | + iocommand->buf_size, DMA_BIDIRECTIONAL); |
|---|
| 6361 | 6426 | if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { |
|---|
| 6362 | 6427 | c->SG[0].Addr = cpu_to_le64(0); |
|---|
| 6363 | 6428 | c->SG[0].Len = cpu_to_le32(0); |
|---|
| .. | .. |
|---|
| 6365 | 6430 | goto out; |
|---|
| 6366 | 6431 | } |
|---|
| 6367 | 6432 | c->SG[0].Addr = cpu_to_le64(temp64); |
|---|
| 6368 | | - c->SG[0].Len = cpu_to_le32(iocommand.buf_size); |
|---|
| 6433 | + c->SG[0].Len = cpu_to_le32(iocommand->buf_size); |
|---|
| 6369 | 6434 | c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ |
|---|
| 6370 | 6435 | } |
|---|
| 6371 | 6436 | rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, |
|---|
| 6372 | 6437 | NO_TIMEOUT); |
|---|
| 6373 | | - if (iocommand.buf_size > 0) |
|---|
| 6374 | | - hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); |
|---|
| 6438 | + if (iocommand->buf_size > 0) |
|---|
| 6439 | + hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL); |
|---|
| 6375 | 6440 | check_ioctl_unit_attention(h, c); |
|---|
| 6376 | 6441 | if (rc) { |
|---|
| 6377 | 6442 | rc = -EIO; |
|---|
| .. | .. |
|---|
| 6379 | 6444 | } |
|---|
| 6380 | 6445 | |
|---|
| 6381 | 6446 | /* Copy the error information out */ |
|---|
| 6382 | | - memcpy(&iocommand.error_info, c->err_info, |
|---|
| 6383 | | - sizeof(iocommand.error_info)); |
|---|
| 6384 | | - if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { |
|---|
| 6385 | | - rc = -EFAULT; |
|---|
| 6386 | | - goto out; |
|---|
| 6387 | | - } |
|---|
| 6388 | | - if ((iocommand.Request.Type.Direction & XFER_READ) && |
|---|
| 6389 | | - iocommand.buf_size > 0) { |
|---|
| 6447 | + memcpy(&iocommand->error_info, c->err_info, |
|---|
| 6448 | + sizeof(iocommand->error_info)); |
|---|
| 6449 | + if ((iocommand->Request.Type.Direction & XFER_READ) && |
|---|
| 6450 | + iocommand->buf_size > 0) { |
|---|
| 6390 | 6451 | /* Copy the data out of the buffer we created */ |
|---|
| 6391 | | - if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { |
|---|
| 6452 | + if (copy_to_user(iocommand->buf, buff, iocommand->buf_size)) { |
|---|
| 6392 | 6453 | rc = -EFAULT; |
|---|
| 6393 | 6454 | goto out; |
|---|
| 6394 | 6455 | } |
|---|
| .. | .. |
|---|
| 6400 | 6461 | return rc; |
|---|
| 6401 | 6462 | } |
|---|
| 6402 | 6463 | |
|---|
| 6403 | | -static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) |
|---|
| 6464 | +static int hpsa_big_passthru_ioctl(struct ctlr_info *h, |
|---|
| 6465 | + BIG_IOCTL_Command_struct *ioc) |
|---|
| 6404 | 6466 | { |
|---|
| 6405 | | - BIG_IOCTL_Command_struct *ioc; |
|---|
| 6406 | 6467 | struct CommandList *c; |
|---|
| 6407 | 6468 | unsigned char **buff = NULL; |
|---|
| 6408 | 6469 | int *buff_size = NULL; |
|---|
| .. | .. |
|---|
| 6413 | 6474 | u32 sz; |
|---|
| 6414 | 6475 | BYTE __user *data_ptr; |
|---|
| 6415 | 6476 | |
|---|
| 6416 | | - if (!argp) |
|---|
| 6417 | | - return -EINVAL; |
|---|
| 6418 | 6477 | if (!capable(CAP_SYS_RAWIO)) |
|---|
| 6419 | 6478 | return -EPERM; |
|---|
| 6420 | | - ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); |
|---|
| 6421 | | - if (!ioc) { |
|---|
| 6422 | | - status = -ENOMEM; |
|---|
| 6423 | | - goto cleanup1; |
|---|
| 6424 | | - } |
|---|
| 6425 | | - if (copy_from_user(ioc, argp, sizeof(*ioc))) { |
|---|
| 6426 | | - status = -EFAULT; |
|---|
| 6427 | | - goto cleanup1; |
|---|
| 6428 | | - } |
|---|
| 6479 | + |
|---|
| 6429 | 6480 | if ((ioc->buf_size < 1) && |
|---|
| 6430 | | - (ioc->Request.Type.Direction != XFER_NONE)) { |
|---|
| 6431 | | - status = -EINVAL; |
|---|
| 6432 | | - goto cleanup1; |
|---|
| 6433 | | - } |
|---|
| 6481 | + (ioc->Request.Type.Direction != XFER_NONE)) |
|---|
| 6482 | + return -EINVAL; |
|---|
| 6434 | 6483 | /* Check kmalloc limits using all SGs */ |
|---|
| 6435 | | - if (ioc->malloc_size > MAX_KMALLOC_SIZE) { |
|---|
| 6436 | | - status = -EINVAL; |
|---|
| 6437 | | - goto cleanup1; |
|---|
| 6438 | | - } |
|---|
| 6439 | | - if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) { |
|---|
| 6440 | | - status = -EINVAL; |
|---|
| 6441 | | - goto cleanup1; |
|---|
| 6442 | | - } |
|---|
| 6484 | + if (ioc->malloc_size > MAX_KMALLOC_SIZE) |
|---|
| 6485 | + return -EINVAL; |
|---|
| 6486 | + if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) |
|---|
| 6487 | + return -EINVAL; |
|---|
| 6443 | 6488 | buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL); |
|---|
| 6444 | 6489 | if (!buff) { |
|---|
| 6445 | 6490 | status = -ENOMEM; |
|---|
| .. | .. |
|---|
| 6483 | 6528 | if (ioc->buf_size > 0) { |
|---|
| 6484 | 6529 | int i; |
|---|
| 6485 | 6530 | for (i = 0; i < sg_used; i++) { |
|---|
| 6486 | | - temp64 = pci_map_single(h->pdev, buff[i], |
|---|
| 6487 | | - buff_size[i], PCI_DMA_BIDIRECTIONAL); |
|---|
| 6531 | + temp64 = dma_map_single(&h->pdev->dev, buff[i], |
|---|
| 6532 | + buff_size[i], DMA_BIDIRECTIONAL); |
|---|
| 6488 | 6533 | if (dma_mapping_error(&h->pdev->dev, |
|---|
| 6489 | 6534 | (dma_addr_t) temp64)) { |
|---|
| 6490 | 6535 | c->SG[i].Addr = cpu_to_le64(0); |
|---|
| 6491 | 6536 | c->SG[i].Len = cpu_to_le32(0); |
|---|
| 6492 | 6537 | hpsa_pci_unmap(h->pdev, c, i, |
|---|
| 6493 | | - PCI_DMA_BIDIRECTIONAL); |
|---|
| 6538 | + DMA_BIDIRECTIONAL); |
|---|
| 6494 | 6539 | status = -ENOMEM; |
|---|
| 6495 | 6540 | goto cleanup0; |
|---|
| 6496 | 6541 | } |
|---|
| .. | .. |
|---|
| 6503 | 6548 | status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, |
|---|
| 6504 | 6549 | NO_TIMEOUT); |
|---|
| 6505 | 6550 | if (sg_used) |
|---|
| 6506 | | - hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); |
|---|
| 6551 | + hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL); |
|---|
| 6507 | 6552 | check_ioctl_unit_attention(h, c); |
|---|
| 6508 | 6553 | if (status) { |
|---|
| 6509 | 6554 | status = -EIO; |
|---|
| .. | .. |
|---|
| 6512 | 6557 | |
|---|
| 6513 | 6558 | /* Copy the error information out */ |
|---|
| 6514 | 6559 | memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); |
|---|
| 6515 | | - if (copy_to_user(argp, ioc, sizeof(*ioc))) { |
|---|
| 6516 | | - status = -EFAULT; |
|---|
| 6517 | | - goto cleanup0; |
|---|
| 6518 | | - } |
|---|
| 6519 | 6560 | if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { |
|---|
| 6520 | 6561 | int i; |
|---|
| 6521 | 6562 | |
|---|
| .. | .. |
|---|
| 6541 | 6582 | kfree(buff); |
|---|
| 6542 | 6583 | } |
|---|
| 6543 | 6584 | kfree(buff_size); |
|---|
| 6544 | | - kfree(ioc); |
|---|
| 6545 | 6585 | return status; |
|---|
| 6546 | 6586 | } |
|---|
| 6547 | 6587 | |
|---|
| .. | .. |
|---|
| 6556 | 6596 | /* |
|---|
| 6557 | 6597 | * ioctl |
|---|
| 6558 | 6598 | */ |
|---|
| 6559 | | -static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg) |
|---|
| 6599 | +static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, |
|---|
| 6600 | + void __user *argp) |
|---|
| 6560 | 6601 | { |
|---|
| 6561 | | - struct ctlr_info *h; |
|---|
| 6562 | | - void __user *argp = (void __user *)arg; |
|---|
| 6602 | + struct ctlr_info *h = sdev_to_hba(dev); |
|---|
| 6563 | 6603 | int rc; |
|---|
| 6564 | | - |
|---|
| 6565 | | - h = sdev_to_hba(dev); |
|---|
| 6566 | 6604 | |
|---|
| 6567 | 6605 | switch (cmd) { |
|---|
| 6568 | 6606 | case CCISS_DEREGDISK: |
|---|
| .. | .. |
|---|
| 6574 | 6612 | return hpsa_getpciinfo_ioctl(h, argp); |
|---|
| 6575 | 6613 | case CCISS_GETDRIVVER: |
|---|
| 6576 | 6614 | return hpsa_getdrivver_ioctl(h, argp); |
|---|
| 6577 | | - case CCISS_PASSTHRU: |
|---|
| 6615 | + case CCISS_PASSTHRU: { |
|---|
| 6616 | + IOCTL_Command_struct iocommand; |
|---|
| 6617 | + |
|---|
| 6618 | + if (!argp) |
|---|
| 6619 | + return -EINVAL; |
|---|
| 6620 | + if (copy_from_user(&iocommand, argp, sizeof(iocommand))) |
|---|
| 6621 | + return -EFAULT; |
|---|
| 6578 | 6622 | if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) |
|---|
| 6579 | 6623 | return -EAGAIN; |
|---|
| 6580 | | - rc = hpsa_passthru_ioctl(h, argp); |
|---|
| 6624 | + rc = hpsa_passthru_ioctl(h, &iocommand); |
|---|
| 6581 | 6625 | atomic_inc(&h->passthru_cmds_avail); |
|---|
| 6626 | + if (!rc && copy_to_user(argp, &iocommand, sizeof(iocommand))) |
|---|
| 6627 | + rc = -EFAULT; |
|---|
| 6582 | 6628 | return rc; |
|---|
| 6583 | | - case CCISS_BIG_PASSTHRU: |
|---|
| 6629 | + } |
|---|
| 6630 | + case CCISS_BIG_PASSTHRU: { |
|---|
| 6631 | + BIG_IOCTL_Command_struct ioc; |
|---|
| 6632 | + if (!argp) |
|---|
| 6633 | + return -EINVAL; |
|---|
| 6634 | + if (copy_from_user(&ioc, argp, sizeof(ioc))) |
|---|
| 6635 | + return -EFAULT; |
|---|
| 6584 | 6636 | if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) |
|---|
| 6585 | 6637 | return -EAGAIN; |
|---|
| 6586 | | - rc = hpsa_big_passthru_ioctl(h, argp); |
|---|
| 6638 | + rc = hpsa_big_passthru_ioctl(h, &ioc); |
|---|
| 6587 | 6639 | atomic_inc(&h->passthru_cmds_avail); |
|---|
| 6640 | + if (!rc && copy_to_user(argp, &ioc, sizeof(ioc))) |
|---|
| 6641 | + rc = -EFAULT; |
|---|
| 6588 | 6642 | return rc; |
|---|
| 6643 | + } |
|---|
| 6589 | 6644 | default: |
|---|
| 6590 | 6645 | return -ENOTTY; |
|---|
| 6591 | 6646 | } |
|---|
| 6592 | 6647 | } |
|---|
| 6593 | 6648 | |
|---|
| 6594 | | -static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, |
|---|
| 6595 | | - u8 reset_type) |
|---|
| 6649 | +static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type) |
|---|
| 6596 | 6650 | { |
|---|
| 6597 | 6651 | struct CommandList *c; |
|---|
| 6598 | 6652 | |
|---|
| .. | .. |
|---|
| 6615 | 6669 | void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, |
|---|
| 6616 | 6670 | int cmd_type) |
|---|
| 6617 | 6671 | { |
|---|
| 6618 | | - int pci_dir = XFER_NONE; |
|---|
| 6672 | + enum dma_data_direction dir = DMA_NONE; |
|---|
| 6619 | 6673 | |
|---|
| 6620 | 6674 | c->cmd_type = CMD_IOCTL_PEND; |
|---|
| 6621 | 6675 | c->scsi_cmd = SCSI_CMD_BUSY; |
|---|
| .. | .. |
|---|
| 6821 | 6875 | |
|---|
| 6822 | 6876 | switch (GET_DIR(c->Request.type_attr_dir)) { |
|---|
| 6823 | 6877 | case XFER_READ: |
|---|
| 6824 | | - pci_dir = PCI_DMA_FROMDEVICE; |
|---|
| 6878 | + dir = DMA_FROM_DEVICE; |
|---|
| 6825 | 6879 | break; |
|---|
| 6826 | 6880 | case XFER_WRITE: |
|---|
| 6827 | | - pci_dir = PCI_DMA_TODEVICE; |
|---|
| 6881 | + dir = DMA_TO_DEVICE; |
|---|
| 6828 | 6882 | break; |
|---|
| 6829 | 6883 | case XFER_NONE: |
|---|
| 6830 | | - pci_dir = PCI_DMA_NONE; |
|---|
| 6884 | + dir = DMA_NONE; |
|---|
| 6831 | 6885 | break; |
|---|
| 6832 | 6886 | default: |
|---|
| 6833 | | - pci_dir = PCI_DMA_BIDIRECTIONAL; |
|---|
| 6887 | + dir = DMA_BIDIRECTIONAL; |
|---|
| 6834 | 6888 | } |
|---|
| 6835 | | - if (hpsa_map_one(h->pdev, c, buff, size, pci_dir)) |
|---|
| 6889 | + if (hpsa_map_one(h->pdev, c, buff, size, dir)) |
|---|
| 6836 | 6890 | return -1; |
|---|
| 6837 | 6891 | return 0; |
|---|
| 6838 | 6892 | } |
|---|
| .. | .. |
|---|
| 6844 | 6898 | { |
|---|
| 6845 | 6899 | ulong page_base = ((ulong) base) & PAGE_MASK; |
|---|
| 6846 | 6900 | ulong page_offs = ((ulong) base) - page_base; |
|---|
| 6847 | | - void __iomem *page_remapped = ioremap_nocache(page_base, |
|---|
| 6901 | + void __iomem *page_remapped = ioremap(page_base, |
|---|
| 6848 | 6902 | page_offs + size); |
|---|
| 6849 | 6903 | |
|---|
| 6850 | 6904 | return page_remapped ? (page_remapped + page_offs) : NULL; |
|---|
| .. | .. |
|---|
| 7028 | 7082 | * CCISS commands, so they must be allocated from the lower 4GiB of |
|---|
| 7029 | 7083 | * memory. |
|---|
| 7030 | 7084 | */ |
|---|
| 7031 | | - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
|---|
| 7085 | + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
|---|
| 7032 | 7086 | if (err) { |
|---|
| 7033 | 7087 | iounmap(vaddr); |
|---|
| 7034 | 7088 | return err; |
|---|
| 7035 | 7089 | } |
|---|
| 7036 | 7090 | |
|---|
| 7037 | | - cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); |
|---|
| 7091 | + cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL); |
|---|
| 7038 | 7092 | if (cmd == NULL) { |
|---|
| 7039 | 7093 | iounmap(vaddr); |
|---|
| 7040 | 7094 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 7083 | 7137 | return -ETIMEDOUT; |
|---|
| 7084 | 7138 | } |
|---|
| 7085 | 7139 | |
|---|
| 7086 | | - pci_free_consistent(pdev, cmd_sz, cmd, paddr64); |
|---|
| 7140 | + dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64); |
|---|
| 7087 | 7141 | |
|---|
| 7088 | 7142 | if (tag & HPSA_ERROR_BIT) { |
|---|
| 7089 | 7143 | dev_err(&pdev->dev, "controller message %02x:%02x failed\n", |
|---|
| .. | .. |
|---|
| 7777 | 7831 | hpsa_disable_interrupt_mode(h); /* pci_init 2 */ |
|---|
| 7778 | 7832 | /* |
|---|
| 7779 | 7833 | * call pci_disable_device before pci_release_regions per |
|---|
| 7780 | | - * Documentation/PCI/pci.txt |
|---|
| 7834 | + * Documentation/driver-api/pci/pci.rst |
|---|
| 7781 | 7835 | */ |
|---|
| 7782 | 7836 | pci_disable_device(h->pdev); /* pci_init 1 */ |
|---|
| 7783 | 7837 | pci_release_regions(h->pdev); /* pci_init 2 */ |
|---|
| .. | .. |
|---|
| 7860 | 7914 | clean1: |
|---|
| 7861 | 7915 | /* |
|---|
| 7862 | 7916 | * call pci_disable_device before pci_release_regions per |
|---|
| 7863 | | - * Documentation/PCI/pci.txt |
|---|
| 7917 | + * Documentation/driver-api/pci/pci.rst |
|---|
| 7864 | 7918 | */ |
|---|
| 7865 | 7919 | pci_disable_device(h->pdev); |
|---|
| 7866 | 7920 | pci_release_regions(h->pdev); |
|---|
| .. | .. |
|---|
| 7950 | 8004 | kfree(h->cmd_pool_bits); |
|---|
| 7951 | 8005 | h->cmd_pool_bits = NULL; |
|---|
| 7952 | 8006 | if (h->cmd_pool) { |
|---|
| 7953 | | - pci_free_consistent(h->pdev, |
|---|
| 8007 | + dma_free_coherent(&h->pdev->dev, |
|---|
| 7954 | 8008 | h->nr_cmds * sizeof(struct CommandList), |
|---|
| 7955 | 8009 | h->cmd_pool, |
|---|
| 7956 | 8010 | h->cmd_pool_dhandle); |
|---|
| .. | .. |
|---|
| 7958 | 8012 | h->cmd_pool_dhandle = 0; |
|---|
| 7959 | 8013 | } |
|---|
| 7960 | 8014 | if (h->errinfo_pool) { |
|---|
| 7961 | | - pci_free_consistent(h->pdev, |
|---|
| 8015 | + dma_free_coherent(&h->pdev->dev, |
|---|
| 7962 | 8016 | h->nr_cmds * sizeof(struct ErrorInfo), |
|---|
| 7963 | 8017 | h->errinfo_pool, |
|---|
| 7964 | 8018 | h->errinfo_pool_dhandle); |
|---|
| .. | .. |
|---|
| 7972 | 8026 | h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG), |
|---|
| 7973 | 8027 | sizeof(unsigned long), |
|---|
| 7974 | 8028 | GFP_KERNEL); |
|---|
| 7975 | | - h->cmd_pool = pci_alloc_consistent(h->pdev, |
|---|
| 8029 | + h->cmd_pool = dma_alloc_coherent(&h->pdev->dev, |
|---|
| 7976 | 8030 | h->nr_cmds * sizeof(*h->cmd_pool), |
|---|
| 7977 | | - &(h->cmd_pool_dhandle)); |
|---|
| 7978 | | - h->errinfo_pool = pci_alloc_consistent(h->pdev, |
|---|
| 8031 | + &h->cmd_pool_dhandle, GFP_KERNEL); |
|---|
| 8032 | + h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev, |
|---|
| 7979 | 8033 | h->nr_cmds * sizeof(*h->errinfo_pool), |
|---|
| 7980 | | - &(h->errinfo_pool_dhandle)); |
|---|
| 8034 | + &h->errinfo_pool_dhandle, GFP_KERNEL); |
|---|
| 7981 | 8035 | if ((h->cmd_pool_bits == NULL) |
|---|
| 7982 | 8036 | || (h->cmd_pool == NULL) |
|---|
| 7983 | 8037 | || (h->errinfo_pool == NULL)) { |
|---|
| .. | .. |
|---|
| 7995 | 8049 | static void hpsa_free_irqs(struct ctlr_info *h) |
|---|
| 7996 | 8050 | { |
|---|
| 7997 | 8051 | int i; |
|---|
| 8052 | + int irq_vector = 0; |
|---|
| 8053 | + |
|---|
| 8054 | + if (hpsa_simple_mode) |
|---|
| 8055 | + irq_vector = h->intr_mode; |
|---|
| 7998 | 8056 | |
|---|
| 7999 | 8057 | if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) { |
|---|
| 8000 | 8058 | /* Single reply queue, only one irq to free */ |
|---|
| 8001 | | - free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]); |
|---|
| 8059 | + free_irq(pci_irq_vector(h->pdev, irq_vector), |
|---|
| 8060 | + &h->q[h->intr_mode]); |
|---|
| 8002 | 8061 | h->q[h->intr_mode] = 0; |
|---|
| 8003 | 8062 | return; |
|---|
| 8004 | 8063 | } |
|---|
| .. | .. |
|---|
| 8017 | 8076 | irqreturn_t (*intxhandler)(int, void *)) |
|---|
| 8018 | 8077 | { |
|---|
| 8019 | 8078 | int rc, i; |
|---|
| 8079 | + int irq_vector = 0; |
|---|
| 8080 | + |
|---|
| 8081 | + if (hpsa_simple_mode) |
|---|
| 8082 | + irq_vector = h->intr_mode; |
|---|
| 8020 | 8083 | |
|---|
| 8021 | 8084 | /* |
|---|
| 8022 | 8085 | * initialize h->q[x] = x so that interrupt handlers know which |
|---|
| .. | .. |
|---|
| 8052 | 8115 | if (h->msix_vectors > 0 || h->pdev->msi_enabled) { |
|---|
| 8053 | 8116 | sprintf(h->intrname[0], "%s-msi%s", h->devname, |
|---|
| 8054 | 8117 | h->msix_vectors ? "x" : ""); |
|---|
| 8055 | | - rc = request_irq(pci_irq_vector(h->pdev, 0), |
|---|
| 8118 | + rc = request_irq(pci_irq_vector(h->pdev, irq_vector), |
|---|
| 8056 | 8119 | msixhandler, 0, |
|---|
| 8057 | 8120 | h->intrname[0], |
|---|
| 8058 | 8121 | &h->q[h->intr_mode]); |
|---|
| 8059 | 8122 | } else { |
|---|
| 8060 | 8123 | sprintf(h->intrname[h->intr_mode], |
|---|
| 8061 | 8124 | "%s-intx", h->devname); |
|---|
| 8062 | | - rc = request_irq(pci_irq_vector(h->pdev, 0), |
|---|
| 8125 | + rc = request_irq(pci_irq_vector(h->pdev, irq_vector), |
|---|
| 8063 | 8126 | intxhandler, IRQF_SHARED, |
|---|
| 8064 | 8127 | h->intrname[0], |
|---|
| 8065 | 8128 | &h->q[h->intr_mode]); |
|---|
| .. | .. |
|---|
| 8067 | 8130 | } |
|---|
| 8068 | 8131 | if (rc) { |
|---|
| 8069 | 8132 | dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", |
|---|
| 8070 | | - pci_irq_vector(h->pdev, 0), h->devname); |
|---|
| 8133 | + pci_irq_vector(h->pdev, irq_vector), h->devname); |
|---|
| 8071 | 8134 | hpsa_free_irqs(h); |
|---|
| 8072 | 8135 | return -ENODEV; |
|---|
| 8073 | 8136 | } |
|---|
| .. | .. |
|---|
| 8077 | 8140 | static int hpsa_kdump_soft_reset(struct ctlr_info *h) |
|---|
| 8078 | 8141 | { |
|---|
| 8079 | 8142 | int rc; |
|---|
| 8080 | | - hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER); |
|---|
| 8143 | + hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER); |
|---|
| 8081 | 8144 | |
|---|
| 8082 | 8145 | dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); |
|---|
| 8083 | 8146 | rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY); |
|---|
| .. | .. |
|---|
| 8104 | 8167 | for (i = 0; i < h->nreply_queues; i++) { |
|---|
| 8105 | 8168 | if (!h->reply_queue[i].head) |
|---|
| 8106 | 8169 | continue; |
|---|
| 8107 | | - pci_free_consistent(h->pdev, |
|---|
| 8170 | + dma_free_coherent(&h->pdev->dev, |
|---|
| 8108 | 8171 | h->reply_queue_size, |
|---|
| 8109 | 8172 | h->reply_queue[i].head, |
|---|
| 8110 | 8173 | h->reply_queue[i].busaddr); |
|---|
| .. | .. |
|---|
| 8133 | 8196 | destroy_workqueue(h->rescan_ctlr_wq); |
|---|
| 8134 | 8197 | h->rescan_ctlr_wq = NULL; |
|---|
| 8135 | 8198 | } |
|---|
| 8199 | + if (h->monitor_ctlr_wq) { |
|---|
| 8200 | + destroy_workqueue(h->monitor_ctlr_wq); |
|---|
| 8201 | + h->monitor_ctlr_wq = NULL; |
|---|
| 8202 | + } |
|---|
| 8203 | + |
|---|
| 8136 | 8204 | kfree(h); /* init_one 1 */ |
|---|
| 8137 | 8205 | } |
|---|
| 8138 | 8206 | |
|---|
| .. | .. |
|---|
| 8481 | 8549 | |
|---|
| 8482 | 8550 | spin_lock_irqsave(&h->lock, flags); |
|---|
| 8483 | 8551 | if (!h->remove_in_progress) |
|---|
| 8484 | | - schedule_delayed_work(&h->event_monitor_work, |
|---|
| 8485 | | - HPSA_EVENT_MONITOR_INTERVAL); |
|---|
| 8552 | + queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work, |
|---|
| 8553 | + HPSA_EVENT_MONITOR_INTERVAL); |
|---|
| 8486 | 8554 | spin_unlock_irqrestore(&h->lock, flags); |
|---|
| 8487 | 8555 | } |
|---|
| 8488 | 8556 | |
|---|
| .. | .. |
|---|
| 8527 | 8595 | |
|---|
| 8528 | 8596 | spin_lock_irqsave(&h->lock, flags); |
|---|
| 8529 | 8597 | if (!h->remove_in_progress) |
|---|
| 8530 | | - schedule_delayed_work(&h->monitor_ctlr_work, |
|---|
| 8598 | + queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work, |
|---|
| 8531 | 8599 | h->heartbeat_sample_interval); |
|---|
| 8532 | 8600 | spin_unlock_irqrestore(&h->lock, flags); |
|---|
| 8533 | 8601 | } |
|---|
| .. | .. |
|---|
| 8643 | 8711 | number_of_controllers++; |
|---|
| 8644 | 8712 | |
|---|
| 8645 | 8713 | /* configure PCI DMA stuff */ |
|---|
| 8646 | | - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
|---|
| 8714 | + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); |
|---|
| 8647 | 8715 | if (rc == 0) { |
|---|
| 8648 | 8716 | dac = 1; |
|---|
| 8649 | 8717 | } else { |
|---|
| 8650 | | - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
|---|
| 8718 | + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
|---|
| 8651 | 8719 | if (rc == 0) { |
|---|
| 8652 | 8720 | dac = 0; |
|---|
| 8653 | 8721 | } else { |
|---|
| .. | .. |
|---|
| 8693 | 8761 | if (!h->resubmit_wq) { |
|---|
| 8694 | 8762 | rc = -ENOMEM; |
|---|
| 8695 | 8763 | goto clean7; /* aer/h */ |
|---|
| 8764 | + } |
|---|
| 8765 | + |
|---|
| 8766 | + h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor"); |
|---|
| 8767 | + if (!h->monitor_ctlr_wq) { |
|---|
| 8768 | + rc = -ENOMEM; |
|---|
| 8769 | + goto clean7; |
|---|
| 8696 | 8770 | } |
|---|
| 8697 | 8771 | |
|---|
| 8698 | 8772 | /* |
|---|
| .. | .. |
|---|
| 8826 | 8900 | destroy_workqueue(h->rescan_ctlr_wq); |
|---|
| 8827 | 8901 | h->rescan_ctlr_wq = NULL; |
|---|
| 8828 | 8902 | } |
|---|
| 8829 | | - kfree(h); |
|---|
| 8903 | + if (h->monitor_ctlr_wq) { |
|---|
| 8904 | + destroy_workqueue(h->monitor_ctlr_wq); |
|---|
| 8905 | + h->monitor_ctlr_wq = NULL; |
|---|
| 8906 | + } |
|---|
| 8907 | + hpda_free_ctlr_info(h); |
|---|
| 8830 | 8908 | return rc; |
|---|
| 8831 | 8909 | } |
|---|
| 8832 | 8910 | |
|---|
| .. | .. |
|---|
| 8848 | 8926 | RAID_CTLR_LUNID, TYPE_CMD)) { |
|---|
| 8849 | 8927 | goto out; |
|---|
| 8850 | 8928 | } |
|---|
| 8851 | | - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
|---|
| 8852 | | - PCI_DMA_TODEVICE, DEFAULT_TIMEOUT); |
|---|
| 8929 | + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE, |
|---|
| 8930 | + DEFAULT_TIMEOUT); |
|---|
| 8853 | 8931 | if (rc) |
|---|
| 8854 | 8932 | goto out; |
|---|
| 8855 | 8933 | if (c->err_info->CommandStatus != 0) |
|---|
| .. | .. |
|---|
| 8884 | 8962 | RAID_CTLR_LUNID, TYPE_CMD)) |
|---|
| 8885 | 8963 | goto errout; |
|---|
| 8886 | 8964 | |
|---|
| 8887 | | - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
|---|
| 8888 | | - PCI_DMA_FROMDEVICE, NO_TIMEOUT); |
|---|
| 8965 | + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, |
|---|
| 8966 | + NO_TIMEOUT); |
|---|
| 8889 | 8967 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) |
|---|
| 8890 | 8968 | goto errout; |
|---|
| 8891 | 8969 | |
|---|
| .. | .. |
|---|
| 8896 | 8974 | RAID_CTLR_LUNID, TYPE_CMD)) |
|---|
| 8897 | 8975 | goto errout; |
|---|
| 8898 | 8976 | |
|---|
| 8899 | | - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
|---|
| 8900 | | - PCI_DMA_TODEVICE, NO_TIMEOUT); |
|---|
| 8977 | + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE, |
|---|
| 8978 | + NO_TIMEOUT); |
|---|
| 8901 | 8979 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) |
|---|
| 8902 | 8980 | goto errout; |
|---|
| 8903 | 8981 | |
|---|
| .. | .. |
|---|
| 8906 | 8984 | RAID_CTLR_LUNID, TYPE_CMD)) |
|---|
| 8907 | 8985 | goto errout; |
|---|
| 8908 | 8986 | |
|---|
| 8909 | | - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
|---|
| 8910 | | - PCI_DMA_FROMDEVICE, NO_TIMEOUT); |
|---|
| 8987 | + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, |
|---|
| 8988 | + NO_TIMEOUT); |
|---|
| 8911 | 8989 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) |
|---|
| 8912 | 8990 | goto errout; |
|---|
| 8913 | 8991 | |
|---|
| .. | .. |
|---|
| 8973 | 9051 | cancel_delayed_work_sync(&h->event_monitor_work); |
|---|
| 8974 | 9052 | destroy_workqueue(h->rescan_ctlr_wq); |
|---|
| 8975 | 9053 | destroy_workqueue(h->resubmit_wq); |
|---|
| 9054 | + destroy_workqueue(h->monitor_ctlr_wq); |
|---|
| 8976 | 9055 | |
|---|
| 8977 | 9056 | hpsa_delete_sas_host(h); |
|---|
| 8978 | 9057 | |
|---|
| .. | .. |
|---|
| 9253 | 9332 | static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h) |
|---|
| 9254 | 9333 | { |
|---|
| 9255 | 9334 | if (h->ioaccel_cmd_pool) { |
|---|
| 9256 | | - pci_free_consistent(h->pdev, |
|---|
| 9257 | | - h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), |
|---|
| 9258 | | - h->ioaccel_cmd_pool, |
|---|
| 9259 | | - h->ioaccel_cmd_pool_dhandle); |
|---|
| 9335 | + dma_free_coherent(&h->pdev->dev, |
|---|
| 9336 | + h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), |
|---|
| 9337 | + h->ioaccel_cmd_pool, |
|---|
| 9338 | + h->ioaccel_cmd_pool_dhandle); |
|---|
| 9260 | 9339 | h->ioaccel_cmd_pool = NULL; |
|---|
| 9261 | 9340 | h->ioaccel_cmd_pool_dhandle = 0; |
|---|
| 9262 | 9341 | } |
|---|
| .. | .. |
|---|
| 9279 | 9358 | BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % |
|---|
| 9280 | 9359 | IOACCEL1_COMMANDLIST_ALIGNMENT); |
|---|
| 9281 | 9360 | h->ioaccel_cmd_pool = |
|---|
| 9282 | | - pci_alloc_consistent(h->pdev, |
|---|
| 9361 | + dma_alloc_coherent(&h->pdev->dev, |
|---|
| 9283 | 9362 | h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), |
|---|
| 9284 | | - &(h->ioaccel_cmd_pool_dhandle)); |
|---|
| 9363 | + &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL); |
|---|
| 9285 | 9364 | |
|---|
| 9286 | 9365 | h->ioaccel1_blockFetchTable = |
|---|
| 9287 | 9366 | kmalloc(((h->ioaccel_maxsg + 1) * |
|---|
| .. | .. |
|---|
| 9306 | 9385 | hpsa_free_ioaccel2_sg_chain_blocks(h); |
|---|
| 9307 | 9386 | |
|---|
| 9308 | 9387 | if (h->ioaccel2_cmd_pool) { |
|---|
| 9309 | | - pci_free_consistent(h->pdev, |
|---|
| 9310 | | - h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), |
|---|
| 9311 | | - h->ioaccel2_cmd_pool, |
|---|
| 9312 | | - h->ioaccel2_cmd_pool_dhandle); |
|---|
| 9388 | + dma_free_coherent(&h->pdev->dev, |
|---|
| 9389 | + h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), |
|---|
| 9390 | + h->ioaccel2_cmd_pool, |
|---|
| 9391 | + h->ioaccel2_cmd_pool_dhandle); |
|---|
| 9313 | 9392 | h->ioaccel2_cmd_pool = NULL; |
|---|
| 9314 | 9393 | h->ioaccel2_cmd_pool_dhandle = 0; |
|---|
| 9315 | 9394 | } |
|---|
| .. | .. |
|---|
| 9332 | 9411 | BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % |
|---|
| 9333 | 9412 | IOACCEL2_COMMANDLIST_ALIGNMENT); |
|---|
| 9334 | 9413 | h->ioaccel2_cmd_pool = |
|---|
| 9335 | | - pci_alloc_consistent(h->pdev, |
|---|
| 9414 | + dma_alloc_coherent(&h->pdev->dev, |
|---|
| 9336 | 9415 | h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), |
|---|
| 9337 | | - &(h->ioaccel2_cmd_pool_dhandle)); |
|---|
| 9416 | + &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL); |
|---|
| 9338 | 9417 | |
|---|
| 9339 | 9418 | h->ioaccel2_blockFetchTable = |
|---|
| 9340 | 9419 | kmalloc(((h->ioaccel_maxsg + 1) * |
|---|
| .. | .. |
|---|
| 9407 | 9486 | h->reply_queue_size = h->max_commands * sizeof(u64); |
|---|
| 9408 | 9487 | |
|---|
| 9409 | 9488 | for (i = 0; i < h->nreply_queues; i++) { |
|---|
| 9410 | | - h->reply_queue[i].head = pci_alloc_consistent(h->pdev, |
|---|
| 9489 | + h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev, |
|---|
| 9411 | 9490 | h->reply_queue_size, |
|---|
| 9412 | | - &(h->reply_queue[i].busaddr)); |
|---|
| 9491 | + &h->reply_queue[i].busaddr, |
|---|
| 9492 | + GFP_KERNEL); |
|---|
| 9413 | 9493 | if (!h->reply_queue[i].head) { |
|---|
| 9414 | 9494 | rc = -ENOMEM; |
|---|
| 9415 | 9495 | goto clean1; /* rq, ioaccel */ |
|---|
| .. | .. |
|---|
| 9684 | 9764 | return 0; |
|---|
| 9685 | 9765 | |
|---|
| 9686 | 9766 | free_sas_phy: |
|---|
| 9687 | | - hpsa_free_sas_phy(hpsa_sas_phy); |
|---|
| 9767 | + sas_phy_free(hpsa_sas_phy->phy); |
|---|
| 9768 | + kfree(hpsa_sas_phy); |
|---|
| 9688 | 9769 | free_sas_port: |
|---|
| 9689 | 9770 | hpsa_free_sas_port(hpsa_sas_port); |
|---|
| 9690 | 9771 | free_sas_node: |
|---|
| .. | .. |
|---|
| 9720 | 9801 | |
|---|
| 9721 | 9802 | rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy); |
|---|
| 9722 | 9803 | if (rc) |
|---|
| 9723 | | - goto free_sas_port; |
|---|
| 9804 | + goto free_sas_rphy; |
|---|
| 9724 | 9805 | |
|---|
| 9725 | 9806 | return 0; |
|---|
| 9726 | 9807 | |
|---|
| 9808 | +free_sas_rphy: |
|---|
| 9809 | + sas_rphy_free(rphy); |
|---|
| 9727 | 9810 | free_sas_port: |
|---|
| 9728 | 9811 | hpsa_free_sas_port(hpsa_sas_port); |
|---|
| 9729 | 9812 | device->sas_port = NULL; |
|---|