forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-10-09 244b2c5ca8b14627e4a17755e5922221e121c771
kernel/drivers/scsi/smartpqi/smartpqi_init.c
....@@ -1,18 +1,11 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * driver for Microsemi PQI-based storage controllers
3
- * Copyright (c) 2016-2017 Microsemi Corporation
4
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
5
+ * Copyright (c) 2016-2018 Microsemi Corporation
46 * Copyright (c) 2016 PMC-Sierra, Inc.
57 *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License as published by
8
- * the Free Software Foundation; version 2 of the License.
9
- *
10
- * This program is distributed in the hope that it will be useful,
11
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13
- * NON INFRINGEMENT. See the GNU General Public License for more details.
14
- *
15
- * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
8
+ * Questions/Comments/Bugfixes to storagedev@microchip.com
169 *
1710 */
1811
....@@ -40,11 +33,11 @@
4033 #define BUILD_TIMESTAMP
4134 #endif
4235
43
-#define DRIVER_VERSION "1.1.4-130"
36
+#define DRIVER_VERSION "1.2.16-010"
4437 #define DRIVER_MAJOR 1
45
-#define DRIVER_MINOR 1
46
-#define DRIVER_RELEASE 4
47
-#define DRIVER_REVISION 130
38
+#define DRIVER_MINOR 2
39
+#define DRIVER_RELEASE 16
40
+#define DRIVER_REVISION 10
4841
4942 #define DRIVER_NAME "Microsemi PQI Driver (v" \
5043 DRIVER_VERSION BUILD_TIMESTAMP ")"
....@@ -74,6 +67,15 @@
7467 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
7568 unsigned int cdb_length, struct pqi_queue_group *queue_group,
7669 struct pqi_encryption_info *encryption_info, bool raid_bypass);
70
+static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
71
+static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
72
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
73
+static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
74
+ u32 bytes_requested);
75
+static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
76
+static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
77
+static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
78
+ struct pqi_scsi_dev *device, unsigned long timeout_secs);
7779
7880 /* for flags argument to pqi_submit_raid_request_synchronous() */
7981 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
....@@ -113,6 +115,7 @@
113115 PQI_EVENT_TYPE_HARDWARE,
114116 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
115117 PQI_EVENT_TYPE_LOGICAL_DEVICE,
118
+ PQI_EVENT_TYPE_OFA,
116119 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
117120 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
118121 };
....@@ -141,6 +144,18 @@
141144 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
142145 "\t\tSupported: none, reboot, panic\n"
143146 "\t\tDefault: none");
147
+
148
+static int pqi_expose_ld_first;
149
+module_param_named(expose_ld_first,
150
+ pqi_expose_ld_first, int, 0644);
151
+MODULE_PARM_DESC(expose_ld_first,
152
+ "Expose logical drives before physical drives.");
153
+
154
+static int pqi_hide_vsep;
155
+module_param_named(hide_vsep,
156
+ pqi_hide_vsep, int, 0644);
157
+MODULE_PARM_DESC(hide_vsep,
158
+ "Hide the virtual SEP for direct attached drives.");
144159
145160 static char *raid_levels[] = {
146161 "RAID-0",
....@@ -176,16 +191,14 @@
176191 scmd->scsi_done(scmd);
177192 }
178193
194
+static inline void pqi_disable_write_same(struct scsi_device *sdev)
195
+{
196
+ sdev->no_write_same = 1;
197
+}
198
+
179199 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
180200 {
181201 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
182
-}
183
-
184
-static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
185
-{
186
- void *hostdata = shost_priv(shost);
187
-
188
- return *((struct pqi_ctrl_info **)hostdata);
189202 }
190203
191204 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
....@@ -227,6 +240,21 @@
227240 sis_write_driver_scratch(ctrl_info, mode);
228241 }
229242
243
+static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
244
+{
245
+ ctrl_info->block_device_reset = true;
246
+}
247
+
248
+static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
249
+{
250
+ return ctrl_info->block_device_reset;
251
+}
252
+
253
+static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
254
+{
255
+ return ctrl_info->block_requests;
256
+}
257
+
230258 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
231259 {
232260 ctrl_info->block_requests = true;
....@@ -239,11 +267,6 @@
239267 wake_up_all(&ctrl_info->block_requests_wait);
240268 pqi_retry_raid_bypass_requests(ctrl_info);
241269 scsi_unblock_requests(ctrl_info->scsi_host);
242
-}
243
-
244
-static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
245
-{
246
- return ctrl_info->block_requests;
247270 }
248271
249272 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
....@@ -275,16 +298,6 @@
275298 return remaining_msecs;
276299 }
277300
278
-static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
279
-{
280
- atomic_inc(&ctrl_info->num_busy_threads);
281
-}
282
-
283
-static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
284
-{
285
- atomic_dec(&ctrl_info->num_busy_threads);
286
-}
287
-
288301 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
289302 {
290303 while (atomic_read(&ctrl_info->num_busy_threads) >
....@@ -312,10 +325,48 @@
312325 return device->in_reset;
313326 }
314327
328
+static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
329
+{
330
+ ctrl_info->in_ofa = true;
331
+}
332
+
333
+static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
334
+{
335
+ ctrl_info->in_ofa = false;
336
+}
337
+
338
+static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
339
+{
340
+ return ctrl_info->in_ofa;
341
+}
342
+
343
+static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
344
+{
345
+ device->in_remove = true;
346
+}
347
+
348
+static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
349
+ struct pqi_scsi_dev *device)
350
+{
351
+ return device->in_remove && !ctrl_info->in_shutdown;
352
+}
353
+
354
+static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
355
+{
356
+ ctrl_info->in_shutdown = true;
357
+}
358
+
359
+static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
360
+{
361
+ return ctrl_info->in_shutdown;
362
+}
363
+
315364 static inline void pqi_schedule_rescan_worker_with_delay(
316365 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
317366 {
318367 if (pqi_ctrl_offline(ctrl_info))
368
+ return;
369
+ if (pqi_ctrl_in_ofa(ctrl_info))
319370 return;
320371
321372 schedule_delayed_work(&ctrl_info->rescan_work, delay);
....@@ -326,7 +377,7 @@
326377 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
327378 }
328379
329
-#define PQI_RESCAN_WORK_DELAY (10 * HZ)
380
+#define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
330381
331382 static inline void pqi_schedule_rescan_worker_delayed(
332383 struct pqi_ctrl_info *ctrl_info)
....@@ -339,6 +390,11 @@
339390 cancel_delayed_work_sync(&ctrl_info->rescan_work);
340391 }
341392
393
+static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
394
+{
395
+ cancel_work_sync(&ctrl_info->event_work);
396
+}
397
+
342398 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
343399 {
344400 if (!ctrl_info->heartbeat_counter)
....@@ -347,18 +403,39 @@
347403 return readl(ctrl_info->heartbeat_counter);
348404 }
349405
406
+static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
407
+{
408
+ if (!ctrl_info->soft_reset_status)
409
+ return 0;
410
+
411
+ return readb(ctrl_info->soft_reset_status);
412
+}
413
+
414
+static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
415
+ u8 clear)
416
+{
417
+ u8 status;
418
+
419
+ if (!ctrl_info->soft_reset_status)
420
+ return;
421
+
422
+ status = pqi_read_soft_reset_status(ctrl_info);
423
+ status &= ~clear;
424
+ writeb(status, ctrl_info->soft_reset_status);
425
+}
426
+
350427 static int pqi_map_single(struct pci_dev *pci_dev,
351428 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
352
- size_t buffer_length, int data_direction)
429
+ size_t buffer_length, enum dma_data_direction data_direction)
353430 {
354431 dma_addr_t bus_address;
355432
356
- if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
433
+ if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
357434 return 0;
358435
359
- bus_address = pci_map_single(pci_dev, buffer, buffer_length,
436
+ bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
360437 data_direction);
361
- if (pci_dma_mapping_error(pci_dev, bus_address))
438
+ if (dma_mapping_error(&pci_dev->dev, bus_address))
362439 return -ENOMEM;
363440
364441 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
....@@ -370,15 +447,15 @@
370447
371448 static void pqi_pci_unmap(struct pci_dev *pci_dev,
372449 struct pqi_sg_descriptor *descriptors, int num_descriptors,
373
- int data_direction)
450
+ enum dma_data_direction data_direction)
374451 {
375452 int i;
376453
377
- if (data_direction == PCI_DMA_NONE)
454
+ if (data_direction == DMA_NONE)
378455 return;
379456
380457 for (i = 0; i < num_descriptors; i++)
381
- pci_unmap_single(pci_dev,
458
+ dma_unmap_single(&pci_dev->dev,
382459 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
383460 get_unaligned_le32(&descriptors[i].length),
384461 data_direction);
....@@ -387,10 +464,10 @@
387464 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
388465 struct pqi_raid_path_request *request, u8 cmd,
389466 u8 *scsi3addr, void *buffer, size_t buffer_length,
390
- u16 vpd_page, int *pci_direction)
467
+ u16 vpd_page, enum dma_data_direction *dir)
391468 {
392469 u8 *cdb;
393
- int pci_dir;
470
+ size_t cdb_length = buffer_length;
394471
395472 memset(request, 0, sizeof(*request));
396473
....@@ -413,68 +490,79 @@
413490 cdb[1] = 0x1;
414491 cdb[2] = (u8)vpd_page;
415492 }
416
- cdb[4] = (u8)buffer_length;
493
+ cdb[4] = (u8)cdb_length;
417494 break;
418495 case CISS_REPORT_LOG:
419496 case CISS_REPORT_PHYS:
420497 request->data_direction = SOP_READ_FLAG;
421498 cdb[0] = cmd;
422499 if (cmd == CISS_REPORT_PHYS)
423
- cdb[1] = CISS_REPORT_PHYS_EXTENDED;
500
+ cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
424501 else
425
- cdb[1] = CISS_REPORT_LOG_EXTENDED;
426
- put_unaligned_be32(buffer_length, &cdb[6]);
502
+ cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
503
+ put_unaligned_be32(cdb_length, &cdb[6]);
427504 break;
428505 case CISS_GET_RAID_MAP:
429506 request->data_direction = SOP_READ_FLAG;
430507 cdb[0] = CISS_READ;
431508 cdb[1] = CISS_GET_RAID_MAP;
432
- put_unaligned_be32(buffer_length, &cdb[6]);
509
+ put_unaligned_be32(cdb_length, &cdb[6]);
433510 break;
434511 case SA_FLUSH_CACHE:
435512 request->data_direction = SOP_WRITE_FLAG;
436513 cdb[0] = BMIC_WRITE;
437514 cdb[6] = BMIC_FLUSH_CACHE;
438
- put_unaligned_be16(buffer_length, &cdb[7]);
515
+ put_unaligned_be16(cdb_length, &cdb[7]);
439516 break;
517
+ case BMIC_SENSE_DIAG_OPTIONS:
518
+ cdb_length = 0;
519
+ fallthrough;
440520 case BMIC_IDENTIFY_CONTROLLER:
441521 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
522
+ case BMIC_SENSE_SUBSYSTEM_INFORMATION:
442523 request->data_direction = SOP_READ_FLAG;
443524 cdb[0] = BMIC_READ;
444525 cdb[6] = cmd;
445
- put_unaligned_be16(buffer_length, &cdb[7]);
526
+ put_unaligned_be16(cdb_length, &cdb[7]);
446527 break;
528
+ case BMIC_SET_DIAG_OPTIONS:
529
+ cdb_length = 0;
530
+ fallthrough;
447531 case BMIC_WRITE_HOST_WELLNESS:
448532 request->data_direction = SOP_WRITE_FLAG;
449533 cdb[0] = BMIC_WRITE;
450534 cdb[6] = cmd;
451
- put_unaligned_be16(buffer_length, &cdb[7]);
535
+ put_unaligned_be16(cdb_length, &cdb[7]);
536
+ break;
537
+ case BMIC_CSMI_PASSTHRU:
538
+ request->data_direction = SOP_BIDIRECTIONAL;
539
+ cdb[0] = BMIC_WRITE;
540
+ cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
541
+ cdb[6] = cmd;
542
+ put_unaligned_be16(cdb_length, &cdb[7]);
452543 break;
453544 default:
454
- dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
455
- cmd);
545
+ dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
456546 break;
457547 }
458548
459549 switch (request->data_direction) {
460550 case SOP_READ_FLAG:
461
- pci_dir = PCI_DMA_FROMDEVICE;
551
+ *dir = DMA_FROM_DEVICE;
462552 break;
463553 case SOP_WRITE_FLAG:
464
- pci_dir = PCI_DMA_TODEVICE;
554
+ *dir = DMA_TO_DEVICE;
465555 break;
466556 case SOP_NO_DIRECTION_FLAG:
467
- pci_dir = PCI_DMA_NONE;
557
+ *dir = DMA_NONE;
468558 break;
469559 default:
470
- pci_dir = PCI_DMA_BIDIRECTIONAL;
560
+ *dir = DMA_BIDIRECTIONAL;
471561 break;
472562 }
473563
474
- *pci_direction = pci_dir;
475
-
476564 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
477
- buffer, buffer_length, pci_dir);
565
+ buffer, buffer_length, *dir);
478566 }
479567
480568 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
....@@ -512,63 +600,79 @@
512600 atomic_dec(&io_request->refcount);
513601 }
514602
515
-static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
516
- struct bmic_identify_controller *buffer)
603
+static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
604
+ u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
605
+ struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
517606 {
518607 int rc;
519
- int pci_direction;
520608 struct pqi_raid_path_request request;
609
+ enum dma_data_direction dir;
521610
522611 rc = pqi_build_raid_path_request(ctrl_info, &request,
523
- BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
524
- sizeof(*buffer), 0, &pci_direction);
612
+ cmd, scsi3addr, buffer,
613
+ buffer_length, vpd_page, &dir);
525614 if (rc)
526615 return rc;
527616
528617 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
529
- NULL, NO_TIMEOUT);
618
+ error_info, timeout_msecs);
530619
531
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
532
- pci_direction);
620
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
533621
534622 return rc;
535623 }
536624
537
-static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
625
+/* helper functions for pqi_send_scsi_raid_request */
626
+
627
+static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
628
+ u8 cmd, void *buffer, size_t buffer_length)
629
+{
630
+ return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
631
+ buffer, buffer_length, 0, NULL, NO_TIMEOUT);
632
+}
633
+
634
+static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
635
+ u8 cmd, void *buffer, size_t buffer_length,
636
+ struct pqi_raid_error_info *error_info)
637
+{
638
+ return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
639
+ buffer, buffer_length, 0, error_info, NO_TIMEOUT);
640
+}
641
+
642
+static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
643
+ struct bmic_identify_controller *buffer)
644
+{
645
+ return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
646
+ buffer, sizeof(*buffer));
647
+}
648
+
649
+static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
650
+ struct bmic_sense_subsystem_info *sense_info)
651
+{
652
+ return pqi_send_ctrl_raid_request(ctrl_info,
653
+ BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
654
+ sizeof(*sense_info));
655
+}
656
+
657
+static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
538658 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
539659 {
540
- int rc;
541
- int pci_direction;
542
- struct pqi_raid_path_request request;
543
-
544
- rc = pqi_build_raid_path_request(ctrl_info, &request,
545
- INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
546
- &pci_direction);
547
- if (rc)
548
- return rc;
549
-
550
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
551
- NULL, NO_TIMEOUT);
552
-
553
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
554
- pci_direction);
555
-
556
- return rc;
660
+ return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
661
+ buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
557662 }
558663
559664 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
560665 struct pqi_scsi_dev *device,
561
- struct bmic_identify_physical_device *buffer,
562
- size_t buffer_length)
666
+ struct bmic_identify_physical_device *buffer, size_t buffer_length)
563667 {
564668 int rc;
565
- int pci_direction;
669
+ enum dma_data_direction dir;
566670 u16 bmic_device_index;
567671 struct pqi_raid_path_request request;
568672
569673 rc = pqi_build_raid_path_request(ctrl_info, &request,
570674 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
571
- buffer_length, 0, &pci_direction);
675
+ buffer_length, 0, &dir);
572676 if (rc)
573677 return rc;
574678
....@@ -579,8 +683,7 @@
579683 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
580684 0, NULL, NO_TIMEOUT);
581685
582
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
583
- pci_direction);
686
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
584687
585688 return rc;
586689 }
....@@ -589,8 +692,6 @@
589692 enum bmic_flush_cache_shutdown_event shutdown_event)
590693 {
591694 int rc;
592
- struct pqi_raid_path_request request;
593
- int pci_direction;
594695 struct bmic_flush_cache *flush_cache;
595696
596697 /*
....@@ -606,44 +707,54 @@
606707
607708 flush_cache->shutdown_event = shutdown_event;
608709
609
- rc = pqi_build_raid_path_request(ctrl_info, &request,
610
- SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache,
611
- sizeof(*flush_cache), 0, &pci_direction);
612
- if (rc)
613
- goto out;
710
+ rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
711
+ sizeof(*flush_cache));
614712
615
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
616
- 0, NULL, NO_TIMEOUT);
617
-
618
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
619
- pci_direction);
620
-
621
-out:
622713 kfree(flush_cache);
623714
624715 return rc;
625716 }
626717
627
-static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
628
- void *buffer, size_t buffer_length)
718
+int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
719
+ struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
720
+ struct pqi_raid_error_info *error_info)
721
+{
722
+ return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
723
+ buffer, buffer_length, error_info);
724
+}
725
+
726
+#define PQI_FETCH_PTRAID_DATA (1 << 31)
727
+
728
+static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
629729 {
630730 int rc;
631
- struct pqi_raid_path_request request;
632
- int pci_direction;
731
+ struct bmic_diag_options *diag;
633732
634
- rc = pqi_build_raid_path_request(ctrl_info, &request,
635
- BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
636
- buffer_length, 0, &pci_direction);
733
+ diag = kzalloc(sizeof(*diag), GFP_KERNEL);
734
+ if (!diag)
735
+ return -ENOMEM;
736
+
737
+ rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
738
+ diag, sizeof(*diag));
637739 if (rc)
638
- return rc;
740
+ goto out;
639741
640
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
641
- 0, NULL, NO_TIMEOUT);
742
+ diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
642743
643
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
644
- pci_direction);
744
+ rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
745
+ sizeof(*diag));
746
+
747
+out:
748
+ kfree(diag);
645749
646750 return rc;
751
+}
752
+
753
+static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
754
+ void *buffer, size_t buffer_length)
755
+{
756
+ return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
757
+ buffer, buffer_length);
647758 }
648759
649760 #pragma pack(1)
....@@ -758,7 +869,7 @@
758869 return rc;
759870 }
760871
761
-#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
872
+#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
762873
763874 static void pqi_update_time_worker(struct work_struct *work)
764875 {
....@@ -792,25 +903,11 @@
792903 cancel_delayed_work_sync(&ctrl_info->update_time_work);
793904 }
794905
795
-static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
906
+static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
796907 void *buffer, size_t buffer_length)
797908 {
798
- int rc;
799
- int pci_direction;
800
- struct pqi_raid_path_request request;
801
-
802
- rc = pqi_build_raid_path_request(ctrl_info, &request,
803
- cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
804
- if (rc)
805
- return rc;
806
-
807
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
808
- NULL, NO_TIMEOUT);
809
-
810
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
811
- pci_direction);
812
-
813
- return rc;
909
+ return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
910
+ buffer_length);
814911 }
815912
816913 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
....@@ -1028,29 +1125,11 @@
10281125 char *err_msg;
10291126 u32 raid_map_size;
10301127 u32 r5or6_blocks_per_row;
1031
- unsigned int num_phys_disks;
1032
- unsigned int num_raid_map_entries;
10331128
10341129 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
10351130
10361131 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
10371132 err_msg = "RAID map too small";
1038
- goto bad_raid_map;
1039
- }
1040
-
1041
- if (raid_map_size > sizeof(*raid_map)) {
1042
- err_msg = "RAID map too large";
1043
- goto bad_raid_map;
1044
- }
1045
-
1046
- num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1047
- (get_unaligned_le16(&raid_map->data_disks_per_row) +
1048
- get_unaligned_le16(&raid_map->metadata_disks_per_row));
1049
- num_raid_map_entries = num_phys_disks *
1050
- get_unaligned_le16(&raid_map->row_cnt);
1051
-
1052
- if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
1053
- err_msg = "invalid number of map entries in RAID map";
10541133 goto bad_raid_map;
10551134 }
10561135
....@@ -1092,28 +1171,46 @@
10921171 struct pqi_scsi_dev *device)
10931172 {
10941173 int rc;
1095
- int pci_direction;
1096
- struct pqi_raid_path_request request;
1174
+ u32 raid_map_size;
10971175 struct raid_map *raid_map;
10981176
10991177 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
11001178 if (!raid_map)
11011179 return -ENOMEM;
11021180
1103
- rc = pqi_build_raid_path_request(ctrl_info, &request,
1104
- CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
1105
- sizeof(*raid_map), 0, &pci_direction);
1106
- if (rc)
1107
- goto error;
1108
-
1109
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
1110
- NULL, NO_TIMEOUT);
1111
-
1112
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
1113
- pci_direction);
1181
+ rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1182
+ device->scsi3addr, raid_map, sizeof(*raid_map),
1183
+ 0, NULL, NO_TIMEOUT);
11141184
11151185 if (rc)
11161186 goto error;
1187
+
1188
+ raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1189
+
1190
+ if (raid_map_size > sizeof(*raid_map)) {
1191
+
1192
+ kfree(raid_map);
1193
+
1194
+ raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1195
+ if (!raid_map)
1196
+ return -ENOMEM;
1197
+
1198
+ rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1199
+ device->scsi3addr, raid_map, raid_map_size,
1200
+ 0, NULL, NO_TIMEOUT);
1201
+ if (rc)
1202
+ goto error;
1203
+
1204
+ if (get_unaligned_le32(&raid_map->structure_size)
1205
+ != raid_map_size) {
1206
+ dev_warn(&ctrl_info->pci_dev->dev,
1207
+ "Requested %d bytes, received %d bytes",
1208
+ raid_map_size,
1209
+ get_unaligned_le32(&raid_map->structure_size));
1210
+ rc = -EINVAL;
1211
+ goto error;
1212
+ }
1213
+ }
11171214
11181215 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
11191216 if (rc)
....@@ -1145,9 +1242,9 @@
11451242 if (rc)
11461243 goto out;
11471244
1148
-#define RAID_BYPASS_STATUS 4
1149
-#define RAID_BYPASS_CONFIGURED 0x1
1150
-#define RAID_BYPASS_ENABLED 0x2
1245
+#define RAID_BYPASS_STATUS 4
1246
+#define RAID_BYPASS_CONFIGURED 0x1
1247
+#define RAID_BYPASS_ENABLED 0x2
11511248
11521249 bypass_status = buffer[RAID_BYPASS_STATUS];
11531250 device->raid_bypass_configured =
....@@ -1203,30 +1300,59 @@
12031300 device->volume_offline = volume_offline;
12041301 }
12051302
1206
-#define PQI_INQUIRY_PAGE0_RETRIES 3
1303
+static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1304
+ struct pqi_scsi_dev *device,
1305
+ struct bmic_identify_physical_device *id_phys)
1306
+{
1307
+ int rc;
12071308
1208
-static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1309
+ memset(id_phys, 0, sizeof(*id_phys));
1310
+
1311
+ rc = pqi_identify_physical_device(ctrl_info, device,
1312
+ id_phys, sizeof(*id_phys));
1313
+ if (rc) {
1314
+ device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1315
+ return rc;
1316
+ }
1317
+
1318
+ scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1319
+ scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1320
+
1321
+ memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1322
+ memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1323
+
1324
+ device->box_index = id_phys->box_index;
1325
+ device->phys_box_on_bus = id_phys->phys_box_on_bus;
1326
+ device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1327
+ device->queue_depth =
1328
+ get_unaligned_le16(&id_phys->current_queue_depth_limit);
1329
+ device->active_path_index = id_phys->active_path_number;
1330
+ device->path_map = id_phys->redundant_path_present_map;
1331
+ memcpy(&device->box,
1332
+ &id_phys->alternate_paths_phys_box_on_port,
1333
+ sizeof(device->box));
1334
+ memcpy(&device->phys_connector,
1335
+ &id_phys->alternate_paths_phys_connector,
1336
+ sizeof(device->phys_connector));
1337
+ device->bay = id_phys->phys_bay_in_box;
1338
+
1339
+ return 0;
1340
+}
1341
+
1342
+static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
12091343 struct pqi_scsi_dev *device)
12101344 {
12111345 int rc;
12121346 u8 *buffer;
1213
- unsigned int retries;
12141347
12151348 buffer = kmalloc(64, GFP_KERNEL);
12161349 if (!buffer)
12171350 return -ENOMEM;
12181351
12191352 /* Send an inquiry to the device to see what it is. */
1220
- for (retries = 0;;) {
1221
- rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
1222
- buffer, 64);
1223
- if (rc == 0)
1224
- break;
1225
- if (pqi_is_logical_device(device) ||
1226
- rc != PQI_CMD_STATUS_ABORTED ||
1227
- ++retries > PQI_INQUIRY_PAGE0_RETRIES)
1228
- goto out;
1229
- }
1353
+ rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1354
+ if (rc)
1355
+ goto out;
12301356
12311357 scsi_sanitize_inquiry_string(&buffer[8], 8);
12321358 scsi_sanitize_inquiry_string(&buffer[16], 16);
....@@ -1235,7 +1361,7 @@
12351361 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
12361362 memcpy(device->model, &buffer[16], sizeof(device->model));
12371363
1238
- if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1364
+ if (device->devtype == TYPE_DISK) {
12391365 if (device->is_external_raid_device) {
12401366 device->raid_level = SA_RAID_UNKNOWN;
12411367 device->volume_status = CISS_LV_OK;
....@@ -1253,33 +1379,21 @@
12531379 return rc;
12541380 }
12551381
1256
-static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1382
+static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
12571383 struct pqi_scsi_dev *device,
12581384 struct bmic_identify_physical_device *id_phys)
12591385 {
12601386 int rc;
12611387
1262
- memset(id_phys, 0, sizeof(*id_phys));
1388
+ if (device->is_expander_smp_device)
1389
+ return 0;
12631390
1264
- rc = pqi_identify_physical_device(ctrl_info, device,
1265
- id_phys, sizeof(*id_phys));
1266
- if (rc) {
1267
- device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1268
- return;
1269
- }
1391
+ if (pqi_is_logical_device(device))
1392
+ rc = pqi_get_logical_device_info(ctrl_info, device);
1393
+ else
1394
+ rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
12701395
1271
- device->queue_depth =
1272
- get_unaligned_le16(&id_phys->current_queue_depth_limit);
1273
- device->device_type = id_phys->device_type;
1274
- device->active_path_index = id_phys->active_path_number;
1275
- device->path_map = id_phys->redundant_path_present_map;
1276
- memcpy(&device->box,
1277
- &id_phys->alternate_paths_phys_box_on_port,
1278
- sizeof(device->box));
1279
- memcpy(&device->phys_connector,
1280
- &id_phys->alternate_paths_phys_connector,
1281
- sizeof(device->phys_connector));
1282
- device->bay = id_phys->phys_bay_in_box;
1396
+ return rc;
12831397 }
12841398
12851399 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
....@@ -1409,9 +1523,23 @@
14091523 return rc;
14101524 }
14111525
1526
+#define PQI_PENDING_IO_TIMEOUT_SECS 20
1527
+
14121528 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
14131529 struct pqi_scsi_dev *device)
14141530 {
1531
+ int rc;
1532
+
1533
+ pqi_device_remove_start(device);
1534
+
1535
+ rc = pqi_device_wait_for_pending_io(ctrl_info, device, PQI_PENDING_IO_TIMEOUT_SECS);
1536
+ if (rc)
1537
+ dev_err(&ctrl_info->pci_dev->dev,
1538
+ "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1539
+ ctrl_info->scsi_host->host_no, device->bus,
1540
+ device->target, device->lun,
1541
+ atomic_read(&device->scsi_cmds_outstanding));
1542
+
14151543 if (pqi_is_logical_device(device))
14161544 scsi_remove_device(device->sdev);
14171545 else
....@@ -1425,10 +1553,8 @@
14251553 {
14261554 struct pqi_scsi_dev *device;
14271555
1428
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
1429
- scsi_device_list_entry)
1430
- if (device->bus == bus && device->target == target &&
1431
- device->lun == lun)
1556
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1557
+ if (device->bus == bus && device->target == target && device->lun == lun)
14321558 return device;
14331559
14341560 return NULL;
....@@ -1454,15 +1580,12 @@
14541580 };
14551581
14561582 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1457
- struct pqi_scsi_dev *device_to_find,
1458
- struct pqi_scsi_dev **matching_device)
1583
+ struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
14591584 {
14601585 struct pqi_scsi_dev *device;
14611586
1462
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
1463
- scsi_device_list_entry) {
1464
- if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1465
- device->scsi3addr)) {
1587
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1588
+ if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
14661589 *matching_device = device;
14671590 if (pqi_device_equal(device_to_find, device)) {
14681591 if (device_to_find->volume_offline)
....@@ -1474,6 +1597,14 @@
14741597 }
14751598
14761599 return DEVICE_NOT_FOUND;
1600
+}
1601
+
1602
+static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1603
+{
1604
+ if (device->is_expander_smp_device)
1605
+ return "Enclosure SMP ";
1606
+
1607
+ return scsi_device_type(device->devtype);
14771608 }
14781609
14791610 #define PQI_DEV_INFO_BUFFER_LENGTH 128
....@@ -1488,48 +1619,48 @@
14881619 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
14891620
14901621 if (device->target_lun_valid)
1491
- count += snprintf(buffer + count,
1622
+ count += scnprintf(buffer + count,
14921623 PQI_DEV_INFO_BUFFER_LENGTH - count,
14931624 "%d:%d",
14941625 device->target,
14951626 device->lun);
14961627 else
1497
- count += snprintf(buffer + count,
1628
+ count += scnprintf(buffer + count,
14981629 PQI_DEV_INFO_BUFFER_LENGTH - count,
14991630 "-:-");
15001631
15011632 if (pqi_is_logical_device(device))
1502
- count += snprintf(buffer + count,
1633
+ count += scnprintf(buffer + count,
15031634 PQI_DEV_INFO_BUFFER_LENGTH - count,
15041635 " %08x%08x",
15051636 *((u32 *)&device->scsi3addr),
15061637 *((u32 *)&device->scsi3addr[4]));
15071638 else
1508
- count += snprintf(buffer + count,
1639
+ count += scnprintf(buffer + count,
15091640 PQI_DEV_INFO_BUFFER_LENGTH - count,
15101641 " %016llx", device->sas_address);
15111642
1512
- count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1643
+ count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
15131644 " %s %.8s %.16s ",
1514
- scsi_device_type(device->devtype),
1645
+ pqi_device_type(device),
15151646 device->vendor,
15161647 device->model);
15171648
15181649 if (pqi_is_logical_device(device)) {
15191650 if (device->devtype == TYPE_DISK)
1520
- count += snprintf(buffer + count,
1651
+ count += scnprintf(buffer + count,
15211652 PQI_DEV_INFO_BUFFER_LENGTH - count,
15221653 "SSDSmartPathCap%c En%c %-12s",
15231654 device->raid_bypass_configured ? '+' : '-',
15241655 device->raid_bypass_enabled ? '+' : '-',
15251656 pqi_raid_level_to_string(device->raid_level));
15261657 } else {
1527
- count += snprintf(buffer + count,
1658
+ count += scnprintf(buffer + count,
15281659 PQI_DEV_INFO_BUFFER_LENGTH - count,
15291660 "AIO%c", device->aio_enabled ? '+' : '-');
15301661 if (device->devtype == TYPE_DISK ||
15311662 device->devtype == TYPE_ZBC)
1532
- count += snprintf(buffer + count,
1663
+ count += scnprintf(buffer + count,
15331664 PQI_DEV_INFO_BUFFER_LENGTH - count,
15341665 " qd=%-6d", device->queue_depth);
15351666 }
....@@ -1551,11 +1682,18 @@
15511682 existing_device->target_lun_valid = true;
15521683 }
15531684
1685
+ if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION ||
1686
+ existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) &&
1687
+ new_device->volume_status == CISS_LV_OK)
1688
+ existing_device->rescan = true;
1689
+
15541690 /* By definition, the scsi3addr and wwid fields are already the same. */
15551691
15561692 existing_device->is_physical_device = new_device->is_physical_device;
15571693 existing_device->is_external_raid_device =
15581694 new_device->is_external_raid_device;
1695
+ existing_device->is_expander_smp_device =
1696
+ new_device->is_expander_smp_device;
15591697 existing_device->aio_enabled = new_device->aio_enabled;
15601698 memcpy(existing_device->vendor, new_device->vendor,
15611699 sizeof(existing_device->vendor));
....@@ -1569,6 +1707,10 @@
15691707 existing_device->active_path_index = new_device->active_path_index;
15701708 existing_device->path_map = new_device->path_map;
15711709 existing_device->bay = new_device->bay;
1710
+ existing_device->box_index = new_device->box_index;
1711
+ existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
1712
+ existing_device->phy_connected_dev_type =
1713
+ new_device->phy_connected_dev_type;
15721714 memcpy(existing_device->box, new_device->box,
15731715 sizeof(existing_device->box));
15741716 memcpy(existing_device->phys_connector, new_device->phys_connector,
....@@ -1580,6 +1722,7 @@
15801722 new_device->raid_bypass_configured;
15811723 existing_device->raid_bypass_enabled =
15821724 new_device->raid_bypass_enabled;
1725
+ existing_device->device_offline = false;
15831726
15841727 /* To prevent this from being freed later. */
15851728 new_device->raid_map = NULL;
....@@ -1611,6 +1754,14 @@
16111754 device->keep_device = false;
16121755 }
16131756
1757
+static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
1758
+{
1759
+ if (device->is_expander_smp_device)
1760
+ return device->sas_port != NULL;
1761
+
1762
+ return device->sdev != NULL;
1763
+}
1764
+
16141765 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
16151766 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
16161767 {
....@@ -1634,15 +1785,14 @@
16341785 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
16351786
16361787 /* Assume that all devices in the existing list have gone away. */
1637
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
1638
- scsi_device_list_entry)
1788
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
16391789 device->device_gone = true;
16401790
16411791 for (i = 0; i < num_new_devices; i++) {
16421792 device = new_device_list[i];
16431793
16441794 find_result = pqi_scsi_find_entry(ctrl_info, device,
1645
- &matching_device);
1795
+ &matching_device);
16461796
16471797 switch (find_result) {
16481798 case DEVICE_SAME:
....@@ -1675,7 +1825,7 @@
16751825 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
16761826 scsi_device_list_entry) {
16771827 if (device->device_gone) {
1678
- list_del(&device->scsi_device_list_entry);
1828
+ list_del_init(&device->scsi_device_list_entry);
16791829 list_add_tail(&device->delete_list_entry, &delete_list);
16801830 }
16811831 }
....@@ -1696,19 +1846,23 @@
16961846
16971847 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
16981848
1849
+ if (pqi_ctrl_in_ofa(ctrl_info))
1850
+ pqi_ctrl_ofa_done(ctrl_info);
1851
+
16991852 /* Remove all devices that have gone away. */
1700
- list_for_each_entry_safe(device, next, &delete_list,
1701
- delete_list_entry) {
1853
+ list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
17021854 if (device->volume_offline) {
17031855 pqi_dev_info(ctrl_info, "offline", device);
17041856 pqi_show_volume_status(ctrl_info, device);
1705
- } else {
1706
- pqi_dev_info(ctrl_info, "removed", device);
17071857 }
1708
- if (device->sdev)
1709
- pqi_remove_device(ctrl_info, device);
17101858 list_del(&device->delete_list_entry);
1711
- pqi_free_device(device);
1859
+ if (pqi_is_device_added(device)) {
1860
+ pqi_remove_device(ctrl_info, device);
1861
+ } else {
1862
+ if (!device->volume_offline)
1863
+ pqi_dev_info(ctrl_info, "removed", device);
1864
+ pqi_free_device(device);
1865
+ }
17121866 }
17131867
17141868 /*
....@@ -1717,20 +1871,27 @@
17171871 */
17181872 list_for_each_entry(device, &ctrl_info->scsi_device_list,
17191873 scsi_device_list_entry) {
1720
- if (device->sdev && device->queue_depth !=
1721
- device->advertised_queue_depth) {
1722
- device->advertised_queue_depth = device->queue_depth;
1723
- scsi_change_queue_depth(device->sdev,
1724
- device->advertised_queue_depth);
1874
+ if (device->sdev) {
1875
+ if (device->queue_depth !=
1876
+ device->advertised_queue_depth) {
1877
+ device->advertised_queue_depth = device->queue_depth;
1878
+ scsi_change_queue_depth(device->sdev,
1879
+ device->advertised_queue_depth);
1880
+ }
1881
+ if (device->rescan) {
1882
+ scsi_rescan_device(&device->sdev->sdev_gendev);
1883
+ device->rescan = false;
1884
+ }
17251885 }
17261886 }
17271887
17281888 /* Expose any new devices. */
17291889 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1730
- if (!device->sdev) {
1731
- pqi_dev_info(ctrl_info, "added", device);
1890
+ if (!pqi_is_device_added(device)) {
17321891 rc = pqi_add_device(ctrl_info, device);
1733
- if (rc) {
1892
+ if (rc == 0) {
1893
+ pqi_dev_info(ctrl_info, "added", device);
1894
+ } else {
17341895 dev_warn(&ctrl_info->pci_dev->dev,
17351896 "scsi %d:%d:%d:%d addition failed, device not added\n",
17361897 ctrl_info->scsi_host->host_no,
....@@ -1742,31 +1903,19 @@
17421903 }
17431904 }
17441905
1745
-static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1906
+static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
17461907 {
1747
- bool is_supported = false;
1908
+ /*
1909
+ * Only support the HBA controller itself as a RAID
1910
+ * controller. If it's a RAID controller other than
1911
+ * the HBA itself (an external RAID controller, for
1912
+ * example), we don't support it.
1913
+ */
1914
+ if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
1915
+ !pqi_is_hba_lunid(device->scsi3addr))
1916
+ return false;
17481917
1749
- switch (device->devtype) {
1750
- case TYPE_DISK:
1751
- case TYPE_ZBC:
1752
- case TYPE_TAPE:
1753
- case TYPE_MEDIUM_CHANGER:
1754
- case TYPE_ENCLOSURE:
1755
- is_supported = true;
1756
- break;
1757
- case TYPE_RAID:
1758
- /*
1759
- * Only support the HBA controller itself as a RAID
1760
- * controller. If it's a RAID controller other than
1761
- * the HBA itself (an external RAID controller, for
1762
- * example), we don't support it.
1763
- */
1764
- if (pqi_is_hba_lunid(device->scsi3addr))
1765
- is_supported = true;
1766
- break;
1767
- }
1768
-
1769
- return is_supported;
1918
+ return true;
17701919 }
17711920
17721921 static inline bool pqi_skip_device(u8 *scsi3addr)
....@@ -1776,6 +1925,29 @@
17761925 return true;
17771926
17781927 return false;
1928
+}
1929
+
1930
+static inline void pqi_mask_device(u8 *scsi3addr)
1931
+{
1932
+ scsi3addr[3] |= 0xc0;
1933
+}
1934
+
1935
+static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
1936
+{
1937
+ switch (device->device_type) {
1938
+ case SA_DEVICE_TYPE_SAS:
1939
+ case SA_DEVICE_TYPE_EXPANDER_SMP:
1940
+ case SA_DEVICE_TYPE_SES:
1941
+ return true;
1942
+ }
1943
+
1944
+ return false;
1945
+}
1946
+
1947
+static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1948
+{
1949
+ return !device->is_physical_device ||
1950
+ !pqi_skip_device(device->scsi3addr);
17791951 }
17801952
17811953 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
....@@ -1797,6 +1969,8 @@
17971969 unsigned int num_valid_devices;
17981970 bool is_physical_device;
17991971 u8 *scsi3addr;
1972
+ unsigned int physical_index;
1973
+ unsigned int logical_index;
18001974 static char *out_of_memory_msg =
18011975 "failed to allocate memory, device discovery stopped";
18021976
....@@ -1832,6 +2006,20 @@
18322006 rc = -ENOMEM;
18332007 goto out;
18342008 }
2009
+
2010
+ if (pqi_hide_vsep) {
2011
+ for (i = num_physicals - 1; i >= 0; i--) {
2012
+ phys_lun_ext_entry =
2013
+ &physdev_list->lun_entries[i];
2014
+ if (CISS_GET_DRIVE_NUMBER(
2015
+ phys_lun_ext_entry->lunid) ==
2016
+ PQI_VSEP_CISS_BTL) {
2017
+ pqi_mask_device(
2018
+ phys_lun_ext_entry->lunid);
2019
+ break;
2020
+ }
2021
+ }
2022
+ }
18352023 }
18362024
18372025 num_new_devices = num_physicals + num_logicals;
....@@ -1859,19 +2047,23 @@
18592047
18602048 device = NULL;
18612049 num_valid_devices = 0;
2050
+ physical_index = 0;
2051
+ logical_index = 0;
18622052
18632053 for (i = 0; i < num_new_devices; i++) {
18642054
1865
- if (i < num_physicals) {
2055
+ if ((!pqi_expose_ld_first && i < num_physicals) ||
2056
+ (pqi_expose_ld_first && i >= num_logicals)) {
18662057 is_physical_device = true;
1867
- phys_lun_ext_entry = &physdev_list->lun_entries[i];
2058
+ phys_lun_ext_entry =
2059
+ &physdev_list->lun_entries[physical_index++];
18682060 log_lun_ext_entry = NULL;
18692061 scsi3addr = phys_lun_ext_entry->lunid;
18702062 } else {
18712063 is_physical_device = false;
18722064 phys_lun_ext_entry = NULL;
18732065 log_lun_ext_entry =
1874
- &logdev_list->lun_entries[i - num_physicals];
2066
+ &logdev_list->lun_entries[logical_index++];
18752067 scsi3addr = log_lun_ext_entry->lunid;
18762068 }
18772069
....@@ -1886,12 +2078,20 @@
18862078
18872079 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
18882080 device->is_physical_device = is_physical_device;
1889
- if (!is_physical_device)
2081
+ if (is_physical_device) {
2082
+ device->device_type = phys_lun_ext_entry->device_type;
2083
+ if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2084
+ device->is_expander_smp_device = true;
2085
+ } else {
18902086 device->is_external_raid_device =
18912087 pqi_is_external_raid_addr(scsi3addr);
2088
+ }
2089
+
2090
+ if (!pqi_is_supported_device(device))
2091
+ continue;
18922092
18932093 /* Gather information about the device. */
1894
- rc = pqi_get_device_info(ctrl_info, device);
2094
+ rc = pqi_get_device_info(ctrl_info, device, id_phys);
18952095 if (rc == -ENOMEM) {
18962096 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
18972097 out_of_memory_msg);
....@@ -1912,39 +2112,24 @@
19122112 continue;
19132113 }
19142114
1915
- if (!pqi_is_supported_device(device))
1916
- continue;
1917
-
19182115 pqi_assign_bus_target_lun(device);
19192116
19202117 if (device->is_physical_device) {
19212118 device->wwid = phys_lun_ext_entry->wwid;
19222119 if ((phys_lun_ext_entry->device_flags &
1923
- REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1924
- phys_lun_ext_entry->aio_handle)
2120
+ CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2121
+ phys_lun_ext_entry->aio_handle) {
19252122 device->aio_enabled = true;
2123
+ device->aio_handle =
2124
+ phys_lun_ext_entry->aio_handle;
2125
+ }
19262126 } else {
19272127 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
19282128 sizeof(device->volume_id));
19292129 }
19302130
1931
- switch (device->devtype) {
1932
- case TYPE_DISK:
1933
- case TYPE_ZBC:
1934
- case TYPE_ENCLOSURE:
1935
- if (device->is_physical_device) {
1936
- device->sas_address =
1937
- get_unaligned_be64(&device->wwid);
1938
- if (device->devtype == TYPE_DISK ||
1939
- device->devtype == TYPE_ZBC) {
1940
- device->aio_handle =
1941
- phys_lun_ext_entry->aio_handle;
1942
- pqi_get_physical_disk_info(ctrl_info,
1943
- device, id_phys);
1944
- }
1945
- }
1946
- break;
1947
- }
2131
+ if (pqi_is_device_with_sas_address(device))
2132
+ device->sas_address = get_unaligned_be64(&device->wwid);
19482133
19492134 new_device_list[num_valid_devices++] = device;
19502135 }
....@@ -1968,52 +2153,35 @@
19682153 return rc;
19692154 }
19702155
1971
-static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1972
-{
1973
- unsigned long flags;
1974
- struct pqi_scsi_dev *device;
1975
-
1976
- while (1) {
1977
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1978
-
1979
- device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
1980
- struct pqi_scsi_dev, scsi_device_list_entry);
1981
- if (device)
1982
- list_del(&device->scsi_device_list_entry);
1983
-
1984
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
1985
- flags);
1986
-
1987
- if (!device)
1988
- break;
1989
-
1990
- if (device->sdev)
1991
- pqi_remove_device(ctrl_info, device);
1992
- pqi_free_device(device);
1993
- }
1994
-}
1995
-
19962156 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
19972157 {
1998
- int rc;
2158
+ int rc = 0;
19992159
20002160 if (pqi_ctrl_offline(ctrl_info))
20012161 return -ENXIO;
20022162
2003
- mutex_lock(&ctrl_info->scan_mutex);
2004
-
2005
- rc = pqi_update_scsi_devices(ctrl_info);
2006
- if (rc)
2163
+ if (!mutex_trylock(&ctrl_info->scan_mutex)) {
20072164 pqi_schedule_rescan_worker_delayed(ctrl_info);
2008
-
2009
- mutex_unlock(&ctrl_info->scan_mutex);
2165
+ rc = -EINPROGRESS;
2166
+ } else {
2167
+ rc = pqi_update_scsi_devices(ctrl_info);
2168
+ if (rc)
2169
+ pqi_schedule_rescan_worker_delayed(ctrl_info);
2170
+ mutex_unlock(&ctrl_info->scan_mutex);
2171
+ }
20102172
20112173 return rc;
20122174 }
20132175
20142176 static void pqi_scan_start(struct Scsi_Host *shost)
20152177 {
2016
- pqi_scan_scsi_devices(shost_to_hba(shost));
2178
+ struct pqi_ctrl_info *ctrl_info;
2179
+
2180
+ ctrl_info = shost_to_hba(shost);
2181
+ if (pqi_ctrl_in_ofa(ctrl_info))
2182
+ return;
2183
+
2184
+ pqi_scan_scsi_devices(ctrl_info);
20172185 }
20182186
20192187 /* Returns TRUE if scan is finished. */
....@@ -2038,6 +2206,12 @@
20382206 {
20392207 mutex_lock(&ctrl_info->lun_reset_mutex);
20402208 mutex_unlock(&ctrl_info->lun_reset_mutex);
2209
+}
2210
+
2211
+static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
2212
+{
2213
+ mutex_lock(&ctrl_info->ofa_mutex);
2214
+ mutex_unlock(&ctrl_info->ofa_mutex);
20412215 }
20422216
20432217 static inline void pqi_set_encryption_info(
....@@ -2118,7 +2292,7 @@
21182292 switch (scmd->cmnd[0]) {
21192293 case WRITE_6:
21202294 is_write = true;
2121
- /* fall through */
2295
+ fallthrough;
21222296 case READ_6:
21232297 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
21242298 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
....@@ -2128,21 +2302,21 @@
21282302 break;
21292303 case WRITE_10:
21302304 is_write = true;
2131
- /* fall through */
2305
+ fallthrough;
21322306 case READ_10:
21332307 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
21342308 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
21352309 break;
21362310 case WRITE_12:
21372311 is_write = true;
2138
- /* fall through */
2312
+ fallthrough;
21392313 case READ_12:
21402314 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
21412315 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
21422316 break;
21432317 case WRITE_16:
21442318 is_write = true;
2145
- /* fall through */
2319
+ fallthrough;
21462320 case READ_16:
21472321 first_block = get_unaligned_be64(&scmd->cmnd[2]);
21482322 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
....@@ -2256,7 +2430,6 @@
22562430 offload_to_mirror =
22572431 (offload_to_mirror >= layout_map_count - 1) ?
22582432 0 : offload_to_mirror + 1;
2259
- WARN_ON(offload_to_mirror >= layout_map_count);
22602433 device->offload_to_mirror = offload_to_mirror;
22612434 /*
22622435 * Avoid direct use of device->offload_to_mirror within this
....@@ -2347,9 +2520,6 @@
23472520 (map_row * total_disks_per_row) + first_column;
23482521 }
23492522
2350
- if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2351
- return PQI_RAID_BYPASS_INELIGIBLE;
2352
-
23532523 aio_handle = raid_map->disk_data[map_index].aio_handle;
23542524 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
23552525 first_row * strip_size +
....@@ -2419,7 +2589,7 @@
24192589 u8 status;
24202590
24212591 pqi_registers = ctrl_info->pqi_registers;
2422
- timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2592
+ timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
24232593
24242594 while (1) {
24252595 signature = readq(&pqi_registers->signature);
....@@ -2480,10 +2650,9 @@
24802650 return;
24812651
24822652 device->device_offline = true;
2483
- scsi_device_set_state(sdev, SDEV_OFFLINE);
24842653 ctrl_info = shost_to_hba(sdev->host);
24852654 pqi_schedule_rescan_worker(ctrl_info);
2486
- dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n",
2655
+ dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
24872656 path, ctrl_info->scsi_host->host_no, device->bus,
24882657 device->target, device->lun);
24892658 }
....@@ -2558,10 +2727,25 @@
25582727 scsi_normalize_sense(error_info->data,
25592728 sense_data_length, &sshdr) &&
25602729 sshdr.sense_key == HARDWARE_ERROR &&
2561
- sshdr.asc == 0x3e &&
2562
- sshdr.ascq == 0x1) {
2563
- pqi_take_device_offline(scmd->device, "RAID");
2564
- host_byte = DID_NO_CONNECT;
2730
+ sshdr.asc == 0x3e) {
2731
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
2732
+ struct pqi_scsi_dev *device = scmd->device->hostdata;
2733
+
2734
+ switch (sshdr.ascq) {
2735
+ case 0x1: /* LOGICAL UNIT FAILURE */
2736
+ if (printk_ratelimit())
2737
+ scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
2738
+ ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2739
+ pqi_take_device_offline(scmd->device, "RAID");
2740
+ host_byte = DID_NO_CONNECT;
2741
+ break;
2742
+
2743
+ default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
2744
+ if (printk_ratelimit())
2745
+ scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
2746
+ sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2747
+ break;
2748
+ }
25652749 }
25662750
25672751 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
....@@ -2687,6 +2871,9 @@
26872871 case SOP_TMF_FUNCTION_SUCCEEDED:
26882872 rc = 0;
26892873 break;
2874
+ case SOP_TMF_REJECTED:
2875
+ rc = -EAGAIN;
2876
+ break;
26902877 default:
26912878 rc = -EIO;
26922879 break;
....@@ -2695,10 +2882,14 @@
26952882 return rc;
26962883 }
26972884
2698
-static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2699
- struct pqi_queue_group *queue_group)
2885
+static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
27002886 {
2701
- unsigned int num_responses;
2887
+ pqi_take_ctrl_offline(ctrl_info);
2888
+}
2889
+
2890
+static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
2891
+{
2892
+ int num_responses;
27022893 pqi_index_t oq_pi;
27032894 pqi_index_t oq_ci;
27042895 struct pqi_io_request *io_request;
....@@ -2710,6 +2901,13 @@
27102901
27112902 while (1) {
27122903 oq_pi = readl(queue_group->oq_pi);
2904
+ if (oq_pi >= ctrl_info->num_elements_per_oq) {
2905
+ pqi_invalid_response(ctrl_info);
2906
+ dev_err(&ctrl_info->pci_dev->dev,
2907
+ "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
2908
+ oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
2909
+ return -1;
2910
+ }
27132911 if (oq_pi == oq_ci)
27142912 break;
27152913
....@@ -2718,18 +2916,36 @@
27182916 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
27192917
27202918 request_id = get_unaligned_le16(&response->request_id);
2721
- WARN_ON(request_id >= ctrl_info->max_io_slots);
2919
+ if (request_id >= ctrl_info->max_io_slots) {
2920
+ pqi_invalid_response(ctrl_info);
2921
+ dev_err(&ctrl_info->pci_dev->dev,
2922
+ "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
2923
+ request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
2924
+ return -1;
2925
+ }
27222926
27232927 io_request = &ctrl_info->io_request_pool[request_id];
2724
- WARN_ON(atomic_read(&io_request->refcount) == 0);
2928
+ if (atomic_read(&io_request->refcount) == 0) {
2929
+ pqi_invalid_response(ctrl_info);
2930
+ dev_err(&ctrl_info->pci_dev->dev,
2931
+ "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
2932
+ request_id, oq_pi, oq_ci);
2933
+ return -1;
2934
+ }
27252935
27262936 switch (response->header.iu_type) {
27272937 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
27282938 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
27292939 if (io_request->scmd)
27302940 io_request->scmd->result = 0;
2731
- /* fall through */
2941
+ fallthrough;
27322942 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2943
+ break;
2944
+ case PQI_RESPONSE_IU_VENDOR_GENERAL:
2945
+ io_request->status =
2946
+ get_unaligned_le16(
2947
+ &((struct pqi_vendor_general_response *)
2948
+ response)->status);
27332949 break;
27342950 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
27352951 io_request->status =
....@@ -2745,24 +2961,22 @@
27452961 io_request->error_info = ctrl_info->error_buffer +
27462962 (get_unaligned_le16(&response->error_index) *
27472963 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2748
- pqi_process_io_error(response->header.iu_type,
2749
- io_request);
2964
+ pqi_process_io_error(response->header.iu_type, io_request);
27502965 break;
27512966 default:
2967
+ pqi_invalid_response(ctrl_info);
27522968 dev_err(&ctrl_info->pci_dev->dev,
2753
- "unexpected IU type: 0x%x\n",
2754
- response->header.iu_type);
2755
- break;
2969
+ "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
2970
+ response->header.iu_type, oq_pi, oq_ci);
2971
+ return -1;
27562972 }
27572973
2758
- io_request->io_complete_callback(io_request,
2759
- io_request->context);
2974
+ io_request->io_complete_callback(io_request, io_request->context);
27602975
27612976 /*
27622977 * Note that the I/O request structure CANNOT BE TOUCHED after
27632978 * returning from the I/O completion callback!
27642979 */
2765
-
27662980 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
27672981 }
27682982
....@@ -2850,6 +3064,110 @@
28503064 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
28513065 }
28523066
3067
+#define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3068
+#define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3069
+
3070
+static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3071
+ struct pqi_ctrl_info *ctrl_info)
3072
+{
3073
+ unsigned long timeout;
3074
+ u8 status;
3075
+
3076
+ timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
3077
+
3078
+ while (1) {
3079
+ status = pqi_read_soft_reset_status(ctrl_info);
3080
+ if (status & PQI_SOFT_RESET_INITIATE)
3081
+ return RESET_INITIATE_DRIVER;
3082
+
3083
+ if (status & PQI_SOFT_RESET_ABORT)
3084
+ return RESET_ABORT;
3085
+
3086
+ if (time_after(jiffies, timeout)) {
3087
+ dev_err(&ctrl_info->pci_dev->dev,
3088
+ "timed out waiting for soft reset status\n");
3089
+ return RESET_TIMEDOUT;
3090
+ }
3091
+
3092
+ if (!sis_is_firmware_running(ctrl_info))
3093
+ return RESET_NORESPONSE;
3094
+
3095
+ ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3096
+ }
3097
+}
3098
+
3099
+static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
3100
+ enum pqi_soft_reset_status reset_status)
3101
+{
3102
+ int rc;
3103
+
3104
+ switch (reset_status) {
3105
+ case RESET_INITIATE_DRIVER:
3106
+ case RESET_TIMEDOUT:
3107
+ dev_info(&ctrl_info->pci_dev->dev,
3108
+ "resetting controller %u\n", ctrl_info->ctrl_id);
3109
+ sis_soft_reset(ctrl_info);
3110
+ fallthrough;
3111
+ case RESET_INITIATE_FIRMWARE:
3112
+ rc = pqi_ofa_ctrl_restart(ctrl_info);
3113
+ pqi_ofa_free_host_buffer(ctrl_info);
3114
+ dev_info(&ctrl_info->pci_dev->dev,
3115
+ "Online Firmware Activation for controller %u: %s\n",
3116
+ ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
3117
+ break;
3118
+ case RESET_ABORT:
3119
+ pqi_ofa_ctrl_unquiesce(ctrl_info);
3120
+ dev_info(&ctrl_info->pci_dev->dev,
3121
+ "Online Firmware Activation for controller %u: %s\n",
3122
+ ctrl_info->ctrl_id, "ABORTED");
3123
+ break;
3124
+ case RESET_NORESPONSE:
3125
+ pqi_ofa_free_host_buffer(ctrl_info);
3126
+ pqi_take_ctrl_offline(ctrl_info);
3127
+ break;
3128
+ }
3129
+}
3130
+
3131
+static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3132
+ struct pqi_event *event)
3133
+{
3134
+ u16 event_id;
3135
+ enum pqi_soft_reset_status status;
3136
+
3137
+ event_id = get_unaligned_le16(&event->event_id);
3138
+
3139
+ mutex_lock(&ctrl_info->ofa_mutex);
3140
+
3141
+ if (event_id == PQI_EVENT_OFA_QUIESCE) {
3142
+ dev_info(&ctrl_info->pci_dev->dev,
3143
+ "Received Online Firmware Activation quiesce event for controller %u\n",
3144
+ ctrl_info->ctrl_id);
3145
+ pqi_ofa_ctrl_quiesce(ctrl_info);
3146
+ pqi_acknowledge_event(ctrl_info, event);
3147
+ if (ctrl_info->soft_reset_handshake_supported) {
3148
+ status = pqi_poll_for_soft_reset_status(ctrl_info);
3149
+ pqi_process_soft_reset(ctrl_info, status);
3150
+ } else {
3151
+ pqi_process_soft_reset(ctrl_info,
3152
+ RESET_INITIATE_FIRMWARE);
3153
+ }
3154
+
3155
+ } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3156
+ pqi_acknowledge_event(ctrl_info, event);
3157
+ pqi_ofa_setup_host_buffer(ctrl_info,
3158
+ le32_to_cpu(event->ofa_bytes_requested));
3159
+ pqi_ofa_host_memory_update(ctrl_info);
3160
+ } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3161
+ pqi_ofa_free_host_buffer(ctrl_info);
3162
+ pqi_acknowledge_event(ctrl_info, event);
3163
+ dev_info(&ctrl_info->pci_dev->dev,
3164
+ "Online Firmware Activation(%u) cancel reason : %u\n",
3165
+ ctrl_info->ctrl_id, event->ofa_cancel_reason);
3166
+ }
3167
+
3168
+ mutex_unlock(&ctrl_info->ofa_mutex);
3169
+}
3170
+
28533171 static void pqi_event_worker(struct work_struct *work)
28543172 {
28553173 unsigned int i;
....@@ -2869,6 +3187,11 @@
28693187 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
28703188 if (event->pending) {
28713189 event->pending = false;
3190
+ if (event->event_type == PQI_EVENT_TYPE_OFA) {
3191
+ pqi_ctrl_unbusy(ctrl_info);
3192
+ pqi_ofa_process_event(ctrl_info, event);
3193
+ return;
3194
+ }
28723195 pqi_acknowledge_event(ctrl_info, event);
28733196 }
28743197 event++;
....@@ -2878,7 +3201,7 @@
28783201 pqi_ctrl_unbusy(ctrl_info);
28793202 }
28803203
2881
-#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
3204
+#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
28823205
28833206 static void pqi_heartbeat_timer_handler(struct timer_list *t)
28843207 {
....@@ -2947,9 +3270,27 @@
29473270 return pqi_event_type_to_event_index(event_type) != -1;
29483271 }
29493272
2950
-static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3273
+static void pqi_ofa_capture_event_payload(struct pqi_event *event,
3274
+ struct pqi_event_response *response)
29513275 {
2952
- unsigned int num_events;
3276
+ u16 event_id;
3277
+
3278
+ event_id = get_unaligned_le16(&event->event_id);
3279
+
3280
+ if (event->event_type == PQI_EVENT_TYPE_OFA) {
3281
+ if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3282
+ event->ofa_bytes_requested =
3283
+ response->data.ofa_memory_allocation.bytes_requested;
3284
+ } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3285
+ event->ofa_cancel_reason =
3286
+ response->data.ofa_cancelled.reason;
3287
+ }
3288
+ }
3289
+}
3290
+
3291
+static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3292
+{
3293
+ int num_events;
29533294 pqi_index_t oq_pi;
29543295 pqi_index_t oq_ci;
29553296 struct pqi_event_queue *event_queue;
....@@ -2963,25 +3304,31 @@
29633304
29643305 while (1) {
29653306 oq_pi = readl(event_queue->oq_pi);
3307
+ if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3308
+ pqi_invalid_response(ctrl_info);
3309
+ dev_err(&ctrl_info->pci_dev->dev,
3310
+ "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3311
+ oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3312
+ return -1;
3313
+ }
3314
+
29663315 if (oq_pi == oq_ci)
29673316 break;
29683317
29693318 num_events++;
2970
- response = event_queue->oq_element_array +
2971
- (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3319
+ response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
29723320
29733321 event_index =
29743322 pqi_event_type_to_event_index(response->event_type);
29753323
2976
- if (event_index >= 0) {
2977
- if (response->request_acknowlege) {
2978
- event = &ctrl_info->events[event_index];
2979
- event->pending = true;
2980
- event->event_type = response->event_type;
2981
- event->event_id = response->event_id;
2982
- event->additional_event_id =
2983
- response->additional_event_id;
2984
- }
3324
+ if (event_index >= 0 && response->request_acknowledge) {
3325
+ event = &ctrl_info->events[event_index];
3326
+ event->pending = true;
3327
+ event->event_type = response->event_type;
3328
+ event->event_id = response->event_id;
3329
+ event->additional_event_id = response->additional_event_id;
3330
+ if (event->event_type == PQI_EVENT_TYPE_OFA)
3331
+ pqi_ofa_capture_event_payload(event, response);
29853332 }
29863333
29873334 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
....@@ -2999,7 +3346,7 @@
29993346 #define PQI_LEGACY_INTX_MASK 0x1
30003347
30013348 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
3002
- bool enable_intx)
3349
+ bool enable_intx)
30033350 {
30043351 u32 intx_mask;
30053352 struct pqi_device_registers __iomem *pqi_registers;
....@@ -3096,7 +3443,8 @@
30963443 {
30973444 struct pqi_ctrl_info *ctrl_info;
30983445 struct pqi_queue_group *queue_group;
3099
- unsigned int num_responses_handled;
3446
+ int num_io_responses_handled;
3447
+ int num_events_handled;
31003448
31013449 queue_group = data;
31023450 ctrl_info = queue_group->ctrl_info;
....@@ -3104,17 +3452,25 @@
31043452 if (!pqi_is_valid_irq(ctrl_info))
31053453 return IRQ_NONE;
31063454
3107
- num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3455
+ num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3456
+ if (num_io_responses_handled < 0)
3457
+ goto out;
31083458
3109
- if (irq == ctrl_info->event_irq)
3110
- num_responses_handled += pqi_process_event_intr(ctrl_info);
3459
+ if (irq == ctrl_info->event_irq) {
3460
+ num_events_handled = pqi_process_event_intr(ctrl_info);
3461
+ if (num_events_handled < 0)
3462
+ goto out;
3463
+ } else {
3464
+ num_events_handled = 0;
3465
+ }
31113466
3112
- if (num_responses_handled)
3467
+ if (num_io_responses_handled + num_events_handled > 0)
31133468 atomic_inc(&ctrl_info->num_interrupts);
31143469
31153470 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
31163471 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
31173472
3473
+out:
31183474 return IRQ_HANDLED;
31193475 }
31203476
....@@ -3234,9 +3590,9 @@
32343590 alloc_length += PQI_EXTRA_SGL_MEMORY;
32353591
32363592 ctrl_info->queue_memory_base =
3237
- dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3238
- alloc_length,
3239
- &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3593
+ dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3594
+ &ctrl_info->queue_memory_base_dma_handle,
3595
+ GFP_KERNEL);
32403596
32413597 if (!ctrl_info->queue_memory_base)
32423598 return -ENOMEM;
....@@ -3373,10 +3729,9 @@
33733729 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
33743730
33753731 ctrl_info->admin_queue_memory_base =
3376
- dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3377
- alloc_length,
3378
- &ctrl_info->admin_queue_memory_base_dma_handle,
3379
- GFP_KERNEL);
3732
+ dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3733
+ &ctrl_info->admin_queue_memory_base_dma_handle,
3734
+ GFP_KERNEL);
33803735
33813736 if (!ctrl_info->admin_queue_memory_base)
33823737 return -ENOMEM;
....@@ -3414,7 +3769,7 @@
34143769 return 0;
34153770 }
34163771
3417
-#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3772
+#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
34183773 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
34193774
34203775 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
....@@ -3438,7 +3793,7 @@
34383793 &pqi_registers->admin_oq_pi_addr);
34393794
34403795 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3441
- (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3796
+ (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
34423797 (admin_queues->int_msg_num << 16);
34433798 writel(reg, &pqi_registers->admin_iq_num_elements);
34443799 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
....@@ -3507,7 +3862,7 @@
35073862 admin_queues = &ctrl_info->admin_queues;
35083863 oq_ci = admin_queues->oq_ci_copy;
35093864
3510
- timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
3865
+ timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
35113866
35123867 while (1) {
35133868 oq_pi = readl(admin_queues->oq_pi);
....@@ -3622,7 +3977,7 @@
36223977
36233978 while (1) {
36243979 if (wait_for_completion_io_timeout(wait,
3625
- PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
3980
+ PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
36263981 rc = 0;
36273982 break;
36283983 }
....@@ -3645,8 +4000,8 @@
36454000 complete(waiting);
36464001 }
36474002
3648
-static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
3649
- *error_info)
4003
+static int pqi_process_raid_io_error_synchronous(
4004
+ struct pqi_raid_error_info *error_info)
36504005 {
36514006 int rc = -EIO;
36524007
....@@ -3719,6 +4074,8 @@
37194074 goto out;
37204075 }
37214076
4077
+ atomic_inc(&ctrl_info->sync_cmds_outstanding);
4078
+
37224079 io_request = pqi_alloc_io_request(ctrl_info);
37234080
37244081 put_unaligned_le16(io_request->index,
....@@ -3765,6 +4122,7 @@
37654122
37664123 pqi_free_io_request(io_request);
37674124
4125
+ atomic_dec(&ctrl_info->sync_cmds_outstanding);
37684126 out:
37694127 up(&ctrl_info->sync_request_sem);
37704128
....@@ -3833,7 +4191,7 @@
38334191 rc = pqi_map_single(ctrl_info->pci_dev,
38344192 &request.data.report_device_capability.sg_descriptor,
38354193 capability, sizeof(*capability),
3836
- PCI_DMA_FROMDEVICE);
4194
+ DMA_FROM_DEVICE);
38374195 if (rc)
38384196 goto out;
38394197
....@@ -3842,7 +4200,7 @@
38424200
38434201 pqi_pci_unmap(ctrl_info->pci_dev,
38444202 &request.data.report_device_capability.sg_descriptor, 1,
3845
- PCI_DMA_FROMDEVICE);
4203
+ DMA_FROM_DEVICE);
38464204
38474205 if (rc)
38484206 goto out;
....@@ -4169,7 +4527,7 @@
41694527 rc = pqi_map_single(ctrl_info->pci_dev,
41704528 request.data.report_event_configuration.sg_descriptors,
41714529 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4172
- PCI_DMA_FROMDEVICE);
4530
+ DMA_FROM_DEVICE);
41734531 if (rc)
41744532 goto out;
41754533
....@@ -4178,7 +4536,7 @@
41784536
41794537 pqi_pci_unmap(ctrl_info->pci_dev,
41804538 request.data.report_event_configuration.sg_descriptors, 1,
4181
- PCI_DMA_FROMDEVICE);
4539
+ DMA_FROM_DEVICE);
41824540
41834541 if (rc)
41844542 goto out;
....@@ -4205,7 +4563,7 @@
42054563 rc = pqi_map_single(ctrl_info->pci_dev,
42064564 request.data.report_event_configuration.sg_descriptors,
42074565 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4208
- PCI_DMA_TODEVICE);
4566
+ DMA_TO_DEVICE);
42094567 if (rc)
42104568 goto out;
42114569
....@@ -4214,7 +4572,7 @@
42144572
42154573 pqi_pci_unmap(ctrl_info->pci_dev,
42164574 request.data.report_event_configuration.sg_descriptors, 1,
4217
- PCI_DMA_TODEVICE);
4575
+ DMA_TO_DEVICE);
42184576
42194577 out:
42204578 kfree(event_config);
....@@ -4262,10 +4620,11 @@
42624620
42634621 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
42644622 {
4265
- ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4266
- ctrl_info->error_buffer_length,
4267
- &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
42684623
4624
+ ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
4625
+ ctrl_info->error_buffer_length,
4626
+ &ctrl_info->error_buffer_dma_handle,
4627
+ GFP_KERNEL);
42694628 if (!ctrl_info->error_buffer)
42704629 return -ENOMEM;
42714630
....@@ -4638,10 +4997,10 @@
46384997 }
46394998
46404999 switch (scmd->sc_data_direction) {
4641
- case DMA_TO_DEVICE:
5000
+ case DMA_FROM_DEVICE:
46425001 request->data_direction = SOP_READ_FLAG;
46435002 break;
4644
- case DMA_FROM_DEVICE:
5003
+ case DMA_TO_DEVICE:
46455004 request->data_direction = SOP_WRITE_FLAG;
46465005 break;
46475006 case DMA_NONE:
....@@ -4954,7 +5313,17 @@
49545313 {
49555314 struct pqi_scsi_dev *device;
49565315
5316
+ if (!scmd->device) {
5317
+ set_host_byte(scmd, DID_NO_CONNECT);
5318
+ return;
5319
+ }
5320
+
49575321 device = scmd->device->hostdata;
5322
+ if (!device) {
5323
+ set_host_byte(scmd, DID_NO_CONNECT);
5324
+ return;
5325
+ }
5326
+
49585327 atomic_dec(&device->scsi_cmds_outstanding);
49595328 }
49605329
....@@ -4971,16 +5340,24 @@
49715340 device = scmd->device->hostdata;
49725341 ctrl_info = shost_to_hba(shost);
49735342
5343
+ if (!device) {
5344
+ set_host_byte(scmd, DID_NO_CONNECT);
5345
+ pqi_scsi_done(scmd);
5346
+ return 0;
5347
+ }
5348
+
49745349 atomic_inc(&device->scsi_cmds_outstanding);
49755350
4976
- if (pqi_ctrl_offline(ctrl_info)) {
5351
+ if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
5352
+ device)) {
49775353 set_host_byte(scmd, DID_NO_CONNECT);
49785354 pqi_scsi_done(scmd);
49795355 return 0;
49805356 }
49815357
49825358 pqi_ctrl_busy(ctrl_info);
4983
- if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
5359
+ if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
5360
+ pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
49845361 rc = SCSI_MLQUEUE_HOST_BUSY;
49855362 goto out;
49865363 }
....@@ -4997,22 +5374,21 @@
49975374 if (pqi_is_logical_device(device)) {
49985375 raid_bypassed = false;
49995376 if (device->raid_bypass_enabled &&
5000
- !blk_rq_is_passthrough(scmd->request)) {
5377
+ !blk_rq_is_passthrough(scmd->request)) {
50015378 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
50025379 scmd, queue_group);
5003
- if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
5380
+ if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
50045381 raid_bypassed = true;
5382
+ atomic_inc(&device->raid_bypass_cnt);
5383
+ }
50055384 }
50065385 if (!raid_bypassed)
5007
- rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5008
- queue_group);
5386
+ rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
50095387 } else {
50105388 if (device->aio_enabled)
5011
- rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
5012
- queue_group);
5389
+ rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
50135390 else
5014
- rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5015
- queue_group);
5391
+ rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
50165392 }
50175393
50185394 out:
....@@ -5116,6 +5492,46 @@
51165492
51175493 list_del(&io_request->request_list_entry);
51185494 set_host_byte(scmd, DID_RESET);
5495
+ pqi_free_io_request(io_request);
5496
+ scsi_dma_unmap(scmd);
5497
+ pqi_scsi_done(scmd);
5498
+ }
5499
+
5500
+ spin_unlock_irqrestore(
5501
+ &queue_group->submit_lock[path], flags);
5502
+ }
5503
+ }
5504
+}
5505
+
5506
+static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
5507
+{
5508
+ unsigned int i;
5509
+ unsigned int path;
5510
+ struct pqi_queue_group *queue_group;
5511
+ unsigned long flags;
5512
+ struct pqi_io_request *io_request;
5513
+ struct pqi_io_request *next;
5514
+ struct scsi_cmnd *scmd;
5515
+
5516
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5517
+ queue_group = &ctrl_info->queue_groups[i];
5518
+
5519
+ for (path = 0; path < 2; path++) {
5520
+ spin_lock_irqsave(&queue_group->submit_lock[path],
5521
+ flags);
5522
+
5523
+ list_for_each_entry_safe(io_request, next,
5524
+ &queue_group->request_list[path],
5525
+ request_list_entry) {
5526
+
5527
+ scmd = io_request->scmd;
5528
+ if (!scmd)
5529
+ continue;
5530
+
5531
+ list_del(&io_request->request_list_entry);
5532
+ set_host_byte(scmd, DID_RESET);
5533
+ pqi_free_io_request(io_request);
5534
+ scsi_dma_unmap(scmd);
51195535 pqi_scsi_done(scmd);
51205536 }
51215537
....@@ -5126,24 +5542,38 @@
51265542 }
51275543
51285544 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5129
- struct pqi_scsi_dev *device)
5545
+ struct pqi_scsi_dev *device, unsigned long timeout_secs)
51305546 {
5547
+ unsigned long timeout;
5548
+
5549
+ timeout = (timeout_secs * PQI_HZ) + jiffies;
5550
+
51315551 while (atomic_read(&device->scsi_cmds_outstanding)) {
51325552 pqi_check_ctrl_health(ctrl_info);
51335553 if (pqi_ctrl_offline(ctrl_info))
51345554 return -ENXIO;
5555
+ if (timeout_secs != NO_TIMEOUT) {
5556
+ if (time_after(jiffies, timeout)) {
5557
+ dev_err(&ctrl_info->pci_dev->dev,
5558
+ "timed out waiting for pending IO\n");
5559
+ return -ETIMEDOUT;
5560
+ }
5561
+ }
51355562 usleep_range(1000, 2000);
51365563 }
51375564
51385565 return 0;
51395566 }
51405567
5141
-static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
5568
+static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5569
+ unsigned long timeout_secs)
51425570 {
51435571 bool io_pending;
51445572 unsigned long flags;
5573
+ unsigned long timeout;
51455574 struct pqi_scsi_dev *device;
51465575
5576
+ timeout = (timeout_secs * PQI_HZ) + jiffies;
51475577 while (1) {
51485578 io_pending = false;
51495579
....@@ -5165,6 +5595,25 @@
51655595 if (pqi_ctrl_offline(ctrl_info))
51665596 return -ENXIO;
51675597
5598
+ if (timeout_secs != NO_TIMEOUT) {
5599
+ if (time_after(jiffies, timeout)) {
5600
+ dev_err(&ctrl_info->pci_dev->dev,
5601
+ "timed out waiting for pending IO\n");
5602
+ return -ETIMEDOUT;
5603
+ }
5604
+ }
5605
+ usleep_range(1000, 2000);
5606
+ }
5607
+
5608
+ return 0;
5609
+}
5610
+
5611
+static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
5612
+{
5613
+ while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
5614
+ pqi_check_ctrl_health(ctrl_info);
5615
+ if (pqi_ctrl_offline(ctrl_info))
5616
+ return -ENXIO;
51685617 usleep_range(1000, 2000);
51695618 }
51705619
....@@ -5179,7 +5628,8 @@
51795628 complete(waiting);
51805629 }
51815630
5182
-#define PQI_LUN_RESET_TIMEOUT_SECS 10
5631
+#define PQI_LUN_RESET_TIMEOUT_SECS 30
5632
+#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
51835633
51845634 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
51855635 struct pqi_scsi_dev *device, struct completion *wait)
....@@ -5188,7 +5638,7 @@
51885638
51895639 while (1) {
51905640 if (wait_for_completion_io_timeout(wait,
5191
- PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
5641
+ PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
51925642 rc = 0;
51935643 break;
51945644 }
....@@ -5225,6 +5675,9 @@
52255675 memcpy(request->lun_number, device->scsi3addr,
52265676 sizeof(request->lun_number));
52275677 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5678
+ if (ctrl_info->tmf_iu_timeout_supported)
5679
+ put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS,
5680
+ &request->timeout);
52285681
52295682 pqi_start_io(ctrl_info,
52305683 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
....@@ -5241,16 +5694,55 @@
52415694
52425695 /* Performs a reset at the LUN level. */
52435696
5697
+#define PQI_LUN_RESET_RETRIES 3
5698
+#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
5699
+#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120
5700
+
5701
+static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5702
+ struct pqi_scsi_dev *device)
5703
+{
5704
+ int rc;
5705
+ unsigned int retries;
5706
+ unsigned long timeout_secs;
5707
+
5708
+ for (retries = 0;;) {
5709
+ rc = pqi_lun_reset(ctrl_info, device);
5710
+ if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
5711
+ break;
5712
+ msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
5713
+ }
5714
+
5715
+ timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
5716
+
5717
+ rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
5718
+
5719
+ return rc == 0 ? SUCCESS : FAILED;
5720
+}
5721
+
52445722 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
52455723 struct pqi_scsi_dev *device)
52465724 {
52475725 int rc;
52485726
5249
- rc = pqi_lun_reset(ctrl_info, device);
5250
- if (rc == 0)
5251
- rc = pqi_device_wait_for_pending_io(ctrl_info, device);
5727
+ mutex_lock(&ctrl_info->lun_reset_mutex);
52525728
5253
- return rc == 0 ? SUCCESS : FAILED;
5729
+ pqi_ctrl_block_requests(ctrl_info);
5730
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
5731
+ pqi_fail_io_queued_for_device(ctrl_info, device);
5732
+ rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5733
+ pqi_device_reset_start(device);
5734
+ pqi_ctrl_unblock_requests(ctrl_info);
5735
+
5736
+ if (rc)
5737
+ rc = FAILED;
5738
+ else
5739
+ rc = _pqi_device_reset(ctrl_info, device);
5740
+
5741
+ pqi_device_reset_done(device);
5742
+
5743
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
5744
+
5745
+ return rc;
52545746 }
52555747
52565748 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
....@@ -5269,28 +5761,17 @@
52695761 shost->host_no, device->bus, device->target, device->lun);
52705762
52715763 pqi_check_ctrl_health(ctrl_info);
5272
- if (pqi_ctrl_offline(ctrl_info)) {
5764
+ if (pqi_ctrl_offline(ctrl_info) ||
5765
+ pqi_device_reset_blocked(ctrl_info)) {
52735766 rc = FAILED;
52745767 goto out;
52755768 }
52765769
5277
- mutex_lock(&ctrl_info->lun_reset_mutex);
5770
+ pqi_wait_until_ofa_finished(ctrl_info);
52785771
5279
- pqi_ctrl_block_requests(ctrl_info);
5280
- pqi_ctrl_wait_until_quiesced(ctrl_info);
5281
- pqi_fail_io_queued_for_device(ctrl_info, device);
5282
- rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5283
- pqi_device_reset_start(device);
5284
- pqi_ctrl_unblock_requests(ctrl_info);
5285
-
5286
- if (rc)
5287
- rc = FAILED;
5288
- else
5289
- rc = pqi_device_reset(ctrl_info, device);
5290
-
5291
- pqi_device_reset_done(device);
5292
-
5293
- mutex_unlock(&ctrl_info->lun_reset_mutex);
5772
+ atomic_inc(&ctrl_info->sync_cmds_outstanding);
5773
+ rc = pqi_device_reset(ctrl_info, device);
5774
+ atomic_dec(&ctrl_info->sync_cmds_outstanding);
52945775
52955776 out:
52965777 dev_err(&ctrl_info->pci_dev->dev,
....@@ -5335,6 +5816,10 @@
53355816 scsi_change_queue_depth(sdev,
53365817 device->advertised_queue_depth);
53375818 }
5819
+ if (pqi_is_logical_device(device))
5820
+ pqi_disable_write_same(sdev);
5821
+ else
5822
+ sdev->allow_restart = 1;
53385823 }
53395824
53405825 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
....@@ -5346,11 +5831,46 @@
53465831 {
53475832 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
53485833
5349
- return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0);
5834
+ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
5835
+ ctrl_info->pci_dev, 0);
53505836 }
53515837
5352
-static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5353
- void __user *arg)
5838
+static int pqi_slave_configure(struct scsi_device *sdev)
5839
+{
5840
+ struct pqi_scsi_dev *device;
5841
+
5842
+ device = sdev->hostdata;
5843
+ device->devtype = sdev->type;
5844
+
5845
+ return 0;
5846
+}
5847
+
5848
+static void pqi_slave_destroy(struct scsi_device *sdev)
5849
+{
5850
+ unsigned long flags;
5851
+ struct pqi_scsi_dev *device;
5852
+ struct pqi_ctrl_info *ctrl_info;
5853
+
5854
+ ctrl_info = shost_to_hba(sdev->host);
5855
+
5856
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5857
+
5858
+ device = sdev->hostdata;
5859
+ if (device) {
5860
+ sdev->hostdata = NULL;
5861
+ if (!list_empty(&device->scsi_device_list_entry))
5862
+ list_del(&device->scsi_device_list_entry);
5863
+ }
5864
+
5865
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5866
+
5867
+ if (device) {
5868
+ pqi_dev_info(ctrl_info, "removed", device);
5869
+ pqi_free_device(device);
5870
+ }
5871
+}
5872
+
5873
+static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
53545874 {
53555875 struct pci_dev *pci_dev;
53565876 u32 subsystem_vendor;
....@@ -5367,8 +5887,7 @@
53675887 pciinfo.dev_fn = pci_dev->devfn;
53685888 subsystem_vendor = pci_dev->subsystem_vendor;
53695889 subsystem_device = pci_dev->subsystem_device;
5370
- pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5371
- subsystem_vendor;
5890
+ pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
53725891
53735892 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
53745893 return -EFAULT;
....@@ -5545,7 +6064,7 @@
55456064
55466065 rc = pqi_map_single(ctrl_info->pci_dev,
55476066 &request.sg_descriptors[0], kernel_buffer,
5548
- iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
6067
+ iocommand.buf_size, DMA_BIDIRECTIONAL);
55496068 if (rc)
55506069 goto out;
55516070
....@@ -5554,12 +6073,15 @@
55546073
55556074 put_unaligned_le16(iu_length, &request.header.iu_length);
55566075
6076
+ if (ctrl_info->raid_iu_timeout_supported)
6077
+ put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6078
+
55576079 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
55586080 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
55596081
55606082 if (iocommand.buf_size > 0)
55616083 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
5562
- PCI_DMA_BIDIRECTIONAL);
6084
+ DMA_BIDIRECTIONAL);
55636085
55646086 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
55656087
....@@ -5599,12 +6121,16 @@
55996121 return rc;
56006122 }
56016123
5602
-static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6124
+static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6125
+ void __user *arg)
56036126 {
56046127 int rc;
56056128 struct pqi_ctrl_info *ctrl_info;
56066129
56076130 ctrl_info = shost_to_hba(sdev->host);
6131
+
6132
+ if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info))
6133
+ return -EBUSY;
56086134
56096135 switch (cmd) {
56106136 case CCISS_DEREGDISK:
....@@ -5629,23 +6155,59 @@
56296155 return rc;
56306156 }
56316157
5632
-static ssize_t pqi_version_show(struct device *dev,
6158
+static ssize_t pqi_firmware_version_show(struct device *dev,
56336159 struct device_attribute *attr, char *buffer)
56346160 {
5635
- ssize_t count = 0;
56366161 struct Scsi_Host *shost;
56376162 struct pqi_ctrl_info *ctrl_info;
56386163
56396164 shost = class_to_shost(dev);
56406165 ctrl_info = shost_to_hba(shost);
56416166
5642
- count += snprintf(buffer + count, PAGE_SIZE - count,
5643
- " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6167
+ return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6168
+}
56446169
5645
- count += snprintf(buffer + count, PAGE_SIZE - count,
5646
- "firmware: %s\n", ctrl_info->firmware_version);
6170
+static ssize_t pqi_driver_version_show(struct device *dev,
6171
+ struct device_attribute *attr, char *buffer)
6172
+{
6173
+ return snprintf(buffer, PAGE_SIZE, "%s\n",
6174
+ DRIVER_VERSION BUILD_TIMESTAMP);
6175
+}
56476176
5648
- return count;
6177
+static ssize_t pqi_serial_number_show(struct device *dev,
6178
+ struct device_attribute *attr, char *buffer)
6179
+{
6180
+ struct Scsi_Host *shost;
6181
+ struct pqi_ctrl_info *ctrl_info;
6182
+
6183
+ shost = class_to_shost(dev);
6184
+ ctrl_info = shost_to_hba(shost);
6185
+
6186
+ return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6187
+}
6188
+
6189
+static ssize_t pqi_model_show(struct device *dev,
6190
+ struct device_attribute *attr, char *buffer)
6191
+{
6192
+ struct Scsi_Host *shost;
6193
+ struct pqi_ctrl_info *ctrl_info;
6194
+
6195
+ shost = class_to_shost(dev);
6196
+ ctrl_info = shost_to_hba(shost);
6197
+
6198
+ return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6199
+}
6200
+
6201
+static ssize_t pqi_vendor_show(struct device *dev,
6202
+ struct device_attribute *attr, char *buffer)
6203
+{
6204
+ struct Scsi_Host *shost;
6205
+ struct pqi_ctrl_info *ctrl_info;
6206
+
6207
+ shost = class_to_shost(dev);
6208
+ ctrl_info = shost_to_hba(shost);
6209
+
6210
+ return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
56496211 }
56506212
56516213 static ssize_t pqi_host_rescan_store(struct device *dev,
....@@ -5666,14 +6228,14 @@
56666228
56676229 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
56686230 if (pqi_lockup_actions[i].action == pqi_lockup_action)
5669
- count += snprintf(buffer + count, PAGE_SIZE - count,
6231
+ count += scnprintf(buffer + count, PAGE_SIZE - count,
56706232 "[%s] ", pqi_lockup_actions[i].name);
56716233 else
5672
- count += snprintf(buffer + count, PAGE_SIZE - count,
6234
+ count += scnprintf(buffer + count, PAGE_SIZE - count,
56736235 "%s ", pqi_lockup_actions[i].name);
56746236 }
56756237
5676
- count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
6238
+ count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
56776239
56786240 return count;
56796241 }
....@@ -5698,17 +6260,174 @@
56986260 return -EINVAL;
56996261 }
57006262
5701
-static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
6263
+static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6264
+static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6265
+static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6266
+static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6267
+static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
57026268 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
57036269 static DEVICE_ATTR(lockup_action, 0644,
57046270 pqi_lockup_action_show, pqi_lockup_action_store);
57056271
57066272 static struct device_attribute *pqi_shost_attrs[] = {
5707
- &dev_attr_version,
6273
+ &dev_attr_driver_version,
6274
+ &dev_attr_firmware_version,
6275
+ &dev_attr_model,
6276
+ &dev_attr_serial_number,
6277
+ &dev_attr_vendor,
57086278 &dev_attr_rescan,
57096279 &dev_attr_lockup_action,
57106280 NULL
57116281 };
6282
+
6283
+static ssize_t pqi_unique_id_show(struct device *dev,
6284
+ struct device_attribute *attr, char *buffer)
6285
+{
6286
+ struct pqi_ctrl_info *ctrl_info;
6287
+ struct scsi_device *sdev;
6288
+ struct pqi_scsi_dev *device;
6289
+ unsigned long flags;
6290
+ u8 unique_id[16];
6291
+
6292
+ sdev = to_scsi_device(dev);
6293
+ ctrl_info = shost_to_hba(sdev->host);
6294
+
6295
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6296
+
6297
+ device = sdev->hostdata;
6298
+ if (!device) {
6299
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6300
+ return -ENODEV;
6301
+ }
6302
+
6303
+ if (device->is_physical_device) {
6304
+ memset(unique_id, 0, 8);
6305
+ memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
6306
+ } else {
6307
+ memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
6308
+ }
6309
+
6310
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6311
+
6312
+ return snprintf(buffer, PAGE_SIZE,
6313
+ "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
6314
+ unique_id[0], unique_id[1], unique_id[2], unique_id[3],
6315
+ unique_id[4], unique_id[5], unique_id[6], unique_id[7],
6316
+ unique_id[8], unique_id[9], unique_id[10], unique_id[11],
6317
+ unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
6318
+}
6319
+
6320
+static ssize_t pqi_lunid_show(struct device *dev,
6321
+ struct device_attribute *attr, char *buffer)
6322
+{
6323
+ struct pqi_ctrl_info *ctrl_info;
6324
+ struct scsi_device *sdev;
6325
+ struct pqi_scsi_dev *device;
6326
+ unsigned long flags;
6327
+ u8 lunid[8];
6328
+
6329
+ sdev = to_scsi_device(dev);
6330
+ ctrl_info = shost_to_hba(sdev->host);
6331
+
6332
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6333
+
6334
+ device = sdev->hostdata;
6335
+ if (!device) {
6336
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6337
+ return -ENODEV;
6338
+ }
6339
+
6340
+ memcpy(lunid, device->scsi3addr, sizeof(lunid));
6341
+
6342
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6343
+
6344
+ return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6345
+}
6346
+
6347
+#define MAX_PATHS 8
6348
+
6349
+static ssize_t pqi_path_info_show(struct device *dev,
6350
+ struct device_attribute *attr, char *buf)
6351
+{
6352
+ struct pqi_ctrl_info *ctrl_info;
6353
+ struct scsi_device *sdev;
6354
+ struct pqi_scsi_dev *device;
6355
+ unsigned long flags;
6356
+ int i;
6357
+ int output_len = 0;
6358
+ u8 box;
6359
+ u8 bay;
6360
+ u8 path_map_index;
6361
+ char *active;
6362
+ u8 phys_connector[2];
6363
+
6364
+ sdev = to_scsi_device(dev);
6365
+ ctrl_info = shost_to_hba(sdev->host);
6366
+
6367
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6368
+
6369
+ device = sdev->hostdata;
6370
+ if (!device) {
6371
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6372
+ return -ENODEV;
6373
+ }
6374
+
6375
+ bay = device->bay;
6376
+ for (i = 0; i < MAX_PATHS; i++) {
6377
+ path_map_index = 1 << i;
6378
+ if (i == device->active_path_index)
6379
+ active = "Active";
6380
+ else if (device->path_map & path_map_index)
6381
+ active = "Inactive";
6382
+ else
6383
+ continue;
6384
+
6385
+ output_len += scnprintf(buf + output_len,
6386
+ PAGE_SIZE - output_len,
6387
+ "[%d:%d:%d:%d] %20.20s ",
6388
+ ctrl_info->scsi_host->host_no,
6389
+ device->bus, device->target,
6390
+ device->lun,
6391
+ scsi_device_type(device->devtype));
6392
+
6393
+ if (device->devtype == TYPE_RAID ||
6394
+ pqi_is_logical_device(device))
6395
+ goto end_buffer;
6396
+
6397
+ memcpy(&phys_connector, &device->phys_connector[i],
6398
+ sizeof(phys_connector));
6399
+ if (phys_connector[0] < '0')
6400
+ phys_connector[0] = '0';
6401
+ if (phys_connector[1] < '0')
6402
+ phys_connector[1] = '0';
6403
+
6404
+ output_len += scnprintf(buf + output_len,
6405
+ PAGE_SIZE - output_len,
6406
+ "PORT: %.2s ", phys_connector);
6407
+
6408
+ box = device->box[i];
6409
+ if (box != 0 && box != 0xFF)
6410
+ output_len += scnprintf(buf + output_len,
6411
+ PAGE_SIZE - output_len,
6412
+ "BOX: %hhu ", box);
6413
+
6414
+ if ((device->devtype == TYPE_DISK ||
6415
+ device->devtype == TYPE_ZBC) &&
6416
+ pqi_expose_device(device))
6417
+ output_len += scnprintf(buf + output_len,
6418
+ PAGE_SIZE - output_len,
6419
+ "BAY: %hhu ", bay);
6420
+
6421
+end_buffer:
6422
+ output_len += scnprintf(buf + output_len,
6423
+ PAGE_SIZE - output_len,
6424
+ "%s\n", active);
6425
+ }
6426
+
6427
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6428
+
6429
+ return output_len;
6430
+}
57126431
57136432 static ssize_t pqi_sas_address_show(struct device *dev,
57146433 struct device_attribute *attr, char *buffer)
....@@ -5725,11 +6444,11 @@
57256444 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
57266445
57276446 device = sdev->hostdata;
5728
- if (pqi_is_logical_device(device)) {
5729
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5730
- flags);
6447
+ if (!device || !pqi_is_device_with_sas_address(device)) {
6448
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
57316449 return -ENODEV;
57326450 }
6451
+
57336452 sas_address = device->sas_address;
57346453
57356454 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
....@@ -5751,6 +6470,11 @@
57516470 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
57526471
57536472 device = sdev->hostdata;
6473
+ if (!device) {
6474
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6475
+ return -ENODEV;
6476
+ }
6477
+
57546478 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
57556479 buffer[1] = '\n';
57566480 buffer[2] = '\0';
....@@ -5775,6 +6499,10 @@
57756499 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
57766500
57776501 device = sdev->hostdata;
6502
+ if (!device) {
6503
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6504
+ return -ENODEV;
6505
+ }
57786506
57796507 if (pqi_is_logical_device(device))
57806508 raid_level = pqi_raid_level_to_string(device->raid_level);
....@@ -5786,15 +6514,49 @@
57866514 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
57876515 }
57886516
6517
+static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
6518
+ struct device_attribute *attr, char *buffer)
6519
+{
6520
+ struct pqi_ctrl_info *ctrl_info;
6521
+ struct scsi_device *sdev;
6522
+ struct pqi_scsi_dev *device;
6523
+ unsigned long flags;
6524
+ int raid_bypass_cnt;
6525
+
6526
+ sdev = to_scsi_device(dev);
6527
+ ctrl_info = shost_to_hba(sdev->host);
6528
+
6529
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6530
+
6531
+ device = sdev->hostdata;
6532
+ if (!device) {
6533
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6534
+ return -ENODEV;
6535
+ }
6536
+
6537
+ raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
6538
+
6539
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6540
+
6541
+ return snprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
6542
+}
6543
+
6544
+static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
6545
+static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
6546
+static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
57896547 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
5790
-static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
5791
- pqi_ssd_smart_path_enabled_show, NULL);
6548
+static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
57926549 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
6550
+static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
57936551
57946552 static struct device_attribute *pqi_sdev_attrs[] = {
6553
+ &dev_attr_lunid,
6554
+ &dev_attr_unique_id,
6555
+ &dev_attr_path_info,
57956556 &dev_attr_sas_address,
57966557 &dev_attr_ssd_smart_path_enabled,
57976558 &dev_attr_raid_level,
6559
+ &dev_attr_raid_bypass_cnt,
57986560 NULL
57996561 };
58006562
....@@ -5806,10 +6568,11 @@
58066568 .scan_start = pqi_scan_start,
58076569 .scan_finished = pqi_scan_finished,
58086570 .this_id = -1,
5809
- .use_clustering = ENABLE_CLUSTERING,
58106571 .eh_device_reset_handler = pqi_eh_device_reset_handler,
58116572 .ioctl = pqi_ioctl,
58126573 .slave_alloc = pqi_slave_alloc,
6574
+ .slave_configure = pqi_slave_configure,
6575
+ .slave_destroy = pqi_slave_destroy,
58136576 .map_queues = pqi_map_queues,
58146577 .sdev_attrs = pqi_sdev_attrs,
58156578 .shost_attrs = pqi_shost_attrs,
....@@ -5843,6 +6606,7 @@
58436606 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
58446607 shost->unique_id = shost->irq;
58456608 shost->nr_hw_queues = ctrl_info->num_queue_groups;
6609
+ shost->host_tagset = 1;
58466610 shost->hostdata[0] = (unsigned long)ctrl_info;
58476611
58486612 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
....@@ -5947,7 +6711,30 @@
59476711 return rc;
59486712 }
59496713
5950
-static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
6714
+static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
6715
+{
6716
+ int rc;
6717
+ struct bmic_sense_subsystem_info *sense_info;
6718
+
6719
+ sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
6720
+ if (!sense_info)
6721
+ return -ENOMEM;
6722
+
6723
+ rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
6724
+ if (rc)
6725
+ goto out;
6726
+
6727
+ memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
6728
+ sizeof(sense_info->ctrl_serial_number));
6729
+ ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
6730
+
6731
+out:
6732
+ kfree(sense_info);
6733
+
6734
+ return rc;
6735
+}
6736
+
6737
+static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
59516738 {
59526739 int rc;
59536740 struct bmic_identify_controller *identify;
....@@ -5968,10 +6755,282 @@
59686755 sizeof(ctrl_info->firmware_version),
59696756 "-%u", get_unaligned_le16(&identify->firmware_build_number));
59706757
6758
+ memcpy(ctrl_info->model, identify->product_id,
6759
+ sizeof(identify->product_id));
6760
+ ctrl_info->model[sizeof(identify->product_id)] = '\0';
6761
+
6762
+ memcpy(ctrl_info->vendor, identify->vendor_id,
6763
+ sizeof(identify->vendor_id));
6764
+ ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
6765
+
59716766 out:
59726767 kfree(identify);
59736768
59746769 return rc;
6770
+}
6771
+
6772
+struct pqi_config_table_section_info {
6773
+ struct pqi_ctrl_info *ctrl_info;
6774
+ void *section;
6775
+ u32 section_offset;
6776
+ void __iomem *section_iomem_addr;
6777
+};
6778
+
6779
+static inline bool pqi_is_firmware_feature_supported(
6780
+ struct pqi_config_table_firmware_features *firmware_features,
6781
+ unsigned int bit_position)
6782
+{
6783
+ unsigned int byte_index;
6784
+
6785
+ byte_index = bit_position / BITS_PER_BYTE;
6786
+
6787
+ if (byte_index >= le16_to_cpu(firmware_features->num_elements))
6788
+ return false;
6789
+
6790
+ return firmware_features->features_supported[byte_index] &
6791
+ (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6792
+}
6793
+
6794
+static inline bool pqi_is_firmware_feature_enabled(
6795
+ struct pqi_config_table_firmware_features *firmware_features,
6796
+ void __iomem *firmware_features_iomem_addr,
6797
+ unsigned int bit_position)
6798
+{
6799
+ unsigned int byte_index;
6800
+ u8 __iomem *features_enabled_iomem_addr;
6801
+
6802
+ byte_index = (bit_position / BITS_PER_BYTE) +
6803
+ (le16_to_cpu(firmware_features->num_elements) * 2);
6804
+
6805
+ features_enabled_iomem_addr = firmware_features_iomem_addr +
6806
+ offsetof(struct pqi_config_table_firmware_features,
6807
+ features_supported) + byte_index;
6808
+
6809
+ return *((__force u8 *)features_enabled_iomem_addr) &
6810
+ (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6811
+}
6812
+
6813
+static inline void pqi_request_firmware_feature(
6814
+ struct pqi_config_table_firmware_features *firmware_features,
6815
+ unsigned int bit_position)
6816
+{
6817
+ unsigned int byte_index;
6818
+
6819
+ byte_index = (bit_position / BITS_PER_BYTE) +
6820
+ le16_to_cpu(firmware_features->num_elements);
6821
+
6822
+ firmware_features->features_supported[byte_index] |=
6823
+ (1 << (bit_position % BITS_PER_BYTE));
6824
+}
6825
+
6826
+static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
6827
+ u16 first_section, u16 last_section)
6828
+{
6829
+ struct pqi_vendor_general_request request;
6830
+
6831
+ memset(&request, 0, sizeof(request));
6832
+
6833
+ request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
6834
+ put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
6835
+ &request.header.iu_length);
6836
+ put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
6837
+ &request.function_code);
6838
+ put_unaligned_le16(first_section,
6839
+ &request.data.config_table_update.first_section);
6840
+ put_unaligned_le16(last_section,
6841
+ &request.data.config_table_update.last_section);
6842
+
6843
+ return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6844
+ 0, NULL, NO_TIMEOUT);
6845
+}
6846
+
6847
+static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
6848
+ struct pqi_config_table_firmware_features *firmware_features,
6849
+ void __iomem *firmware_features_iomem_addr)
6850
+{
6851
+ void *features_requested;
6852
+ void __iomem *features_requested_iomem_addr;
6853
+
6854
+ features_requested = firmware_features->features_supported +
6855
+ le16_to_cpu(firmware_features->num_elements);
6856
+
6857
+ features_requested_iomem_addr = firmware_features_iomem_addr +
6858
+ (features_requested - (void *)firmware_features);
6859
+
6860
+ memcpy_toio(features_requested_iomem_addr, features_requested,
6861
+ le16_to_cpu(firmware_features->num_elements));
6862
+
6863
+ return pqi_config_table_update(ctrl_info,
6864
+ PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
6865
+ PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
6866
+}
6867
+
6868
+struct pqi_firmware_feature {
6869
+ char *feature_name;
6870
+ unsigned int feature_bit;
6871
+ bool supported;
6872
+ bool enabled;
6873
+ void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
6874
+ struct pqi_firmware_feature *firmware_feature);
6875
+};
6876
+
6877
+static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
6878
+ struct pqi_firmware_feature *firmware_feature)
6879
+{
6880
+ if (!firmware_feature->supported) {
6881
+ dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
6882
+ firmware_feature->feature_name);
6883
+ return;
6884
+ }
6885
+
6886
+ if (firmware_feature->enabled) {
6887
+ dev_info(&ctrl_info->pci_dev->dev,
6888
+ "%s enabled\n", firmware_feature->feature_name);
6889
+ return;
6890
+ }
6891
+
6892
+ dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
6893
+ firmware_feature->feature_name);
6894
+}
6895
+
6896
+static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
6897
+ struct pqi_firmware_feature *firmware_feature)
6898
+{
6899
+ switch (firmware_feature->feature_bit) {
6900
+ case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
6901
+ ctrl_info->soft_reset_handshake_supported =
6902
+ firmware_feature->enabled;
6903
+ break;
6904
+ case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
6905
+ ctrl_info->raid_iu_timeout_supported =
6906
+ firmware_feature->enabled;
6907
+ break;
6908
+ case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
6909
+ ctrl_info->tmf_iu_timeout_supported =
6910
+ firmware_feature->enabled;
6911
+ break;
6912
+ }
6913
+
6914
+ pqi_firmware_feature_status(ctrl_info, firmware_feature);
6915
+}
6916
+
6917
+static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
6918
+ struct pqi_firmware_feature *firmware_feature)
6919
+{
6920
+ if (firmware_feature->feature_status)
6921
+ firmware_feature->feature_status(ctrl_info, firmware_feature);
6922
+}
6923
+
6924
+static DEFINE_MUTEX(pqi_firmware_features_mutex);
6925
+
6926
+static struct pqi_firmware_feature pqi_firmware_features[] = {
6927
+ {
6928
+ .feature_name = "Online Firmware Activation",
6929
+ .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
6930
+ .feature_status = pqi_firmware_feature_status,
6931
+ },
6932
+ {
6933
+ .feature_name = "Serial Management Protocol",
6934
+ .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
6935
+ .feature_status = pqi_firmware_feature_status,
6936
+ },
6937
+ {
6938
+ .feature_name = "New Soft Reset Handshake",
6939
+ .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
6940
+ .feature_status = pqi_ctrl_update_feature_flags,
6941
+ },
6942
+ {
6943
+ .feature_name = "RAID IU Timeout",
6944
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
6945
+ .feature_status = pqi_ctrl_update_feature_flags,
6946
+ },
6947
+ {
6948
+ .feature_name = "TMF IU Timeout",
6949
+ .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
6950
+ .feature_status = pqi_ctrl_update_feature_flags,
6951
+ },
6952
+};
6953
+
6954
+static void pqi_process_firmware_features(
6955
+ struct pqi_config_table_section_info *section_info)
6956
+{
6957
+ int rc;
6958
+ struct pqi_ctrl_info *ctrl_info;
6959
+ struct pqi_config_table_firmware_features *firmware_features;
6960
+ void __iomem *firmware_features_iomem_addr;
6961
+ unsigned int i;
6962
+ unsigned int num_features_supported;
6963
+
6964
+ ctrl_info = section_info->ctrl_info;
6965
+ firmware_features = section_info->section;
6966
+ firmware_features_iomem_addr = section_info->section_iomem_addr;
6967
+
6968
+ for (i = 0, num_features_supported = 0;
6969
+ i < ARRAY_SIZE(pqi_firmware_features); i++) {
6970
+ if (pqi_is_firmware_feature_supported(firmware_features,
6971
+ pqi_firmware_features[i].feature_bit)) {
6972
+ pqi_firmware_features[i].supported = true;
6973
+ num_features_supported++;
6974
+ } else {
6975
+ pqi_firmware_feature_update(ctrl_info,
6976
+ &pqi_firmware_features[i]);
6977
+ }
6978
+ }
6979
+
6980
+ if (num_features_supported == 0)
6981
+ return;
6982
+
6983
+ for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6984
+ if (!pqi_firmware_features[i].supported)
6985
+ continue;
6986
+ pqi_request_firmware_feature(firmware_features,
6987
+ pqi_firmware_features[i].feature_bit);
6988
+ }
6989
+
6990
+ rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
6991
+ firmware_features_iomem_addr);
6992
+ if (rc) {
6993
+ dev_err(&ctrl_info->pci_dev->dev,
6994
+ "failed to enable firmware features in PQI configuration table\n");
6995
+ for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6996
+ if (!pqi_firmware_features[i].supported)
6997
+ continue;
6998
+ pqi_firmware_feature_update(ctrl_info,
6999
+ &pqi_firmware_features[i]);
7000
+ }
7001
+ return;
7002
+ }
7003
+
7004
+ for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7005
+ if (!pqi_firmware_features[i].supported)
7006
+ continue;
7007
+ if (pqi_is_firmware_feature_enabled(firmware_features,
7008
+ firmware_features_iomem_addr,
7009
+ pqi_firmware_features[i].feature_bit)) {
7010
+ pqi_firmware_features[i].enabled = true;
7011
+ }
7012
+ pqi_firmware_feature_update(ctrl_info,
7013
+ &pqi_firmware_features[i]);
7014
+ }
7015
+}
7016
+
7017
+static void pqi_init_firmware_features(void)
7018
+{
7019
+ unsigned int i;
7020
+
7021
+ for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7022
+ pqi_firmware_features[i].supported = false;
7023
+ pqi_firmware_features[i].enabled = false;
7024
+ }
7025
+}
7026
+
7027
+static void pqi_process_firmware_features_section(
7028
+ struct pqi_config_table_section_info *section_info)
7029
+{
7030
+ mutex_lock(&pqi_firmware_features_mutex);
7031
+ pqi_init_firmware_features();
7032
+ pqi_process_firmware_features(section_info);
7033
+ mutex_unlock(&pqi_firmware_features_mutex);
59757034 }
59767035
59777036 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
....@@ -5981,8 +7040,11 @@
59817040 void __iomem *table_iomem_addr;
59827041 struct pqi_config_table *config_table;
59837042 struct pqi_config_table_section_header *section;
7043
+ struct pqi_config_table_section_info section_info;
59847044
59857045 table_length = ctrl_info->config_table_length;
7046
+ if (table_length == 0)
7047
+ return 0;
59867048
59877049 config_table = kmalloc(table_length, GFP_KERNEL);
59887050 if (!config_table) {
....@@ -5999,13 +7061,22 @@
59997061 ctrl_info->config_table_offset;
60007062 memcpy_fromio(config_table, table_iomem_addr, table_length);
60017063
7064
+ section_info.ctrl_info = ctrl_info;
60027065 section_offset =
60037066 get_unaligned_le32(&config_table->first_section_offset);
60047067
60057068 while (section_offset) {
60067069 section = (void *)config_table + section_offset;
60077070
7071
+ section_info.section = section;
7072
+ section_info.section_offset = section_offset;
7073
+ section_info.section_iomem_addr =
7074
+ table_iomem_addr + section_offset;
7075
+
60087076 switch (get_unaligned_le16(&section->section_id)) {
7077
+ case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7078
+ pqi_process_firmware_features_section(&section_info);
7079
+ break;
60097080 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
60107081 if (pqi_disable_heartbeat)
60117082 dev_warn(&ctrl_info->pci_dev->dev,
....@@ -6017,6 +7088,13 @@
60177088 offsetof(
60187089 struct pqi_config_table_heartbeat,
60197090 heartbeat_counter);
7091
+ break;
7092
+ case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
7093
+ ctrl_info->soft_reset_status =
7094
+ table_iomem_addr +
7095
+ section_offset +
7096
+ offsetof(struct pqi_config_table_soft_reset,
7097
+ soft_reset_status);
60207098 break;
60217099 }
60227100
....@@ -6071,13 +7149,20 @@
60717149 return pqi_revert_to_sis_mode(ctrl_info);
60727150 }
60737151
7152
+#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
7153
+
60747154 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
60757155 {
60767156 int rc;
60777157
6078
- rc = pqi_force_sis_mode(ctrl_info);
6079
- if (rc)
6080
- return rc;
7158
+ if (reset_devices) {
7159
+ sis_soft_reset(ctrl_info);
7160
+ msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
7161
+ } else {
7162
+ rc = pqi_force_sis_mode(ctrl_info);
7163
+ if (rc)
7164
+ return rc;
7165
+ }
60817166
60827167 /*
60837168 * Wait until the controller is ready to start accepting SIS
....@@ -6150,10 +7235,6 @@
61507235 ctrl_info->pqi_mode_enabled = true;
61517236 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
61527237
6153
- rc = pqi_process_config_table(ctrl_info);
6154
- if (rc)
6155
- return rc;
6156
-
61577238 rc = pqi_alloc_admin_queues(ctrl_info);
61587239 if (rc) {
61597240 dev_err(&ctrl_info->pci_dev->dev,
....@@ -6215,6 +7296,11 @@
62157296 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
62167297
62177298 ctrl_info->controller_online = true;
7299
+
7300
+ rc = pqi_process_config_table(ctrl_info);
7301
+ if (rc)
7302
+ return rc;
7303
+
62187304 pqi_start_heartbeat_timer(ctrl_info);
62197305
62207306 rc = pqi_enable_events(ctrl_info);
....@@ -6229,10 +7315,24 @@
62297315 if (rc)
62307316 return rc;
62317317
6232
- rc = pqi_get_ctrl_firmware_version(ctrl_info);
7318
+ rc = pqi_get_ctrl_product_details(ctrl_info);
62337319 if (rc) {
62347320 dev_err(&ctrl_info->pci_dev->dev,
6235
- "error obtaining firmware version\n");
7321
+ "error obtaining product details\n");
7322
+ return rc;
7323
+ }
7324
+
7325
+ rc = pqi_get_ctrl_serial_number(ctrl_info);
7326
+ if (rc) {
7327
+ dev_err(&ctrl_info->pci_dev->dev,
7328
+ "error obtaining ctrl serial number\n");
7329
+ return rc;
7330
+ }
7331
+
7332
+ rc = pqi_set_diag_rescan(ctrl_info);
7333
+ if (rc) {
7334
+ dev_err(&ctrl_info->pci_dev->dev,
7335
+ "error enabling multi-lun rescan\n");
62367336 return rc;
62377337 }
62387338
....@@ -6293,6 +7393,24 @@
62937393 return rc;
62947394
62957395 /*
7396
+ * Get the controller properties. This allows us to determine
7397
+ * whether or not it supports PQI mode.
7398
+ */
7399
+ rc = sis_get_ctrl_properties(ctrl_info);
7400
+ if (rc) {
7401
+ dev_err(&ctrl_info->pci_dev->dev,
7402
+ "error obtaining controller properties\n");
7403
+ return rc;
7404
+ }
7405
+
7406
+ rc = sis_get_pqi_capabilities(ctrl_info);
7407
+ if (rc) {
7408
+ dev_err(&ctrl_info->pci_dev->dev,
7409
+ "error obtaining controller capabilities\n");
7410
+ return rc;
7411
+ }
7412
+
7413
+ /*
62967414 * If the function we are about to call succeeds, the
62977415 * controller will transition from legacy SIS mode
62987416 * into PQI mode.
....@@ -6332,13 +7450,32 @@
63327450 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
63337451
63347452 ctrl_info->controller_online = true;
6335
- pqi_start_heartbeat_timer(ctrl_info);
63367453 pqi_ctrl_unblock_requests(ctrl_info);
7454
+
7455
+ rc = pqi_process_config_table(ctrl_info);
7456
+ if (rc)
7457
+ return rc;
7458
+
7459
+ pqi_start_heartbeat_timer(ctrl_info);
63377460
63387461 rc = pqi_enable_events(ctrl_info);
63397462 if (rc) {
63407463 dev_err(&ctrl_info->pci_dev->dev,
63417464 "error enabling events\n");
7465
+ return rc;
7466
+ }
7467
+
7468
+ rc = pqi_get_ctrl_product_details(ctrl_info);
7469
+ if (rc) {
7470
+ dev_err(&ctrl_info->pci_dev->dev,
7471
+ "error obtaining product details\n");
7472
+ return rc;
7473
+ }
7474
+
7475
+ rc = pqi_set_diag_rescan(ctrl_info);
7476
+ if (rc) {
7477
+ dev_err(&ctrl_info->pci_dev->dev,
7478
+ "error enabling multi-lun rescan\n");
63427479 return rc;
63437480 }
63447481
....@@ -6359,8 +7496,12 @@
63597496 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
63607497 u16 timeout)
63617498 {
6362
- return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
7499
+ int rc;
7500
+
7501
+ rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
63637502 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
7503
+
7504
+ return pcibios_err_to_errno(rc);
63647505 }
63657506
63667507 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
....@@ -6393,7 +7534,7 @@
63937534 goto disable_device;
63947535 }
63957536
6396
- ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
7537
+ ctrl_info->iomem_base = ioremap(pci_resource_start(
63977538 ctrl_info->pci_dev, 0),
63987539 sizeof(struct pqi_ctrl_registers));
63997540 if (!ctrl_info->iomem_base) {
....@@ -6452,12 +7593,14 @@
64527593
64537594 mutex_init(&ctrl_info->scan_mutex);
64547595 mutex_init(&ctrl_info->lun_reset_mutex);
7596
+ mutex_init(&ctrl_info->ofa_mutex);
64557597
64567598 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
64577599 spin_lock_init(&ctrl_info->scsi_device_list_lock);
64587600
64597601 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
64607602 atomic_set(&ctrl_info->num_interrupts, 0);
7603
+ atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
64617604
64627605 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
64637606 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
....@@ -6521,11 +7664,221 @@
65217664 {
65227665 pqi_cancel_rescan_worker(ctrl_info);
65237666 pqi_cancel_update_time_worker(ctrl_info);
6524
- pqi_remove_all_scsi_devices(ctrl_info);
65257667 pqi_unregister_scsi(ctrl_info);
65267668 if (ctrl_info->pqi_mode_enabled)
65277669 pqi_revert_to_sis_mode(ctrl_info);
65287670 pqi_free_ctrl_resources(ctrl_info);
7671
+}
7672
+
7673
+static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
7674
+{
7675
+ pqi_cancel_update_time_worker(ctrl_info);
7676
+ pqi_cancel_rescan_worker(ctrl_info);
7677
+ pqi_wait_until_lun_reset_finished(ctrl_info);
7678
+ pqi_wait_until_scan_finished(ctrl_info);
7679
+ pqi_ctrl_ofa_start(ctrl_info);
7680
+ pqi_ctrl_block_requests(ctrl_info);
7681
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
7682
+ pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
7683
+ pqi_fail_io_queued_for_all_devices(ctrl_info);
7684
+ pqi_wait_until_inbound_queues_empty(ctrl_info);
7685
+ pqi_stop_heartbeat_timer(ctrl_info);
7686
+ ctrl_info->pqi_mode_enabled = false;
7687
+ pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7688
+}
7689
+
7690
+static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
7691
+{
7692
+ pqi_ofa_free_host_buffer(ctrl_info);
7693
+ ctrl_info->pqi_mode_enabled = true;
7694
+ pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7695
+ ctrl_info->controller_online = true;
7696
+ pqi_ctrl_unblock_requests(ctrl_info);
7697
+ pqi_start_heartbeat_timer(ctrl_info);
7698
+ pqi_schedule_update_time_worker(ctrl_info);
7699
+ pqi_clear_soft_reset_status(ctrl_info,
7700
+ PQI_SOFT_RESET_ABORT);
7701
+ pqi_scan_scsi_devices(ctrl_info);
7702
+}
7703
+
7704
+static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
7705
+ u32 total_size, u32 chunk_size)
7706
+{
7707
+ u32 sg_count;
7708
+ u32 size;
7709
+ int i;
7710
+ struct pqi_sg_descriptor *mem_descriptor = NULL;
7711
+ struct device *dev;
7712
+ struct pqi_ofa_memory *ofap;
7713
+
7714
+ dev = &ctrl_info->pci_dev->dev;
7715
+
7716
+ sg_count = (total_size + chunk_size - 1);
7717
+ sg_count /= chunk_size;
7718
+
7719
+ ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7720
+
7721
+ if (sg_count*chunk_size < total_size)
7722
+ goto out;
7723
+
7724
+ ctrl_info->pqi_ofa_chunk_virt_addr =
7725
+ kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
7726
+ if (!ctrl_info->pqi_ofa_chunk_virt_addr)
7727
+ goto out;
7728
+
7729
+ for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
7730
+ dma_addr_t dma_handle;
7731
+
7732
+ ctrl_info->pqi_ofa_chunk_virt_addr[i] =
7733
+ dma_alloc_coherent(dev, chunk_size, &dma_handle,
7734
+ GFP_KERNEL);
7735
+
7736
+ if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
7737
+ break;
7738
+
7739
+ mem_descriptor = &ofap->sg_descriptor[i];
7740
+ put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
7741
+ put_unaligned_le32 (chunk_size, &mem_descriptor->length);
7742
+ }
7743
+
7744
+ if (!size || size < total_size)
7745
+ goto out_free_chunks;
7746
+
7747
+ put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
7748
+ put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
7749
+ put_unaligned_le32(size, &ofap->bytes_allocated);
7750
+
7751
+ return 0;
7752
+
7753
+out_free_chunks:
7754
+ while (--i >= 0) {
7755
+ mem_descriptor = &ofap->sg_descriptor[i];
7756
+ dma_free_coherent(dev, chunk_size,
7757
+ ctrl_info->pqi_ofa_chunk_virt_addr[i],
7758
+ get_unaligned_le64(&mem_descriptor->address));
7759
+ }
7760
+ kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7761
+
7762
+out:
7763
+ put_unaligned_le32 (0, &ofap->bytes_allocated);
7764
+ return -ENOMEM;
7765
+}
7766
+
7767
+static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
7768
+{
7769
+ u32 total_size;
7770
+ u32 min_chunk_size;
7771
+ u32 chunk_sz;
7772
+
7773
+ total_size = le32_to_cpu(
7774
+ ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
7775
+ min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
7776
+
7777
+ for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
7778
+ if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
7779
+ return 0;
7780
+
7781
+ return -ENOMEM;
7782
+}
7783
+
7784
+static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
7785
+ u32 bytes_requested)
7786
+{
7787
+ struct pqi_ofa_memory *pqi_ofa_memory;
7788
+ struct device *dev;
7789
+
7790
+ dev = &ctrl_info->pci_dev->dev;
7791
+ pqi_ofa_memory = dma_alloc_coherent(dev,
7792
+ PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
7793
+ &ctrl_info->pqi_ofa_mem_dma_handle,
7794
+ GFP_KERNEL);
7795
+
7796
+ if (!pqi_ofa_memory)
7797
+ return;
7798
+
7799
+ put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
7800
+ memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
7801
+ sizeof(pqi_ofa_memory->signature));
7802
+ pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
7803
+
7804
+ ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
7805
+
7806
+ if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
7807
+ dev_err(dev, "Failed to allocate host buffer of size = %u",
7808
+ bytes_requested);
7809
+ }
7810
+
7811
+ return;
7812
+}
7813
+
7814
+static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
7815
+{
7816
+ int i;
7817
+ struct pqi_sg_descriptor *mem_descriptor;
7818
+ struct pqi_ofa_memory *ofap;
7819
+
7820
+ ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7821
+
7822
+ if (!ofap)
7823
+ return;
7824
+
7825
+ if (!ofap->bytes_allocated)
7826
+ goto out;
7827
+
7828
+ mem_descriptor = ofap->sg_descriptor;
7829
+
7830
+ for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
7831
+ i++) {
7832
+ dma_free_coherent(&ctrl_info->pci_dev->dev,
7833
+ get_unaligned_le32(&mem_descriptor[i].length),
7834
+ ctrl_info->pqi_ofa_chunk_virt_addr[i],
7835
+ get_unaligned_le64(&mem_descriptor[i].address));
7836
+ }
7837
+ kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7838
+
7839
+out:
7840
+ dma_free_coherent(&ctrl_info->pci_dev->dev,
7841
+ PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
7842
+ ctrl_info->pqi_ofa_mem_dma_handle);
7843
+ ctrl_info->pqi_ofa_mem_virt_addr = NULL;
7844
+}
7845
+
7846
+static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
7847
+{
7848
+ struct pqi_vendor_general_request request;
7849
+ size_t size;
7850
+ struct pqi_ofa_memory *ofap;
7851
+
7852
+ memset(&request, 0, sizeof(request));
7853
+
7854
+ ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7855
+
7856
+ request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7857
+ put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7858
+ &request.header.iu_length);
7859
+ put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
7860
+ &request.function_code);
7861
+
7862
+ if (ofap) {
7863
+ size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
7864
+ get_unaligned_le16(&ofap->num_memory_descriptors) *
7865
+ sizeof(struct pqi_sg_descriptor);
7866
+
7867
+ put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
7868
+ &request.data.ofa_memory_allocation.buffer_address);
7869
+ put_unaligned_le32(size,
7870
+ &request.data.ofa_memory_allocation.buffer_length);
7871
+
7872
+ }
7873
+
7874
+ return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
7875
+ 0, NULL, NO_TIMEOUT);
7876
+}
7877
+
7878
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
7879
+{
7880
+ msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
7881
+ return pqi_ctrl_init_resume(ctrl_info);
65297882 }
65307883
65317884 static void pqi_perform_lockup_action(void)
....@@ -6626,7 +7979,7 @@
66267979 const struct pci_device_id *id)
66277980 {
66287981 int rc;
6629
- int node;
7982
+ int node, cp_node;
66307983 struct pqi_ctrl_info *ctrl_info;
66317984
66327985 pqi_print_ctrl_info(pci_dev, id);
....@@ -6644,8 +7997,12 @@
66447997 "controller device ID matched using wildcards\n");
66457998
66467999 node = dev_to_node(&pci_dev->dev);
6647
- if (node == NUMA_NO_NODE)
6648
- set_dev_node(&pci_dev->dev, 0);
8000
+ if (node == NUMA_NO_NODE) {
8001
+ cp_node = cpu_to_node(0);
8002
+ if (cp_node == NUMA_NO_NODE)
8003
+ cp_node = 0;
8004
+ set_dev_node(&pci_dev->dev, cp_node);
8005
+ }
66498006
66508007 ctrl_info = pqi_alloc_ctrl_info(node);
66518008 if (!ctrl_info) {
....@@ -6680,7 +8037,25 @@
66808037 if (!ctrl_info)
66818038 return;
66828039
8040
+ ctrl_info->in_shutdown = true;
8041
+
66838042 pqi_remove_ctrl(ctrl_info);
8043
+}
8044
+
8045
+static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
8046
+{
8047
+ unsigned int i;
8048
+ struct pqi_io_request *io_request;
8049
+ struct scsi_cmnd *scmd;
8050
+
8051
+ for (i = 0; i < ctrl_info->max_io_slots; i++) {
8052
+ io_request = &ctrl_info->io_request_pool[i];
8053
+ if (atomic_read(&io_request->refcount) == 0)
8054
+ continue;
8055
+ scmd = io_request->scmd;
8056
+ WARN_ON(scmd != NULL); /* IO command from SML */
8057
+ WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
8058
+ }
66848059 }
66858060
66868061 static void pqi_shutdown(struct pci_dev *pci_dev)
....@@ -6689,22 +8064,51 @@
66898064 struct pqi_ctrl_info *ctrl_info;
66908065
66918066 ctrl_info = pci_get_drvdata(pci_dev);
6692
- if (!ctrl_info)
6693
- goto error;
8067
+ if (!ctrl_info) {
8068
+ dev_err(&pci_dev->dev,
8069
+ "cache could not be flushed\n");
8070
+ return;
8071
+ }
8072
+
8073
+ pqi_disable_events(ctrl_info);
8074
+ pqi_wait_until_ofa_finished(ctrl_info);
8075
+ pqi_cancel_update_time_worker(ctrl_info);
8076
+ pqi_cancel_rescan_worker(ctrl_info);
8077
+ pqi_cancel_event_worker(ctrl_info);
8078
+
8079
+ pqi_ctrl_shutdown_start(ctrl_info);
8080
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
8081
+
8082
+ rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
8083
+ if (rc) {
8084
+ dev_err(&pci_dev->dev,
8085
+ "wait for pending I/O failed\n");
8086
+ return;
8087
+ }
8088
+
8089
+ pqi_ctrl_block_device_reset(ctrl_info);
8090
+ pqi_wait_until_lun_reset_finished(ctrl_info);
66948091
66958092 /*
66968093 * Write all data in the controller's battery-backed cache to
66978094 * storage.
66988095 */
66998096 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
6700
- pqi_free_interrupts(ctrl_info);
6701
- pqi_reset(ctrl_info);
6702
- if (rc == 0)
6703
- return;
8097
+ if (rc)
8098
+ dev_err(&pci_dev->dev,
8099
+ "unable to flush controller cache\n");
67048100
6705
-error:
6706
- dev_warn(&pci_dev->dev,
6707
- "unable to flush controller cache\n");
8101
+ pqi_ctrl_block_requests(ctrl_info);
8102
+
8103
+ rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
8104
+ if (rc) {
8105
+ dev_err(&pci_dev->dev,
8106
+ "wait for pending sync cmds failed\n");
8107
+ return;
8108
+ }
8109
+
8110
+ pqi_crash_if_pending_command(ctrl_info);
8111
+ pqi_reset(ctrl_info);
67088112 }
67098113
67108114 static void pqi_process_lockup_action_param(void)
....@@ -6742,11 +8146,12 @@
67428146 pqi_cancel_rescan_worker(ctrl_info);
67438147 pqi_wait_until_scan_finished(ctrl_info);
67448148 pqi_wait_until_lun_reset_finished(ctrl_info);
8149
+ pqi_wait_until_ofa_finished(ctrl_info);
67458150 pqi_flush_cache(ctrl_info, SUSPEND);
67468151 pqi_ctrl_block_requests(ctrl_info);
67478152 pqi_ctrl_wait_until_quiesced(ctrl_info);
67488153 pqi_wait_until_inbound_queues_empty(ctrl_info);
6749
- pqi_ctrl_wait_for_pending_io(ctrl_info);
8154
+ pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
67508155 pqi_stop_heartbeat_timer(ctrl_info);
67518156
67528157 if (state.event == PM_EVENT_FREEZE)
....@@ -6828,7 +8233,35 @@
68288233 },
68298234 {
68308235 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8236
+ 0x193d, 0x1104)
8237
+ },
8238
+ {
8239
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8240
+ 0x193d, 0x1105)
8241
+ },
8242
+ {
8243
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8244
+ 0x193d, 0x1106)
8245
+ },
8246
+ {
8247
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8248
+ 0x193d, 0x1107)
8249
+ },
8250
+ {
8251
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8252
+ 0x193d, 0x8460)
8253
+ },
8254
+ {
8255
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
68318256 0x193d, 0x8461)
8257
+ },
8258
+ {
8259
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8260
+ 0x193d, 0xc460)
8261
+ },
8262
+ {
8263
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8264
+ 0x193d, 0xc461)
68328265 },
68338266 {
68348267 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
....@@ -6865,6 +8298,50 @@
68658298 {
68668299 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
68678300 0x1bd4, 0x004c)
8301
+ },
8302
+ {
8303
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8304
+ 0x1bd4, 0x004f)
8305
+ },
8306
+ {
8307
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8308
+ 0x1bd4, 0x0051)
8309
+ },
8310
+ {
8311
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8312
+ 0x1bd4, 0x0052)
8313
+ },
8314
+ {
8315
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8316
+ 0x1bd4, 0x0053)
8317
+ },
8318
+ {
8319
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8320
+ 0x1bd4, 0x0054)
8321
+ },
8322
+ {
8323
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8324
+ 0x19e5, 0xd227)
8325
+ },
8326
+ {
8327
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8328
+ 0x19e5, 0xd228)
8329
+ },
8330
+ {
8331
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8332
+ 0x19e5, 0xd229)
8333
+ },
8334
+ {
8335
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8336
+ 0x19e5, 0xd22a)
8337
+ },
8338
+ {
8339
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8340
+ 0x19e5, 0xd22b)
8341
+ },
8342
+ {
8343
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8344
+ 0x19e5, 0xd22c)
68688345 },
68698346 {
68708347 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
....@@ -6905,6 +8382,18 @@
69058382 {
69068383 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
69078384 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
8385
+ },
8386
+ {
8387
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8388
+ PCI_VENDOR_ID_ADAPTEC2, 0x0808)
8389
+ },
8390
+ {
8391
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8392
+ PCI_VENDOR_ID_ADAPTEC2, 0x0809)
8393
+ },
8394
+ {
8395
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8396
+ PCI_VENDOR_ID_ADAPTEC2, 0x080a)
69088397 },
69098398 {
69108399 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
....@@ -6992,6 +8481,122 @@
69928481 },
69938482 {
69948483 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8484
+ PCI_VENDOR_ID_ADAPTEC2, 0x1400)
8485
+ },
8486
+ {
8487
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8488
+ PCI_VENDOR_ID_ADAPTEC2, 0x1402)
8489
+ },
8490
+ {
8491
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8492
+ PCI_VENDOR_ID_ADAPTEC2, 0x1410)
8493
+ },
8494
+ {
8495
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8496
+ PCI_VENDOR_ID_ADAPTEC2, 0x1411)
8497
+ },
8498
+ {
8499
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8500
+ PCI_VENDOR_ID_ADAPTEC2, 0x1412)
8501
+ },
8502
+ {
8503
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8504
+ PCI_VENDOR_ID_ADAPTEC2, 0x1420)
8505
+ },
8506
+ {
8507
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8508
+ PCI_VENDOR_ID_ADAPTEC2, 0x1430)
8509
+ },
8510
+ {
8511
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8512
+ PCI_VENDOR_ID_ADAPTEC2, 0x1440)
8513
+ },
8514
+ {
8515
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8516
+ PCI_VENDOR_ID_ADAPTEC2, 0x1441)
8517
+ },
8518
+ {
8519
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8520
+ PCI_VENDOR_ID_ADAPTEC2, 0x1450)
8521
+ },
8522
+ {
8523
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8524
+ PCI_VENDOR_ID_ADAPTEC2, 0x1452)
8525
+ },
8526
+ {
8527
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8528
+ PCI_VENDOR_ID_ADAPTEC2, 0x1460)
8529
+ },
8530
+ {
8531
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8532
+ PCI_VENDOR_ID_ADAPTEC2, 0x1461)
8533
+ },
8534
+ {
8535
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8536
+ PCI_VENDOR_ID_ADAPTEC2, 0x1462)
8537
+ },
8538
+ {
8539
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8540
+ PCI_VENDOR_ID_ADAPTEC2, 0x1470)
8541
+ },
8542
+ {
8543
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8544
+ PCI_VENDOR_ID_ADAPTEC2, 0x1471)
8545
+ },
8546
+ {
8547
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8548
+ PCI_VENDOR_ID_ADAPTEC2, 0x1472)
8549
+ },
8550
+ {
8551
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8552
+ PCI_VENDOR_ID_ADAPTEC2, 0x1480)
8553
+ },
8554
+ {
8555
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8556
+ PCI_VENDOR_ID_ADAPTEC2, 0x1490)
8557
+ },
8558
+ {
8559
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8560
+ PCI_VENDOR_ID_ADAPTEC2, 0x1491)
8561
+ },
8562
+ {
8563
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8564
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
8565
+ },
8566
+ {
8567
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8568
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
8569
+ },
8570
+ {
8571
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8572
+ PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
8573
+ },
8574
+ {
8575
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8576
+ PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
8577
+ },
8578
+ {
8579
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8580
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
8581
+ },
8582
+ {
8583
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8584
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
8585
+ },
8586
+ {
8587
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8588
+ PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
8589
+ },
8590
+ {
8591
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8592
+ PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
8593
+ },
8594
+ {
8595
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8596
+ PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
8597
+ },
8598
+ {
8599
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
69958600 PCI_VENDOR_ID_ADVANTECH, 0x8312)
69968601 },
69978602 {
....@@ -7056,11 +8661,51 @@
70568661 },
70578662 {
70588663 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8664
+ PCI_VENDOR_ID_HP, 0x1002)
8665
+ },
8666
+ {
8667
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
70598668 PCI_VENDOR_ID_HP, 0x1100)
70608669 },
70618670 {
70628671 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
70638672 PCI_VENDOR_ID_HP, 0x1101)
8673
+ },
8674
+ {
8675
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8676
+ 0x1590, 0x0294)
8677
+ },
8678
+ {
8679
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8680
+ 0x1590, 0x02db)
8681
+ },
8682
+ {
8683
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8684
+ 0x1590, 0x02dc)
8685
+ },
8686
+ {
8687
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8688
+ 0x1590, 0x032e)
8689
+ },
8690
+ {
8691
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8692
+ 0x1d8d, 0x0800)
8693
+ },
8694
+ {
8695
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8696
+ 0x1d8d, 0x0908)
8697
+ },
8698
+ {
8699
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8700
+ 0x1d8d, 0x0806)
8701
+ },
8702
+ {
8703
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8704
+ 0x1d8d, 0x0916)
8705
+ },
8706
+ {
8707
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8708
+ PCI_VENDOR_ID_GIGABYTE, 0x1000)
70648709 },
70658710 {
70668711 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
....@@ -7089,8 +8734,7 @@
70898734
70908735 pr_info(DRIVER_NAME "\n");
70918736
7092
- pqi_sas_transport_template =
7093
- sas_attach_transport(&pqi_sas_transport_functions);
8737
+ pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
70948738 if (!pqi_sas_transport_template)
70958739 return -ENODEV;
70968740
....@@ -7280,11 +8924,11 @@
72808924 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
72818925 data.delete_operational_queue.queue_id) != 12);
72828926 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
7283
- BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
8927
+ BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
72848928 data.create_operational_iq) != 64 - 11);
7285
- BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
8929
+ BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
72868930 data.create_operational_oq) != 64 - 11);
7287
- BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
8931
+ BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
72888932 data.delete_operational_queue) != 64 - 11);
72898933
72908934 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
....@@ -7331,6 +8975,8 @@
73318975 error_index) != 27);
73328976 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
73338977 cdb) != 32);
8978
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8979
+ timeout) != 60);
73348980 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
73358981 sg_descriptors) != 64);
73368982 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
....@@ -7486,6 +9132,8 @@
74869132 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
74879133 nexus_id) != 10);
74889134 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9135
+ timeout) != 14);
9136
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
74899137 lun_number) != 16);
74909138 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
74919139 protocol_specific) != 24);