hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/scsi/arcmsr/arcmsr_hba.c
....@@ -41,7 +41,7 @@
4141 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4242 *******************************************************************************
4343 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
44
-** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
44
+** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst
4545 *******************************************************************************
4646 */
4747 #include <linux/module.h>
....@@ -91,6 +91,10 @@
9191 module_param(cmd_per_lun, int, S_IRUGO);
9292 MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32");
9393
94
+static int dma_mask_64 = 0;
95
+module_param(dma_mask_64, int, S_IRUGO);
96
+MODULE_PARM_DESC(dma_mask_64, " set DMA mask to 64 bits(0 ~ 1), dma_mask_64=1(64 bits), =0(32 bits)");
97
+
9498 static int set_date_time = 0;
9599 module_param(set_date_time, int, S_IRUGO);
96100 MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
....@@ -129,6 +133,7 @@
129133 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
130134 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
131135 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
136
+static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb);
132137 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
133138 static const char *arcmsr_info(struct Scsi_Host *);
134139 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
....@@ -156,7 +161,6 @@
156161 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
157162 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
158163 .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
159
- .use_clustering = ENABLE_CLUSTERING,
160164 .shost_attrs = arcmsr_host_attrs,
161165 .no_write_same = 1,
162166 };
....@@ -206,6 +210,8 @@
206210 .driver_data = ACB_ADAPTER_TYPE_C},
207211 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
208212 .driver_data = ACB_ADAPTER_TYPE_E},
213
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886),
214
+ .driver_data = ACB_ADAPTER_TYPE_F},
209215 {0, 0}, /* Terminating entry */
210216 };
211217 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
....@@ -224,16 +230,16 @@
224230 ****************************************************************************
225231 */
226232
227
-static void arcmsr_free_mu(struct AdapterControlBlock *acb)
233
+static void arcmsr_free_io_queue(struct AdapterControlBlock *acb)
228234 {
229235 switch (acb->adapter_type) {
230236 case ACB_ADAPTER_TYPE_B:
231237 case ACB_ADAPTER_TYPE_D:
232
- case ACB_ADAPTER_TYPE_E: {
233
- dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
238
+ case ACB_ADAPTER_TYPE_E:
239
+ case ACB_ADAPTER_TYPE_F:
240
+ dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size,
234241 acb->dma_coherent2, acb->dma_coherent_handle2);
235242 break;
236
- }
237243 }
238244 }
239245
....@@ -267,7 +273,7 @@
267273 break;
268274 }
269275 case ACB_ADAPTER_TYPE_C:{
270
- acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
276
+ acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
271277 if (!acb->pmuC) {
272278 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
273279 return false;
....@@ -280,11 +286,10 @@
280286 }
281287 case ACB_ADAPTER_TYPE_D: {
282288 void __iomem *mem_base0;
283
- unsigned long addr, range, flags;
289
+ unsigned long addr, range;
284290
285291 addr = (unsigned long)pci_resource_start(pdev, 0);
286292 range = pci_resource_len(pdev, 0);
287
- flags = pci_resource_flags(pdev, 0);
288293 mem_base0 = ioremap(addr, range);
289294 if (!mem_base0) {
290295 pr_notice("arcmsr%d: memory mapping region fail\n",
....@@ -308,6 +313,19 @@
308313 acb->out_doorbell = 0;
309314 break;
310315 }
316
+ case ACB_ADAPTER_TYPE_F: {
317
+ acb->pmuF = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
318
+ if (!acb->pmuF) {
319
+ pr_notice("arcmsr%d: memory mapping region fail\n",
320
+ acb->host->host_no);
321
+ return false;
322
+ }
323
+ writel(0, &acb->pmuF->host_int_status); /* clear interrupt */
324
+ writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
325
+ acb->in_doorbell = 0;
326
+ acb->out_doorbell = 0;
327
+ break;
328
+ }
311329 }
312330 return true;
313331 }
....@@ -315,25 +333,24 @@
315333 static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
316334 {
317335 switch (acb->adapter_type) {
318
- case ACB_ADAPTER_TYPE_A:{
336
+ case ACB_ADAPTER_TYPE_A:
319337 iounmap(acb->pmuA);
320
- }
321
- break;
322
- case ACB_ADAPTER_TYPE_B:{
338
+ break;
339
+ case ACB_ADAPTER_TYPE_B:
323340 iounmap(acb->mem_base0);
324341 iounmap(acb->mem_base1);
325
- }
326
-
327
- break;
328
- case ACB_ADAPTER_TYPE_C:{
342
+ break;
343
+ case ACB_ADAPTER_TYPE_C:
329344 iounmap(acb->pmuC);
330
- }
331
- break;
345
+ break;
332346 case ACB_ADAPTER_TYPE_D:
333347 iounmap(acb->mem_base0);
334348 break;
335349 case ACB_ADAPTER_TYPE_E:
336350 iounmap(acb->pmuE);
351
+ break;
352
+ case ACB_ADAPTER_TYPE_F:
353
+ iounmap(acb->pmuF);
337354 break;
338355 }
339356 }
....@@ -350,16 +367,11 @@
350367 static int arcmsr_bios_param(struct scsi_device *sdev,
351368 struct block_device *bdev, sector_t capacity, int *geom)
352369 {
353
- int ret, heads, sectors, cylinders, total_capacity;
354
- unsigned char *buffer;/* return copy of block device's partition table */
370
+ int heads, sectors, cylinders, total_capacity;
355371
356
- buffer = scsi_bios_ptable(bdev);
357
- if (buffer) {
358
- ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
359
- kfree(buffer);
360
- if (ret != -1)
361
- return ret;
362
- }
372
+ if (scsi_partsize(bdev, capacity, geom))
373
+ return 0;
374
+
363375 total_capacity = capacity;
364376 heads = 64;
365377 sectors = 32;
....@@ -555,26 +567,96 @@
555567 {
556568 switch (acb->adapter_type) {
557569
558
- case ACB_ADAPTER_TYPE_A: {
570
+ case ACB_ADAPTER_TYPE_A:
559571 arcmsr_hbaA_flush_cache(acb);
560
- }
561572 break;
562
-
563
- case ACB_ADAPTER_TYPE_B: {
573
+ case ACB_ADAPTER_TYPE_B:
564574 arcmsr_hbaB_flush_cache(acb);
565
- }
566575 break;
567
- case ACB_ADAPTER_TYPE_C: {
576
+ case ACB_ADAPTER_TYPE_C:
568577 arcmsr_hbaC_flush_cache(acb);
569
- }
570578 break;
571579 case ACB_ADAPTER_TYPE_D:
572580 arcmsr_hbaD_flush_cache(acb);
573581 break;
574582 case ACB_ADAPTER_TYPE_E:
583
+ case ACB_ADAPTER_TYPE_F:
575584 arcmsr_hbaE_flush_cache(acb);
576585 break;
577586 }
587
+}
588
+
589
+static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock *acb)
590
+{
591
+ struct MessageUnit_B *reg = acb->pmuB;
592
+
593
+ if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) {
594
+ reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203);
595
+ reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203);
596
+ reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203);
597
+ reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203);
598
+ } else {
599
+ reg->drv2iop_doorbell= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL);
600
+ reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK);
601
+ reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL);
602
+ reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK);
603
+ }
604
+ reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER);
605
+ reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER);
606
+ reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER);
607
+}
608
+
609
+static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb)
610
+{
611
+ struct MessageUnit_D *reg = acb->pmuD;
612
+
613
+ reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID);
614
+ reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION);
615
+ reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK);
616
+ reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET);
617
+ reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST);
618
+ reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS);
619
+ reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE);
620
+ reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0);
621
+ reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1);
622
+ reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0);
623
+ reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1);
624
+ reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL);
625
+ reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL);
626
+ reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE);
627
+ reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW);
628
+ reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH);
629
+ reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER);
630
+ reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW);
631
+ reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH);
632
+ reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER);
633
+ reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER);
634
+ reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE);
635
+ reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE);
636
+ reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER);
637
+ reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER);
638
+ reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
639
+}
640
+
641
+static void arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock *acb)
642
+{
643
+ dma_addr_t host_buffer_dma;
644
+ struct MessageUnit_F __iomem *pmuF;
645
+
646
+ memset(acb->dma_coherent2, 0xff, acb->completeQ_size);
647
+ acb->message_wbuffer = (uint32_t *)round_up((unsigned long)acb->dma_coherent2 +
648
+ acb->completeQ_size, 4);
649
+ acb->message_rbuffer = ((void *)acb->message_wbuffer) + 0x100;
650
+ acb->msgcode_rwbuffer = ((void *)acb->message_wbuffer) + 0x200;
651
+ memset((void *)acb->message_wbuffer, 0, MESG_RW_BUFFER_SIZE);
652
+ host_buffer_dma = round_up(acb->dma_coherent_handle2 + acb->completeQ_size, 4);
653
+ pmuF = acb->pmuF;
654
+ /* host buffer low address, bit0:1 all buffer active */
655
+ writel(lower_32_bits(host_buffer_dma | 1), &pmuF->inbound_msgaddr0);
656
+ /* host buffer high address */
657
+ writel(upper_32_bits(host_buffer_dma), &pmuF->inbound_msgaddr1);
658
+ /* set host buffer physical address */
659
+ writel(ARCMSR_HBFMU_DOORBELL_SYNC1, &pmuF->iobound_doorbell);
578660 }
579661
580662 static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
....@@ -586,9 +668,8 @@
586668
587669 switch (acb->adapter_type) {
588670 case ACB_ADAPTER_TYPE_B: {
589
- struct MessageUnit_B *reg;
590
- acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32);
591
- dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
671
+ acb->ioqueue_size = roundup(sizeof(struct MessageUnit_B), 32);
672
+ dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
592673 &dma_coherent_handle, GFP_KERNEL);
593674 if (!dma_coherent) {
594675 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
....@@ -596,29 +677,13 @@
596677 }
597678 acb->dma_coherent_handle2 = dma_coherent_handle;
598679 acb->dma_coherent2 = dma_coherent;
599
- reg = (struct MessageUnit_B *)dma_coherent;
600
- acb->pmuB = reg;
601
- if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) {
602
- reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203);
603
- reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203);
604
- reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203);
605
- reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203);
606
- } else {
607
- reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL);
608
- reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK);
609
- reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL);
610
- reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK);
611
- }
612
- reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER);
613
- reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER);
614
- reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER);
680
+ acb->pmuB = (struct MessageUnit_B *)dma_coherent;
681
+ arcmsr_hbaB_assign_regAddr(acb);
615682 }
616683 break;
617684 case ACB_ADAPTER_TYPE_D: {
618
- struct MessageUnit_D *reg;
619
-
620
- acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32);
621
- dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
685
+ acb->ioqueue_size = roundup(sizeof(struct MessageUnit_D), 32);
686
+ dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
622687 &dma_coherent_handle, GFP_KERNEL);
623688 if (!dma_coherent) {
624689 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
....@@ -626,41 +691,15 @@
626691 }
627692 acb->dma_coherent_handle2 = dma_coherent_handle;
628693 acb->dma_coherent2 = dma_coherent;
629
- reg = (struct MessageUnit_D *)dma_coherent;
630
- acb->pmuD = reg;
631
- reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID);
632
- reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION);
633
- reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK);
634
- reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET);
635
- reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST);
636
- reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS);
637
- reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE);
638
- reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0);
639
- reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1);
640
- reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0);
641
- reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1);
642
- reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL);
643
- reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL);
644
- reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE);
645
- reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW);
646
- reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH);
647
- reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER);
648
- reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW);
649
- reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH);
650
- reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER);
651
- reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER);
652
- reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE);
653
- reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE);
654
- reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER);
655
- reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER);
656
- reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
694
+ acb->pmuD = (struct MessageUnit_D *)dma_coherent;
695
+ arcmsr_hbaD_assign_regAddr(acb);
657696 }
658697 break;
659698 case ACB_ADAPTER_TYPE_E: {
660699 uint32_t completeQ_size;
661700 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
662
- acb->roundup_ccbsize = roundup(completeQ_size, 32);
663
- dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
701
+ acb->ioqueue_size = roundup(completeQ_size, 32);
702
+ dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
664703 &dma_coherent_handle, GFP_KERNEL);
665704 if (!dma_coherent){
666705 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
....@@ -669,8 +708,30 @@
669708 acb->dma_coherent_handle2 = dma_coherent_handle;
670709 acb->dma_coherent2 = dma_coherent;
671710 acb->pCompletionQ = dma_coherent;
672
- acb->completionQ_entry = acb->roundup_ccbsize / sizeof(struct deliver_completeQ);
711
+ acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
673712 acb->doneq_index = 0;
713
+ }
714
+ break;
715
+ case ACB_ADAPTER_TYPE_F: {
716
+ uint32_t QueueDepth;
717
+ uint32_t depthTbl[] = {256, 512, 1024, 128, 64, 32};
718
+
719
+ arcmsr_wait_firmware_ready(acb);
720
+ QueueDepth = depthTbl[readl(&acb->pmuF->outbound_msgaddr1) & 7];
721
+ acb->completeQ_size = sizeof(struct deliver_completeQ) * QueueDepth + 128;
722
+ acb->ioqueue_size = roundup(acb->completeQ_size + MESG_RW_BUFFER_SIZE, 32);
723
+ dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
724
+ &dma_coherent_handle, GFP_KERNEL);
725
+ if (!dma_coherent) {
726
+ pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
727
+ return false;
728
+ }
729
+ acb->dma_coherent_handle2 = dma_coherent_handle;
730
+ acb->dma_coherent2 = dma_coherent;
731
+ acb->pCompletionQ = dma_coherent;
732
+ acb->completionQ_entry = acb->completeQ_size / sizeof(struct deliver_completeQ);
733
+ acb->doneq_index = 0;
734
+ arcmsr_hbaF_assign_regAddr(acb);
674735 }
675736 break;
676737 default:
....@@ -686,11 +747,11 @@
686747 dma_addr_t dma_coherent_handle;
687748 struct CommandControlBlock *ccb_tmp;
688749 int i = 0, j = 0;
689
- dma_addr_t cdb_phyaddr;
750
+ unsigned long cdb_phyaddr, next_ccb_phy;
690751 unsigned long roundup_ccbsize;
691752 unsigned long max_xfer_len;
692753 unsigned long max_sg_entrys;
693
- uint32_t firm_config_version;
754
+ uint32_t firm_config_version, curr_phy_upper32;
694755
695756 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
696757 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
....@@ -707,6 +768,8 @@
707768 acb->host->sg_tablesize = max_sg_entrys;
708769 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
709770 acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
771
+ if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
772
+ acb->uncache_size += acb->ioqueue_size;
710773 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
711774 if(!dma_coherent){
712775 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
....@@ -717,9 +780,10 @@
717780 memset(dma_coherent, 0, acb->uncache_size);
718781 acb->ccbsize = roundup_ccbsize;
719782 ccb_tmp = dma_coherent;
783
+ curr_phy_upper32 = upper_32_bits(dma_coherent_handle);
720784 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
721785 for(i = 0; i < acb->maxFreeCCB; i++){
722
- cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
786
+ cdb_phyaddr = (unsigned long)dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
723787 switch (acb->adapter_type) {
724788 case ACB_ADAPTER_TYPE_A:
725789 case ACB_ADAPTER_TYPE_B:
....@@ -728,6 +792,7 @@
728792 case ACB_ADAPTER_TYPE_C:
729793 case ACB_ADAPTER_TYPE_D:
730794 case ACB_ADAPTER_TYPE_E:
795
+ case ACB_ADAPTER_TYPE_F:
731796 ccb_tmp->cdb_phyaddr = cdb_phyaddr;
732797 break;
733798 }
....@@ -735,10 +800,36 @@
735800 ccb_tmp->acb = acb;
736801 ccb_tmp->smid = (u32)i << 16;
737802 INIT_LIST_HEAD(&ccb_tmp->list);
738
- list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
803
+ next_ccb_phy = dma_coherent_handle + roundup_ccbsize;
804
+ if (upper_32_bits(next_ccb_phy) != curr_phy_upper32) {
805
+ acb->maxFreeCCB = i;
806
+ acb->host->can_queue = i;
807
+ break;
808
+ }
809
+ else
810
+ list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
739811 ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
740
- dma_coherent_handle = dma_coherent_handle + roundup_ccbsize;
812
+ dma_coherent_handle = next_ccb_phy;
741813 }
814
+ if (acb->adapter_type != ACB_ADAPTER_TYPE_F) {
815
+ acb->dma_coherent_handle2 = dma_coherent_handle;
816
+ acb->dma_coherent2 = ccb_tmp;
817
+ }
818
+ switch (acb->adapter_type) {
819
+ case ACB_ADAPTER_TYPE_B:
820
+ acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2;
821
+ arcmsr_hbaB_assign_regAddr(acb);
822
+ break;
823
+ case ACB_ADAPTER_TYPE_D:
824
+ acb->pmuD = (struct MessageUnit_D *)acb->dma_coherent2;
825
+ arcmsr_hbaD_assign_regAddr(acb);
826
+ break;
827
+ case ACB_ADAPTER_TYPE_E:
828
+ acb->pCompletionQ = acb->dma_coherent2;
829
+ acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
830
+ acb->doneq_index = 0;
831
+ break;
832
+ }
742833 return 0;
743834 }
744835
....@@ -753,7 +844,6 @@
753844 struct scsi_device *psdev;
754845 char diff, temp;
755846
756
- acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
757847 switch (acb->adapter_type) {
758848 case ACB_ADAPTER_TYPE_A: {
759849 struct MessageUnit_A __iomem *reg = acb->pmuA;
....@@ -790,8 +880,12 @@
790880 devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
791881 break;
792882 }
883
+ case ACB_ADAPTER_TYPE_F: {
884
+ signature = (uint32_t __iomem *)(&acb->msgcode_rwbuffer[0]);
885
+ devicemap = (char __iomem *)(&acb->msgcode_rwbuffer[21]);
886
+ break;
887
+ }
793888 }
794
- atomic_inc(&acb->rq_map_token);
795889 if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
796890 return;
797891 for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
....@@ -822,6 +916,7 @@
822916 devicemap++;
823917 acb_dev_map++;
824918 }
919
+ acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
825920 }
826921
827922 static int
....@@ -874,8 +969,6 @@
874969 static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
875970 {
876971 INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
877
- atomic_set(&pacb->rq_map_token, 16);
878
- atomic_set(&pacb->ante_token_value, 16);
879972 pacb->fw_flag = FW_NORMAL;
880973 timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0);
881974 pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
....@@ -887,6 +980,31 @@
887980 timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
888981 pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000);
889982 add_timer(&pacb->refresh_timer);
983
+}
984
+
985
+static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb)
986
+{
987
+ struct pci_dev *pcidev = acb->pdev;
988
+
989
+ if (IS_DMA64) {
990
+ if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) ||
991
+ dma_set_mask(&pcidev->dev, DMA_BIT_MASK(64)))
992
+ goto dma32;
993
+ if (dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(64)) ||
994
+ dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64))) {
995
+ printk("arcmsr: set DMA 64 mask failed\n");
996
+ return -ENXIO;
997
+ }
998
+ } else {
999
+dma32:
1000
+ if (dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
1001
+ dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
1002
+ dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32))) {
1003
+ printk("arcmsr: set DMA 32-bit mask failed\n");
1004
+ return -ENXIO;
1005
+ }
1006
+ }
1007
+ return 0;
8901008 }
8911009
8921010 static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
....@@ -903,22 +1021,15 @@
9031021 if(!host){
9041022 goto pci_disable_dev;
9051023 }
906
- error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
907
- if(error){
908
- error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
909
- if(error){
910
- printk(KERN_WARNING
911
- "scsi%d: No suitable DMA mask available\n",
912
- host->host_no);
913
- goto scsi_host_release;
914
- }
915
- }
9161024 init_waitqueue_head(&wait_q);
9171025 bus = pdev->bus->number;
9181026 dev_fun = pdev->devfn;
9191027 acb = (struct AdapterControlBlock *) host->hostdata;
9201028 memset(acb,0,sizeof(struct AdapterControlBlock));
9211029 acb->pdev = pdev;
1030
+ acb->adapter_type = id->driver_data;
1031
+ if (arcmsr_set_dma_mask(acb))
1032
+ goto scsi_host_release;
9221033 acb->host = host;
9231034 host->max_lun = ARCMSR_MAX_TARGETLUN;
9241035 host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
....@@ -948,7 +1059,6 @@
9481059 ACB_F_MESSAGE_WQBUFFER_READED);
9491060 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
9501061 INIT_LIST_HEAD(&acb->ccb_free_list);
951
- acb->adapter_type = id->driver_data;
9521062 error = arcmsr_remap_pciregion(acb);
9531063 if(!error){
9541064 goto pci_release_regs;
....@@ -960,9 +1070,11 @@
9601070 if(!error){
9611071 goto free_hbb_mu;
9621072 }
1073
+ if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
1074
+ arcmsr_free_io_queue(acb);
9631075 error = arcmsr_alloc_ccb_pool(acb);
9641076 if(error){
965
- goto free_hbb_mu;
1077
+ goto unmap_pci_region;
9661078 }
9671079 error = scsi_add_host(host, &pdev->dev);
9681080 if(error){
....@@ -990,8 +1102,9 @@
9901102 scsi_remove_host(host);
9911103 free_ccb_pool:
9921104 arcmsr_free_ccb_pool(acb);
1105
+ goto unmap_pci_region;
9931106 free_hbb_mu:
994
- arcmsr_free_mu(acb);
1107
+ arcmsr_free_io_queue(acb);
9951108 unmap_pci_region:
9961109 arcmsr_unmap_pciregion(acb);
9971110 pci_release_regs:
....@@ -1015,12 +1128,11 @@
10151128
10161129 static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
10171130 {
1018
- uint32_t intmask_org;
10191131 struct Scsi_Host *host = pci_get_drvdata(pdev);
10201132 struct AdapterControlBlock *acb =
10211133 (struct AdapterControlBlock *)host->hostdata;
10221134
1023
- intmask_org = arcmsr_disable_outbound_ints(acb);
1135
+ arcmsr_disable_outbound_ints(acb);
10241136 arcmsr_free_irq(pdev, acb);
10251137 del_timer_sync(&acb->eternal_timer);
10261138 if (set_date_time)
....@@ -1037,7 +1149,6 @@
10371149
10381150 static int arcmsr_resume(struct pci_dev *pdev)
10391151 {
1040
- int error;
10411152 struct Scsi_Host *host = pci_get_drvdata(pdev);
10421153 struct AdapterControlBlock *acb =
10431154 (struct AdapterControlBlock *)host->hostdata;
....@@ -1049,24 +1160,38 @@
10491160 pr_warn("%s: pci_enable_device error\n", __func__);
10501161 return -ENODEV;
10511162 }
1052
- error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1053
- if (error) {
1054
- error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1055
- if (error) {
1056
- pr_warn("scsi%d: No suitable DMA mask available\n",
1057
- host->host_no);
1058
- goto controller_unregister;
1059
- }
1060
- }
1163
+ if (arcmsr_set_dma_mask(acb))
1164
+ goto controller_unregister;
10611165 pci_set_master(pdev);
10621166 if (arcmsr_request_irq(pdev, acb) == FAILED)
10631167 goto controller_stop;
1064
- if (acb->adapter_type == ACB_ADAPTER_TYPE_E) {
1168
+ switch (acb->adapter_type) {
1169
+ case ACB_ADAPTER_TYPE_B: {
1170
+ struct MessageUnit_B *reg = acb->pmuB;
1171
+ uint32_t i;
1172
+ for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
1173
+ reg->post_qbuffer[i] = 0;
1174
+ reg->done_qbuffer[i] = 0;
1175
+ }
1176
+ reg->postq_index = 0;
1177
+ reg->doneq_index = 0;
1178
+ break;
1179
+ }
1180
+ case ACB_ADAPTER_TYPE_E:
10651181 writel(0, &acb->pmuE->host_int_status);
10661182 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);
10671183 acb->in_doorbell = 0;
10681184 acb->out_doorbell = 0;
10691185 acb->doneq_index = 0;
1186
+ break;
1187
+ case ACB_ADAPTER_TYPE_F:
1188
+ writel(0, &acb->pmuF->host_int_status);
1189
+ writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
1190
+ acb->in_doorbell = 0;
1191
+ acb->out_doorbell = 0;
1192
+ acb->doneq_index = 0;
1193
+ arcmsr_hbaF_assign_regAddr(acb);
1194
+ break;
10701195 }
10711196 arcmsr_iop_init(acb);
10721197 arcmsr_init_get_devmap_timer(acb);
....@@ -1079,6 +1204,8 @@
10791204 controller_unregister:
10801205 scsi_remove_host(host);
10811206 arcmsr_free_ccb_pool(acb);
1207
+ if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1208
+ arcmsr_free_io_queue(acb);
10821209 arcmsr_unmap_pciregion(acb);
10831210 pci_release_regions(pdev);
10841211 scsi_host_put(host);
....@@ -1158,25 +1285,20 @@
11581285 {
11591286 uint8_t rtnval = 0;
11601287 switch (acb->adapter_type) {
1161
- case ACB_ADAPTER_TYPE_A: {
1288
+ case ACB_ADAPTER_TYPE_A:
11621289 rtnval = arcmsr_hbaA_abort_allcmd(acb);
1163
- }
11641290 break;
1165
-
1166
- case ACB_ADAPTER_TYPE_B: {
1291
+ case ACB_ADAPTER_TYPE_B:
11671292 rtnval = arcmsr_hbaB_abort_allcmd(acb);
1168
- }
11691293 break;
1170
-
1171
- case ACB_ADAPTER_TYPE_C: {
1294
+ case ACB_ADAPTER_TYPE_C:
11721295 rtnval = arcmsr_hbaC_abort_allcmd(acb);
1173
- }
11741296 break;
1175
-
11761297 case ACB_ADAPTER_TYPE_D:
11771298 rtnval = arcmsr_hbaD_abort_allcmd(acb);
11781299 break;
11791300 case ACB_ADAPTER_TYPE_E:
1301
+ case ACB_ADAPTER_TYPE_F:
11801302 rtnval = arcmsr_hbaE_abort_allcmd(acb);
11811303 break;
11821304 }
....@@ -1252,7 +1374,8 @@
12521374 writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
12531375 }
12541376 break;
1255
- case ACB_ADAPTER_TYPE_E: {
1377
+ case ACB_ADAPTER_TYPE_E:
1378
+ case ACB_ADAPTER_TYPE_F: {
12561379 struct MessageUnit_E __iomem *reg = acb->pmuE;
12571380 orig_mask = readl(&reg->host_int_mask);
12581381 writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, &reg->host_int_mask);
....@@ -1317,13 +1440,10 @@
13171440
13181441 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
13191442 {
1320
- int id, lun;
13211443 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
13221444 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
13231445 struct scsi_cmnd *abortcmd = pCCB->pcmd;
13241446 if (abortcmd) {
1325
- id = abortcmd->device->id;
1326
- lun = abortcmd->device->lun;
13271447 abortcmd->result |= DID_ABORT << 16;
13281448 arcmsr_ccb_complete(pCCB);
13291449 printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
....@@ -1341,7 +1461,7 @@
13411461 , pCCB->acb
13421462 , pCCB->startdone
13431463 , atomic_read(&acb->ccboutstandingcount));
1344
- return;
1464
+ return;
13451465 }
13461466 arcmsr_report_ccb_state(acb, pCCB, error);
13471467 }
....@@ -1349,10 +1469,12 @@
13491469 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
13501470 {
13511471 int i = 0;
1352
- uint32_t flag_ccb, ccb_cdb_phy;
1472
+ uint32_t flag_ccb;
13531473 struct ARCMSR_CDB *pARCMSR_CDB;
13541474 bool error;
13551475 struct CommandControlBlock *pCCB;
1476
+ unsigned long ccb_cdb_phy;
1477
+
13561478 switch (acb->adapter_type) {
13571479
13581480 case ACB_ADAPTER_TYPE_A: {
....@@ -1364,7 +1486,10 @@
13641486 writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
13651487 while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
13661488 && (i++ < acb->maxOutstanding)) {
1367
- pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
1489
+ ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
1490
+ if (acb->cdb_phyadd_hipart)
1491
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1492
+ pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
13681493 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
13691494 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
13701495 arcmsr_drain_donequeue(acb, pCCB, error);
....@@ -1380,7 +1505,10 @@
13801505 flag_ccb = reg->done_qbuffer[i];
13811506 if (flag_ccb != 0) {
13821507 reg->done_qbuffer[i] = 0;
1383
- pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
1508
+ ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
1509
+ if (acb->cdb_phyadd_hipart)
1510
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1511
+ pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
13841512 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
13851513 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
13861514 arcmsr_drain_donequeue(acb, pCCB, error);
....@@ -1397,7 +1525,9 @@
13971525 /*need to do*/
13981526 flag_ccb = readl(&reg->outbound_queueport_low);
13991527 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
1400
- pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+ccb_cdb_phy);/*frame must be 32 bytes aligned*/
1528
+ if (acb->cdb_phyadd_hipart)
1529
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1530
+ pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
14011531 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
14021532 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
14031533 arcmsr_drain_donequeue(acb, pCCB, error);
....@@ -1428,6 +1558,8 @@
14281558 addressLow = pmu->done_qbuffer[doneq_index &
14291559 0xFFF].addressLow;
14301560 ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
1561
+ if (acb->cdb_phyadd_hipart)
1562
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
14311563 pARCMSR_CDB = (struct ARCMSR_CDB *)
14321564 (acb->vir2phy_offset + ccb_cdb_phy);
14331565 pCCB = container_of(pARCMSR_CDB,
....@@ -1449,6 +1581,9 @@
14491581 break;
14501582 case ACB_ADAPTER_TYPE_E:
14511583 arcmsr_hbaE_postqueue_isr(acb);
1584
+ break;
1585
+ case ACB_ADAPTER_TYPE_F:
1586
+ arcmsr_hbaF_postqueue_isr(acb);
14521587 break;
14531588 }
14541589 }
....@@ -1504,7 +1639,8 @@
15041639 pdev = acb->pdev;
15051640 arcmsr_free_irq(pdev, acb);
15061641 arcmsr_free_ccb_pool(acb);
1507
- arcmsr_free_mu(acb);
1642
+ if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1643
+ arcmsr_free_io_queue(acb);
15081644 arcmsr_unmap_pciregion(acb);
15091645 pci_release_regions(pdev);
15101646 scsi_host_put(host);
....@@ -1562,7 +1698,8 @@
15621698 }
15631699 arcmsr_free_irq(pdev, acb);
15641700 arcmsr_free_ccb_pool(acb);
1565
- arcmsr_free_mu(acb);
1701
+ if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1702
+ arcmsr_free_io_queue(acb);
15661703 arcmsr_unmap_pciregion(acb);
15671704 pci_release_regions(pdev);
15681705 scsi_host_put(host);
....@@ -1640,7 +1777,8 @@
16401777 writel(intmask_org | mask, reg->pcief0_int_enable);
16411778 break;
16421779 }
1643
- case ACB_ADAPTER_TYPE_E: {
1780
+ case ACB_ADAPTER_TYPE_E:
1781
+ case ACB_ADAPTER_TYPE_F: {
16441782 struct MessageUnit_E __iomem *reg = acb->pmuE;
16451783
16461784 mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
....@@ -1747,12 +1885,8 @@
17471885
17481886 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
17491887 ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
1750
- if (acb->cdb_phyaddr_hi32) {
1751
- writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high);
1752
- writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
1753
- } else {
1754
- writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
1755
- }
1888
+ writel(upper_32_bits(ccb->cdb_phyaddr), &phbcmu->inbound_queueport_high);
1889
+ writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
17561890 }
17571891 break;
17581892 case ACB_ADAPTER_TYPE_D: {
....@@ -1765,8 +1899,8 @@
17651899 spin_lock_irqsave(&acb->postq_lock, flags);
17661900 postq_index = pmu->postq_index;
17671901 pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
1768
- pinbound_srb->addressHigh = dma_addr_hi32(cdb_phyaddr);
1769
- pinbound_srb->addressLow = dma_addr_lo32(cdb_phyaddr);
1902
+ pinbound_srb->addressHigh = upper_32_bits(ccb->cdb_phyaddr);
1903
+ pinbound_srb->addressLow = cdb_phyaddr;
17701904 pinbound_srb->length = ccb->arc_cdb_size >> 2;
17711905 arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
17721906 toggle = postq_index & 0x4000;
....@@ -1788,6 +1922,23 @@
17881922 writel(ccb_post_stamp, &pmu->inbound_queueport_low);
17891923 break;
17901924 }
1925
+ case ACB_ADAPTER_TYPE_F: {
1926
+ struct MessageUnit_F __iomem *pmu = acb->pmuF;
1927
+ u32 ccb_post_stamp, arc_cdb_size;
1928
+
1929
+ if (ccb->arc_cdb_size <= 0x300)
1930
+ arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1;
1931
+ else {
1932
+ arc_cdb_size = ((ccb->arc_cdb_size + 0xff) >> 8) + 2;
1933
+ if (arc_cdb_size > 0xF)
1934
+ arc_cdb_size = 0xF;
1935
+ arc_cdb_size = (arc_cdb_size << 1) | 1;
1936
+ }
1937
+ ccb_post_stamp = (ccb->smid | arc_cdb_size);
1938
+ writel(0, &pmu->inbound_queueport_high);
1939
+ writel(ccb_post_stamp, &pmu->inbound_queueport_low);
1940
+ break;
1941
+ }
17911942 }
17921943 }
17931944
....@@ -1798,7 +1949,7 @@
17981949 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
17991950 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
18001951 printk(KERN_NOTICE
1801
- "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
1952
+ "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
18021953 , acb->host->host_no);
18031954 }
18041955 }
....@@ -1811,7 +1962,7 @@
18111962
18121963 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
18131964 printk(KERN_NOTICE
1814
- "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
1965
+ "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
18151966 , acb->host->host_no);
18161967 }
18171968 }
....@@ -1824,7 +1975,7 @@
18241975 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
18251976 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
18261977 printk(KERN_NOTICE
1827
- "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
1978
+ "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
18281979 , pACB->host->host_no);
18291980 }
18301981 return;
....@@ -1837,7 +1988,7 @@
18371988 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
18381989 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
18391990 if (!arcmsr_hbaD_wait_msgint_ready(pACB))
1840
- pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
1991
+ pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
18411992 "timeout\n", pACB->host->host_no);
18421993 }
18431994
....@@ -1850,7 +2001,7 @@
18502001 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
18512002 writel(pACB->out_doorbell, &reg->iobound_doorbell);
18522003 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
1853
- pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
2004
+ pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
18542005 "timeout\n", pACB->host->host_no);
18552006 }
18562007 }
....@@ -1858,23 +2009,20 @@
18582009 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
18592010 {
18602011 switch (acb->adapter_type) {
1861
- case ACB_ADAPTER_TYPE_A: {
2012
+ case ACB_ADAPTER_TYPE_A:
18622013 arcmsr_hbaA_stop_bgrb(acb);
1863
- }
18642014 break;
1865
-
1866
- case ACB_ADAPTER_TYPE_B: {
2015
+ case ACB_ADAPTER_TYPE_B:
18672016 arcmsr_hbaB_stop_bgrb(acb);
1868
- }
18692017 break;
1870
- case ACB_ADAPTER_TYPE_C: {
2018
+ case ACB_ADAPTER_TYPE_C:
18712019 arcmsr_hbaC_stop_bgrb(acb);
1872
- }
18732020 break;
18742021 case ACB_ADAPTER_TYPE_D:
18752022 arcmsr_hbaD_stop_bgrb(acb);
18762023 break;
18772024 case ACB_ADAPTER_TYPE_E:
2025
+ case ACB_ADAPTER_TYPE_F:
18782026 arcmsr_hbaE_stop_bgrb(acb);
18792027 break;
18802028 }
....@@ -1893,7 +2041,6 @@
18932041 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
18942042 }
18952043 break;
1896
-
18972044 case ACB_ADAPTER_TYPE_B: {
18982045 struct MessageUnit_B *reg = acb->pmuB;
18992046 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
....@@ -1911,7 +2058,8 @@
19112058 reg->inbound_doorbell);
19122059 }
19132060 break;
1914
- case ACB_ADAPTER_TYPE_E: {
2061
+ case ACB_ADAPTER_TYPE_E:
2062
+ case ACB_ADAPTER_TYPE_F: {
19152063 struct MessageUnit_E __iomem *reg = acb->pmuE;
19162064 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
19172065 writel(acb->out_doorbell, &reg->iobound_doorbell);
....@@ -1957,7 +2105,8 @@
19572105 reg->inbound_doorbell);
19582106 }
19592107 break;
1960
- case ACB_ADAPTER_TYPE_E: {
2108
+ case ACB_ADAPTER_TYPE_E:
2109
+ case ACB_ADAPTER_TYPE_F: {
19612110 struct MessageUnit_E __iomem *reg = acb->pmuE;
19622111 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
19632112 writel(acb->out_doorbell, &reg->iobound_doorbell);
....@@ -1976,7 +2125,6 @@
19762125 qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
19772126 }
19782127 break;
1979
-
19802128 case ACB_ADAPTER_TYPE_B: {
19812129 struct MessageUnit_B *reg = acb->pmuB;
19822130 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
....@@ -1997,6 +2145,10 @@
19972145 qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
19982146 }
19992147 break;
2148
+ case ACB_ADAPTER_TYPE_F: {
2149
+ qbuffer = (struct QBUFFER __iomem *)acb->message_rbuffer;
2150
+ }
2151
+ break;
20002152 }
20012153 return qbuffer;
20022154 }
....@@ -2011,7 +2163,6 @@
20112163 pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
20122164 }
20132165 break;
2014
-
20152166 case ACB_ADAPTER_TYPE_B: {
20162167 struct MessageUnit_B *reg = acb->pmuB;
20172168 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
....@@ -2031,6 +2182,9 @@
20312182 struct MessageUnit_E __iomem *reg = acb->pmuE;
20322183 pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
20332184 }
2185
+ break;
2186
+ case ACB_ADAPTER_TYPE_F:
2187
+ pqbuffer = (struct QBUFFER __iomem *)acb->message_wbuffer;
20342188 break;
20352189 }
20362190 return pqbuffer;
....@@ -2270,10 +2424,17 @@
22702424
22712425 static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
22722426 {
2273
- uint32_t outbound_doorbell, in_doorbell, tmp;
2427
+ uint32_t outbound_doorbell, in_doorbell, tmp, i;
22742428 struct MessageUnit_E __iomem *reg = pACB->pmuE;
22752429
2276
- in_doorbell = readl(&reg->iobound_doorbell);
2430
+ if (pACB->adapter_type == ACB_ADAPTER_TYPE_F) {
2431
+ for (i = 0; i < 5; i++) {
2432
+ in_doorbell = readl(&reg->iobound_doorbell);
2433
+ if (in_doorbell != 0)
2434
+ break;
2435
+ }
2436
+ } else
2437
+ in_doorbell = readl(&reg->iobound_doorbell);
22772438 outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
22782439 do {
22792440 writel(0, &reg->host_int_status); /* clear interrupt */
....@@ -2302,8 +2463,13 @@
23022463 struct ARCMSR_CDB *pARCMSR_CDB;
23032464 struct CommandControlBlock *pCCB;
23042465 bool error;
2466
+ unsigned long cdb_phy_addr;
2467
+
23052468 while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
2306
- pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
2469
+ cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
2470
+ if (acb->cdb_phyadd_hipart)
2471
+ cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
2472
+ pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
23072473 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
23082474 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
23092475 arcmsr_drain_donequeue(acb, pCCB, error);
....@@ -2317,13 +2483,18 @@
23172483 struct ARCMSR_CDB *pARCMSR_CDB;
23182484 struct CommandControlBlock *pCCB;
23192485 bool error;
2486
+ unsigned long cdb_phy_addr;
2487
+
23202488 index = reg->doneq_index;
23212489 while ((flag_ccb = reg->done_qbuffer[index]) != 0) {
2322
- reg->done_qbuffer[index] = 0;
2323
- pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
2490
+ cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
2491
+ if (acb->cdb_phyadd_hipart)
2492
+ cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
2493
+ pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
23242494 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
23252495 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
23262496 arcmsr_drain_donequeue(acb, pCCB, error);
2497
+ reg->done_qbuffer[index] = 0;
23272498 index++;
23282499 index %= ARCMSR_MAX_HBB_POSTQUEUE;
23292500 reg->doneq_index = index;
....@@ -2335,7 +2506,8 @@
23352506 struct MessageUnit_C __iomem *phbcmu;
23362507 struct ARCMSR_CDB *arcmsr_cdb;
23372508 struct CommandControlBlock *ccb;
2338
- uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
2509
+ uint32_t flag_ccb, throttling = 0;
2510
+ unsigned long ccb_cdb_phy;
23392511 int error;
23402512
23412513 phbcmu = acb->pmuC;
....@@ -2345,6 +2517,8 @@
23452517 while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) !=
23462518 0xFFFFFFFF) {
23472519 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
2520
+ if (acb->cdb_phyadd_hipart)
2521
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
23482522 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
23492523 + ccb_cdb_phy);
23502524 ccb = container_of(arcmsr_cdb, struct CommandControlBlock,
....@@ -2365,12 +2539,12 @@
23652539 static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
23662540 {
23672541 u32 outbound_write_pointer, doneq_index, index_stripped, toggle;
2368
- uint32_t addressLow, ccb_cdb_phy;
2542
+ uint32_t addressLow;
23692543 int error;
23702544 struct MessageUnit_D *pmu;
23712545 struct ARCMSR_CDB *arcmsr_cdb;
23722546 struct CommandControlBlock *ccb;
2373
- unsigned long flags;
2547
+ unsigned long flags, ccb_cdb_phy;
23742548
23752549 spin_lock_irqsave(&acb->doneq_lock, flags);
23762550 pmu = acb->pmuD;
....@@ -2387,6 +2561,8 @@
23872561 addressLow = pmu->done_qbuffer[doneq_index &
23882562 0xFFF].addressLow;
23892563 ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
2564
+ if (acb->cdb_phyadd_hipart)
2565
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
23902566 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
23912567 + ccb_cdb_phy);
23922568 ccb = container_of(arcmsr_cdb,
....@@ -2428,6 +2604,36 @@
24282604 }
24292605 acb->doneq_index = doneq_index;
24302606 writel(doneq_index, &pmu->reply_post_consumer_index);
2607
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
2608
+}
2609
+
2610
+static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb)
2611
+{
2612
+ uint32_t doneq_index;
2613
+ uint16_t cmdSMID;
2614
+ int error;
2615
+ struct MessageUnit_F __iomem *phbcmu;
2616
+ struct CommandControlBlock *ccb;
2617
+ unsigned long flags;
2618
+
2619
+ spin_lock_irqsave(&acb->doneq_lock, flags);
2620
+ doneq_index = acb->doneq_index;
2621
+ phbcmu = acb->pmuF;
2622
+ while (1) {
2623
+ cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
2624
+ if (cmdSMID == 0xffff)
2625
+ break;
2626
+ ccb = acb->pccb_pool[cmdSMID];
2627
+ error = (acb->pCompletionQ[doneq_index].cmdFlag &
2628
+ ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
2629
+ arcmsr_drain_donequeue(acb, ccb, error);
2630
+ acb->pCompletionQ[doneq_index].cmdSMID = 0xffff;
2631
+ doneq_index++;
2632
+ if (doneq_index >= acb->completionQ_entry)
2633
+ doneq_index = 0;
2634
+ }
2635
+ acb->doneq_index = doneq_index;
2636
+ writel(doneq_index, &phbcmu->reply_post_consumer_index);
24312637 spin_unlock_irqrestore(&acb->doneq_lock, flags);
24322638 }
24332639
....@@ -2621,21 +2827,46 @@
26212827 return IRQ_HANDLED;
26222828 }
26232829
2830
+static irqreturn_t arcmsr_hbaF_handle_isr(struct AdapterControlBlock *pACB)
2831
+{
2832
+ uint32_t host_interrupt_status;
2833
+ struct MessageUnit_F __iomem *phbcmu = pACB->pmuF;
2834
+
2835
+ host_interrupt_status = readl(&phbcmu->host_int_status) &
2836
+ (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2837
+ ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
2838
+ if (!host_interrupt_status)
2839
+ return IRQ_NONE;
2840
+ do {
2841
+ /* MU post queue interrupts*/
2842
+ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR)
2843
+ arcmsr_hbaF_postqueue_isr(pACB);
2844
+
2845
+ /* MU ioctl transfer doorbell interrupts*/
2846
+ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)
2847
+ arcmsr_hbaE_doorbell_isr(pACB);
2848
+
2849
+ host_interrupt_status = readl(&phbcmu->host_int_status);
2850
+ } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2851
+ ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
2852
+ return IRQ_HANDLED;
2853
+}
2854
+
26242855 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
26252856 {
26262857 switch (acb->adapter_type) {
26272858 case ACB_ADAPTER_TYPE_A:
26282859 return arcmsr_hbaA_handle_isr(acb);
2629
- break;
26302860 case ACB_ADAPTER_TYPE_B:
26312861 return arcmsr_hbaB_handle_isr(acb);
2632
- break;
26332862 case ACB_ADAPTER_TYPE_C:
26342863 return arcmsr_hbaC_handle_isr(acb);
26352864 case ACB_ADAPTER_TYPE_D:
26362865 return arcmsr_hbaD_handle_isr(acb);
26372866 case ACB_ADAPTER_TYPE_E:
26382867 return arcmsr_hbaE_handle_isr(acb);
2868
+ case ACB_ADAPTER_TYPE_F:
2869
+ return arcmsr_hbaF_handle_isr(acb);
26392870 default:
26402871 return IRQ_NONE;
26412872 }
....@@ -3184,6 +3415,31 @@
31843415 return true;
31853416 }
31863417
3418
+static bool arcmsr_hbaF_get_config(struct AdapterControlBlock *pACB)
3419
+{
3420
+ struct MessageUnit_F __iomem *reg = pACB->pmuF;
3421
+ uint32_t intmask_org;
3422
+
3423
+ /* disable all outbound interrupt */
3424
+ intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
3425
+ writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, &reg->host_int_mask);
3426
+ /* wait firmware ready */
3427
+ arcmsr_wait_firmware_ready(pACB);
3428
+ /* post "get config" instruction */
3429
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
3430
+
3431
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3432
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
3433
+ /* wait message ready */
3434
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
3435
+ pr_notice("arcmsr%d: wait get adapter firmware miscellaneous data timeout\n",
3436
+ pACB->host->host_no);
3437
+ return false;
3438
+ }
3439
+ arcmsr_get_adapter_config(pACB, pACB->msgcode_rwbuffer);
3440
+ return true;
3441
+}
3442
+
31873443 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
31883444 {
31893445 bool rtn = false;
....@@ -3203,6 +3459,9 @@
32033459 break;
32043460 case ACB_ADAPTER_TYPE_E:
32053461 rtn = arcmsr_hbaE_get_config(acb);
3462
+ break;
3463
+ case ACB_ADAPTER_TYPE_F:
3464
+ rtn = arcmsr_hbaF_get_config(acb);
32063465 break;
32073466 default:
32083467 break;
....@@ -3227,7 +3486,9 @@
32273486 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
32283487 int rtn;
32293488 bool error;
3230
- polling_hba_ccb_retry:
3489
+ unsigned long ccb_cdb_phy;
3490
+
3491
+polling_hba_ccb_retry:
32313492 poll_count++;
32323493 outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
32333494 writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
....@@ -3245,7 +3506,10 @@
32453506 goto polling_hba_ccb_retry;
32463507 }
32473508 }
3248
- arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
3509
+ ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
3510
+ if (acb->cdb_phyadd_hipart)
3511
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3512
+ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
32493513 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
32503514 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
32513515 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
....@@ -3283,8 +3547,9 @@
32833547 uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
32843548 int index, rtn;
32853549 bool error;
3286
- polling_hbb_ccb_retry:
3550
+ unsigned long ccb_cdb_phy;
32873551
3552
+polling_hbb_ccb_retry:
32883553 poll_count++;
32893554 /* clear doorbell interrupt */
32903555 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
....@@ -3310,7 +3575,10 @@
33103575 index %= ARCMSR_MAX_HBB_POSTQUEUE;
33113576 reg->doneq_index = index;
33123577 /* check if command done with no error*/
3313
- arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
3578
+ ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
3579
+ if (acb->cdb_phyadd_hipart)
3580
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3581
+ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
33143582 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
33153583 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
33163584 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
....@@ -3343,12 +3611,14 @@
33433611 struct CommandControlBlock *poll_ccb)
33443612 {
33453613 struct MessageUnit_C __iomem *reg = acb->pmuC;
3346
- uint32_t flag_ccb, ccb_cdb_phy;
3614
+ uint32_t flag_ccb;
33473615 struct ARCMSR_CDB *arcmsr_cdb;
33483616 bool error;
33493617 struct CommandControlBlock *pCCB;
33503618 uint32_t poll_ccb_done = 0, poll_count = 0;
33513619 int rtn;
3620
+ unsigned long ccb_cdb_phy;
3621
+
33523622 polling_hbc_ccb_retry:
33533623 poll_count++;
33543624 while (1) {
....@@ -3367,7 +3637,9 @@
33673637 }
33683638 flag_ccb = readl(&reg->outbound_queueport_low);
33693639 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3370
- arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/
3640
+ if (acb->cdb_phyadd_hipart)
3641
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3642
+ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
33713643 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
33723644 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
33733645 /* check ifcommand done with no error*/
....@@ -3379,8 +3651,8 @@
33793651 , pCCB->pcmd->device->id
33803652 , (u32)pCCB->pcmd->device->lun
33813653 , pCCB);
3382
- pCCB->pcmd->result = DID_ABORT << 16;
3383
- arcmsr_ccb_complete(pCCB);
3654
+ pCCB->pcmd->result = DID_ABORT << 16;
3655
+ arcmsr_ccb_complete(pCCB);
33843656 continue;
33853657 }
33863658 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
....@@ -3401,9 +3673,9 @@
34013673 struct CommandControlBlock *poll_ccb)
34023674 {
34033675 bool error;
3404
- uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb, ccb_cdb_phy;
3676
+ uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb;
34053677 int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle;
3406
- unsigned long flags;
3678
+ unsigned long flags, ccb_cdb_phy;
34073679 struct ARCMSR_CDB *arcmsr_cdb;
34083680 struct CommandControlBlock *pCCB;
34093681 struct MessageUnit_D *pmu = acb->pmuD;
....@@ -3437,6 +3709,8 @@
34373709 spin_unlock_irqrestore(&acb->doneq_lock, flags);
34383710 flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
34393711 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3712
+ if (acb->cdb_phyadd_hipart)
3713
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
34403714 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset +
34413715 ccb_cdb_phy);
34423716 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock,
....@@ -3546,23 +3820,20 @@
35463820 int rtn = 0;
35473821 switch (acb->adapter_type) {
35483822
3549
- case ACB_ADAPTER_TYPE_A: {
3823
+ case ACB_ADAPTER_TYPE_A:
35503824 rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb);
3551
- }
35523825 break;
3553
-
3554
- case ACB_ADAPTER_TYPE_B: {
3826
+ case ACB_ADAPTER_TYPE_B:
35553827 rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb);
3556
- }
35573828 break;
3558
- case ACB_ADAPTER_TYPE_C: {
3829
+ case ACB_ADAPTER_TYPE_C:
35593830 rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb);
3560
- }
35613831 break;
35623832 case ACB_ADAPTER_TYPE_D:
35633833 rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
35643834 break;
35653835 case ACB_ADAPTER_TYPE_E:
3836
+ case ACB_ADAPTER_TYPE_F:
35663837 rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb);
35673838 break;
35683839 }
....@@ -3643,6 +3914,16 @@
36433914 writel(pacb->out_doorbell, &reg->iobound_doorbell);
36443915 break;
36453916 }
3917
+ case ACB_ADAPTER_TYPE_F: {
3918
+ struct MessageUnit_F __iomem *reg = pacb->pmuF;
3919
+
3920
+ pacb->msgcode_rwbuffer[0] = datetime.b.msg_time[0];
3921
+ pacb->msgcode_rwbuffer[1] = datetime.b.msg_time[1];
3922
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
3923
+ pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3924
+ writel(pacb->out_doorbell, &reg->iobound_doorbell);
3925
+ break;
3926
+ }
36463927 }
36473928 if (sys_tz.tz_minuteswest)
36483929 next_time = ARCMSR_HOURS;
....@@ -3668,6 +3949,7 @@
36683949 dma_coherent_handle = acb->dma_coherent_handle2;
36693950 break;
36703951 case ACB_ADAPTER_TYPE_E:
3952
+ case ACB_ADAPTER_TYPE_F:
36713953 dma_coherent_handle = acb->dma_coherent_handle +
36723954 offsetof(struct CommandControlBlock, arcmsr_cdb);
36733955 break;
....@@ -3678,6 +3960,7 @@
36783960 cdb_phyaddr = lower_32_bits(dma_coherent_handle);
36793961 cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle);
36803962 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
3963
+ acb->cdb_phyadd_hipart = ((uint64_t)cdb_phyaddr_hi32) << 32;
36813964 /*
36823965 ***********************************************************************
36833966 ** if adapter type B, set window of "post command Q"
....@@ -3742,7 +4025,6 @@
37424025 }
37434026 break;
37444027 case ACB_ADAPTER_TYPE_C: {
3745
- if (cdb_phyaddr_hi32 != 0) {
37464028 struct MessageUnit_C __iomem *reg = acb->pmuC;
37474029
37484030 printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
....@@ -3756,7 +4038,6 @@
37564038 timeout \n", acb->host->host_no);
37574039 return 1;
37584040 }
3759
- }
37604041 }
37614042 break;
37624043 case ACB_ADAPTER_TYPE_D: {
....@@ -3786,17 +4067,35 @@
37864067 writel(cdb_phyaddr, &reg->msgcode_rwbuffer[2]);
37874068 writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[3]);
37884069 writel(acb->ccbsize, &reg->msgcode_rwbuffer[4]);
3789
- dma_coherent_handle = acb->dma_coherent_handle2;
3790
- cdb_phyaddr = (uint32_t)(dma_coherent_handle & 0xffffffff);
3791
- cdb_phyaddr_hi32 = (uint32_t)((dma_coherent_handle >> 16) >> 16);
3792
- writel(cdb_phyaddr, &reg->msgcode_rwbuffer[5]);
3793
- writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[6]);
3794
- writel(acb->roundup_ccbsize, &reg->msgcode_rwbuffer[7]);
4070
+ writel(lower_32_bits(acb->dma_coherent_handle2), &reg->msgcode_rwbuffer[5]);
4071
+ writel(upper_32_bits(acb->dma_coherent_handle2), &reg->msgcode_rwbuffer[6]);
4072
+ writel(acb->ioqueue_size, &reg->msgcode_rwbuffer[7]);
37954073 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
37964074 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
37974075 writel(acb->out_doorbell, &reg->iobound_doorbell);
37984076 if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
37994077 pr_notice("arcmsr%d: 'set command Q window' timeout \n",
4078
+ acb->host->host_no);
4079
+ return 1;
4080
+ }
4081
+ }
4082
+ break;
4083
+ case ACB_ADAPTER_TYPE_F: {
4084
+ struct MessageUnit_F __iomem *reg = acb->pmuF;
4085
+
4086
+ acb->msgcode_rwbuffer[0] = ARCMSR_SIGNATURE_SET_CONFIG;
4087
+ acb->msgcode_rwbuffer[1] = ARCMSR_SIGNATURE_1886;
4088
+ acb->msgcode_rwbuffer[2] = cdb_phyaddr;
4089
+ acb->msgcode_rwbuffer[3] = cdb_phyaddr_hi32;
4090
+ acb->msgcode_rwbuffer[4] = acb->ccbsize;
4091
+ acb->msgcode_rwbuffer[5] = lower_32_bits(acb->dma_coherent_handle2);
4092
+ acb->msgcode_rwbuffer[6] = upper_32_bits(acb->dma_coherent_handle2);
4093
+ acb->msgcode_rwbuffer[7] = acb->completeQ_size;
4094
+ writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
4095
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4096
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
4097
+ if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
4098
+ pr_notice("arcmsr%d: 'set command Q window' timeout\n",
38004099 acb->host->host_no);
38014100 return 1;
38024101 }
....@@ -3850,7 +4149,8 @@
38504149 ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
38514150 }
38524151 break;
3853
- case ACB_ADAPTER_TYPE_E: {
4152
+ case ACB_ADAPTER_TYPE_E:
4153
+ case ACB_ADAPTER_TYPE_F: {
38544154 struct MessageUnit_E __iomem *reg = acb->pmuE;
38554155 do {
38564156 if (!(acb->acb_flags & ACB_F_IOP_INITED))
....@@ -3865,24 +4165,10 @@
38654165 static void arcmsr_request_device_map(struct timer_list *t)
38664166 {
38674167 struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
3868
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
3869
- (acb->acb_flags & ACB_F_BUS_RESET) ||
3870
- (acb->acb_flags & ACB_F_ABORT)) {
3871
- mod_timer(&acb->eternal_timer,
3872
- jiffies + msecs_to_jiffies(6 * HZ));
4168
+ if (acb->acb_flags & (ACB_F_MSG_GET_CONFIG | ACB_F_BUS_RESET | ACB_F_ABORT)) {
4169
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
38734170 } else {
38744171 acb->fw_flag = FW_NORMAL;
3875
- if (atomic_read(&acb->ante_token_value) ==
3876
- atomic_read(&acb->rq_map_token)) {
3877
- atomic_set(&acb->rq_map_token, 16);
3878
- }
3879
- atomic_set(&acb->ante_token_value,
3880
- atomic_read(&acb->rq_map_token));
3881
- if (atomic_dec_and_test(&acb->rq_map_token)) {
3882
- mod_timer(&acb->eternal_timer, jiffies +
3883
- msecs_to_jiffies(6 * HZ));
3884
- return;
3885
- }
38864172 switch (acb->adapter_type) {
38874173 case ACB_ADAPTER_TYPE_A: {
38884174 struct MessageUnit_A __iomem *reg = acb->pmuA;
....@@ -3912,10 +4198,23 @@
39124198 writel(acb->out_doorbell, &reg->iobound_doorbell);
39134199 break;
39144200 }
4201
+ case ACB_ADAPTER_TYPE_F: {
4202
+ struct MessageUnit_F __iomem *reg = acb->pmuF;
4203
+ uint32_t outMsg1 = readl(&reg->outbound_msgaddr1);
4204
+
4205
+ if (!(outMsg1 & ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK) ||
4206
+ (outMsg1 & ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE))
4207
+ goto nxt6s;
4208
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
4209
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4210
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
4211
+ break;
4212
+ }
39154213 default:
39164214 return;
39174215 }
39184216 acb->acb_flags |= ACB_F_MSG_GET_CONFIG;
4217
+nxt6s:
39194218 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
39204219 }
39214220 }
....@@ -3927,7 +4226,7 @@
39274226 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
39284227 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
39294228 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
3930
- rebulid' timeout \n", acb->host->host_no);
4229
+ rebuild' timeout \n", acb->host->host_no);
39314230 }
39324231 }
39334232
....@@ -3938,7 +4237,7 @@
39384237 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
39394238 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
39404239 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
3941
- rebulid' timeout \n",acb->host->host_no);
4240
+ rebuild' timeout \n",acb->host->host_no);
39424241 }
39434242 }
39444243
....@@ -3950,7 +4249,7 @@
39504249 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
39514250 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
39524251 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
3953
- rebulid' timeout \n", pACB->host->host_no);
4252
+ rebuild' timeout \n", pACB->host->host_no);
39544253 }
39554254 return;
39564255 }
....@@ -3963,7 +4262,7 @@
39634262 writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
39644263 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
39654264 pr_notice("arcmsr%d: wait 'start adapter "
3966
- "background rebulid' timeout\n", pACB->host->host_no);
4265
+ "background rebuild' timeout\n", pACB->host->host_no);
39674266 }
39684267 }
39694268
....@@ -3977,7 +4276,7 @@
39774276 writel(pACB->out_doorbell, &pmu->iobound_doorbell);
39784277 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
39794278 pr_notice("arcmsr%d: wait 'start adapter "
3980
- "background rebulid' timeout \n", pACB->host->host_no);
4279
+ "background rebuild' timeout \n", pACB->host->host_no);
39814280 }
39824281 }
39834282
....@@ -3997,6 +4296,7 @@
39974296 arcmsr_hbaD_start_bgrb(acb);
39984297 break;
39994298 case ACB_ADAPTER_TYPE_E:
4299
+ case ACB_ADAPTER_TYPE_F:
40004300 arcmsr_hbaE_start_bgrb(acb);
40014301 break;
40024302 }
....@@ -4076,7 +4376,8 @@
40764376 }
40774377 }
40784378 break;
4079
- case ACB_ADAPTER_TYPE_E: {
4379
+ case ACB_ADAPTER_TYPE_E:
4380
+ case ACB_ADAPTER_TYPE_F: {
40804381 struct MessageUnit_E __iomem *reg = acb->pmuE;
40814382 uint32_t i, tmp;
40824383
....@@ -4203,7 +4504,8 @@
42034504 true : false;
42044505 }
42054506 break;
4206
- case ACB_ADAPTER_TYPE_E:{
4507
+ case ACB_ADAPTER_TYPE_E:
4508
+ case ACB_ADAPTER_TYPE_F:{
42074509 struct MessageUnit_E __iomem *reg = acb->pmuE;
42084510 rtn = (readl(&reg->host_diagnostic_3xxx) &
42094511 ARCMSR_ARC188X_RESET_ADAPTER) ? true : false;
....@@ -4302,8 +4604,6 @@
43024604 goto wait_reset_done;
43034605 }
43044606 arcmsr_iop_init(acb);
4305
- atomic_set(&acb->rq_map_token, 16);
4306
- atomic_set(&acb->ante_token_value, 16);
43074607 acb->fw_flag = FW_NORMAL;
43084608 mod_timer(&acb->eternal_timer, jiffies +
43094609 msecs_to_jiffies(6 * HZ));
....@@ -4312,8 +4612,6 @@
43124612 pr_notice("arcmsr: scsi bus reset eh returns with success\n");
43134613 } else {
43144614 acb->acb_flags &= ~ACB_F_BUS_RESET;
4315
- atomic_set(&acb->rq_map_token, 16);
4316
- atomic_set(&acb->ante_token_value, 16);
43174615 acb->fw_flag = FW_NORMAL;
43184616 mod_timer(&acb->eternal_timer, jiffies +
43194617 msecs_to_jiffies(6 * HZ));
....@@ -4383,7 +4681,7 @@
43834681 case PCI_DEVICE_ID_ARECA_1202:
43844682 case PCI_DEVICE_ID_ARECA_1210:
43854683 raid6 = 0;
4386
- /*FALLTHRU*/
4684
+ fallthrough;
43874685 case PCI_DEVICE_ID_ARECA_1120:
43884686 case PCI_DEVICE_ID_ARECA_1130:
43894687 case PCI_DEVICE_ID_ARECA_1160:
....@@ -4406,6 +4704,9 @@
44064704 case PCI_DEVICE_ID_ARECA_1884:
44074705 type = "SAS/SATA";
44084706 break;
4707
+ case PCI_DEVICE_ID_ARECA_1886:
4708
+ type = "NVMe/SAS/SATA";
4709
+ break;
44094710 default:
44104711 type = "unknown";
44114712 raid6 = 0;