.. | .. |
---|
1 | 1 | /******************************************************************* |
---|
2 | 2 | * This file is part of the Emulex Linux Device Driver for * |
---|
3 | 3 | * Fibre Channel Host Bus Adapters. * |
---|
4 | | - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
---|
| 4 | + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * |
---|
5 | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
---|
6 | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
---|
7 | 7 | * EMULEX and SLI are trademarks of Emulex. * |
---|
.. | .. |
---|
37 | 37 | #include <linux/miscdevice.h> |
---|
38 | 38 | #include <linux/percpu.h> |
---|
39 | 39 | #include <linux/msi.h> |
---|
| 40 | +#include <linux/irq.h> |
---|
40 | 41 | #include <linux/bitops.h> |
---|
| 42 | +#include <linux/crash_dump.h> |
---|
| 43 | +#include <linux/cpu.h> |
---|
| 44 | +#include <linux/cpuhotplug.h> |
---|
41 | 45 | |
---|
42 | 46 | #include <scsi/scsi.h> |
---|
43 | 47 | #include <scsi/scsi_device.h> |
---|
.. | .. |
---|
45 | 49 | #include <scsi/scsi_transport_fc.h> |
---|
46 | 50 | #include <scsi/scsi_tcq.h> |
---|
47 | 51 | #include <scsi/fc/fc_fs.h> |
---|
48 | | - |
---|
49 | | -#include <linux/nvme-fc-driver.h> |
---|
50 | 52 | |
---|
51 | 53 | #include "lpfc_hw4.h" |
---|
52 | 54 | #include "lpfc_hw.h" |
---|
.. | .. |
---|
57 | 59 | #include "lpfc.h" |
---|
58 | 60 | #include "lpfc_scsi.h" |
---|
59 | 61 | #include "lpfc_nvme.h" |
---|
60 | | -#include "lpfc_nvmet.h" |
---|
61 | 62 | #include "lpfc_logmsg.h" |
---|
62 | 63 | #include "lpfc_crtn.h" |
---|
63 | 64 | #include "lpfc_vport.h" |
---|
64 | 65 | #include "lpfc_version.h" |
---|
65 | 66 | #include "lpfc_ids.h" |
---|
66 | 67 | |
---|
67 | | -char *_dump_buf_data; |
---|
68 | | -unsigned long _dump_buf_data_order; |
---|
69 | | -char *_dump_buf_dif; |
---|
70 | | -unsigned long _dump_buf_dif_order; |
---|
71 | | -spinlock_t _dump_buf_lock; |
---|
72 | | - |
---|
| 68 | +static enum cpuhp_state lpfc_cpuhp_state; |
---|
73 | 69 | /* Used when mapping IRQ vectors in a driver centric manner */ |
---|
74 | | -uint16_t *lpfc_used_cpu; |
---|
75 | | -uint32_t lpfc_present_cpu; |
---|
| 70 | +static uint32_t lpfc_present_cpu; |
---|
76 | 71 | |
---|
| 72 | +static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); |
---|
| 73 | +static void lpfc_cpuhp_remove(struct lpfc_hba *phba); |
---|
| 74 | +static void lpfc_cpuhp_add(struct lpfc_hba *phba); |
---|
77 | 75 | static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); |
---|
78 | 76 | static int lpfc_post_rcv_buf(struct lpfc_hba *); |
---|
79 | 77 | static int lpfc_sli4_queue_verify(struct lpfc_hba *); |
---|
.. | .. |
---|
93 | 91 | static void lpfc_sli4_disable_intr(struct lpfc_hba *); |
---|
94 | 92 | static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); |
---|
95 | 93 | static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); |
---|
| 94 | +static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); |
---|
| 95 | +static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); |
---|
96 | 96 | |
---|
97 | 97 | static struct scsi_transport_template *lpfc_transport_template = NULL; |
---|
98 | 98 | static struct scsi_transport_template *lpfc_vport_transport_template = NULL; |
---|
.. | .. |
---|
153 | 153 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); |
---|
154 | 154 | |
---|
155 | 155 | if (rc != MBX_SUCCESS) { |
---|
156 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, |
---|
| 156 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
157 | 157 | "0324 Config Port initialization " |
---|
158 | 158 | "error, mbxCmd x%x READ_NVPARM, " |
---|
159 | 159 | "mbxStatus x%x\n", |
---|
.. | .. |
---|
177 | 177 | lpfc_read_rev(phba, pmb); |
---|
178 | 178 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); |
---|
179 | 179 | if (rc != MBX_SUCCESS) { |
---|
180 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 180 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
181 | 181 | "0439 Adapter failed to init, mbxCmd x%x " |
---|
182 | 182 | "READ_REV, mbxStatus x%x\n", |
---|
183 | 183 | mb->mbxCommand, mb->mbxStatus); |
---|
.. | .. |
---|
192 | 192 | */ |
---|
193 | 193 | if (mb->un.varRdRev.rr == 0) { |
---|
194 | 194 | vp->rev.rBit = 0; |
---|
195 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 195 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
196 | 196 | "0440 Adapter failed to init, READ_REV has " |
---|
197 | 197 | "missing revision information.\n"); |
---|
198 | 198 | mempool_free(pmb, phba->mbox_mem_pool); |
---|
.. | .. |
---|
253 | 253 | */ |
---|
254 | 254 | if (mb->un.varDmp.word_cnt == 0) |
---|
255 | 255 | break; |
---|
256 | | - if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) |
---|
257 | | - mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; |
---|
| 256 | + |
---|
| 257 | + i = mb->un.varDmp.word_cnt * sizeof(uint32_t); |
---|
| 258 | + if (offset + i > DMP_VPD_SIZE) |
---|
| 259 | + i = DMP_VPD_SIZE - offset; |
---|
258 | 260 | lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, |
---|
259 | | - lpfc_vpd_data + offset, |
---|
260 | | - mb->un.varDmp.word_cnt); |
---|
261 | | - offset += mb->un.varDmp.word_cnt; |
---|
262 | | - } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); |
---|
| 261 | + lpfc_vpd_data + offset, i); |
---|
| 262 | + offset += i; |
---|
| 263 | + } while (offset < DMP_VPD_SIZE); |
---|
| 264 | + |
---|
263 | 265 | lpfc_parse_vpd(phba, lpfc_vpd_data, offset); |
---|
264 | 266 | |
---|
265 | 267 | kfree(lpfc_vpd_data); |
---|
.. | .. |
---|
442 | 444 | |
---|
443 | 445 | pmb->vport = vport; |
---|
444 | 446 | if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { |
---|
445 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 447 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
446 | 448 | "0448 Adapter failed init, mbxCmd x%x " |
---|
447 | 449 | "READ_SPARM mbxStatus x%x\n", |
---|
448 | 450 | mb->mbxCommand, mb->mbxStatus); |
---|
449 | 451 | phba->link_state = LPFC_HBA_ERROR; |
---|
450 | | - mp = (struct lpfc_dmabuf *) pmb->context1; |
---|
| 452 | + mp = (struct lpfc_dmabuf *)pmb->ctx_buf; |
---|
451 | 453 | mempool_free(pmb, phba->mbox_mem_pool); |
---|
452 | 454 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
---|
453 | 455 | kfree(mp); |
---|
454 | 456 | return -EIO; |
---|
455 | 457 | } |
---|
456 | 458 | |
---|
457 | | - mp = (struct lpfc_dmabuf *) pmb->context1; |
---|
| 459 | + mp = (struct lpfc_dmabuf *)pmb->ctx_buf; |
---|
458 | 460 | |
---|
459 | 461 | memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); |
---|
460 | 462 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
---|
461 | 463 | kfree(mp); |
---|
462 | | - pmb->context1 = NULL; |
---|
| 464 | + pmb->ctx_buf = NULL; |
---|
463 | 465 | lpfc_update_vport_wwn(vport); |
---|
464 | 466 | |
---|
465 | 467 | /* Update the fc_host data structures with new wwn. */ |
---|
.. | .. |
---|
496 | 498 | lpfc_read_config(phba, pmb); |
---|
497 | 499 | pmb->vport = vport; |
---|
498 | 500 | if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { |
---|
499 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 501 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
500 | 502 | "0453 Adapter failed to init, mbxCmd x%x " |
---|
501 | 503 | "READ_CONFIG, mbxStatus x%x\n", |
---|
502 | 504 | mb->mbxCommand, mb->mbxStatus); |
---|
.. | .. |
---|
509 | 511 | lpfc_sli_read_link_ste(phba); |
---|
510 | 512 | |
---|
511 | 513 | /* Reset the DFT_HBA_Q_DEPTH to the max xri */ |
---|
512 | | - i = (mb->un.varRdConfig.max_xri + 1); |
---|
513 | | - if (phba->cfg_hba_queue_depth > i) { |
---|
| 514 | + if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { |
---|
514 | 515 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
---|
515 | 516 | "3359 HBA queue depth changed from %d to %d\n", |
---|
516 | | - phba->cfg_hba_queue_depth, i); |
---|
517 | | - phba->cfg_hba_queue_depth = i; |
---|
518 | | - } |
---|
519 | | - |
---|
520 | | - /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ |
---|
521 | | - i = (mb->un.varRdConfig.max_xri >> 3); |
---|
522 | | - if (phba->pport->cfg_lun_queue_depth > i) { |
---|
523 | | - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
---|
524 | | - "3360 LUN queue depth changed from %d to %d\n", |
---|
525 | | - phba->pport->cfg_lun_queue_depth, i); |
---|
526 | | - phba->pport->cfg_lun_queue_depth = i; |
---|
| 517 | + phba->cfg_hba_queue_depth, |
---|
| 518 | + mb->un.varRdConfig.max_xri); |
---|
| 519 | + phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; |
---|
527 | 520 | } |
---|
528 | 521 | |
---|
529 | 522 | phba->lmt = mb->un.varRdConfig.lmt; |
---|
.. | .. |
---|
554 | 547 | } |
---|
555 | 548 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); |
---|
556 | 549 | if (rc != MBX_SUCCESS) { |
---|
557 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, |
---|
| 550 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
558 | 551 | "0352 Config MSI mailbox command " |
---|
559 | 552 | "failed, mbxCmd x%x, mbxStatus x%x\n", |
---|
560 | 553 | pmb->u.mb.mbxCommand, |
---|
.. | .. |
---|
605 | 598 | jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); |
---|
606 | 599 | |
---|
607 | 600 | if (phba->hba_flag & LINK_DISABLED) { |
---|
608 | | - lpfc_printf_log(phba, |
---|
609 | | - KERN_ERR, LOG_INIT, |
---|
610 | | - "2598 Adapter Link is disabled.\n"); |
---|
| 601 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 602 | + "2598 Adapter Link is disabled.\n"); |
---|
611 | 603 | lpfc_down_link(phba, pmb); |
---|
612 | 604 | pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
---|
613 | 605 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); |
---|
614 | 606 | if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { |
---|
615 | | - lpfc_printf_log(phba, |
---|
616 | | - KERN_ERR, LOG_INIT, |
---|
617 | | - "2599 Adapter failed to issue DOWN_LINK" |
---|
618 | | - " mbox command rc 0x%x\n", rc); |
---|
| 607 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 608 | + "2599 Adapter failed to issue DOWN_LINK" |
---|
| 609 | + " mbox command rc 0x%x\n", rc); |
---|
619 | 610 | |
---|
620 | 611 | mempool_free(pmb, phba->mbox_mem_pool); |
---|
621 | 612 | return -EIO; |
---|
.. | .. |
---|
639 | 630 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); |
---|
640 | 631 | |
---|
641 | 632 | if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { |
---|
642 | | - lpfc_printf_log(phba, |
---|
643 | | - KERN_ERR, |
---|
644 | | - LOG_INIT, |
---|
| 633 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
645 | 634 | "0456 Adapter failed to issue " |
---|
646 | 635 | "ASYNCEVT_ENABLE mbox status x%x\n", |
---|
647 | 636 | rc); |
---|
.. | .. |
---|
661 | 650 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); |
---|
662 | 651 | |
---|
663 | 652 | if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { |
---|
664 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " |
---|
| 653 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 654 | + "0435 Adapter failed " |
---|
665 | 655 | "to get Option ROM version status x%x\n", rc); |
---|
666 | 656 | mempool_free(pmb, phba->mbox_mem_pool); |
---|
667 | 657 | } |
---|
.. | .. |
---|
739 | 729 | ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && |
---|
740 | 730 | !(phba->lmt & LMT_64Gb))) { |
---|
741 | 731 | /* Reset link speed to auto */ |
---|
742 | | - lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
---|
743 | | - "1302 Invalid speed for this board:%d " |
---|
744 | | - "Reset link speed to auto.\n", |
---|
745 | | - phba->cfg_link_speed); |
---|
| 732 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 733 | + "1302 Invalid speed for this board:%d " |
---|
| 734 | + "Reset link speed to auto.\n", |
---|
| 735 | + phba->cfg_link_speed); |
---|
746 | 736 | phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; |
---|
747 | 737 | } |
---|
748 | 738 | lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); |
---|
.. | .. |
---|
751 | 741 | lpfc_set_loopback_flag(phba); |
---|
752 | 742 | rc = lpfc_sli_issue_mbox(phba, pmb, flag); |
---|
753 | 743 | if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { |
---|
754 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
755 | | - "0498 Adapter failed to init, mbxCmd x%x " |
---|
756 | | - "INIT_LINK, mbxStatus x%x\n", |
---|
757 | | - mb->mbxCommand, mb->mbxStatus); |
---|
| 744 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 745 | + "0498 Adapter failed to init, mbxCmd x%x " |
---|
| 746 | + "INIT_LINK, mbxStatus x%x\n", |
---|
| 747 | + mb->mbxCommand, mb->mbxStatus); |
---|
758 | 748 | if (phba->sli_rev <= LPFC_SLI_REV3) { |
---|
759 | 749 | /* Clear all interrupt enable conditions */ |
---|
760 | 750 | writel(0, phba->HCregaddr); |
---|
.. | .. |
---|
800 | 790 | return -ENOMEM; |
---|
801 | 791 | } |
---|
802 | 792 | |
---|
803 | | - lpfc_printf_log(phba, |
---|
804 | | - KERN_ERR, LOG_INIT, |
---|
805 | | - "0491 Adapter Link is disabled.\n"); |
---|
| 793 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 794 | + "0491 Adapter Link is disabled.\n"); |
---|
806 | 795 | lpfc_down_link(phba, pmb); |
---|
807 | 796 | pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
---|
808 | 797 | rc = lpfc_sli_issue_mbox(phba, pmb, flag); |
---|
809 | 798 | if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { |
---|
810 | | - lpfc_printf_log(phba, |
---|
811 | | - KERN_ERR, LOG_INIT, |
---|
812 | | - "2522 Adapter failed to issue DOWN_LINK" |
---|
813 | | - " mbox command rc 0x%x\n", rc); |
---|
| 799 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 800 | + "2522 Adapter failed to issue DOWN_LINK" |
---|
| 801 | + " mbox command rc 0x%x\n", rc); |
---|
814 | 802 | |
---|
815 | 803 | mempool_free(pmb, phba->mbox_mem_pool); |
---|
816 | 804 | return -EIO; |
---|
.. | .. |
---|
1005 | 993 | |
---|
1006 | 994 | /** |
---|
1007 | 995 | * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset |
---|
1008 | | - int i; |
---|
1009 | 996 | * @phba: pointer to lpfc HBA data structure. |
---|
1010 | 997 | * |
---|
1011 | 998 | * This routine will do uninitialization after the HBA is reset when bring |
---|
.. | .. |
---|
1037 | 1024 | static int |
---|
1038 | 1025 | lpfc_hba_down_post_s4(struct lpfc_hba *phba) |
---|
1039 | 1026 | { |
---|
1040 | | - struct lpfc_scsi_buf *psb, *psb_next; |
---|
1041 | | - struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next; |
---|
| 1027 | + struct lpfc_io_buf *psb, *psb_next; |
---|
| 1028 | + struct lpfc_async_xchg_ctx *ctxp, *ctxp_next; |
---|
| 1029 | + struct lpfc_sli4_hdw_queue *qp; |
---|
1042 | 1030 | LIST_HEAD(aborts); |
---|
1043 | 1031 | LIST_HEAD(nvme_aborts); |
---|
1044 | 1032 | LIST_HEAD(nvmet_aborts); |
---|
1045 | | - unsigned long iflag = 0; |
---|
1046 | 1033 | struct lpfc_sglq *sglq_entry = NULL; |
---|
1047 | | - int cnt; |
---|
| 1034 | + int cnt, idx; |
---|
1048 | 1035 | |
---|
1049 | 1036 | |
---|
1050 | 1037 | lpfc_sli_hbqbuf_free_all(phba); |
---|
.. | .. |
---|
1071 | 1058 | |
---|
1072 | 1059 | |
---|
1073 | 1060 | spin_unlock(&phba->sli4_hba.sgl_list_lock); |
---|
1074 | | - /* abts_scsi_buf_list_lock required because worker thread uses this |
---|
| 1061 | + |
---|
| 1062 | + /* abts_xxxx_buf_list_lock required because worker thread uses this |
---|
1075 | 1063 | * list. |
---|
1076 | 1064 | */ |
---|
1077 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { |
---|
1078 | | - spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); |
---|
1079 | | - list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, |
---|
| 1065 | + cnt = 0; |
---|
| 1066 | + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
---|
| 1067 | + qp = &phba->sli4_hba.hdwq[idx]; |
---|
| 1068 | + |
---|
| 1069 | + spin_lock(&qp->abts_io_buf_list_lock); |
---|
| 1070 | + list_splice_init(&qp->lpfc_abts_io_buf_list, |
---|
1080 | 1071 | &aborts); |
---|
1081 | | - spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); |
---|
1082 | | - } |
---|
1083 | 1072 | |
---|
1084 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
1085 | | - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
---|
1086 | | - list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list, |
---|
1087 | | - &nvme_aborts); |
---|
1088 | | - list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, |
---|
1089 | | - &nvmet_aborts); |
---|
1090 | | - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); |
---|
1091 | | - } |
---|
1092 | | - |
---|
1093 | | - spin_unlock_irq(&phba->hbalock); |
---|
1094 | | - |
---|
1095 | | - list_for_each_entry_safe(psb, psb_next, &aborts, list) { |
---|
1096 | | - psb->pCmd = NULL; |
---|
1097 | | - psb->status = IOSTAT_SUCCESS; |
---|
1098 | | - } |
---|
1099 | | - spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); |
---|
1100 | | - list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); |
---|
1101 | | - spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); |
---|
1102 | | - |
---|
1103 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
1104 | | - cnt = 0; |
---|
1105 | | - list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) { |
---|
| 1073 | + list_for_each_entry_safe(psb, psb_next, &aborts, list) { |
---|
1106 | 1074 | psb->pCmd = NULL; |
---|
1107 | 1075 | psb->status = IOSTAT_SUCCESS; |
---|
1108 | 1076 | cnt++; |
---|
1109 | 1077 | } |
---|
1110 | | - spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); |
---|
1111 | | - phba->put_nvme_bufs += cnt; |
---|
1112 | | - list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put); |
---|
1113 | | - spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); |
---|
| 1078 | + spin_lock(&qp->io_buf_list_put_lock); |
---|
| 1079 | + list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); |
---|
| 1080 | + qp->put_io_bufs += qp->abts_scsi_io_bufs; |
---|
| 1081 | + qp->put_io_bufs += qp->abts_nvme_io_bufs; |
---|
| 1082 | + qp->abts_scsi_io_bufs = 0; |
---|
| 1083 | + qp->abts_nvme_io_bufs = 0; |
---|
| 1084 | + spin_unlock(&qp->io_buf_list_put_lock); |
---|
| 1085 | + spin_unlock(&qp->abts_io_buf_list_lock); |
---|
| 1086 | + } |
---|
| 1087 | + spin_unlock_irq(&phba->hbalock); |
---|
1114 | 1088 | |
---|
| 1089 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
| 1090 | + spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
| 1091 | + list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, |
---|
| 1092 | + &nvmet_aborts); |
---|
| 1093 | + spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
1115 | 1094 | list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { |
---|
1116 | | - ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); |
---|
| 1095 | + ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP); |
---|
1117 | 1096 | lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); |
---|
1118 | 1097 | } |
---|
1119 | 1098 | } |
---|
1120 | 1099 | |
---|
1121 | 1100 | lpfc_sli4_free_sp_events(phba); |
---|
1122 | | - return 0; |
---|
| 1101 | + return cnt; |
---|
1123 | 1102 | } |
---|
1124 | 1103 | |
---|
1125 | 1104 | /** |
---|
.. | .. |
---|
1141 | 1120 | |
---|
1142 | 1121 | /** |
---|
1143 | 1122 | * lpfc_hb_timeout - The HBA-timer timeout handler |
---|
1144 | | - * @ptr: unsigned long holds the pointer to lpfc hba data structure. |
---|
| 1123 | + * @t: timer context used to obtain the pointer to lpfc hba data structure. |
---|
1145 | 1124 | * |
---|
1146 | 1125 | * This is the HBA-timer timeout handler registered to the lpfc driver. When |
---|
1147 | 1126 | * this timer fires, a HBA timeout event shall be posted to the lpfc driver |
---|
.. | .. |
---|
1175 | 1154 | |
---|
1176 | 1155 | /** |
---|
1177 | 1156 | * lpfc_rrq_timeout - The RRQ-timer timeout handler |
---|
1178 | | - * @ptr: unsigned long holds the pointer to lpfc hba data structure. |
---|
| 1157 | + * @t: timer context used to obtain the pointer to lpfc hba data structure. |
---|
1179 | 1158 | * |
---|
1180 | 1159 | * This is the RRQ-timer timeout handler registered to the lpfc driver. When |
---|
1181 | 1160 | * this timer fires, a RRQ timeout event shall be posted to the lpfc driver |
---|
.. | .. |
---|
1239 | 1218 | return; |
---|
1240 | 1219 | } |
---|
1241 | 1220 | |
---|
| 1221 | +/* |
---|
| 1222 | + * lpfc_idle_stat_delay_work - idle_stat tracking |
---|
| 1223 | + * |
---|
| 1224 | + * This routine tracks per-cq idle_stat and determines polling decisions. |
---|
| 1225 | + * |
---|
| 1226 | + * Return codes: |
---|
| 1227 | + * None |
---|
| 1228 | + **/ |
---|
| 1229 | +static void |
---|
| 1230 | +lpfc_idle_stat_delay_work(struct work_struct *work) |
---|
| 1231 | +{ |
---|
| 1232 | + struct lpfc_hba *phba = container_of(to_delayed_work(work), |
---|
| 1233 | + struct lpfc_hba, |
---|
| 1234 | + idle_stat_delay_work); |
---|
| 1235 | + struct lpfc_queue *cq; |
---|
| 1236 | + struct lpfc_sli4_hdw_queue *hdwq; |
---|
| 1237 | + struct lpfc_idle_stat *idle_stat; |
---|
| 1238 | + u32 i, idle_percent; |
---|
| 1239 | + u64 wall, wall_idle, diff_wall, diff_idle, busy_time; |
---|
| 1240 | + |
---|
| 1241 | + if (phba->pport->load_flag & FC_UNLOADING) |
---|
| 1242 | + return; |
---|
| 1243 | + |
---|
| 1244 | + if (phba->link_state == LPFC_HBA_ERROR || |
---|
| 1245 | + phba->pport->fc_flag & FC_OFFLINE_MODE) |
---|
| 1246 | + goto requeue; |
---|
| 1247 | + |
---|
| 1248 | + for_each_present_cpu(i) { |
---|
| 1249 | + hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; |
---|
| 1250 | + cq = hdwq->io_cq; |
---|
| 1251 | + |
---|
| 1252 | + /* Skip if we've already handled this cq's primary CPU */ |
---|
| 1253 | + if (cq->chann != i) |
---|
| 1254 | + continue; |
---|
| 1255 | + |
---|
| 1256 | + idle_stat = &phba->sli4_hba.idle_stat[i]; |
---|
| 1257 | + |
---|
| 1258 | + /* get_cpu_idle_time returns values as running counters. Thus, |
---|
| 1259 | + * to know the amount for this period, the prior counter values |
---|
| 1260 | + * need to be subtracted from the current counter values. |
---|
| 1261 | + * From there, the idle time stat can be calculated as a |
---|
| 1262 | + * percentage of 100 - the sum of the other consumption times. |
---|
| 1263 | + */ |
---|
| 1264 | + wall_idle = get_cpu_idle_time(i, &wall, 1); |
---|
| 1265 | + diff_idle = wall_idle - idle_stat->prev_idle; |
---|
| 1266 | + diff_wall = wall - idle_stat->prev_wall; |
---|
| 1267 | + |
---|
| 1268 | + if (diff_wall <= diff_idle) |
---|
| 1269 | + busy_time = 0; |
---|
| 1270 | + else |
---|
| 1271 | + busy_time = diff_wall - diff_idle; |
---|
| 1272 | + |
---|
| 1273 | + idle_percent = div64_u64(100 * busy_time, diff_wall); |
---|
| 1274 | + idle_percent = 100 - idle_percent; |
---|
| 1275 | + |
---|
| 1276 | + if (idle_percent < 15) |
---|
| 1277 | + cq->poll_mode = LPFC_QUEUE_WORK; |
---|
| 1278 | + else |
---|
| 1279 | + cq->poll_mode = LPFC_IRQ_POLL; |
---|
| 1280 | + |
---|
| 1281 | + idle_stat->prev_idle = wall_idle; |
---|
| 1282 | + idle_stat->prev_wall = wall; |
---|
| 1283 | + } |
---|
| 1284 | + |
---|
| 1285 | +requeue: |
---|
| 1286 | + schedule_delayed_work(&phba->idle_stat_delay_work, |
---|
| 1287 | + msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); |
---|
| 1288 | +} |
---|
| 1289 | + |
---|
| 1290 | +static void |
---|
| 1291 | +lpfc_hb_eq_delay_work(struct work_struct *work) |
---|
| 1292 | +{ |
---|
| 1293 | + struct lpfc_hba *phba = container_of(to_delayed_work(work), |
---|
| 1294 | + struct lpfc_hba, eq_delay_work); |
---|
| 1295 | + struct lpfc_eq_intr_info *eqi, *eqi_new; |
---|
| 1296 | + struct lpfc_queue *eq, *eq_next; |
---|
| 1297 | + unsigned char *ena_delay = NULL; |
---|
| 1298 | + uint32_t usdelay; |
---|
| 1299 | + int i; |
---|
| 1300 | + |
---|
| 1301 | + if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) |
---|
| 1302 | + return; |
---|
| 1303 | + |
---|
| 1304 | + if (phba->link_state == LPFC_HBA_ERROR || |
---|
| 1305 | + phba->pport->fc_flag & FC_OFFLINE_MODE) |
---|
| 1306 | + goto requeue; |
---|
| 1307 | + |
---|
| 1308 | + ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), |
---|
| 1309 | + GFP_KERNEL); |
---|
| 1310 | + if (!ena_delay) |
---|
| 1311 | + goto requeue; |
---|
| 1312 | + |
---|
| 1313 | + for (i = 0; i < phba->cfg_irq_chann; i++) { |
---|
| 1314 | + /* Get the EQ corresponding to the IRQ vector */ |
---|
| 1315 | + eq = phba->sli4_hba.hba_eq_hdl[i].eq; |
---|
| 1316 | + if (!eq) |
---|
| 1317 | + continue; |
---|
| 1318 | + if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { |
---|
| 1319 | + eq->q_flag &= ~HBA_EQ_DELAY_CHK; |
---|
| 1320 | + ena_delay[eq->last_cpu] = 1; |
---|
| 1321 | + } |
---|
| 1322 | + } |
---|
| 1323 | + |
---|
| 1324 | + for_each_present_cpu(i) { |
---|
| 1325 | + eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); |
---|
| 1326 | + if (ena_delay[i]) { |
---|
| 1327 | + usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; |
---|
| 1328 | + if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) |
---|
| 1329 | + usdelay = LPFC_MAX_AUTO_EQ_DELAY; |
---|
| 1330 | + } else { |
---|
| 1331 | + usdelay = 0; |
---|
| 1332 | + } |
---|
| 1333 | + |
---|
| 1334 | + eqi->icnt = 0; |
---|
| 1335 | + |
---|
| 1336 | + list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { |
---|
| 1337 | + if (unlikely(eq->last_cpu != i)) { |
---|
| 1338 | + eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, |
---|
| 1339 | + eq->last_cpu); |
---|
| 1340 | + list_move_tail(&eq->cpu_list, &eqi_new->list); |
---|
| 1341 | + continue; |
---|
| 1342 | + } |
---|
| 1343 | + if (usdelay != eq->q_mode) |
---|
| 1344 | + lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, |
---|
| 1345 | + usdelay); |
---|
| 1346 | + } |
---|
| 1347 | + } |
---|
| 1348 | + |
---|
| 1349 | + kfree(ena_delay); |
---|
| 1350 | + |
---|
| 1351 | +requeue: |
---|
| 1352 | + queue_delayed_work(phba->wq, &phba->eq_delay_work, |
---|
| 1353 | + msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); |
---|
| 1354 | +} |
---|
| 1355 | + |
---|
| 1356 | +/** |
---|
| 1357 | + * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution |
---|
| 1358 | + * @phba: pointer to lpfc hba data structure. |
---|
| 1359 | + * |
---|
| 1360 | + * For each heartbeat, this routine does some heuristic methods to adjust |
---|
| 1361 | + * XRI distribution. The goal is to fully utilize free XRIs. |
---|
| 1362 | + **/ |
---|
| 1363 | +static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) |
---|
| 1364 | +{ |
---|
| 1365 | + u32 i; |
---|
| 1366 | + u32 hwq_count; |
---|
| 1367 | + |
---|
| 1368 | + hwq_count = phba->cfg_hdw_queue; |
---|
| 1369 | + for (i = 0; i < hwq_count; i++) { |
---|
| 1370 | + /* Adjust XRIs in private pool */ |
---|
| 1371 | + lpfc_adjust_pvt_pool_count(phba, i); |
---|
| 1372 | + |
---|
| 1373 | + /* Adjust high watermark */ |
---|
| 1374 | + lpfc_adjust_high_watermark(phba, i); |
---|
| 1375 | + |
---|
| 1376 | +#ifdef LPFC_MXP_STAT |
---|
| 1377 | + /* Snapshot pbl, pvt and busy count */ |
---|
| 1378 | + lpfc_snapshot_mxp(phba, i); |
---|
| 1379 | +#endif |
---|
| 1380 | + } |
---|
| 1381 | +} |
---|
| 1382 | + |
---|
1242 | 1383 | /** |
---|
1243 | 1384 | * lpfc_hb_timeout_handler - The HBA-timer timeout handler |
---|
1244 | 1385 | * @phba: pointer to lpfc hba data structure. |
---|
.. | .. |
---|
1264 | 1405 | int retval, i; |
---|
1265 | 1406 | struct lpfc_sli *psli = &phba->sli; |
---|
1266 | 1407 | LIST_HEAD(completions); |
---|
1267 | | - struct lpfc_queue *qp; |
---|
1268 | | - unsigned long time_elapsed; |
---|
1269 | | - uint32_t tick_cqe, max_cqe, val; |
---|
1270 | | - uint64_t tot, data1, data2, data3; |
---|
1271 | | - struct lpfc_nvmet_tgtport *tgtp; |
---|
1272 | | - struct lpfc_register reg_data; |
---|
1273 | | - struct nvme_fc_local_port *localport; |
---|
1274 | | - struct lpfc_nvme_lport *lport; |
---|
1275 | | - struct lpfc_nvme_ctrl_stat *cstat; |
---|
1276 | | - void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr; |
---|
| 1408 | + |
---|
| 1409 | + if (phba->cfg_xri_rebalancing) { |
---|
| 1410 | + /* Multi-XRI pools handler */ |
---|
| 1411 | + lpfc_hb_mxp_handler(phba); |
---|
| 1412 | + } |
---|
1277 | 1413 | |
---|
1278 | 1414 | vports = lpfc_create_vport_work_array(phba); |
---|
1279 | 1415 | if (vports != NULL) |
---|
1280 | 1416 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
---|
1281 | 1417 | lpfc_rcv_seq_check_edtov(vports[i]); |
---|
1282 | | - lpfc_fdmi_num_disc_check(vports[i]); |
---|
| 1418 | + lpfc_fdmi_change_check(vports[i]); |
---|
1283 | 1419 | } |
---|
1284 | 1420 | lpfc_destroy_vport_work_array(phba, vports); |
---|
1285 | 1421 | |
---|
.. | .. |
---|
1288 | 1424 | (phba->pport->fc_flag & FC_OFFLINE_MODE)) |
---|
1289 | 1425 | return; |
---|
1290 | 1426 | |
---|
1291 | | - if (phba->cfg_auto_imax) { |
---|
1292 | | - if (!phba->last_eqdelay_time) { |
---|
1293 | | - phba->last_eqdelay_time = jiffies; |
---|
1294 | | - goto skip_eqdelay; |
---|
1295 | | - } |
---|
1296 | | - time_elapsed = jiffies - phba->last_eqdelay_time; |
---|
1297 | | - phba->last_eqdelay_time = jiffies; |
---|
1298 | | - |
---|
1299 | | - tot = 0xffff; |
---|
1300 | | - /* Check outstanding IO count */ |
---|
1301 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
1302 | | - if (phba->nvmet_support) { |
---|
1303 | | - tgtp = phba->targetport->private; |
---|
1304 | | - /* Calculate outstanding IOs */ |
---|
1305 | | - tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); |
---|
1306 | | - tot += atomic_read(&tgtp->xmt_fcp_release); |
---|
1307 | | - tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; |
---|
1308 | | - } else { |
---|
1309 | | - localport = phba->pport->localport; |
---|
1310 | | - if (!localport || !localport->private) |
---|
1311 | | - goto skip_eqdelay; |
---|
1312 | | - lport = (struct lpfc_nvme_lport *) |
---|
1313 | | - localport->private; |
---|
1314 | | - tot = 0; |
---|
1315 | | - for (i = 0; |
---|
1316 | | - i < phba->cfg_nvme_io_channel; i++) { |
---|
1317 | | - cstat = &lport->cstat[i]; |
---|
1318 | | - data1 = atomic_read( |
---|
1319 | | - &cstat->fc4NvmeInputRequests); |
---|
1320 | | - data2 = atomic_read( |
---|
1321 | | - &cstat->fc4NvmeOutputRequests); |
---|
1322 | | - data3 = atomic_read( |
---|
1323 | | - &cstat->fc4NvmeControlRequests); |
---|
1324 | | - tot += (data1 + data2 + data3); |
---|
1325 | | - tot -= atomic_read( |
---|
1326 | | - &cstat->fc4NvmeIoCmpls); |
---|
1327 | | - } |
---|
1328 | | - } |
---|
1329 | | - } |
---|
1330 | | - |
---|
1331 | | - /* Interrupts per sec per EQ */ |
---|
1332 | | - val = phba->cfg_fcp_imax / phba->io_channel_irqs; |
---|
1333 | | - tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */ |
---|
1334 | | - |
---|
1335 | | - /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */ |
---|
1336 | | - max_cqe = time_elapsed * tick_cqe; |
---|
1337 | | - |
---|
1338 | | - for (i = 0; i < phba->io_channel_irqs; i++) { |
---|
1339 | | - /* Fast-path EQ */ |
---|
1340 | | - qp = phba->sli4_hba.hba_eq[i]; |
---|
1341 | | - if (!qp) |
---|
1342 | | - continue; |
---|
1343 | | - |
---|
1344 | | - /* Use no EQ delay if we don't have many outstanding |
---|
1345 | | - * IOs, or if we are only processing 1 CQE/ISR or less. |
---|
1346 | | - * Otherwise, assume we can process up to lpfc_fcp_imax |
---|
1347 | | - * interrupts per HBA. |
---|
1348 | | - */ |
---|
1349 | | - if (tot < LPFC_NODELAY_MAX_IO || |
---|
1350 | | - qp->EQ_cqe_cnt <= max_cqe) |
---|
1351 | | - val = 0; |
---|
1352 | | - else |
---|
1353 | | - val = phba->cfg_fcp_imax; |
---|
1354 | | - |
---|
1355 | | - if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { |
---|
1356 | | - /* Use EQ Delay Register method */ |
---|
1357 | | - |
---|
1358 | | - /* Convert for EQ Delay register */ |
---|
1359 | | - if (val) { |
---|
1360 | | - /* First, interrupts per sec per EQ */ |
---|
1361 | | - val = phba->cfg_fcp_imax / |
---|
1362 | | - phba->io_channel_irqs; |
---|
1363 | | - |
---|
1364 | | - /* us delay between each interrupt */ |
---|
1365 | | - val = LPFC_SEC_TO_USEC / val; |
---|
1366 | | - } |
---|
1367 | | - if (val != qp->q_mode) { |
---|
1368 | | - reg_data.word0 = 0; |
---|
1369 | | - bf_set(lpfc_sliport_eqdelay_id, |
---|
1370 | | - ®_data, qp->queue_id); |
---|
1371 | | - bf_set(lpfc_sliport_eqdelay_delay, |
---|
1372 | | - ®_data, val); |
---|
1373 | | - writel(reg_data.word0, eqdreg); |
---|
1374 | | - } |
---|
1375 | | - } else { |
---|
1376 | | - /* Use mbox command method */ |
---|
1377 | | - if (val != qp->q_mode) |
---|
1378 | | - lpfc_modify_hba_eq_delay(phba, i, |
---|
1379 | | - 1, val); |
---|
1380 | | - } |
---|
1381 | | - |
---|
1382 | | - /* |
---|
1383 | | - * val is cfg_fcp_imax or 0 for mbox delay or us delay |
---|
1384 | | - * between interrupts for EQDR. |
---|
1385 | | - */ |
---|
1386 | | - qp->q_mode = val; |
---|
1387 | | - qp->EQ_cqe_cnt = 0; |
---|
1388 | | - } |
---|
1389 | | - } |
---|
1390 | | - |
---|
1391 | | -skip_eqdelay: |
---|
1392 | 1427 | spin_lock_irq(&phba->pport->work_port_lock); |
---|
1393 | 1428 | |
---|
1394 | 1429 | if (time_after(phba->last_completion_time + |
---|
.. | .. |
---|
1537 | 1572 | spin_unlock_irq(&phba->hbalock); |
---|
1538 | 1573 | |
---|
1539 | 1574 | lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); |
---|
| 1575 | + lpfc_sli_flush_io_rings(phba); |
---|
1540 | 1576 | lpfc_offline(phba); |
---|
1541 | 1577 | lpfc_hba_down_post(phba); |
---|
1542 | 1578 | lpfc_unblock_mgmt_io(phba); |
---|
.. | .. |
---|
1567 | 1603 | return; |
---|
1568 | 1604 | } |
---|
1569 | 1605 | |
---|
1570 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
1571 | | - "0479 Deferred Adapter Hardware Error " |
---|
1572 | | - "Data: x%x x%x x%x\n", |
---|
1573 | | - phba->work_hs, |
---|
1574 | | - phba->work_status[0], phba->work_status[1]); |
---|
| 1606 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 1607 | + "0479 Deferred Adapter Hardware Error " |
---|
| 1608 | + "Data: x%x x%x x%x\n", |
---|
| 1609 | + phba->work_hs, phba->work_status[0], |
---|
| 1610 | + phba->work_status[1]); |
---|
1575 | 1611 | |
---|
1576 | 1612 | spin_lock_irq(&phba->hbalock); |
---|
1577 | 1613 | psli->sli_flag &= ~LPFC_SLI_ACTIVE; |
---|
.. | .. |
---|
1722 | 1758 | temp_event_data.event_code = LPFC_CRIT_TEMP; |
---|
1723 | 1759 | temp_event_data.data = (uint32_t)temperature; |
---|
1724 | 1760 | |
---|
1725 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 1761 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1726 | 1762 | "0406 Adapter maximum temperature exceeded " |
---|
1727 | 1763 | "(%ld), taking this port offline " |
---|
1728 | 1764 | "Data: x%x x%x x%x\n", |
---|
.. | .. |
---|
1746 | 1782 | * failure is a value other than FFER6. Do not call the offline |
---|
1747 | 1783 | * twice. This is the adapter hardware error path. |
---|
1748 | 1784 | */ |
---|
1749 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 1785 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1750 | 1786 | "0457 Adapter Hardware Error " |
---|
1751 | 1787 | "Data: x%x x%x x%x\n", |
---|
1752 | 1788 | phba->work_hs, |
---|
.. | .. |
---|
1767 | 1803 | * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg |
---|
1768 | 1804 | * @phba: pointer to lpfc hba data structure. |
---|
1769 | 1805 | * @mbx_action: flag for mailbox shutdown action. |
---|
1770 | | - * |
---|
| 1806 | + * @en_rn_msg: send reset/port recovery message. |
---|
1771 | 1807 | * This routine is invoked to perform an SLI4 port PCI function reset in |
---|
1772 | 1808 | * response to port status register polling attention. It waits for port |
---|
1773 | 1809 | * status register (ERR, RDY, RN) bits before proceeding with function reset. |
---|
.. | .. |
---|
1794 | 1830 | |
---|
1795 | 1831 | /* need reset: attempt for port recovery */ |
---|
1796 | 1832 | if (en_rn_msg) |
---|
1797 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 1833 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1798 | 1834 | "2887 Reset Needed: Attempting Port " |
---|
1799 | 1835 | "Recovery...\n"); |
---|
1800 | 1836 | lpfc_offline_prep(phba, mbx_action); |
---|
| 1837 | + lpfc_sli_flush_io_rings(phba); |
---|
1801 | 1838 | lpfc_offline(phba); |
---|
1802 | 1839 | /* release interrupt for possible resource change */ |
---|
1803 | 1840 | lpfc_sli4_disable_intr(phba); |
---|
1804 | 1841 | rc = lpfc_sli_brdrestart(phba); |
---|
1805 | 1842 | if (rc) { |
---|
1806 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 1843 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1807 | 1844 | "6309 Failed to restart board\n"); |
---|
1808 | 1845 | return rc; |
---|
1809 | 1846 | } |
---|
1810 | 1847 | /* request and enable interrupt */ |
---|
1811 | 1848 | intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); |
---|
1812 | 1849 | if (intr_mode == LPFC_INTR_ERROR) { |
---|
1813 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 1850 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1814 | 1851 | "3175 Failed to enable interrupt\n"); |
---|
1815 | 1852 | return -EIO; |
---|
1816 | 1853 | } |
---|
.. | .. |
---|
1848 | 1885 | /* If the pci channel is offline, ignore possible errors, since |
---|
1849 | 1886 | * we cannot communicate with the pci card anyway. |
---|
1850 | 1887 | */ |
---|
1851 | | - if (pci_channel_offline(phba->pcidev)) |
---|
| 1888 | + if (pci_channel_offline(phba->pcidev)) { |
---|
| 1889 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 1890 | + "3166 pci channel is offline\n"); |
---|
| 1891 | + lpfc_sli4_offline_eratt(phba); |
---|
1852 | 1892 | return; |
---|
| 1893 | + } |
---|
1853 | 1894 | |
---|
1854 | 1895 | memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); |
---|
1855 | 1896 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); |
---|
.. | .. |
---|
1868 | 1909 | lpfc_sli4_offline_eratt(phba); |
---|
1869 | 1910 | return; |
---|
1870 | 1911 | } |
---|
1871 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 1912 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1872 | 1913 | "7623 Checking UE recoverable"); |
---|
1873 | 1914 | |
---|
1874 | 1915 | for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { |
---|
.. | .. |
---|
1885 | 1926 | msleep(1000); |
---|
1886 | 1927 | } |
---|
1887 | 1928 | |
---|
1888 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 1929 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1889 | 1930 | "4827 smphr_port_status x%x : Waited %dSec", |
---|
1890 | 1931 | smphr_port_status, i); |
---|
1891 | 1932 | |
---|
.. | .. |
---|
1903 | 1944 | LPFC_MBX_NO_WAIT, en_rn_msg); |
---|
1904 | 1945 | if (rc == 0) |
---|
1905 | 1946 | return; |
---|
1906 | | - lpfc_printf_log(phba, |
---|
1907 | | - KERN_ERR, LOG_INIT, |
---|
| 1947 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 1948 | + LOG_TRACE_EVENT, |
---|
1908 | 1949 | "4215 Failed to recover UE"); |
---|
1909 | 1950 | break; |
---|
1910 | 1951 | } |
---|
1911 | 1952 | } |
---|
1912 | 1953 | } |
---|
1913 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 1954 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1914 | 1955 | "7624 Firmware not ready: Failing UE recovery," |
---|
1915 | 1956 | " waited %dSec", i); |
---|
1916 | | - lpfc_sli4_offline_eratt(phba); |
---|
| 1957 | + phba->link_state = LPFC_HBA_ERROR; |
---|
1917 | 1958 | break; |
---|
1918 | 1959 | |
---|
1919 | 1960 | case LPFC_SLI_INTF_IF_TYPE_2: |
---|
.. | .. |
---|
1923 | 1964 | &portstat_reg.word0); |
---|
1924 | 1965 | /* consider PCI bus read error as pci_channel_offline */ |
---|
1925 | 1966 | if (pci_rd_rc1 == -EIO) { |
---|
1926 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 1967 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1927 | 1968 | "3151 PCI bus read access failure: x%x\n", |
---|
1928 | 1969 | readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); |
---|
| 1970 | + lpfc_sli4_offline_eratt(phba); |
---|
1929 | 1971 | return; |
---|
1930 | 1972 | } |
---|
1931 | 1973 | reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); |
---|
1932 | 1974 | reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); |
---|
1933 | 1975 | if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { |
---|
1934 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
1935 | | - "2889 Port Overtemperature event, " |
---|
1936 | | - "taking port offline Data: x%x x%x\n", |
---|
1937 | | - reg_err1, reg_err2); |
---|
| 1976 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 1977 | + "2889 Port Overtemperature event, " |
---|
| 1978 | + "taking port offline Data: x%x x%x\n", |
---|
| 1979 | + reg_err1, reg_err2); |
---|
1938 | 1980 | |
---|
1939 | 1981 | phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; |
---|
1940 | 1982 | temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; |
---|
.. | .. |
---|
1956 | 1998 | } |
---|
1957 | 1999 | if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && |
---|
1958 | 2000 | reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { |
---|
1959 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 2001 | + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
1960 | 2002 | "3143 Port Down: Firmware Update " |
---|
1961 | 2003 | "Detected\n"); |
---|
1962 | 2004 | en_rn_msg = false; |
---|
1963 | 2005 | } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && |
---|
1964 | 2006 | reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) |
---|
1965 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 2007 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1966 | 2008 | "3144 Port Down: Debug Dump\n"); |
---|
1967 | 2009 | else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && |
---|
1968 | 2010 | reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) |
---|
1969 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 2011 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
1970 | 2012 | "3145 Port Down: Provisioning\n"); |
---|
1971 | 2013 | |
---|
1972 | 2014 | /* If resets are disabled then leave the HBA alone and return */ |
---|
.. | .. |
---|
1985 | 2027 | break; |
---|
1986 | 2028 | } |
---|
1987 | 2029 | /* fall through for not able to recover */ |
---|
1988 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
1989 | | - "3152 Unrecoverable error, bring the port " |
---|
1990 | | - "offline\n"); |
---|
1991 | | - lpfc_sli4_offline_eratt(phba); |
---|
| 2030 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 2031 | + "3152 Unrecoverable error\n"); |
---|
| 2032 | + phba->link_state = LPFC_HBA_ERROR; |
---|
1992 | 2033 | break; |
---|
1993 | 2034 | case LPFC_SLI_INTF_IF_TYPE_1: |
---|
1994 | 2035 | default: |
---|
.. | .. |
---|
2104 | 2145 | lpfc_linkdown(phba); |
---|
2105 | 2146 | phba->link_state = LPFC_HBA_ERROR; |
---|
2106 | 2147 | |
---|
2107 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, |
---|
2108 | | - "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); |
---|
| 2148 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 2149 | + "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); |
---|
2109 | 2150 | |
---|
2110 | 2151 | return; |
---|
2111 | 2152 | } |
---|
.. | .. |
---|
2854 | 2895 | */ |
---|
2855 | 2896 | while (!list_empty(&vport->fc_nodes)) { |
---|
2856 | 2897 | if (i++ > 3000) { |
---|
2857 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, |
---|
| 2898 | + lpfc_printf_vlog(vport, KERN_ERR, |
---|
| 2899 | + LOG_TRACE_EVENT, |
---|
2858 | 2900 | "0233 Nodelist not empty\n"); |
---|
2859 | 2901 | list_for_each_entry_safe(ndlp, next_ndlp, |
---|
2860 | 2902 | &vport->fc_nodes, nlp_listp) { |
---|
2861 | 2903 | lpfc_printf_vlog(ndlp->vport, KERN_ERR, |
---|
2862 | | - LOG_NODE, |
---|
2863 | | - "0282 did:x%x ndlp:x%p " |
---|
| 2904 | + LOG_TRACE_EVENT, |
---|
| 2905 | + "0282 did:x%x ndlp:x%px " |
---|
2864 | 2906 | "usgmap:x%x refcnt:%d\n", |
---|
2865 | 2907 | ndlp->nlp_DID, (void *)ndlp, |
---|
2866 | 2908 | ndlp->nlp_usg_map, |
---|
.. | .. |
---|
2943 | 2985 | void |
---|
2944 | 2986 | lpfc_stop_hba_timers(struct lpfc_hba *phba) |
---|
2945 | 2987 | { |
---|
2946 | | - lpfc_stop_vport_timers(phba->pport); |
---|
| 2988 | + if (phba->pport) |
---|
| 2989 | + lpfc_stop_vport_timers(phba->pport); |
---|
| 2990 | + cancel_delayed_work_sync(&phba->eq_delay_work); |
---|
| 2991 | + cancel_delayed_work_sync(&phba->idle_stat_delay_work); |
---|
2947 | 2992 | del_timer_sync(&phba->sli.mbox_tmo); |
---|
2948 | 2993 | del_timer_sync(&phba->fabric_block_timer); |
---|
2949 | 2994 | del_timer_sync(&phba->eratt_poll); |
---|
.. | .. |
---|
2960 | 3005 | del_timer_sync(&phba->fcp_poll_timer); |
---|
2961 | 3006 | break; |
---|
2962 | 3007 | case LPFC_PCI_DEV_OC: |
---|
2963 | | - /* Stop any OneConnect device sepcific driver timers */ |
---|
| 3008 | + /* Stop any OneConnect device specific driver timers */ |
---|
2964 | 3009 | lpfc_sli4_stop_fcf_redisc_wait_timer(phba); |
---|
2965 | 3010 | break; |
---|
2966 | 3011 | default: |
---|
2967 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 3012 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
2968 | 3013 | "0297 Invalid device group (x%x)\n", |
---|
2969 | 3014 | phba->pci_dev_grp); |
---|
2970 | 3015 | break; |
---|
.. | .. |
---|
2975 | 3020 | /** |
---|
2976 | 3021 | * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked |
---|
2977 | 3022 | * @phba: pointer to lpfc hba data structure. |
---|
| 3023 | + * @mbx_action: flag for mailbox no wait action. |
---|
2978 | 3024 | * |
---|
2979 | 3025 | * This routine marks a HBA's management interface as blocked. Once the HBA's |
---|
2980 | 3026 | * management interface is marked as blocked, all the user space access to |
---|
.. | .. |
---|
3011 | 3057 | /* Check active mailbox complete status every 2ms */ |
---|
3012 | 3058 | msleep(2); |
---|
3013 | 3059 | if (time_after(jiffies, timeout)) { |
---|
3014 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
3015 | | - "2813 Mgmt IO is Blocked %x " |
---|
3016 | | - "- mbox cmd %x still active\n", |
---|
3017 | | - phba->sli.sli_flag, actcmd); |
---|
| 3060 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 3061 | + "2813 Mgmt IO is Blocked %x " |
---|
| 3062 | + "- mbox cmd %x still active\n", |
---|
| 3063 | + phba->sli.sli_flag, actcmd); |
---|
3018 | 3064 | break; |
---|
3019 | 3065 | } |
---|
3020 | 3066 | } |
---|
.. | .. |
---|
3060 | 3106 | continue; |
---|
3061 | 3107 | } |
---|
3062 | 3108 | ndlp->nlp_rpi = rpi; |
---|
3063 | | - lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, |
---|
3064 | | - "0009 rpi:%x DID:%x " |
---|
3065 | | - "flg:%x map:%x %p\n", ndlp->nlp_rpi, |
---|
3066 | | - ndlp->nlp_DID, ndlp->nlp_flag, |
---|
3067 | | - ndlp->nlp_usg_map, ndlp); |
---|
| 3109 | + lpfc_printf_vlog(ndlp->vport, KERN_INFO, |
---|
| 3110 | + LOG_NODE | LOG_DISCOVERY, |
---|
| 3111 | + "0009 Assign RPI x%x to ndlp x%px " |
---|
| 3112 | + "DID:x%06x flg:x%x map:x%x\n", |
---|
| 3113 | + ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, |
---|
| 3114 | + ndlp->nlp_flag, ndlp->nlp_usg_map); |
---|
3068 | 3115 | } |
---|
3069 | 3116 | } |
---|
3070 | 3117 | lpfc_destroy_vport_work_array(phba, vports); |
---|
| 3118 | +} |
---|
| 3119 | + |
---|
| 3120 | +/** |
---|
| 3121 | + * lpfc_create_expedite_pool - create expedite pool |
---|
| 3122 | + * @phba: pointer to lpfc hba data structure. |
---|
| 3123 | + * |
---|
| 3124 | + * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 |
---|
| 3125 | + * to expedite pool. Mark them as expedite. |
---|
| 3126 | + **/ |
---|
| 3127 | +static void lpfc_create_expedite_pool(struct lpfc_hba *phba) |
---|
| 3128 | +{ |
---|
| 3129 | + struct lpfc_sli4_hdw_queue *qp; |
---|
| 3130 | + struct lpfc_io_buf *lpfc_ncmd; |
---|
| 3131 | + struct lpfc_io_buf *lpfc_ncmd_next; |
---|
| 3132 | + struct lpfc_epd_pool *epd_pool; |
---|
| 3133 | + unsigned long iflag; |
---|
| 3134 | + |
---|
| 3135 | + epd_pool = &phba->epd_pool; |
---|
| 3136 | + qp = &phba->sli4_hba.hdwq[0]; |
---|
| 3137 | + |
---|
| 3138 | + spin_lock_init(&epd_pool->lock); |
---|
| 3139 | + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); |
---|
| 3140 | + spin_lock(&epd_pool->lock); |
---|
| 3141 | + INIT_LIST_HEAD(&epd_pool->list); |
---|
| 3142 | + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
---|
| 3143 | + &qp->lpfc_io_buf_list_put, list) { |
---|
| 3144 | + list_move_tail(&lpfc_ncmd->list, &epd_pool->list); |
---|
| 3145 | + lpfc_ncmd->expedite = true; |
---|
| 3146 | + qp->put_io_bufs--; |
---|
| 3147 | + epd_pool->count++; |
---|
| 3148 | + if (epd_pool->count >= XRI_BATCH) |
---|
| 3149 | + break; |
---|
| 3150 | + } |
---|
| 3151 | + spin_unlock(&epd_pool->lock); |
---|
| 3152 | + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); |
---|
| 3153 | +} |
---|
| 3154 | + |
---|
| 3155 | +/** |
---|
| 3156 | + * lpfc_destroy_expedite_pool - destroy expedite pool |
---|
| 3157 | + * @phba: pointer to lpfc hba data structure. |
---|
| 3158 | + * |
---|
| 3159 | + * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put |
---|
| 3160 | + * of HWQ 0. Clear the mark. |
---|
| 3161 | + **/ |
---|
| 3162 | +static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) |
---|
| 3163 | +{ |
---|
| 3164 | + struct lpfc_sli4_hdw_queue *qp; |
---|
| 3165 | + struct lpfc_io_buf *lpfc_ncmd; |
---|
| 3166 | + struct lpfc_io_buf *lpfc_ncmd_next; |
---|
| 3167 | + struct lpfc_epd_pool *epd_pool; |
---|
| 3168 | + unsigned long iflag; |
---|
| 3169 | + |
---|
| 3170 | + epd_pool = &phba->epd_pool; |
---|
| 3171 | + qp = &phba->sli4_hba.hdwq[0]; |
---|
| 3172 | + |
---|
| 3173 | + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); |
---|
| 3174 | + spin_lock(&epd_pool->lock); |
---|
| 3175 | + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
---|
| 3176 | + &epd_pool->list, list) { |
---|
| 3177 | + list_move_tail(&lpfc_ncmd->list, |
---|
| 3178 | + &qp->lpfc_io_buf_list_put); |
---|
| 3179 | + lpfc_ncmd->flags = false; |
---|
| 3180 | + qp->put_io_bufs++; |
---|
| 3181 | + epd_pool->count--; |
---|
| 3182 | + } |
---|
| 3183 | + spin_unlock(&epd_pool->lock); |
---|
| 3184 | + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); |
---|
| 3185 | +} |
---|
| 3186 | + |
---|
| 3187 | +/** |
---|
| 3188 | + * lpfc_create_multixri_pools - create multi-XRI pools |
---|
| 3189 | + * @phba: pointer to lpfc hba data structure. |
---|
| 3190 | + * |
---|
| 3191 | + * This routine initialize public, private per HWQ. Then, move XRIs from |
---|
| 3192 | + * lpfc_io_buf_list_put to public pool. High and low watermark are also |
---|
| 3193 | + * Initialized. |
---|
| 3194 | + **/ |
---|
| 3195 | +void lpfc_create_multixri_pools(struct lpfc_hba *phba) |
---|
| 3196 | +{ |
---|
| 3197 | + u32 i, j; |
---|
| 3198 | + u32 hwq_count; |
---|
| 3199 | + u32 count_per_hwq; |
---|
| 3200 | + struct lpfc_io_buf *lpfc_ncmd; |
---|
| 3201 | + struct lpfc_io_buf *lpfc_ncmd_next; |
---|
| 3202 | + unsigned long iflag; |
---|
| 3203 | + struct lpfc_sli4_hdw_queue *qp; |
---|
| 3204 | + struct lpfc_multixri_pool *multixri_pool; |
---|
| 3205 | + struct lpfc_pbl_pool *pbl_pool; |
---|
| 3206 | + struct lpfc_pvt_pool *pvt_pool; |
---|
| 3207 | + |
---|
| 3208 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
| 3209 | + "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", |
---|
| 3210 | + phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, |
---|
| 3211 | + phba->sli4_hba.io_xri_cnt); |
---|
| 3212 | + |
---|
| 3213 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) |
---|
| 3214 | + lpfc_create_expedite_pool(phba); |
---|
| 3215 | + |
---|
| 3216 | + hwq_count = phba->cfg_hdw_queue; |
---|
| 3217 | + count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; |
---|
| 3218 | + |
---|
| 3219 | + for (i = 0; i < hwq_count; i++) { |
---|
| 3220 | + multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); |
---|
| 3221 | + |
---|
| 3222 | + if (!multixri_pool) { |
---|
| 3223 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
| 3224 | + "1238 Failed to allocate memory for " |
---|
| 3225 | + "multixri_pool\n"); |
---|
| 3226 | + |
---|
| 3227 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) |
---|
| 3228 | + lpfc_destroy_expedite_pool(phba); |
---|
| 3229 | + |
---|
| 3230 | + j = 0; |
---|
| 3231 | + while (j < i) { |
---|
| 3232 | + qp = &phba->sli4_hba.hdwq[j]; |
---|
| 3233 | + kfree(qp->p_multixri_pool); |
---|
| 3234 | + j++; |
---|
| 3235 | + } |
---|
| 3236 | + phba->cfg_xri_rebalancing = 0; |
---|
| 3237 | + return; |
---|
| 3238 | + } |
---|
| 3239 | + |
---|
| 3240 | + qp = &phba->sli4_hba.hdwq[i]; |
---|
| 3241 | + qp->p_multixri_pool = multixri_pool; |
---|
| 3242 | + |
---|
| 3243 | + multixri_pool->xri_limit = count_per_hwq; |
---|
| 3244 | + multixri_pool->rrb_next_hwqid = i; |
---|
| 3245 | + |
---|
| 3246 | + /* Deal with public free xri pool */ |
---|
| 3247 | + pbl_pool = &multixri_pool->pbl_pool; |
---|
| 3248 | + spin_lock_init(&pbl_pool->lock); |
---|
| 3249 | + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); |
---|
| 3250 | + spin_lock(&pbl_pool->lock); |
---|
| 3251 | + INIT_LIST_HEAD(&pbl_pool->list); |
---|
| 3252 | + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
---|
| 3253 | + &qp->lpfc_io_buf_list_put, list) { |
---|
| 3254 | + list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); |
---|
| 3255 | + qp->put_io_bufs--; |
---|
| 3256 | + pbl_pool->count++; |
---|
| 3257 | + } |
---|
| 3258 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
| 3259 | + "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", |
---|
| 3260 | + pbl_pool->count, i); |
---|
| 3261 | + spin_unlock(&pbl_pool->lock); |
---|
| 3262 | + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); |
---|
| 3263 | + |
---|
| 3264 | + /* Deal with private free xri pool */ |
---|
| 3265 | + pvt_pool = &multixri_pool->pvt_pool; |
---|
| 3266 | + pvt_pool->high_watermark = multixri_pool->xri_limit / 2; |
---|
| 3267 | + pvt_pool->low_watermark = XRI_BATCH; |
---|
| 3268 | + spin_lock_init(&pvt_pool->lock); |
---|
| 3269 | + spin_lock_irqsave(&pvt_pool->lock, iflag); |
---|
| 3270 | + INIT_LIST_HEAD(&pvt_pool->list); |
---|
| 3271 | + pvt_pool->count = 0; |
---|
| 3272 | + spin_unlock_irqrestore(&pvt_pool->lock, iflag); |
---|
| 3273 | + } |
---|
| 3274 | +} |
---|
| 3275 | + |
---|
| 3276 | +/** |
---|
| 3277 | + * lpfc_destroy_multixri_pools - destroy multi-XRI pools |
---|
| 3278 | + * @phba: pointer to lpfc hba data structure. |
---|
| 3279 | + * |
---|
| 3280 | + * This routine returns XRIs from public/private to lpfc_io_buf_list_put. |
---|
| 3281 | + **/ |
---|
| 3282 | +static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) |
---|
| 3283 | +{ |
---|
| 3284 | + u32 i; |
---|
| 3285 | + u32 hwq_count; |
---|
| 3286 | + struct lpfc_io_buf *lpfc_ncmd; |
---|
| 3287 | + struct lpfc_io_buf *lpfc_ncmd_next; |
---|
| 3288 | + unsigned long iflag; |
---|
| 3289 | + struct lpfc_sli4_hdw_queue *qp; |
---|
| 3290 | + struct lpfc_multixri_pool *multixri_pool; |
---|
| 3291 | + struct lpfc_pbl_pool *pbl_pool; |
---|
| 3292 | + struct lpfc_pvt_pool *pvt_pool; |
---|
| 3293 | + |
---|
| 3294 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) |
---|
| 3295 | + lpfc_destroy_expedite_pool(phba); |
---|
| 3296 | + |
---|
| 3297 | + if (!(phba->pport->load_flag & FC_UNLOADING)) |
---|
| 3298 | + lpfc_sli_flush_io_rings(phba); |
---|
| 3299 | + |
---|
| 3300 | + hwq_count = phba->cfg_hdw_queue; |
---|
| 3301 | + |
---|
| 3302 | + for (i = 0; i < hwq_count; i++) { |
---|
| 3303 | + qp = &phba->sli4_hba.hdwq[i]; |
---|
| 3304 | + multixri_pool = qp->p_multixri_pool; |
---|
| 3305 | + if (!multixri_pool) |
---|
| 3306 | + continue; |
---|
| 3307 | + |
---|
| 3308 | + qp->p_multixri_pool = NULL; |
---|
| 3309 | + |
---|
| 3310 | + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); |
---|
| 3311 | + |
---|
| 3312 | + /* Deal with public free xri pool */ |
---|
| 3313 | + pbl_pool = &multixri_pool->pbl_pool; |
---|
| 3314 | + spin_lock(&pbl_pool->lock); |
---|
| 3315 | + |
---|
| 3316 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
| 3317 | + "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", |
---|
| 3318 | + pbl_pool->count, i); |
---|
| 3319 | + |
---|
| 3320 | + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
---|
| 3321 | + &pbl_pool->list, list) { |
---|
| 3322 | + list_move_tail(&lpfc_ncmd->list, |
---|
| 3323 | + &qp->lpfc_io_buf_list_put); |
---|
| 3324 | + qp->put_io_bufs++; |
---|
| 3325 | + pbl_pool->count--; |
---|
| 3326 | + } |
---|
| 3327 | + |
---|
| 3328 | + INIT_LIST_HEAD(&pbl_pool->list); |
---|
| 3329 | + pbl_pool->count = 0; |
---|
| 3330 | + |
---|
| 3331 | + spin_unlock(&pbl_pool->lock); |
---|
| 3332 | + |
---|
| 3333 | + /* Deal with private free xri pool */ |
---|
| 3334 | + pvt_pool = &multixri_pool->pvt_pool; |
---|
| 3335 | + spin_lock(&pvt_pool->lock); |
---|
| 3336 | + |
---|
| 3337 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
| 3338 | + "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", |
---|
| 3339 | + pvt_pool->count, i); |
---|
| 3340 | + |
---|
| 3341 | + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
---|
| 3342 | + &pvt_pool->list, list) { |
---|
| 3343 | + list_move_tail(&lpfc_ncmd->list, |
---|
| 3344 | + &qp->lpfc_io_buf_list_put); |
---|
| 3345 | + qp->put_io_bufs++; |
---|
| 3346 | + pvt_pool->count--; |
---|
| 3347 | + } |
---|
| 3348 | + |
---|
| 3349 | + INIT_LIST_HEAD(&pvt_pool->list); |
---|
| 3350 | + pvt_pool->count = 0; |
---|
| 3351 | + |
---|
| 3352 | + spin_unlock(&pvt_pool->lock); |
---|
| 3353 | + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); |
---|
| 3354 | + |
---|
| 3355 | + kfree(multixri_pool); |
---|
| 3356 | + } |
---|
3071 | 3357 | } |
---|
3072 | 3358 | |
---|
3073 | 3359 | /** |
---|
.. | .. |
---|
3119 | 3405 | !phba->nvmet_support) { |
---|
3120 | 3406 | error = lpfc_nvme_create_localport(phba->pport); |
---|
3121 | 3407 | if (error) |
---|
3122 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 3408 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
3123 | 3409 | "6132 NVME restore reg failed " |
---|
3124 | 3410 | "on nvmei error x%x\n", error); |
---|
3125 | 3411 | } |
---|
.. | .. |
---|
3152 | 3438 | } |
---|
3153 | 3439 | lpfc_destroy_vport_work_array(phba, vports); |
---|
3154 | 3440 | |
---|
| 3441 | + if (phba->cfg_xri_rebalancing) |
---|
| 3442 | + lpfc_create_multixri_pools(phba); |
---|
| 3443 | + |
---|
| 3444 | + lpfc_cpuhp_add(phba); |
---|
| 3445 | + |
---|
3155 | 3446 | lpfc_unblock_mgmt_io(phba); |
---|
3156 | 3447 | return 0; |
---|
3157 | 3448 | } |
---|
.. | .. |
---|
3180 | 3471 | /** |
---|
3181 | 3472 | * lpfc_offline_prep - Prepare a HBA to be brought offline |
---|
3182 | 3473 | * @phba: pointer to lpfc hba data structure. |
---|
| 3474 | + * @mbx_action: flag for mailbox shutdown action. |
---|
3183 | 3475 | * |
---|
3184 | 3476 | * This routine is invoked to prepare a HBA to be brought offline. It performs |
---|
3185 | 3477 | * unregistration login to all the nodes on all vports and flushes the mailbox |
---|
.. | .. |
---|
3218 | 3510 | list_for_each_entry_safe(ndlp, next_ndlp, |
---|
3219 | 3511 | &vports[i]->fc_nodes, |
---|
3220 | 3512 | nlp_listp) { |
---|
3221 | | - if (!NLP_CHK_NODE_ACT(ndlp)) |
---|
| 3513 | + if ((!NLP_CHK_NODE_ACT(ndlp)) || |
---|
| 3514 | + ndlp->nlp_state == NLP_STE_UNUSED_NODE) { |
---|
| 3515 | + /* Driver must assume RPI is invalid for |
---|
| 3516 | + * any unused or inactive node. |
---|
| 3517 | + */ |
---|
| 3518 | + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; |
---|
3222 | 3519 | continue; |
---|
3223 | | - if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
---|
3224 | | - continue; |
---|
| 3520 | + } |
---|
| 3521 | + |
---|
3225 | 3522 | if (ndlp->nlp_type & NLP_FABRIC) { |
---|
3226 | 3523 | lpfc_disc_state_machine(vports[i], ndlp, |
---|
3227 | 3524 | NULL, NLP_EVT_DEVICE_RECOVERY); |
---|
.. | .. |
---|
3237 | 3534 | * comes back online. |
---|
3238 | 3535 | */ |
---|
3239 | 3536 | if (phba->sli_rev == LPFC_SLI_REV4) { |
---|
3240 | | - lpfc_printf_vlog(ndlp->vport, |
---|
3241 | | - KERN_INFO, LOG_NODE, |
---|
3242 | | - "0011 lpfc_offline: " |
---|
3243 | | - "ndlp:x%p did %x " |
---|
3244 | | - "usgmap:x%x rpi:%x\n", |
---|
3245 | | - ndlp, ndlp->nlp_DID, |
---|
3246 | | - ndlp->nlp_usg_map, |
---|
3247 | | - ndlp->nlp_rpi); |
---|
3248 | | - |
---|
| 3537 | + lpfc_printf_vlog(ndlp->vport, KERN_INFO, |
---|
| 3538 | + LOG_NODE | LOG_DISCOVERY, |
---|
| 3539 | + "0011 Free RPI x%x on " |
---|
| 3540 | + "ndlp:x%px did x%x " |
---|
| 3541 | + "usgmap:x%x\n", |
---|
| 3542 | + ndlp->nlp_rpi, ndlp, |
---|
| 3543 | + ndlp->nlp_DID, |
---|
| 3544 | + ndlp->nlp_usg_map); |
---|
3249 | 3545 | lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); |
---|
| 3546 | + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; |
---|
3250 | 3547 | } |
---|
3251 | 3548 | lpfc_unreg_rpi(vports[i], ndlp); |
---|
3252 | 3549 | } |
---|
.. | .. |
---|
3310 | 3607 | spin_unlock_irq(shost->host_lock); |
---|
3311 | 3608 | } |
---|
3312 | 3609 | lpfc_destroy_vport_work_array(phba, vports); |
---|
| 3610 | + __lpfc_cpuhp_remove(phba); |
---|
| 3611 | + |
---|
| 3612 | + if (phba->cfg_xri_rebalancing) |
---|
| 3613 | + lpfc_destroy_multixri_pools(phba); |
---|
3313 | 3614 | } |
---|
3314 | 3615 | |
---|
3315 | 3616 | /** |
---|
.. | .. |
---|
3323 | 3624 | static void |
---|
3324 | 3625 | lpfc_scsi_free(struct lpfc_hba *phba) |
---|
3325 | 3626 | { |
---|
3326 | | - struct lpfc_scsi_buf *sb, *sb_next; |
---|
| 3627 | + struct lpfc_io_buf *sb, *sb_next; |
---|
3327 | 3628 | |
---|
3328 | 3629 | if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) |
---|
3329 | 3630 | return; |
---|
.. | .. |
---|
3355 | 3656 | spin_unlock(&phba->scsi_buf_list_get_lock); |
---|
3356 | 3657 | spin_unlock_irq(&phba->hbalock); |
---|
3357 | 3658 | } |
---|
| 3659 | + |
---|
3358 | 3660 | /** |
---|
3359 | | - * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists |
---|
| 3661 | + * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists |
---|
3360 | 3662 | * @phba: pointer to lpfc hba data structure. |
---|
3361 | 3663 | * |
---|
3362 | | - * This routine is to free all the NVME buffers and IOCBs from the driver |
---|
| 3664 | + * This routine is to free all the IO buffers and IOCBs from the driver |
---|
3363 | 3665 | * list back to kernel. It is called from lpfc_pci_remove_one to free |
---|
3364 | 3666 | * the internal resources before the device is removed from the system. |
---|
3365 | 3667 | **/ |
---|
3366 | | -static void |
---|
3367 | | -lpfc_nvme_free(struct lpfc_hba *phba) |
---|
| 3668 | +void |
---|
| 3669 | +lpfc_io_free(struct lpfc_hba *phba) |
---|
3368 | 3670 | { |
---|
3369 | | - struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; |
---|
| 3671 | + struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; |
---|
| 3672 | + struct lpfc_sli4_hdw_queue *qp; |
---|
| 3673 | + int idx; |
---|
3370 | 3674 | |
---|
3371 | | - if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) |
---|
3372 | | - return; |
---|
| 3675 | + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
---|
| 3676 | + qp = &phba->sli4_hba.hdwq[idx]; |
---|
| 3677 | + /* Release all the lpfc_nvme_bufs maintained by this host. */ |
---|
| 3678 | + spin_lock(&qp->io_buf_list_put_lock); |
---|
| 3679 | + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
---|
| 3680 | + &qp->lpfc_io_buf_list_put, |
---|
| 3681 | + list) { |
---|
| 3682 | + list_del(&lpfc_ncmd->list); |
---|
| 3683 | + qp->put_io_bufs--; |
---|
| 3684 | + dma_pool_free(phba->lpfc_sg_dma_buf_pool, |
---|
| 3685 | + lpfc_ncmd->data, lpfc_ncmd->dma_handle); |
---|
| 3686 | + if (phba->cfg_xpsgl && !phba->nvmet_support) |
---|
| 3687 | + lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); |
---|
| 3688 | + lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); |
---|
| 3689 | + kfree(lpfc_ncmd); |
---|
| 3690 | + qp->total_io_bufs--; |
---|
| 3691 | + } |
---|
| 3692 | + spin_unlock(&qp->io_buf_list_put_lock); |
---|
3373 | 3693 | |
---|
3374 | | - spin_lock_irq(&phba->hbalock); |
---|
3375 | | - |
---|
3376 | | - /* Release all the lpfc_nvme_bufs maintained by this host. */ |
---|
3377 | | - spin_lock(&phba->nvme_buf_list_put_lock); |
---|
3378 | | - list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
---|
3379 | | - &phba->lpfc_nvme_buf_list_put, list) { |
---|
3380 | | - list_del(&lpfc_ncmd->list); |
---|
3381 | | - phba->put_nvme_bufs--; |
---|
3382 | | - dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, |
---|
3383 | | - lpfc_ncmd->dma_handle); |
---|
3384 | | - kfree(lpfc_ncmd); |
---|
3385 | | - phba->total_nvme_bufs--; |
---|
| 3694 | + spin_lock(&qp->io_buf_list_get_lock); |
---|
| 3695 | + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
---|
| 3696 | + &qp->lpfc_io_buf_list_get, |
---|
| 3697 | + list) { |
---|
| 3698 | + list_del(&lpfc_ncmd->list); |
---|
| 3699 | + qp->get_io_bufs--; |
---|
| 3700 | + dma_pool_free(phba->lpfc_sg_dma_buf_pool, |
---|
| 3701 | + lpfc_ncmd->data, lpfc_ncmd->dma_handle); |
---|
| 3702 | + if (phba->cfg_xpsgl && !phba->nvmet_support) |
---|
| 3703 | + lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); |
---|
| 3704 | + lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); |
---|
| 3705 | + kfree(lpfc_ncmd); |
---|
| 3706 | + qp->total_io_bufs--; |
---|
| 3707 | + } |
---|
| 3708 | + spin_unlock(&qp->io_buf_list_get_lock); |
---|
3386 | 3709 | } |
---|
3387 | | - spin_unlock(&phba->nvme_buf_list_put_lock); |
---|
3388 | | - |
---|
3389 | | - spin_lock(&phba->nvme_buf_list_get_lock); |
---|
3390 | | - list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
---|
3391 | | - &phba->lpfc_nvme_buf_list_get, list) { |
---|
3392 | | - list_del(&lpfc_ncmd->list); |
---|
3393 | | - phba->get_nvme_bufs--; |
---|
3394 | | - dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, |
---|
3395 | | - lpfc_ncmd->dma_handle); |
---|
3396 | | - kfree(lpfc_ncmd); |
---|
3397 | | - phba->total_nvme_bufs--; |
---|
3398 | | - } |
---|
3399 | | - spin_unlock(&phba->nvme_buf_list_get_lock); |
---|
3400 | | - spin_unlock_irq(&phba->hbalock); |
---|
3401 | 3710 | } |
---|
| 3711 | + |
---|
3402 | 3712 | /** |
---|
3403 | 3713 | * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping |
---|
3404 | 3714 | * @phba: pointer to lpfc hba data structure. |
---|
.. | .. |
---|
3436 | 3746 | sglq_entry = kzalloc(sizeof(struct lpfc_sglq), |
---|
3437 | 3747 | GFP_KERNEL); |
---|
3438 | 3748 | if (sglq_entry == NULL) { |
---|
3439 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 3749 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 3750 | + LOG_TRACE_EVENT, |
---|
3440 | 3751 | "2562 Failure to allocate an " |
---|
3441 | 3752 | "ELS sgl entry:%d\n", i); |
---|
3442 | 3753 | rc = -ENOMEM; |
---|
.. | .. |
---|
3447 | 3758 | &sglq_entry->phys); |
---|
3448 | 3759 | if (sglq_entry->virt == NULL) { |
---|
3449 | 3760 | kfree(sglq_entry); |
---|
3450 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 3761 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 3762 | + LOG_TRACE_EVENT, |
---|
3451 | 3763 | "2563 Failure to allocate an " |
---|
3452 | 3764 | "ELS mbuf:%d\n", i); |
---|
3453 | 3765 | rc = -ENOMEM; |
---|
.. | .. |
---|
3502 | 3814 | &phba->sli4_hba.lpfc_els_sgl_list, list) { |
---|
3503 | 3815 | lxri = lpfc_sli4_next_xritag(phba); |
---|
3504 | 3816 | if (lxri == NO_XRI) { |
---|
3505 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 3817 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 3818 | + LOG_TRACE_EVENT, |
---|
3506 | 3819 | "2400 Failed to allocate xri for " |
---|
3507 | 3820 | "ELS sgl\n"); |
---|
3508 | 3821 | rc = -ENOMEM; |
---|
.. | .. |
---|
3557 | 3870 | sglq_entry = kzalloc(sizeof(struct lpfc_sglq), |
---|
3558 | 3871 | GFP_KERNEL); |
---|
3559 | 3872 | if (sglq_entry == NULL) { |
---|
3560 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 3873 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 3874 | + LOG_TRACE_EVENT, |
---|
3561 | 3875 | "6303 Failure to allocate an " |
---|
3562 | 3876 | "NVMET sgl entry:%d\n", i); |
---|
3563 | 3877 | rc = -ENOMEM; |
---|
.. | .. |
---|
3568 | 3882 | &sglq_entry->phys); |
---|
3569 | 3883 | if (sglq_entry->virt == NULL) { |
---|
3570 | 3884 | kfree(sglq_entry); |
---|
3571 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 3885 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 3886 | + LOG_TRACE_EVENT, |
---|
3572 | 3887 | "6304 Failure to allocate an " |
---|
3573 | 3888 | "NVMET buf:%d\n", i); |
---|
3574 | 3889 | rc = -ENOMEM; |
---|
.. | .. |
---|
3624 | 3939 | &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { |
---|
3625 | 3940 | lxri = lpfc_sli4_next_xritag(phba); |
---|
3626 | 3941 | if (lxri == NO_XRI) { |
---|
3627 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 3942 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 3943 | + LOG_TRACE_EVENT, |
---|
3628 | 3944 | "6307 Failed to allocate xri for " |
---|
3629 | 3945 | "NVMET sgl\n"); |
---|
3630 | 3946 | rc = -ENOMEM; |
---|
.. | .. |
---|
3640 | 3956 | return rc; |
---|
3641 | 3957 | } |
---|
3642 | 3958 | |
---|
| 3959 | +int |
---|
| 3960 | +lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) |
---|
| 3961 | +{ |
---|
| 3962 | + LIST_HEAD(blist); |
---|
| 3963 | + struct lpfc_sli4_hdw_queue *qp; |
---|
| 3964 | + struct lpfc_io_buf *lpfc_cmd; |
---|
| 3965 | + struct lpfc_io_buf *iobufp, *prev_iobufp; |
---|
| 3966 | + int idx, cnt, xri, inserted; |
---|
| 3967 | + |
---|
| 3968 | + cnt = 0; |
---|
| 3969 | + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
---|
| 3970 | + qp = &phba->sli4_hba.hdwq[idx]; |
---|
| 3971 | + spin_lock_irq(&qp->io_buf_list_get_lock); |
---|
| 3972 | + spin_lock(&qp->io_buf_list_put_lock); |
---|
| 3973 | + |
---|
| 3974 | + /* Take everything off the get and put lists */ |
---|
| 3975 | + list_splice_init(&qp->lpfc_io_buf_list_get, &blist); |
---|
| 3976 | + list_splice(&qp->lpfc_io_buf_list_put, &blist); |
---|
| 3977 | + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); |
---|
| 3978 | + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); |
---|
| 3979 | + cnt += qp->get_io_bufs + qp->put_io_bufs; |
---|
| 3980 | + qp->get_io_bufs = 0; |
---|
| 3981 | + qp->put_io_bufs = 0; |
---|
| 3982 | + qp->total_io_bufs = 0; |
---|
| 3983 | + spin_unlock(&qp->io_buf_list_put_lock); |
---|
| 3984 | + spin_unlock_irq(&qp->io_buf_list_get_lock); |
---|
| 3985 | + } |
---|
| 3986 | + |
---|
| 3987 | + /* |
---|
| 3988 | + * Take IO buffers off blist and put on cbuf sorted by XRI. |
---|
| 3989 | + * This is because POST_SGL takes a sequential range of XRIs |
---|
| 3990 | + * to post to the firmware. |
---|
| 3991 | + */ |
---|
| 3992 | + for (idx = 0; idx < cnt; idx++) { |
---|
| 3993 | + list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); |
---|
| 3994 | + if (!lpfc_cmd) |
---|
| 3995 | + return cnt; |
---|
| 3996 | + if (idx == 0) { |
---|
| 3997 | + list_add_tail(&lpfc_cmd->list, cbuf); |
---|
| 3998 | + continue; |
---|
| 3999 | + } |
---|
| 4000 | + xri = lpfc_cmd->cur_iocbq.sli4_xritag; |
---|
| 4001 | + inserted = 0; |
---|
| 4002 | + prev_iobufp = NULL; |
---|
| 4003 | + list_for_each_entry(iobufp, cbuf, list) { |
---|
| 4004 | + if (xri < iobufp->cur_iocbq.sli4_xritag) { |
---|
| 4005 | + if (prev_iobufp) |
---|
| 4006 | + list_add(&lpfc_cmd->list, |
---|
| 4007 | + &prev_iobufp->list); |
---|
| 4008 | + else |
---|
| 4009 | + list_add(&lpfc_cmd->list, cbuf); |
---|
| 4010 | + inserted = 1; |
---|
| 4011 | + break; |
---|
| 4012 | + } |
---|
| 4013 | + prev_iobufp = iobufp; |
---|
| 4014 | + } |
---|
| 4015 | + if (!inserted) |
---|
| 4016 | + list_add_tail(&lpfc_cmd->list, cbuf); |
---|
| 4017 | + } |
---|
| 4018 | + return cnt; |
---|
| 4019 | +} |
---|
| 4020 | + |
---|
| 4021 | +int |
---|
| 4022 | +lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) |
---|
| 4023 | +{ |
---|
| 4024 | + struct lpfc_sli4_hdw_queue *qp; |
---|
| 4025 | + struct lpfc_io_buf *lpfc_cmd; |
---|
| 4026 | + int idx, cnt; |
---|
| 4027 | + |
---|
| 4028 | + qp = phba->sli4_hba.hdwq; |
---|
| 4029 | + cnt = 0; |
---|
| 4030 | + while (!list_empty(cbuf)) { |
---|
| 4031 | + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
---|
| 4032 | + list_remove_head(cbuf, lpfc_cmd, |
---|
| 4033 | + struct lpfc_io_buf, list); |
---|
| 4034 | + if (!lpfc_cmd) |
---|
| 4035 | + return cnt; |
---|
| 4036 | + cnt++; |
---|
| 4037 | + qp = &phba->sli4_hba.hdwq[idx]; |
---|
| 4038 | + lpfc_cmd->hdwq_no = idx; |
---|
| 4039 | + lpfc_cmd->hdwq = qp; |
---|
| 4040 | + lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; |
---|
| 4041 | + lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; |
---|
| 4042 | + spin_lock(&qp->io_buf_list_put_lock); |
---|
| 4043 | + list_add_tail(&lpfc_cmd->list, |
---|
| 4044 | + &qp->lpfc_io_buf_list_put); |
---|
| 4045 | + qp->put_io_bufs++; |
---|
| 4046 | + qp->total_io_bufs++; |
---|
| 4047 | + spin_unlock(&qp->io_buf_list_put_lock); |
---|
| 4048 | + } |
---|
| 4049 | + } |
---|
| 4050 | + return cnt; |
---|
| 4051 | +} |
---|
| 4052 | + |
---|
3643 | 4053 | /** |
---|
3644 | | - * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping |
---|
| 4054 | + * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping |
---|
3645 | 4055 | * @phba: pointer to lpfc hba data structure. |
---|
3646 | 4056 | * |
---|
3647 | 4057 | * This routine first calculates the sizes of the current els and allocated |
---|
.. | .. |
---|
3653 | 4063 | * 0 - successful (for now, it always returns 0) |
---|
3654 | 4064 | **/ |
---|
3655 | 4065 | int |
---|
3656 | | -lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba) |
---|
| 4066 | +lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) |
---|
3657 | 4067 | { |
---|
3658 | | - struct lpfc_scsi_buf *psb, *psb_next; |
---|
3659 | | - uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt; |
---|
3660 | | - LIST_HEAD(scsi_sgl_list); |
---|
3661 | | - int rc; |
---|
| 4068 | + struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; |
---|
| 4069 | + uint16_t i, lxri, els_xri_cnt; |
---|
| 4070 | + uint16_t io_xri_cnt, io_xri_max; |
---|
| 4071 | + LIST_HEAD(io_sgl_list); |
---|
| 4072 | + int rc, cnt; |
---|
3662 | 4073 | |
---|
3663 | 4074 | /* |
---|
3664 | | - * update on pci function's els xri-sgl list |
---|
| 4075 | + * update on pci function's allocated nvme xri-sgl list |
---|
3665 | 4076 | */ |
---|
| 4077 | + |
---|
| 4078 | + /* maximum number of xris available for nvme buffers */ |
---|
3666 | 4079 | els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); |
---|
3667 | | - phba->total_scsi_bufs = 0; |
---|
3668 | | - |
---|
3669 | | - /* |
---|
3670 | | - * update on pci function's allocated scsi xri-sgl list |
---|
3671 | | - */ |
---|
3672 | | - /* maximum number of xris available for scsi buffers */ |
---|
3673 | | - phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - |
---|
3674 | | - els_xri_cnt; |
---|
3675 | | - |
---|
3676 | | - if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) |
---|
3677 | | - return 0; |
---|
3678 | | - |
---|
3679 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) |
---|
3680 | | - phba->sli4_hba.scsi_xri_max = /* Split them up */ |
---|
3681 | | - (phba->sli4_hba.scsi_xri_max * |
---|
3682 | | - phba->cfg_xri_split) / 100; |
---|
3683 | | - |
---|
3684 | | - spin_lock_irq(&phba->scsi_buf_list_get_lock); |
---|
3685 | | - spin_lock(&phba->scsi_buf_list_put_lock); |
---|
3686 | | - list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); |
---|
3687 | | - list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); |
---|
3688 | | - spin_unlock(&phba->scsi_buf_list_put_lock); |
---|
3689 | | - spin_unlock_irq(&phba->scsi_buf_list_get_lock); |
---|
| 4080 | + io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; |
---|
| 4081 | + phba->sli4_hba.io_xri_max = io_xri_max; |
---|
3690 | 4082 | |
---|
3691 | 4083 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
---|
3692 | | - "6060 Current allocated SCSI xri-sgl count:%d, " |
---|
3693 | | - "maximum SCSI xri count:%d (split:%d)\n", |
---|
3694 | | - phba->sli4_hba.scsi_xri_cnt, |
---|
3695 | | - phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split); |
---|
| 4084 | + "6074 Current allocated XRI sgl count:%d, " |
---|
| 4085 | + "maximum XRI count:%d\n", |
---|
| 4086 | + phba->sli4_hba.io_xri_cnt, |
---|
| 4087 | + phba->sli4_hba.io_xri_max); |
---|
3696 | 4088 | |
---|
3697 | | - if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { |
---|
3698 | | - /* max scsi xri shrinked below the allocated scsi buffers */ |
---|
3699 | | - scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - |
---|
3700 | | - phba->sli4_hba.scsi_xri_max; |
---|
3701 | | - /* release the extra allocated scsi buffers */ |
---|
3702 | | - for (i = 0; i < scsi_xri_cnt; i++) { |
---|
3703 | | - list_remove_head(&scsi_sgl_list, psb, |
---|
3704 | | - struct lpfc_scsi_buf, list); |
---|
3705 | | - if (psb) { |
---|
| 4089 | + cnt = lpfc_io_buf_flush(phba, &io_sgl_list); |
---|
| 4090 | + |
---|
| 4091 | + if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { |
---|
| 4092 | + /* max nvme xri shrunk below the allocated nvme buffers */ |
---|
| 4093 | + io_xri_cnt = phba->sli4_hba.io_xri_cnt - |
---|
| 4094 | + phba->sli4_hba.io_xri_max; |
---|
| 4095 | + /* release the extra allocated nvme buffers */ |
---|
| 4096 | + for (i = 0; i < io_xri_cnt; i++) { |
---|
| 4097 | + list_remove_head(&io_sgl_list, lpfc_ncmd, |
---|
| 4098 | + struct lpfc_io_buf, list); |
---|
| 4099 | + if (lpfc_ncmd) { |
---|
3706 | 4100 | dma_pool_free(phba->lpfc_sg_dma_buf_pool, |
---|
3707 | | - psb->data, psb->dma_handle); |
---|
3708 | | - kfree(psb); |
---|
| 4101 | + lpfc_ncmd->data, |
---|
| 4102 | + lpfc_ncmd->dma_handle); |
---|
| 4103 | + kfree(lpfc_ncmd); |
---|
3709 | 4104 | } |
---|
3710 | 4105 | } |
---|
3711 | | - spin_lock_irq(&phba->scsi_buf_list_get_lock); |
---|
3712 | | - phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; |
---|
3713 | | - spin_unlock_irq(&phba->scsi_buf_list_get_lock); |
---|
| 4106 | + phba->sli4_hba.io_xri_cnt -= io_xri_cnt; |
---|
3714 | 4107 | } |
---|
3715 | 4108 | |
---|
3716 | | - /* update xris associated to remaining allocated scsi buffers */ |
---|
3717 | | - psb = NULL; |
---|
3718 | | - psb_next = NULL; |
---|
3719 | | - list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) { |
---|
| 4109 | + /* update xris associated to remaining allocated nvme buffers */ |
---|
| 4110 | + lpfc_ncmd = NULL; |
---|
| 4111 | + lpfc_ncmd_next = NULL; |
---|
| 4112 | + phba->sli4_hba.io_xri_cnt = cnt; |
---|
| 4113 | + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
---|
| 4114 | + &io_sgl_list, list) { |
---|
3720 | 4115 | lxri = lpfc_sli4_next_xritag(phba); |
---|
3721 | 4116 | if (lxri == NO_XRI) { |
---|
3722 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
3723 | | - "2560 Failed to allocate xri for " |
---|
3724 | | - "scsi buffer\n"); |
---|
| 4117 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 4118 | + LOG_TRACE_EVENT, |
---|
| 4119 | + "6075 Failed to allocate xri for " |
---|
| 4120 | + "nvme buffer\n"); |
---|
3725 | 4121 | rc = -ENOMEM; |
---|
3726 | 4122 | goto out_free_mem; |
---|
3727 | 4123 | } |
---|
3728 | | - psb->cur_iocbq.sli4_lxritag = lxri; |
---|
3729 | | - psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; |
---|
| 4124 | + lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; |
---|
| 4125 | + lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; |
---|
3730 | 4126 | } |
---|
3731 | | - spin_lock_irq(&phba->scsi_buf_list_get_lock); |
---|
3732 | | - spin_lock(&phba->scsi_buf_list_put_lock); |
---|
3733 | | - list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); |
---|
3734 | | - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); |
---|
3735 | | - spin_unlock(&phba->scsi_buf_list_put_lock); |
---|
3736 | | - spin_unlock_irq(&phba->scsi_buf_list_get_lock); |
---|
| 4127 | + cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); |
---|
3737 | 4128 | return 0; |
---|
3738 | 4129 | |
---|
3739 | 4130 | out_free_mem: |
---|
3740 | | - lpfc_scsi_free(phba); |
---|
| 4131 | + lpfc_io_free(phba); |
---|
3741 | 4132 | return rc; |
---|
| 4133 | +} |
---|
| 4134 | + |
---|
| 4135 | +/** |
---|
| 4136 | + * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec |
---|
| 4137 | + * @phba: Pointer to lpfc hba data structure. |
---|
| 4138 | + * @num_to_alloc: The requested number of buffers to allocate. |
---|
| 4139 | + * |
---|
| 4140 | + * This routine allocates nvme buffers for device with SLI-4 interface spec, |
---|
| 4141 | + * the nvme buffer contains all the necessary information needed to initiate |
---|
| 4142 | + * an I/O. After allocating up to @num_to_allocate IO buffers and put |
---|
| 4143 | + * them on a list, it post them to the port by using SGL block post. |
---|
| 4144 | + * |
---|
| 4145 | + * Return codes: |
---|
| 4146 | + * int - number of IO buffers that were allocated and posted. |
---|
| 4147 | + * 0 = failure, less than num_to_alloc is a partial failure. |
---|
| 4148 | + **/ |
---|
| 4149 | +int |
---|
| 4150 | +lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) |
---|
| 4151 | +{ |
---|
| 4152 | + struct lpfc_io_buf *lpfc_ncmd; |
---|
| 4153 | + struct lpfc_iocbq *pwqeq; |
---|
| 4154 | + uint16_t iotag, lxri = 0; |
---|
| 4155 | + int bcnt, num_posted; |
---|
| 4156 | + LIST_HEAD(prep_nblist); |
---|
| 4157 | + LIST_HEAD(post_nblist); |
---|
| 4158 | + LIST_HEAD(nvme_nblist); |
---|
| 4159 | + |
---|
| 4160 | + phba->sli4_hba.io_xri_cnt = 0; |
---|
| 4161 | + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { |
---|
| 4162 | + lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); |
---|
| 4163 | + if (!lpfc_ncmd) |
---|
| 4164 | + break; |
---|
| 4165 | + /* |
---|
| 4166 | + * Get memory from the pci pool to map the virt space to |
---|
| 4167 | + * pci bus space for an I/O. The DMA buffer includes the |
---|
| 4168 | + * number of SGE's necessary to support the sg_tablesize. |
---|
| 4169 | + */ |
---|
| 4170 | + lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, |
---|
| 4171 | + GFP_KERNEL, |
---|
| 4172 | + &lpfc_ncmd->dma_handle); |
---|
| 4173 | + if (!lpfc_ncmd->data) { |
---|
| 4174 | + kfree(lpfc_ncmd); |
---|
| 4175 | + break; |
---|
| 4176 | + } |
---|
| 4177 | + |
---|
| 4178 | + if (phba->cfg_xpsgl && !phba->nvmet_support) { |
---|
| 4179 | + INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); |
---|
| 4180 | + } else { |
---|
| 4181 | + /* |
---|
| 4182 | + * 4K Page alignment is CRITICAL to BlockGuard, double |
---|
| 4183 | + * check to be sure. |
---|
| 4184 | + */ |
---|
| 4185 | + if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && |
---|
| 4186 | + (((unsigned long)(lpfc_ncmd->data) & |
---|
| 4187 | + (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { |
---|
| 4188 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 4189 | + LOG_TRACE_EVENT, |
---|
| 4190 | + "3369 Memory alignment err: " |
---|
| 4191 | + "addr=%lx\n", |
---|
| 4192 | + (unsigned long)lpfc_ncmd->data); |
---|
| 4193 | + dma_pool_free(phba->lpfc_sg_dma_buf_pool, |
---|
| 4194 | + lpfc_ncmd->data, |
---|
| 4195 | + lpfc_ncmd->dma_handle); |
---|
| 4196 | + kfree(lpfc_ncmd); |
---|
| 4197 | + break; |
---|
| 4198 | + } |
---|
| 4199 | + } |
---|
| 4200 | + |
---|
| 4201 | + INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); |
---|
| 4202 | + |
---|
| 4203 | + lxri = lpfc_sli4_next_xritag(phba); |
---|
| 4204 | + if (lxri == NO_XRI) { |
---|
| 4205 | + dma_pool_free(phba->lpfc_sg_dma_buf_pool, |
---|
| 4206 | + lpfc_ncmd->data, lpfc_ncmd->dma_handle); |
---|
| 4207 | + kfree(lpfc_ncmd); |
---|
| 4208 | + break; |
---|
| 4209 | + } |
---|
| 4210 | + pwqeq = &lpfc_ncmd->cur_iocbq; |
---|
| 4211 | + |
---|
| 4212 | + /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ |
---|
| 4213 | + iotag = lpfc_sli_next_iotag(phba, pwqeq); |
---|
| 4214 | + if (iotag == 0) { |
---|
| 4215 | + dma_pool_free(phba->lpfc_sg_dma_buf_pool, |
---|
| 4216 | + lpfc_ncmd->data, lpfc_ncmd->dma_handle); |
---|
| 4217 | + kfree(lpfc_ncmd); |
---|
| 4218 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 4219 | + "6121 Failed to allocate IOTAG for" |
---|
| 4220 | + " XRI:0x%x\n", lxri); |
---|
| 4221 | + lpfc_sli4_free_xri(phba, lxri); |
---|
| 4222 | + break; |
---|
| 4223 | + } |
---|
| 4224 | + pwqeq->sli4_lxritag = lxri; |
---|
| 4225 | + pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; |
---|
| 4226 | + pwqeq->context1 = lpfc_ncmd; |
---|
| 4227 | + |
---|
| 4228 | + /* Initialize local short-hand pointers. */ |
---|
| 4229 | + lpfc_ncmd->dma_sgl = lpfc_ncmd->data; |
---|
| 4230 | + lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; |
---|
| 4231 | + lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; |
---|
| 4232 | + spin_lock_init(&lpfc_ncmd->buf_lock); |
---|
| 4233 | + |
---|
| 4234 | + /* add the nvme buffer to a post list */ |
---|
| 4235 | + list_add_tail(&lpfc_ncmd->list, &post_nblist); |
---|
| 4236 | + phba->sli4_hba.io_xri_cnt++; |
---|
| 4237 | + } |
---|
| 4238 | + lpfc_printf_log(phba, KERN_INFO, LOG_NVME, |
---|
| 4239 | + "6114 Allocate %d out of %d requested new NVME " |
---|
| 4240 | + "buffers\n", bcnt, num_to_alloc); |
---|
| 4241 | + |
---|
| 4242 | + /* post the list of nvme buffer sgls to port if available */ |
---|
| 4243 | + if (!list_empty(&post_nblist)) |
---|
| 4244 | + num_posted = lpfc_sli4_post_io_sgl_list( |
---|
| 4245 | + phba, &post_nblist, bcnt); |
---|
| 4246 | + else |
---|
| 4247 | + num_posted = 0; |
---|
| 4248 | + |
---|
| 4249 | + return num_posted; |
---|
3742 | 4250 | } |
---|
3743 | 4251 | |
---|
3744 | 4252 | static uint64_t |
---|
.. | .. |
---|
3758 | 4266 | lpfc_read_nv(phba, mboxq); |
---|
3759 | 4267 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
---|
3760 | 4268 | if (rc != MBX_SUCCESS) { |
---|
3761 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 4269 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
3762 | 4270 | "6019 Mailbox failed , mbxCmd x%x " |
---|
3763 | 4271 | "READ_NV, mbxStatus x%x\n", |
---|
3764 | 4272 | bf_get(lpfc_mqe_command, &mboxq->u.mqe), |
---|
.. | .. |
---|
3774 | 4282 | return be64_to_cpu(wwn); |
---|
3775 | 4283 | else |
---|
3776 | 4284 | return rol64(wwn, 32); |
---|
3777 | | -} |
---|
3778 | | - |
---|
3779 | | -/** |
---|
3780 | | - * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping |
---|
3781 | | - * @phba: pointer to lpfc hba data structure. |
---|
3782 | | - * |
---|
3783 | | - * This routine first calculates the sizes of the current els and allocated |
---|
3784 | | - * scsi sgl lists, and then goes through all sgls to updates the physical |
---|
3785 | | - * XRIs assigned due to port function reset. During port initialization, the |
---|
3786 | | - * current els and allocated scsi sgl lists are 0s. |
---|
3787 | | - * |
---|
3788 | | - * Return codes |
---|
3789 | | - * 0 - successful (for now, it always returns 0) |
---|
3790 | | - **/ |
---|
3791 | | -int |
---|
3792 | | -lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba) |
---|
3793 | | -{ |
---|
3794 | | - struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; |
---|
3795 | | - uint16_t i, lxri, els_xri_cnt; |
---|
3796 | | - uint16_t nvme_xri_cnt, nvme_xri_max; |
---|
3797 | | - LIST_HEAD(nvme_sgl_list); |
---|
3798 | | - int rc, cnt; |
---|
3799 | | - |
---|
3800 | | - phba->total_nvme_bufs = 0; |
---|
3801 | | - phba->get_nvme_bufs = 0; |
---|
3802 | | - phba->put_nvme_bufs = 0; |
---|
3803 | | - |
---|
3804 | | - if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) |
---|
3805 | | - return 0; |
---|
3806 | | - /* |
---|
3807 | | - * update on pci function's allocated nvme xri-sgl list |
---|
3808 | | - */ |
---|
3809 | | - |
---|
3810 | | - /* maximum number of xris available for nvme buffers */ |
---|
3811 | | - els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); |
---|
3812 | | - nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; |
---|
3813 | | - phba->sli4_hba.nvme_xri_max = nvme_xri_max; |
---|
3814 | | - phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max; |
---|
3815 | | - |
---|
3816 | | - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
---|
3817 | | - "6074 Current allocated NVME xri-sgl count:%d, " |
---|
3818 | | - "maximum NVME xri count:%d\n", |
---|
3819 | | - phba->sli4_hba.nvme_xri_cnt, |
---|
3820 | | - phba->sli4_hba.nvme_xri_max); |
---|
3821 | | - |
---|
3822 | | - spin_lock_irq(&phba->nvme_buf_list_get_lock); |
---|
3823 | | - spin_lock(&phba->nvme_buf_list_put_lock); |
---|
3824 | | - list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list); |
---|
3825 | | - list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list); |
---|
3826 | | - cnt = phba->get_nvme_bufs + phba->put_nvme_bufs; |
---|
3827 | | - phba->get_nvme_bufs = 0; |
---|
3828 | | - phba->put_nvme_bufs = 0; |
---|
3829 | | - spin_unlock(&phba->nvme_buf_list_put_lock); |
---|
3830 | | - spin_unlock_irq(&phba->nvme_buf_list_get_lock); |
---|
3831 | | - |
---|
3832 | | - if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) { |
---|
3833 | | - /* max nvme xri shrunk below the allocated nvme buffers */ |
---|
3834 | | - spin_lock_irq(&phba->nvme_buf_list_get_lock); |
---|
3835 | | - nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt - |
---|
3836 | | - phba->sli4_hba.nvme_xri_max; |
---|
3837 | | - spin_unlock_irq(&phba->nvme_buf_list_get_lock); |
---|
3838 | | - /* release the extra allocated nvme buffers */ |
---|
3839 | | - for (i = 0; i < nvme_xri_cnt; i++) { |
---|
3840 | | - list_remove_head(&nvme_sgl_list, lpfc_ncmd, |
---|
3841 | | - struct lpfc_nvme_buf, list); |
---|
3842 | | - if (lpfc_ncmd) { |
---|
3843 | | - dma_pool_free(phba->lpfc_sg_dma_buf_pool, |
---|
3844 | | - lpfc_ncmd->data, |
---|
3845 | | - lpfc_ncmd->dma_handle); |
---|
3846 | | - kfree(lpfc_ncmd); |
---|
3847 | | - } |
---|
3848 | | - } |
---|
3849 | | - spin_lock_irq(&phba->nvme_buf_list_get_lock); |
---|
3850 | | - phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt; |
---|
3851 | | - spin_unlock_irq(&phba->nvme_buf_list_get_lock); |
---|
3852 | | - } |
---|
3853 | | - |
---|
3854 | | - /* update xris associated to remaining allocated nvme buffers */ |
---|
3855 | | - lpfc_ncmd = NULL; |
---|
3856 | | - lpfc_ncmd_next = NULL; |
---|
3857 | | - list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
---|
3858 | | - &nvme_sgl_list, list) { |
---|
3859 | | - lxri = lpfc_sli4_next_xritag(phba); |
---|
3860 | | - if (lxri == NO_XRI) { |
---|
3861 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
3862 | | - "6075 Failed to allocate xri for " |
---|
3863 | | - "nvme buffer\n"); |
---|
3864 | | - rc = -ENOMEM; |
---|
3865 | | - goto out_free_mem; |
---|
3866 | | - } |
---|
3867 | | - lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; |
---|
3868 | | - lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; |
---|
3869 | | - } |
---|
3870 | | - spin_lock_irq(&phba->nvme_buf_list_get_lock); |
---|
3871 | | - spin_lock(&phba->nvme_buf_list_put_lock); |
---|
3872 | | - list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get); |
---|
3873 | | - phba->get_nvme_bufs = cnt; |
---|
3874 | | - INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); |
---|
3875 | | - spin_unlock(&phba->nvme_buf_list_put_lock); |
---|
3876 | | - spin_unlock_irq(&phba->nvme_buf_list_get_lock); |
---|
3877 | | - return 0; |
---|
3878 | | - |
---|
3879 | | -out_free_mem: |
---|
3880 | | - lpfc_nvme_free(phba); |
---|
3881 | | - return rc; |
---|
3882 | 4285 | } |
---|
3883 | 4286 | |
---|
3884 | 4287 | /** |
---|
.. | .. |
---|
3902 | 4305 | { |
---|
3903 | 4306 | struct lpfc_vport *vport; |
---|
3904 | 4307 | struct Scsi_Host *shost = NULL; |
---|
| 4308 | + struct scsi_host_template *template; |
---|
3905 | 4309 | int error = 0; |
---|
3906 | 4310 | int i; |
---|
3907 | 4311 | uint64_t wwn; |
---|
.. | .. |
---|
3922 | 4326 | |
---|
3923 | 4327 | for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { |
---|
3924 | 4328 | if (wwn == lpfc_no_hba_reset[i]) { |
---|
3925 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 4329 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 4330 | + LOG_TRACE_EVENT, |
---|
3926 | 4331 | "6020 Setting use_no_reset port=%llx\n", |
---|
3927 | 4332 | wwn); |
---|
3928 | 4333 | use_no_reset_hba = true; |
---|
.. | .. |
---|
3930 | 4335 | } |
---|
3931 | 4336 | } |
---|
3932 | 4337 | |
---|
3933 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { |
---|
3934 | | - if (dev != &phba->pcidev->dev) { |
---|
3935 | | - shost = scsi_host_alloc(&lpfc_vport_template, |
---|
3936 | | - sizeof(struct lpfc_vport)); |
---|
| 4338 | + /* Seed template for SCSI host registration */ |
---|
| 4339 | + if (dev == &phba->pcidev->dev) { |
---|
| 4340 | + template = &phba->port_template; |
---|
| 4341 | + |
---|
| 4342 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { |
---|
| 4343 | + /* Seed physical port template */ |
---|
| 4344 | + memcpy(template, &lpfc_template, sizeof(*template)); |
---|
| 4345 | + |
---|
| 4346 | + if (use_no_reset_hba) { |
---|
| 4347 | + /* template is for a no reset SCSI Host */ |
---|
| 4348 | + template->max_sectors = 0xffff; |
---|
| 4349 | + template->eh_host_reset_handler = NULL; |
---|
| 4350 | + } |
---|
| 4351 | + |
---|
| 4352 | + /* Template for all vports this physical port creates */ |
---|
| 4353 | + memcpy(&phba->vport_template, &lpfc_template, |
---|
| 4354 | + sizeof(*template)); |
---|
| 4355 | + phba->vport_template.max_sectors = 0xffff; |
---|
| 4356 | + phba->vport_template.shost_attrs = lpfc_vport_attrs; |
---|
| 4357 | + phba->vport_template.eh_bus_reset_handler = NULL; |
---|
| 4358 | + phba->vport_template.eh_host_reset_handler = NULL; |
---|
| 4359 | + phba->vport_template.vendor_id = 0; |
---|
| 4360 | + |
---|
| 4361 | + /* Initialize the host templates with updated value */ |
---|
| 4362 | + if (phba->sli_rev == LPFC_SLI_REV4) { |
---|
| 4363 | + template->sg_tablesize = phba->cfg_scsi_seg_cnt; |
---|
| 4364 | + phba->vport_template.sg_tablesize = |
---|
| 4365 | + phba->cfg_scsi_seg_cnt; |
---|
| 4366 | + } else { |
---|
| 4367 | + template->sg_tablesize = phba->cfg_sg_seg_cnt; |
---|
| 4368 | + phba->vport_template.sg_tablesize = |
---|
| 4369 | + phba->cfg_sg_seg_cnt; |
---|
| 4370 | + } |
---|
| 4371 | + |
---|
3937 | 4372 | } else { |
---|
3938 | | - if (!use_no_reset_hba) |
---|
3939 | | - shost = scsi_host_alloc(&lpfc_template, |
---|
3940 | | - sizeof(struct lpfc_vport)); |
---|
3941 | | - else |
---|
3942 | | - shost = scsi_host_alloc(&lpfc_template_no_hr, |
---|
3943 | | - sizeof(struct lpfc_vport)); |
---|
| 4373 | + /* NVMET is for physical port only */ |
---|
| 4374 | + memcpy(template, &lpfc_template_nvme, |
---|
| 4375 | + sizeof(*template)); |
---|
3944 | 4376 | } |
---|
3945 | | - } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
3946 | | - shost = scsi_host_alloc(&lpfc_template_nvme, |
---|
3947 | | - sizeof(struct lpfc_vport)); |
---|
| 4377 | + } else { |
---|
| 4378 | + template = &phba->vport_template; |
---|
3948 | 4379 | } |
---|
| 4380 | + |
---|
| 4381 | + shost = scsi_host_alloc(template, sizeof(struct lpfc_vport)); |
---|
3949 | 4382 | if (!shost) |
---|
3950 | 4383 | goto out; |
---|
3951 | 4384 | |
---|
.. | .. |
---|
3956 | 4389 | vport->fc_rscn_flush = 0; |
---|
3957 | 4390 | lpfc_get_vport_cfgparam(vport); |
---|
3958 | 4391 | |
---|
| 4392 | + /* Adjust value in vport */ |
---|
| 4393 | + vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; |
---|
| 4394 | + |
---|
3959 | 4395 | shost->unique_id = instance; |
---|
3960 | 4396 | shost->max_id = LPFC_MAX_TARGET; |
---|
3961 | 4397 | shost->max_lun = vport->cfg_max_luns; |
---|
3962 | 4398 | shost->this_id = -1; |
---|
3963 | 4399 | shost->max_cmd_len = 16; |
---|
3964 | | - shost->nr_hw_queues = phba->cfg_fcp_io_channel; |
---|
| 4400 | + |
---|
3965 | 4401 | if (phba->sli_rev == LPFC_SLI_REV4) { |
---|
| 4402 | + if (!phba->cfg_fcp_mq_threshold || |
---|
| 4403 | + phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) |
---|
| 4404 | + phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; |
---|
| 4405 | + |
---|
| 4406 | + shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), |
---|
| 4407 | + phba->cfg_fcp_mq_threshold); |
---|
| 4408 | + |
---|
3966 | 4409 | shost->dma_boundary = |
---|
3967 | 4410 | phba->sli4_hba.pc_sli4_params.sge_supp_len-1; |
---|
3968 | | - shost->sg_tablesize = phba->cfg_sg_seg_cnt; |
---|
3969 | | - } |
---|
| 4411 | + |
---|
| 4412 | + if (phba->cfg_xpsgl && !phba->nvmet_support) |
---|
| 4413 | + shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE; |
---|
| 4414 | + else |
---|
| 4415 | + shost->sg_tablesize = phba->cfg_scsi_seg_cnt; |
---|
| 4416 | + } else |
---|
| 4417 | + /* SLI-3 has a limited number of hardware queues (3), |
---|
| 4418 | + * thus there is only one for FCP processing. |
---|
| 4419 | + */ |
---|
| 4420 | + shost->nr_hw_queues = 1; |
---|
3970 | 4421 | |
---|
3971 | 4422 | /* |
---|
3972 | 4423 | * Set initial can_queue value since 0 is no longer supported and |
---|
.. | .. |
---|
3982 | 4433 | vport->port_type = LPFC_PHYSICAL_PORT; |
---|
3983 | 4434 | } |
---|
3984 | 4435 | |
---|
| 4436 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, |
---|
| 4437 | + "9081 CreatePort TMPLATE type %x TBLsize %d " |
---|
| 4438 | + "SEGcnt %d/%d\n", |
---|
| 4439 | + vport->port_type, shost->sg_tablesize, |
---|
| 4440 | + phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); |
---|
| 4441 | + |
---|
3985 | 4442 | /* Initialize all internally managed lists. */ |
---|
3986 | 4443 | INIT_LIST_HEAD(&vport->fc_nodes); |
---|
3987 | 4444 | INIT_LIST_HEAD(&vport->rcv_buffer_list); |
---|
.. | .. |
---|
3993 | 4450 | |
---|
3994 | 4451 | timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); |
---|
3995 | 4452 | |
---|
| 4453 | + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) |
---|
| 4454 | + lpfc_setup_bg(phba, shost); |
---|
| 4455 | + |
---|
3996 | 4456 | error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); |
---|
3997 | 4457 | if (error) |
---|
3998 | 4458 | goto out_put_shost; |
---|
3999 | 4459 | |
---|
4000 | | - spin_lock_irq(&phba->hbalock); |
---|
| 4460 | + spin_lock_irq(&phba->port_list_lock); |
---|
4001 | 4461 | list_add_tail(&vport->listentry, &phba->port_list); |
---|
4002 | | - spin_unlock_irq(&phba->hbalock); |
---|
| 4462 | + spin_unlock_irq(&phba->port_list_lock); |
---|
4003 | 4463 | return vport; |
---|
4004 | 4464 | |
---|
4005 | 4465 | out_put_shost: |
---|
.. | .. |
---|
4025 | 4485 | fc_remove_host(shost); |
---|
4026 | 4486 | scsi_remove_host(shost); |
---|
4027 | 4487 | |
---|
4028 | | - spin_lock_irq(&phba->hbalock); |
---|
| 4488 | + spin_lock_irq(&phba->port_list_lock); |
---|
4029 | 4489 | list_del_init(&vport->listentry); |
---|
4030 | | - spin_unlock_irq(&phba->hbalock); |
---|
| 4490 | + spin_unlock_irq(&phba->port_list_lock); |
---|
4031 | 4491 | |
---|
4032 | 4492 | lpfc_cleanup(vport); |
---|
4033 | 4493 | return; |
---|
.. | .. |
---|
4111 | 4571 | return stat; |
---|
4112 | 4572 | } |
---|
4113 | 4573 | |
---|
| 4574 | +static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) |
---|
| 4575 | +{ |
---|
| 4576 | + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; |
---|
| 4577 | + struct lpfc_hba *phba = vport->phba; |
---|
| 4578 | + |
---|
| 4579 | + fc_host_supported_speeds(shost) = 0; |
---|
| 4580 | + /* |
---|
| 4581 | + * Avoid reporting supported link speed for FCoE as it can't be |
---|
| 4582 | + * controlled via FCoE. |
---|
| 4583 | + */ |
---|
| 4584 | + if (phba->hba_flag & HBA_FCOE_MODE) |
---|
| 4585 | + return; |
---|
| 4586 | + |
---|
| 4587 | + if (phba->lmt & LMT_128Gb) |
---|
| 4588 | + fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; |
---|
| 4589 | + if (phba->lmt & LMT_64Gb) |
---|
| 4590 | + fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; |
---|
| 4591 | + if (phba->lmt & LMT_32Gb) |
---|
| 4592 | + fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; |
---|
| 4593 | + if (phba->lmt & LMT_16Gb) |
---|
| 4594 | + fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; |
---|
| 4595 | + if (phba->lmt & LMT_10Gb) |
---|
| 4596 | + fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; |
---|
| 4597 | + if (phba->lmt & LMT_8Gb) |
---|
| 4598 | + fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; |
---|
| 4599 | + if (phba->lmt & LMT_4Gb) |
---|
| 4600 | + fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; |
---|
| 4601 | + if (phba->lmt & LMT_2Gb) |
---|
| 4602 | + fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; |
---|
| 4603 | + if (phba->lmt & LMT_1Gb) |
---|
| 4604 | + fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; |
---|
| 4605 | +} |
---|
| 4606 | + |
---|
4114 | 4607 | /** |
---|
4115 | 4608 | * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port |
---|
4116 | 4609 | * @shost: pointer to SCSI host data structure. |
---|
.. | .. |
---|
4138 | 4631 | lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), |
---|
4139 | 4632 | sizeof fc_host_symbolic_name(shost)); |
---|
4140 | 4633 | |
---|
4141 | | - fc_host_supported_speeds(shost) = 0; |
---|
4142 | | - if (phba->lmt & LMT_64Gb) |
---|
4143 | | - fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; |
---|
4144 | | - if (phba->lmt & LMT_32Gb) |
---|
4145 | | - fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; |
---|
4146 | | - if (phba->lmt & LMT_16Gb) |
---|
4147 | | - fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; |
---|
4148 | | - if (phba->lmt & LMT_10Gb) |
---|
4149 | | - fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; |
---|
4150 | | - if (phba->lmt & LMT_8Gb) |
---|
4151 | | - fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; |
---|
4152 | | - if (phba->lmt & LMT_4Gb) |
---|
4153 | | - fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; |
---|
4154 | | - if (phba->lmt & LMT_2Gb) |
---|
4155 | | - fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; |
---|
4156 | | - if (phba->lmt & LMT_1Gb) |
---|
4157 | | - fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; |
---|
| 4634 | + lpfc_host_supported_speeds_set(shost); |
---|
4158 | 4635 | |
---|
4159 | 4636 | fc_host_maxframe_size(shost) = |
---|
4160 | 4637 | (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | |
---|
.. | .. |
---|
4210 | 4687 | { |
---|
4211 | 4688 | /* Reset some HBA SLI4 setup states */ |
---|
4212 | 4689 | lpfc_stop_hba_timers(phba); |
---|
4213 | | - phba->pport->work_port_events = 0; |
---|
| 4690 | + if (phba->pport) |
---|
| 4691 | + phba->pport->work_port_events = 0; |
---|
4214 | 4692 | phba->sli4_hba.intr_enable = 0; |
---|
4215 | 4693 | } |
---|
4216 | 4694 | |
---|
.. | .. |
---|
4253 | 4731 | |
---|
4254 | 4732 | /** |
---|
4255 | 4733 | * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout |
---|
4256 | | - * @ptr: Map to lpfc_hba data structure pointer. |
---|
| 4734 | + * @t: Timer context used to obtain the pointer to lpfc hba data structure. |
---|
4257 | 4735 | * |
---|
4258 | 4736 | * This routine is invoked when waiting for FCF table rediscover has been |
---|
4259 | 4737 | * timed out. If new FCF record(s) has (have) been discovered during the |
---|
.. | .. |
---|
4301 | 4779 | case LPFC_ASYNC_LINK_FAULT_LR_LRR: |
---|
4302 | 4780 | break; |
---|
4303 | 4781 | default: |
---|
4304 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 4782 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
4305 | 4783 | "0398 Unknown link fault code: x%x\n", |
---|
4306 | 4784 | bf_get(lpfc_acqe_link_fault, acqe_link)); |
---|
4307 | 4785 | break; |
---|
.. | .. |
---|
4337 | 4815 | att_type = LPFC_ATT_LINK_UP; |
---|
4338 | 4816 | break; |
---|
4339 | 4817 | default: |
---|
4340 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 4818 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
4341 | 4819 | "0399 Invalid link attention type: x%x\n", |
---|
4342 | 4820 | bf_get(lpfc_acqe_link_status, acqe_link)); |
---|
4343 | 4821 | att_type = LPFC_ATT_RESERVED; |
---|
.. | .. |
---|
4439 | 4917 | case LPFC_ASYNC_LINK_SPEED_40GBPS: |
---|
4440 | 4918 | port_speed = 40000; |
---|
4441 | 4919 | break; |
---|
| 4920 | + case LPFC_ASYNC_LINK_SPEED_100GBPS: |
---|
| 4921 | + port_speed = 100000; |
---|
| 4922 | + break; |
---|
4442 | 4923 | default: |
---|
4443 | 4924 | port_speed = 0; |
---|
4444 | 4925 | } |
---|
.. | .. |
---|
4471 | 4952 | break; |
---|
4472 | 4953 | case LPFC_FC_LA_SPEED_64G: |
---|
4473 | 4954 | port_speed = 64000; |
---|
| 4955 | + break; |
---|
| 4956 | + case LPFC_FC_LA_SPEED_128G: |
---|
| 4957 | + port_speed = 128000; |
---|
4474 | 4958 | break; |
---|
4475 | 4959 | default: |
---|
4476 | 4960 | port_speed = 0; |
---|
.. | .. |
---|
4506 | 4990 | phba->fcoe_eventtag = acqe_link->event_tag; |
---|
4507 | 4991 | pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
---|
4508 | 4992 | if (!pmb) { |
---|
4509 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 4993 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
4510 | 4994 | "0395 The mboxq allocation failed\n"); |
---|
4511 | 4995 | return; |
---|
4512 | 4996 | } |
---|
4513 | 4997 | mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); |
---|
4514 | 4998 | if (!mp) { |
---|
4515 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 4999 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
4516 | 5000 | "0396 The lpfc_dmabuf allocation failed\n"); |
---|
4517 | 5001 | goto out_free_pmb; |
---|
4518 | 5002 | } |
---|
4519 | 5003 | mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); |
---|
4520 | 5004 | if (!mp->virt) { |
---|
4521 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 5005 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
4522 | 5006 | "0397 The mbuf allocation failed\n"); |
---|
4523 | 5007 | goto out_free_dmabuf; |
---|
4524 | 5008 | } |
---|
.. | .. |
---|
4614 | 5098 | } |
---|
4615 | 5099 | |
---|
4616 | 5100 | /** |
---|
| 5101 | + * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read |
---|
| 5102 | + * topology. |
---|
| 5103 | + * @phba: pointer to lpfc hba data structure. |
---|
| 5104 | + * @speed_code: asynchronous event link speed code. |
---|
| 5105 | + * |
---|
| 5106 | + * This routine is to parse the giving SLI4 async event link speed code into |
---|
| 5107 | + * value of Read topology link speed. |
---|
| 5108 | + * |
---|
| 5109 | + * Return: link speed in terms of Read topology. |
---|
| 5110 | + **/ |
---|
| 5111 | +static uint8_t |
---|
| 5112 | +lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) |
---|
| 5113 | +{ |
---|
| 5114 | + uint8_t port_speed; |
---|
| 5115 | + |
---|
| 5116 | + switch (speed_code) { |
---|
| 5117 | + case LPFC_FC_LA_SPEED_1G: |
---|
| 5118 | + port_speed = LPFC_LINK_SPEED_1GHZ; |
---|
| 5119 | + break; |
---|
| 5120 | + case LPFC_FC_LA_SPEED_2G: |
---|
| 5121 | + port_speed = LPFC_LINK_SPEED_2GHZ; |
---|
| 5122 | + break; |
---|
| 5123 | + case LPFC_FC_LA_SPEED_4G: |
---|
| 5124 | + port_speed = LPFC_LINK_SPEED_4GHZ; |
---|
| 5125 | + break; |
---|
| 5126 | + case LPFC_FC_LA_SPEED_8G: |
---|
| 5127 | + port_speed = LPFC_LINK_SPEED_8GHZ; |
---|
| 5128 | + break; |
---|
| 5129 | + case LPFC_FC_LA_SPEED_16G: |
---|
| 5130 | + port_speed = LPFC_LINK_SPEED_16GHZ; |
---|
| 5131 | + break; |
---|
| 5132 | + case LPFC_FC_LA_SPEED_32G: |
---|
| 5133 | + port_speed = LPFC_LINK_SPEED_32GHZ; |
---|
| 5134 | + break; |
---|
| 5135 | + case LPFC_FC_LA_SPEED_64G: |
---|
| 5136 | + port_speed = LPFC_LINK_SPEED_64GHZ; |
---|
| 5137 | + break; |
---|
| 5138 | + case LPFC_FC_LA_SPEED_128G: |
---|
| 5139 | + port_speed = LPFC_LINK_SPEED_128GHZ; |
---|
| 5140 | + break; |
---|
| 5141 | + case LPFC_FC_LA_SPEED_256G: |
---|
| 5142 | + port_speed = LPFC_LINK_SPEED_256GHZ; |
---|
| 5143 | + break; |
---|
| 5144 | + default: |
---|
| 5145 | + port_speed = 0; |
---|
| 5146 | + break; |
---|
| 5147 | + } |
---|
| 5148 | + |
---|
| 5149 | + return port_speed; |
---|
| 5150 | +} |
---|
| 5151 | + |
---|
| 5152 | +#define trunk_link_status(__idx)\ |
---|
| 5153 | + bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ |
---|
| 5154 | + ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ |
---|
| 5155 | + "Link up" : "Link down") : "NA" |
---|
| 5156 | +/* Did port __idx reported an error */ |
---|
| 5157 | +#define trunk_port_fault(__idx)\ |
---|
| 5158 | + bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ |
---|
| 5159 | + (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" |
---|
| 5160 | + |
---|
| 5161 | +static void |
---|
| 5162 | +lpfc_update_trunk_link_status(struct lpfc_hba *phba, |
---|
| 5163 | + struct lpfc_acqe_fc_la *acqe_fc) |
---|
| 5164 | +{ |
---|
| 5165 | + uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); |
---|
| 5166 | + uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); |
---|
| 5167 | + |
---|
| 5168 | + phba->sli4_hba.link_state.speed = |
---|
| 5169 | + lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, |
---|
| 5170 | + bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); |
---|
| 5171 | + |
---|
| 5172 | + phba->sli4_hba.link_state.logical_speed = |
---|
| 5173 | + bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; |
---|
| 5174 | + /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ |
---|
| 5175 | + phba->fc_linkspeed = |
---|
| 5176 | + lpfc_async_link_speed_to_read_top( |
---|
| 5177 | + phba, |
---|
| 5178 | + bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); |
---|
| 5179 | + |
---|
| 5180 | + if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { |
---|
| 5181 | + phba->trunk_link.link0.state = |
---|
| 5182 | + bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) |
---|
| 5183 | + ? LPFC_LINK_UP : LPFC_LINK_DOWN; |
---|
| 5184 | + phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; |
---|
| 5185 | + } |
---|
| 5186 | + if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { |
---|
| 5187 | + phba->trunk_link.link1.state = |
---|
| 5188 | + bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) |
---|
| 5189 | + ? LPFC_LINK_UP : LPFC_LINK_DOWN; |
---|
| 5190 | + phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; |
---|
| 5191 | + } |
---|
| 5192 | + if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { |
---|
| 5193 | + phba->trunk_link.link2.state = |
---|
| 5194 | + bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) |
---|
| 5195 | + ? LPFC_LINK_UP : LPFC_LINK_DOWN; |
---|
| 5196 | + phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; |
---|
| 5197 | + } |
---|
| 5198 | + if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { |
---|
| 5199 | + phba->trunk_link.link3.state = |
---|
| 5200 | + bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) |
---|
| 5201 | + ? LPFC_LINK_UP : LPFC_LINK_DOWN; |
---|
| 5202 | + phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; |
---|
| 5203 | + } |
---|
| 5204 | + |
---|
| 5205 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 5206 | + "2910 Async FC Trunking Event - Speed:%d\n" |
---|
| 5207 | + "\tLogical speed:%d " |
---|
| 5208 | + "port0: %s port1: %s port2: %s port3: %s\n", |
---|
| 5209 | + phba->sli4_hba.link_state.speed, |
---|
| 5210 | + phba->sli4_hba.link_state.logical_speed, |
---|
| 5211 | + trunk_link_status(0), trunk_link_status(1), |
---|
| 5212 | + trunk_link_status(2), trunk_link_status(3)); |
---|
| 5213 | + |
---|
| 5214 | + if (port_fault) |
---|
| 5215 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 5216 | + "3202 trunk error:0x%x (%s) seen on port0:%s " |
---|
| 5217 | + /* |
---|
| 5218 | + * SLI-4: We have only 0xA error codes |
---|
| 5219 | + * defined as of now. print an appropriate |
---|
| 5220 | + * message in case driver needs to be updated. |
---|
| 5221 | + */ |
---|
| 5222 | + "port1:%s port2:%s port3:%s\n", err, err > 0xA ? |
---|
| 5223 | + "UNDEFINED. update driver." : trunk_errmsg[err], |
---|
| 5224 | + trunk_port_fault(0), trunk_port_fault(1), |
---|
| 5225 | + trunk_port_fault(2), trunk_port_fault(3)); |
---|
| 5226 | +} |
---|
| 5227 | + |
---|
| 5228 | + |
---|
| 5229 | +/** |
---|
4617 | 5230 | * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event |
---|
4618 | 5231 | * @phba: pointer to lpfc hba data structure. |
---|
4619 | 5232 | * @acqe_fc: pointer to the async fc completion queue entry. |
---|
.. | .. |
---|
4633 | 5246 | |
---|
4634 | 5247 | if (bf_get(lpfc_trailer_type, acqe_fc) != |
---|
4635 | 5248 | LPFC_FC_LA_EVENT_TYPE_FC_LINK) { |
---|
4636 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 5249 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
4637 | 5250 | "2895 Non FC link Event detected.(%d)\n", |
---|
4638 | 5251 | bf_get(lpfc_trailer_type, acqe_fc)); |
---|
4639 | 5252 | return; |
---|
4640 | 5253 | } |
---|
| 5254 | + |
---|
| 5255 | + if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == |
---|
| 5256 | + LPFC_FC_LA_TYPE_TRUNKING_EVENT) { |
---|
| 5257 | + lpfc_update_trunk_link_status(phba, acqe_fc); |
---|
| 5258 | + return; |
---|
| 5259 | + } |
---|
| 5260 | + |
---|
4641 | 5261 | /* Keep the link status for extra SLI4 state machine reference */ |
---|
4642 | 5262 | phba->sli4_hba.link_state.speed = |
---|
4643 | 5263 | lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, |
---|
.. | .. |
---|
4653 | 5273 | bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); |
---|
4654 | 5274 | phba->sli4_hba.link_state.fault = |
---|
4655 | 5275 | bf_get(lpfc_acqe_link_fault, acqe_fc); |
---|
4656 | | - phba->sli4_hba.link_state.logical_speed = |
---|
| 5276 | + |
---|
| 5277 | + if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == |
---|
| 5278 | + LPFC_FC_LA_TYPE_LINK_DOWN) |
---|
| 5279 | + phba->sli4_hba.link_state.logical_speed = 0; |
---|
| 5280 | + else if (!phba->sli4_hba.conf_trunk) |
---|
| 5281 | + phba->sli4_hba.link_state.logical_speed = |
---|
4657 | 5282 | bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; |
---|
| 5283 | + |
---|
4658 | 5284 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
---|
4659 | 5285 | "2896 Async FC event - Speed:%dGBaud Topology:x%x " |
---|
4660 | 5286 | "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" |
---|
.. | .. |
---|
4668 | 5294 | phba->sli4_hba.link_state.fault); |
---|
4669 | 5295 | pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
---|
4670 | 5296 | if (!pmb) { |
---|
4671 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 5297 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
4672 | 5298 | "2897 The mboxq allocation failed\n"); |
---|
4673 | 5299 | return; |
---|
4674 | 5300 | } |
---|
4675 | 5301 | mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); |
---|
4676 | 5302 | if (!mp) { |
---|
4677 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 5303 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
4678 | 5304 | "2898 The lpfc_dmabuf allocation failed\n"); |
---|
4679 | 5305 | goto out_free_pmb; |
---|
4680 | 5306 | } |
---|
4681 | 5307 | mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); |
---|
4682 | 5308 | if (!mp->virt) { |
---|
4683 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 5309 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
4684 | 5310 | "2899 The mbuf allocation failed\n"); |
---|
4685 | 5311 | goto out_free_dmabuf; |
---|
4686 | 5312 | } |
---|
.. | .. |
---|
4752 | 5378 | /** |
---|
4753 | 5379 | * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event |
---|
4754 | 5380 | * @phba: pointer to lpfc hba data structure. |
---|
4755 | | - * @acqe_fc: pointer to the async SLI completion queue entry. |
---|
| 5381 | + * @acqe_sli: pointer to the async SLI completion queue entry. |
---|
4756 | 5382 | * |
---|
4757 | 5383 | * This routine is to handle the SLI4 asynchronous SLI events. |
---|
4758 | 5384 | **/ |
---|
.. | .. |
---|
4767 | 5393 | struct temp_event temp_event_data; |
---|
4768 | 5394 | struct lpfc_acqe_misconfigured_event *misconfigured; |
---|
4769 | 5395 | struct Scsi_Host *shost; |
---|
| 5396 | + struct lpfc_vport **vports; |
---|
| 5397 | + int rc, i; |
---|
4770 | 5398 | |
---|
4771 | 5399 | evt_type = bf_get(lpfc_trailer_type, acqe_sli); |
---|
4772 | 5400 | |
---|
4773 | 5401 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
---|
4774 | | - "2901 Async SLI event - Event Data1:x%08x Event Data2:" |
---|
4775 | | - "x%08x SLI Event Type:%d\n", |
---|
| 5402 | + "2901 Async SLI event - Type:%d, Event Data: x%08x " |
---|
| 5403 | + "x%08x x%08x x%08x\n", evt_type, |
---|
4776 | 5404 | acqe_sli->event_data1, acqe_sli->event_data2, |
---|
4777 | | - evt_type); |
---|
| 5405 | + acqe_sli->reserved, acqe_sli->trailer); |
---|
4778 | 5406 | |
---|
4779 | 5407 | port_name = phba->Port[0]; |
---|
4780 | 5408 | if (port_name == 0x00) |
---|
.. | .. |
---|
4845 | 5473 | &misconfigured->theEvent); |
---|
4846 | 5474 | break; |
---|
4847 | 5475 | default: |
---|
4848 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 5476 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
4849 | 5477 | "3296 " |
---|
4850 | 5478 | "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " |
---|
4851 | 5479 | "event: Invalid link %d", |
---|
.. | .. |
---|
4892 | 5520 | sprintf(message, "Unknown event status x%02x", status); |
---|
4893 | 5521 | break; |
---|
4894 | 5522 | } |
---|
| 5523 | + |
---|
| 5524 | + /* Issue READ_CONFIG mbox command to refresh supported speeds */ |
---|
| 5525 | + rc = lpfc_sli4_read_config(phba); |
---|
| 5526 | + if (rc) { |
---|
| 5527 | + phba->lmt = 0; |
---|
| 5528 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 5529 | + LOG_TRACE_EVENT, |
---|
| 5530 | + "3194 Unable to retrieve supported " |
---|
| 5531 | + "speeds, rc = 0x%x\n", rc); |
---|
| 5532 | + } |
---|
| 5533 | + vports = lpfc_create_vport_work_array(phba); |
---|
| 5534 | + if (vports != NULL) { |
---|
| 5535 | + for (i = 0; i <= phba->max_vports && vports[i] != NULL; |
---|
| 5536 | + i++) { |
---|
| 5537 | + shost = lpfc_shost_from_vport(vports[i]); |
---|
| 5538 | + lpfc_host_supported_speeds_set(shost); |
---|
| 5539 | + } |
---|
| 5540 | + } |
---|
| 5541 | + lpfc_destroy_vport_work_array(phba, vports); |
---|
| 5542 | + |
---|
4895 | 5543 | phba->sli4_hba.lnk_info.optic_state = status; |
---|
4896 | 5544 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
4897 | 5545 | "3176 Port Name %c %s\n", port_name, message); |
---|
.. | .. |
---|
4902 | 5550 | "Event Data1:x%08x Event Data2: x%08x\n", |
---|
4903 | 5551 | acqe_sli->event_data1, acqe_sli->event_data2); |
---|
4904 | 5552 | break; |
---|
| 5553 | + case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: |
---|
| 5554 | + /* Misconfigured WWN. Reports that the SLI Port is configured |
---|
| 5555 | + * to use FA-WWN, but the attached device doesn’t support it. |
---|
| 5556 | + * No driver action is required. |
---|
| 5557 | + * Event Data1 - N.A, Event Data2 - N.A |
---|
| 5558 | + */ |
---|
| 5559 | + lpfc_log_msg(phba, KERN_WARNING, LOG_SLI, |
---|
| 5560 | + "2699 Misconfigured FA-WWN - Attached device does " |
---|
| 5561 | + "not support FA-WWN\n"); |
---|
| 5562 | + break; |
---|
| 5563 | + case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: |
---|
| 5564 | + /* EEPROM failure. No driver action is required */ |
---|
| 5565 | + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
---|
| 5566 | + "2518 EEPROM failure - " |
---|
| 5567 | + "Event Data1: x%08x Event Data2: x%08x\n", |
---|
| 5568 | + acqe_sli->event_data1, acqe_sli->event_data2); |
---|
| 5569 | + break; |
---|
4905 | 5570 | default: |
---|
4906 | 5571 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
---|
4907 | | - "3193 Async SLI event - Event Data1:x%08x Event Data2:" |
---|
4908 | | - "x%08x SLI Event Type:%d\n", |
---|
4909 | | - acqe_sli->event_data1, acqe_sli->event_data2, |
---|
| 5572 | + "3193 Unrecognized SLI event, type: 0x%x", |
---|
4910 | 5573 | evt_type); |
---|
4911 | 5574 | break; |
---|
4912 | 5575 | } |
---|
.. | .. |
---|
4971 | 5634 | |
---|
4972 | 5635 | /** |
---|
4973 | 5636 | * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports |
---|
4974 | | - * @vport: pointer to lpfc hba data structure. |
---|
| 5637 | + * @phba: pointer to lpfc hba data structure. |
---|
4975 | 5638 | * |
---|
4976 | 5639 | * This routine is to perform Clear Virtual Link (CVL) on all vports in |
---|
4977 | 5640 | * response to a FCF dead event. |
---|
.. | .. |
---|
4992 | 5655 | /** |
---|
4993 | 5656 | * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event |
---|
4994 | 5657 | * @phba: pointer to lpfc hba data structure. |
---|
4995 | | - * @acqe_link: pointer to the async fcoe completion queue entry. |
---|
| 5658 | + * @acqe_fip: pointer to the async fcoe completion queue entry. |
---|
4996 | 5659 | * |
---|
4997 | 5660 | * This routine is to handle the SLI4 asynchronous fcoe event. |
---|
4998 | 5661 | **/ |
---|
.. | .. |
---|
5015 | 5678 | case LPFC_FIP_EVENT_TYPE_NEW_FCF: |
---|
5016 | 5679 | case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: |
---|
5017 | 5680 | if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) |
---|
5018 | | - lpfc_printf_log(phba, KERN_ERR, LOG_FIP | |
---|
5019 | | - LOG_DISCOVERY, |
---|
| 5681 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
5020 | 5682 | "2546 New FCF event, evt_tag:x%x, " |
---|
5021 | 5683 | "index:x%x\n", |
---|
5022 | 5684 | acqe_fip->event_tag, |
---|
.. | .. |
---|
5069 | 5731 | rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, |
---|
5070 | 5732 | LPFC_FCOE_FCF_GET_FIRST); |
---|
5071 | 5733 | if (rc) |
---|
5072 | | - lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, |
---|
| 5734 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
5073 | 5735 | "2547 Issue FCF scan read FCF mailbox " |
---|
5074 | 5736 | "command failed (x%x)\n", rc); |
---|
5075 | 5737 | break; |
---|
5076 | 5738 | |
---|
5077 | 5739 | case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: |
---|
5078 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
5079 | | - "2548 FCF Table full count 0x%x tag 0x%x\n", |
---|
5080 | | - bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), |
---|
5081 | | - acqe_fip->event_tag); |
---|
| 5740 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 5741 | + "2548 FCF Table full count 0x%x tag 0x%x\n", |
---|
| 5742 | + bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), |
---|
| 5743 | + acqe_fip->event_tag); |
---|
5082 | 5744 | break; |
---|
5083 | 5745 | |
---|
5084 | 5746 | case LPFC_FIP_EVENT_TYPE_FCF_DEAD: |
---|
5085 | 5747 | phba->fcoe_cvl_eventtag = acqe_fip->event_tag; |
---|
5086 | | - lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, |
---|
5087 | | - "2549 FCF (x%x) disconnected from network, " |
---|
5088 | | - "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); |
---|
| 5748 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 5749 | + "2549 FCF (x%x) disconnected from network, " |
---|
| 5750 | + "tag:x%x\n", acqe_fip->index, |
---|
| 5751 | + acqe_fip->event_tag); |
---|
5089 | 5752 | /* |
---|
5090 | 5753 | * If we are in the middle of FCF failover process, clear |
---|
5091 | 5754 | * the corresponding FCF bit in the roundrobin bitmap. |
---|
.. | .. |
---|
5122 | 5785 | rc = lpfc_sli4_redisc_fcf_table(phba); |
---|
5123 | 5786 | if (rc) { |
---|
5124 | 5787 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | |
---|
5125 | | - LOG_DISCOVERY, |
---|
| 5788 | + LOG_TRACE_EVENT, |
---|
5126 | 5789 | "2772 Issue FCF rediscover mailbox " |
---|
5127 | 5790 | "command failed, fail through to FCF " |
---|
5128 | 5791 | "dead event\n"); |
---|
.. | .. |
---|
5146 | 5809 | break; |
---|
5147 | 5810 | case LPFC_FIP_EVENT_TYPE_CVL: |
---|
5148 | 5811 | phba->fcoe_cvl_eventtag = acqe_fip->event_tag; |
---|
5149 | | - lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, |
---|
| 5812 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 5813 | + LOG_TRACE_EVENT, |
---|
5150 | 5814 | "2718 Clear Virtual Link Received for VPI 0x%x" |
---|
5151 | 5815 | " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); |
---|
5152 | 5816 | |
---|
.. | .. |
---|
5213 | 5877 | rc = lpfc_sli4_redisc_fcf_table(phba); |
---|
5214 | 5878 | if (rc) { |
---|
5215 | 5879 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | |
---|
5216 | | - LOG_DISCOVERY, |
---|
| 5880 | + LOG_TRACE_EVENT, |
---|
5217 | 5881 | "2774 Issue FCF rediscover " |
---|
5218 | 5882 | "mailbox command failed, " |
---|
5219 | 5883 | "through to CVL event\n"); |
---|
.. | .. |
---|
5234 | 5898 | } |
---|
5235 | 5899 | break; |
---|
5236 | 5900 | default: |
---|
5237 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
5238 | | - "0288 Unknown FCoE event type 0x%x event tag " |
---|
5239 | | - "0x%x\n", event_type, acqe_fip->event_tag); |
---|
| 5901 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 5902 | + "0288 Unknown FCoE event type 0x%x event tag " |
---|
| 5903 | + "0x%x\n", event_type, acqe_fip->event_tag); |
---|
5240 | 5904 | break; |
---|
5241 | 5905 | } |
---|
5242 | 5906 | } |
---|
.. | .. |
---|
5244 | 5908 | /** |
---|
5245 | 5909 | * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event |
---|
5246 | 5910 | * @phba: pointer to lpfc hba data structure. |
---|
5247 | | - * @acqe_link: pointer to the async dcbx completion queue entry. |
---|
| 5911 | + * @acqe_dcbx: pointer to the async dcbx completion queue entry. |
---|
5248 | 5912 | * |
---|
5249 | 5913 | * This routine is to handle the SLI4 asynchronous dcbx event. |
---|
5250 | 5914 | **/ |
---|
.. | .. |
---|
5253 | 5917 | struct lpfc_acqe_dcbx *acqe_dcbx) |
---|
5254 | 5918 | { |
---|
5255 | 5919 | phba->fc_eventTag = acqe_dcbx->event_tag; |
---|
5256 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 5920 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
5257 | 5921 | "0290 The SLI4 DCBX asynchronous event is not " |
---|
5258 | 5922 | "handled yet\n"); |
---|
5259 | 5923 | } |
---|
.. | .. |
---|
5261 | 5925 | /** |
---|
5262 | 5926 | * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event |
---|
5263 | 5927 | * @phba: pointer to lpfc hba data structure. |
---|
5264 | | - * @acqe_link: pointer to the async grp5 completion queue entry. |
---|
| 5928 | + * @acqe_grp5: pointer to the async grp5 completion queue entry. |
---|
5265 | 5929 | * |
---|
5266 | 5930 | * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event |
---|
5267 | 5931 | * is an asynchronous notified of a logical link speed change. The Port |
---|
.. | .. |
---|
5294 | 5958 | void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) |
---|
5295 | 5959 | { |
---|
5296 | 5960 | struct lpfc_cq_event *cq_event; |
---|
| 5961 | + unsigned long iflags; |
---|
5297 | 5962 | |
---|
5298 | 5963 | /* First, declare the async event has been handled */ |
---|
5299 | | - spin_lock_irq(&phba->hbalock); |
---|
| 5964 | + spin_lock_irqsave(&phba->hbalock, iflags); |
---|
5300 | 5965 | phba->hba_flag &= ~ASYNC_EVENT; |
---|
5301 | | - spin_unlock_irq(&phba->hbalock); |
---|
| 5966 | + spin_unlock_irqrestore(&phba->hbalock, iflags); |
---|
| 5967 | + |
---|
5302 | 5968 | /* Now, handle all the async events */ |
---|
| 5969 | + spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); |
---|
5303 | 5970 | while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { |
---|
5304 | | - /* Get the first event from the head of the event queue */ |
---|
5305 | | - spin_lock_irq(&phba->hbalock); |
---|
5306 | 5971 | list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, |
---|
5307 | 5972 | cq_event, struct lpfc_cq_event, list); |
---|
5308 | | - spin_unlock_irq(&phba->hbalock); |
---|
| 5973 | + spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, |
---|
| 5974 | + iflags); |
---|
| 5975 | + |
---|
5309 | 5976 | /* Process the asynchronous event */ |
---|
5310 | 5977 | switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { |
---|
5311 | 5978 | case LPFC_TRAILER_CODE_LINK: |
---|
.. | .. |
---|
5330 | 5997 | lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); |
---|
5331 | 5998 | break; |
---|
5332 | 5999 | default: |
---|
5333 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
5334 | | - "1804 Invalid asynchrous event code: " |
---|
| 6000 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 6001 | + LOG_TRACE_EVENT, |
---|
| 6002 | + "1804 Invalid asynchronous event code: " |
---|
5335 | 6003 | "x%x\n", bf_get(lpfc_trailer_code, |
---|
5336 | 6004 | &cq_event->cqe.mcqe_cmpl)); |
---|
5337 | 6005 | break; |
---|
5338 | 6006 | } |
---|
| 6007 | + |
---|
5339 | 6008 | /* Free the completion event processed to the free pool */ |
---|
5340 | 6009 | lpfc_sli4_cq_event_release(phba, cq_event); |
---|
| 6010 | + spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); |
---|
5341 | 6011 | } |
---|
| 6012 | + spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); |
---|
5342 | 6013 | } |
---|
5343 | 6014 | |
---|
5344 | 6015 | /** |
---|
.. | .. |
---|
5366 | 6037 | "2777 Start post-quiescent FCF table scan\n"); |
---|
5367 | 6038 | rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); |
---|
5368 | 6039 | if (rc) |
---|
5369 | | - lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, |
---|
| 6040 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
5370 | 6041 | "2747 Issue FCF scan read FCF mailbox " |
---|
5371 | 6042 | "command failed 0x%x\n", rc); |
---|
5372 | 6043 | } |
---|
.. | .. |
---|
5437 | 6108 | "0480 Enabled MSI-X interrupt mode.\n"); |
---|
5438 | 6109 | break; |
---|
5439 | 6110 | default: |
---|
5440 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 6111 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
5441 | 6112 | "0482 Illegal interrupt mode.\n"); |
---|
5442 | 6113 | break; |
---|
5443 | 6114 | } |
---|
.. | .. |
---|
5485 | 6156 | out_disable_device: |
---|
5486 | 6157 | pci_disable_device(pdev); |
---|
5487 | 6158 | out_error: |
---|
5488 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 6159 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
5489 | 6160 | "1401 Failed to enable pci device\n"); |
---|
5490 | 6161 | return -ENODEV; |
---|
5491 | 6162 | } |
---|
.. | .. |
---|
5586 | 6257 | |
---|
5587 | 6258 | max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); |
---|
5588 | 6259 | if (nr_vfn > max_nr_vfn) { |
---|
5589 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 6260 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
5590 | 6261 | "3057 Requested vfs (%d) greater than " |
---|
5591 | 6262 | "supported vfs (%d)", nr_vfn, max_nr_vfn); |
---|
5592 | 6263 | return -EINVAL; |
---|
.. | .. |
---|
5625 | 6296 | * Driver resources common to all SLI revisions |
---|
5626 | 6297 | */ |
---|
5627 | 6298 | atomic_set(&phba->fast_event_count, 0); |
---|
| 6299 | + atomic_set(&phba->dbg_log_idx, 0); |
---|
| 6300 | + atomic_set(&phba->dbg_log_cnt, 0); |
---|
| 6301 | + atomic_set(&phba->dbg_log_dmping, 0); |
---|
5628 | 6302 | spin_lock_init(&phba->hbalock); |
---|
5629 | 6303 | |
---|
5630 | 6304 | /* Initialize ndlp management spinlock */ |
---|
5631 | 6305 | spin_lock_init(&phba->ndlp_lock); |
---|
5632 | 6306 | |
---|
| 6307 | + /* Initialize port_list spinlock */ |
---|
| 6308 | + spin_lock_init(&phba->port_list_lock); |
---|
5633 | 6309 | INIT_LIST_HEAD(&phba->port_list); |
---|
| 6310 | + |
---|
5634 | 6311 | INIT_LIST_HEAD(&phba->work_list); |
---|
5635 | 6312 | init_waitqueue_head(&phba->wait_4_mlo_m_q); |
---|
5636 | 6313 | |
---|
.. | .. |
---|
5645 | 6322 | "NVME" : " "), |
---|
5646 | 6323 | (phba->nvmet_support ? "NVMET" : " ")); |
---|
5647 | 6324 | |
---|
5648 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { |
---|
5649 | | - /* Initialize the scsi buffer list used by driver for scsi IO */ |
---|
5650 | | - spin_lock_init(&phba->scsi_buf_list_get_lock); |
---|
5651 | | - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); |
---|
5652 | | - spin_lock_init(&phba->scsi_buf_list_put_lock); |
---|
5653 | | - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); |
---|
5654 | | - } |
---|
5655 | | - |
---|
5656 | | - if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && |
---|
5657 | | - (phba->nvmet_support == 0)) { |
---|
5658 | | - /* Initialize the NVME buffer list used by driver for NVME IO */ |
---|
5659 | | - spin_lock_init(&phba->nvme_buf_list_get_lock); |
---|
5660 | | - INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get); |
---|
5661 | | - phba->get_nvme_bufs = 0; |
---|
5662 | | - spin_lock_init(&phba->nvme_buf_list_put_lock); |
---|
5663 | | - INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); |
---|
5664 | | - phba->put_nvme_bufs = 0; |
---|
5665 | | - } |
---|
| 6325 | + /* Initialize the IO buffer list used by driver for SLI3 SCSI */ |
---|
| 6326 | + spin_lock_init(&phba->scsi_buf_list_get_lock); |
---|
| 6327 | + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); |
---|
| 6328 | + spin_lock_init(&phba->scsi_buf_list_put_lock); |
---|
| 6329 | + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); |
---|
5666 | 6330 | |
---|
5667 | 6331 | /* Initialize the fabric iocb list */ |
---|
5668 | 6332 | INIT_LIST_HEAD(&phba->fabric_iocb_list); |
---|
.. | .. |
---|
5686 | 6350 | /* Heartbeat timer */ |
---|
5687 | 6351 | timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); |
---|
5688 | 6352 | |
---|
| 6353 | + INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); |
---|
| 6354 | + |
---|
| 6355 | + INIT_DELAYED_WORK(&phba->idle_stat_delay_work, |
---|
| 6356 | + lpfc_idle_stat_delay_work); |
---|
| 6357 | + |
---|
5689 | 6358 | return 0; |
---|
5690 | 6359 | } |
---|
5691 | 6360 | |
---|
.. | .. |
---|
5703 | 6372 | static int |
---|
5704 | 6373 | lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) |
---|
5705 | 6374 | { |
---|
5706 | | - int rc; |
---|
| 6375 | + int rc, entry_sz; |
---|
5707 | 6376 | |
---|
5708 | 6377 | /* |
---|
5709 | 6378 | * Initialize timers used by driver |
---|
.. | .. |
---|
5743 | 6412 | * used to create the sg_dma_buf_pool must be dynamically calculated. |
---|
5744 | 6413 | */ |
---|
5745 | 6414 | |
---|
5746 | | - /* Initialize the host templates the configured values. */ |
---|
5747 | | - lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; |
---|
5748 | | - lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; |
---|
5749 | | - lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; |
---|
| 6415 | + if (phba->sli_rev == LPFC_SLI_REV4) |
---|
| 6416 | + entry_sz = sizeof(struct sli4_sge); |
---|
| 6417 | + else |
---|
| 6418 | + entry_sz = sizeof(struct ulp_bde64); |
---|
5750 | 6419 | |
---|
5751 | 6420 | /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ |
---|
5752 | 6421 | if (phba->cfg_enable_bg) { |
---|
.. | .. |
---|
5761 | 6430 | */ |
---|
5762 | 6431 | phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + |
---|
5763 | 6432 | sizeof(struct fcp_rsp) + |
---|
5764 | | - (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); |
---|
| 6433 | + (LPFC_MAX_SG_SEG_CNT * entry_sz); |
---|
5765 | 6434 | |
---|
5766 | 6435 | if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) |
---|
5767 | 6436 | phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; |
---|
.. | .. |
---|
5776 | 6445 | */ |
---|
5777 | 6446 | phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + |
---|
5778 | 6447 | sizeof(struct fcp_rsp) + |
---|
5779 | | - ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); |
---|
| 6448 | + ((phba->cfg_sg_seg_cnt + 2) * entry_sz); |
---|
5780 | 6449 | |
---|
5781 | 6450 | /* Total BDEs in BPL for scsi_sg_list */ |
---|
5782 | 6451 | phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; |
---|
5783 | 6452 | } |
---|
5784 | 6453 | |
---|
5785 | 6454 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, |
---|
5786 | | - "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", |
---|
| 6455 | + "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", |
---|
5787 | 6456 | phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, |
---|
5788 | 6457 | phba->cfg_total_seg_cnt); |
---|
5789 | 6458 | |
---|
.. | .. |
---|
5800 | 6469 | /* Allocate device driver memory */ |
---|
5801 | 6470 | if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) |
---|
5802 | 6471 | return -ENOMEM; |
---|
| 6472 | + |
---|
| 6473 | + phba->lpfc_sg_dma_buf_pool = |
---|
| 6474 | + dma_pool_create("lpfc_sg_dma_buf_pool", |
---|
| 6475 | + &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, |
---|
| 6476 | + BPL_ALIGN_SZ, 0); |
---|
| 6477 | + |
---|
| 6478 | + if (!phba->lpfc_sg_dma_buf_pool) |
---|
| 6479 | + goto fail_free_mem; |
---|
| 6480 | + |
---|
| 6481 | + phba->lpfc_cmd_rsp_buf_pool = |
---|
| 6482 | + dma_pool_create("lpfc_cmd_rsp_buf_pool", |
---|
| 6483 | + &phba->pcidev->dev, |
---|
| 6484 | + sizeof(struct fcp_cmnd) + |
---|
| 6485 | + sizeof(struct fcp_rsp), |
---|
| 6486 | + BPL_ALIGN_SZ, 0); |
---|
| 6487 | + |
---|
| 6488 | + if (!phba->lpfc_cmd_rsp_buf_pool) |
---|
| 6489 | + goto fail_free_dma_buf_pool; |
---|
5803 | 6490 | |
---|
5804 | 6491 | /* |
---|
5805 | 6492 | * Enable sr-iov virtual functions if supported and configured |
---|
.. | .. |
---|
5819 | 6506 | } |
---|
5820 | 6507 | |
---|
5821 | 6508 | return 0; |
---|
| 6509 | + |
---|
| 6510 | +fail_free_dma_buf_pool: |
---|
| 6511 | + dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); |
---|
| 6512 | + phba->lpfc_sg_dma_buf_pool = NULL; |
---|
| 6513 | +fail_free_mem: |
---|
| 6514 | + lpfc_mem_free(phba); |
---|
| 6515 | + return -ENOMEM; |
---|
5822 | 6516 | } |
---|
5823 | 6517 | |
---|
5824 | 6518 | /** |
---|
.. | .. |
---|
5855 | 6549 | MAILBOX_t *mb; |
---|
5856 | 6550 | int rc, i, max_buf_size; |
---|
5857 | 6551 | int longs; |
---|
5858 | | - int fof_vectors = 0; |
---|
5859 | 6552 | int extra; |
---|
5860 | 6553 | uint64_t wwn; |
---|
5861 | 6554 | u32 if_type; |
---|
5862 | 6555 | u32 if_fam; |
---|
5863 | 6556 | |
---|
5864 | | - phba->sli4_hba.num_online_cpu = num_online_cpus(); |
---|
5865 | 6557 | phba->sli4_hba.num_present_cpu = lpfc_present_cpu; |
---|
| 6558 | + phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; |
---|
5866 | 6559 | phba->sli4_hba.curr_disp_cpu = 0; |
---|
5867 | 6560 | |
---|
5868 | 6561 | /* Get all the module params for configuring this host */ |
---|
.. | .. |
---|
5877 | 6570 | rc = lpfc_sli4_post_status_check(phba); |
---|
5878 | 6571 | if (rc) |
---|
5879 | 6572 | return -ENODEV; |
---|
| 6573 | + |
---|
| 6574 | + /* Allocate all driver workqueues here */ |
---|
| 6575 | + |
---|
| 6576 | + /* The lpfc_wq workqueue for deferred irq use */ |
---|
| 6577 | + phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); |
---|
5880 | 6578 | |
---|
5881 | 6579 | /* |
---|
5882 | 6580 | * Initialize timers used by driver |
---|
.. | .. |
---|
5912 | 6610 | * The WQ create will allocate the ring. |
---|
5913 | 6611 | */ |
---|
5914 | 6612 | |
---|
5915 | | - /* |
---|
5916 | | - * 1 for cmd, 1 for rsp, NVME adds an extra one |
---|
5917 | | - * for boundary conditions in its max_sgl_segment template. |
---|
5918 | | - */ |
---|
5919 | | - extra = 2; |
---|
5920 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) |
---|
5921 | | - extra++; |
---|
5922 | | - |
---|
5923 | | - /* |
---|
5924 | | - * It doesn't matter what family our adapter is in, we are |
---|
5925 | | - * limited to 2 Pages, 512 SGEs, for our SGL. |
---|
5926 | | - * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp |
---|
5927 | | - */ |
---|
5928 | | - max_buf_size = (2 * SLI4_PAGE_SIZE); |
---|
5929 | | - if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra) |
---|
5930 | | - phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra; |
---|
5931 | | - |
---|
5932 | | - /* |
---|
5933 | | - * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size |
---|
5934 | | - * used to create the sg_dma_buf_pool must be calculated. |
---|
5935 | | - */ |
---|
5936 | | - if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { |
---|
5937 | | - /* |
---|
5938 | | - * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, |
---|
5939 | | - * the FCP rsp, and a SGE. Sice we have no control |
---|
5940 | | - * over how many protection segments the SCSI Layer |
---|
5941 | | - * will hand us (ie: there could be one for every block |
---|
5942 | | - * in the IO), just allocate enough SGEs to accomidate |
---|
5943 | | - * our max amount and we need to limit lpfc_sg_seg_cnt |
---|
5944 | | - * to minimize the risk of running out. |
---|
5945 | | - */ |
---|
5946 | | - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + |
---|
5947 | | - sizeof(struct fcp_rsp) + max_buf_size; |
---|
5948 | | - |
---|
5949 | | - /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ |
---|
5950 | | - phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; |
---|
5951 | | - |
---|
5952 | | - if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) |
---|
5953 | | - phba->cfg_sg_seg_cnt = |
---|
5954 | | - LPFC_MAX_SG_SLI4_SEG_CNT_DIF; |
---|
5955 | | - } else { |
---|
5956 | | - /* |
---|
5957 | | - * The scsi_buf for a regular I/O holds the FCP cmnd, |
---|
5958 | | - * the FCP rsp, a SGE for each, and a SGE for up to |
---|
5959 | | - * cfg_sg_seg_cnt data segments. |
---|
5960 | | - */ |
---|
5961 | | - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + |
---|
5962 | | - sizeof(struct fcp_rsp) + |
---|
5963 | | - ((phba->cfg_sg_seg_cnt + extra) * |
---|
5964 | | - sizeof(struct sli4_sge)); |
---|
5965 | | - |
---|
5966 | | - /* Total SGEs for scsi_sg_list */ |
---|
5967 | | - phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; |
---|
5968 | | - |
---|
5969 | | - /* |
---|
5970 | | - * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only |
---|
5971 | | - * need to post 1 page for the SGL. |
---|
5972 | | - */ |
---|
5973 | | - } |
---|
5974 | | - |
---|
5975 | | - /* Initialize the host templates with the updated values. */ |
---|
5976 | | - lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; |
---|
5977 | | - lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; |
---|
5978 | | - lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; |
---|
5979 | | - |
---|
5980 | | - if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) |
---|
5981 | | - phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; |
---|
5982 | | - else |
---|
5983 | | - phba->cfg_sg_dma_buf_size = |
---|
5984 | | - SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); |
---|
5985 | | - |
---|
5986 | | - lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, |
---|
5987 | | - "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", |
---|
5988 | | - phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, |
---|
5989 | | - phba->cfg_total_seg_cnt); |
---|
5990 | | - |
---|
5991 | 6613 | /* Initialize buffer queue management fields */ |
---|
5992 | 6614 | INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); |
---|
5993 | 6615 | phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; |
---|
.. | .. |
---|
5996 | 6618 | /* |
---|
5997 | 6619 | * Initialize the SLI Layer to run with lpfc SLI4 HBAs. |
---|
5998 | 6620 | */ |
---|
5999 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { |
---|
6000 | | - /* Initialize the Abort scsi buffer list used by driver */ |
---|
6001 | | - spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); |
---|
6002 | | - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); |
---|
6003 | | - } |
---|
| 6621 | + /* Initialize the Abort buffer list used by driver */ |
---|
| 6622 | + spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); |
---|
| 6623 | + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); |
---|
6004 | 6624 | |
---|
6005 | 6625 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
6006 | 6626 | /* Initialize the Abort nvme buffer list used by driver */ |
---|
6007 | | - spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); |
---|
6008 | | - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); |
---|
| 6627 | + spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); |
---|
6009 | 6628 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); |
---|
6010 | 6629 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); |
---|
| 6630 | + spin_lock_init(&phba->sli4_hba.t_active_list_lock); |
---|
| 6631 | + INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); |
---|
6011 | 6632 | } |
---|
6012 | 6633 | |
---|
6013 | 6634 | /* This abort list used by worker thread */ |
---|
6014 | 6635 | spin_lock_init(&phba->sli4_hba.sgl_list_lock); |
---|
6015 | 6636 | spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); |
---|
| 6637 | + spin_lock_init(&phba->sli4_hba.asynce_list_lock); |
---|
| 6638 | + spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock); |
---|
6016 | 6639 | |
---|
6017 | 6640 | /* |
---|
6018 | 6641 | * Initialize driver internal slow-path work queues |
---|
.. | .. |
---|
6024 | 6647 | INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); |
---|
6025 | 6648 | /* Asynchronous event CQ Event work queue list */ |
---|
6026 | 6649 | INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); |
---|
6027 | | - /* Fast-path XRI aborted CQ Event work queue list */ |
---|
6028 | | - INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); |
---|
6029 | 6650 | /* Slow-path XRI aborted CQ Event work queue list */ |
---|
6030 | 6651 | INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); |
---|
6031 | 6652 | /* Receive queue CQ Event work queue list */ |
---|
.. | .. |
---|
6049 | 6670 | /* Allocate device driver memory */ |
---|
6050 | 6671 | rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); |
---|
6051 | 6672 | if (rc) |
---|
6052 | | - return -ENOMEM; |
---|
| 6673 | + goto out_destroy_workqueue; |
---|
6053 | 6674 | |
---|
6054 | 6675 | /* IF Type 2 ports get initialized now. */ |
---|
6055 | 6676 | if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= |
---|
.. | .. |
---|
6103 | 6724 | lpfc_read_nv(phba, mboxq); |
---|
6104 | 6725 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
---|
6105 | 6726 | if (rc != MBX_SUCCESS) { |
---|
6106 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 6727 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 6728 | + LOG_TRACE_EVENT, |
---|
6107 | 6729 | "6016 Mailbox failed , mbxCmd x%x " |
---|
6108 | 6730 | "READ_NV, mbxStatus x%x\n", |
---|
6109 | 6731 | bf_get(lpfc_mqe_command, &mboxq->u.mqe), |
---|
.. | .. |
---|
6132 | 6754 | |
---|
6133 | 6755 | phba->nvmet_support = 1; /* a match */ |
---|
6134 | 6756 | |
---|
6135 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 6757 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 6758 | + LOG_TRACE_EVENT, |
---|
6136 | 6759 | "6017 NVME Target %016llx\n", |
---|
6137 | 6760 | wwn); |
---|
6138 | 6761 | #else |
---|
6139 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 6762 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 6763 | + LOG_TRACE_EVENT, |
---|
6140 | 6764 | "6021 Can't enable NVME Target." |
---|
6141 | 6765 | " NVME_TARGET_FC infrastructure" |
---|
6142 | 6766 | " is not in kernel\n"); |
---|
6143 | 6767 | #endif |
---|
| 6768 | + /* Not supported for NVMET */ |
---|
| 6769 | + phba->cfg_xri_rebalancing = 0; |
---|
| 6770 | + if (phba->irq_chann_mode == NHT_MODE) { |
---|
| 6771 | + phba->cfg_irq_chann = |
---|
| 6772 | + phba->sli4_hba.num_present_cpu; |
---|
| 6773 | + phba->cfg_hdw_queue = |
---|
| 6774 | + phba->sli4_hba.num_present_cpu; |
---|
| 6775 | + phba->irq_chann_mode = NORMAL_MODE; |
---|
| 6776 | + } |
---|
6144 | 6777 | break; |
---|
6145 | 6778 | } |
---|
6146 | 6779 | } |
---|
.. | .. |
---|
6161 | 6794 | &phba->sli4_hba.sli_intf); |
---|
6162 | 6795 | if (phba->sli4_hba.extents_in_use && |
---|
6163 | 6796 | phba->sli4_hba.rpi_hdrs_in_use) { |
---|
6164 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
6165 | | - "2999 Unsupported SLI4 Parameters " |
---|
6166 | | - "Extents and RPI headers enabled.\n"); |
---|
| 6797 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 6798 | + "2999 Unsupported SLI4 Parameters " |
---|
| 6799 | + "Extents and RPI headers enabled.\n"); |
---|
6167 | 6800 | if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && |
---|
6168 | 6801 | if_fam == LPFC_SLI_INTF_FAMILY_BE2) { |
---|
6169 | 6802 | mempool_free(mboxq, phba->mbox_mem_pool); |
---|
.. | .. |
---|
6179 | 6812 | } |
---|
6180 | 6813 | } |
---|
6181 | 6814 | |
---|
| 6815 | + /* |
---|
| 6816 | + * 1 for cmd, 1 for rsp, NVME adds an extra one |
---|
| 6817 | + * for boundary conditions in its max_sgl_segment template. |
---|
| 6818 | + */ |
---|
| 6819 | + extra = 2; |
---|
| 6820 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) |
---|
| 6821 | + extra++; |
---|
| 6822 | + |
---|
| 6823 | + /* |
---|
| 6824 | + * It doesn't matter what family our adapter is in, we are |
---|
| 6825 | + * limited to 2 Pages, 512 SGEs, for our SGL. |
---|
| 6826 | + * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp |
---|
| 6827 | + */ |
---|
| 6828 | + max_buf_size = (2 * SLI4_PAGE_SIZE); |
---|
| 6829 | + |
---|
| 6830 | + /* |
---|
| 6831 | + * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size |
---|
| 6832 | + * used to create the sg_dma_buf_pool must be calculated. |
---|
| 6833 | + */ |
---|
| 6834 | + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { |
---|
| 6835 | + /* Both cfg_enable_bg and cfg_external_dif code paths */ |
---|
| 6836 | + |
---|
| 6837 | + /* |
---|
| 6838 | + * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, |
---|
| 6839 | + * the FCP rsp, and a SGE. Sice we have no control |
---|
| 6840 | + * over how many protection segments the SCSI Layer |
---|
| 6841 | + * will hand us (ie: there could be one for every block |
---|
| 6842 | + * in the IO), just allocate enough SGEs to accomidate |
---|
| 6843 | + * our max amount and we need to limit lpfc_sg_seg_cnt |
---|
| 6844 | + * to minimize the risk of running out. |
---|
| 6845 | + */ |
---|
| 6846 | + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + |
---|
| 6847 | + sizeof(struct fcp_rsp) + max_buf_size; |
---|
| 6848 | + |
---|
| 6849 | + /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ |
---|
| 6850 | + phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; |
---|
| 6851 | + |
---|
| 6852 | + /* |
---|
| 6853 | + * If supporting DIF, reduce the seg count for scsi to |
---|
| 6854 | + * allow room for the DIF sges. |
---|
| 6855 | + */ |
---|
| 6856 | + if (phba->cfg_enable_bg && |
---|
| 6857 | + phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) |
---|
| 6858 | + phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; |
---|
| 6859 | + else |
---|
| 6860 | + phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; |
---|
| 6861 | + |
---|
| 6862 | + } else { |
---|
| 6863 | + /* |
---|
| 6864 | + * The scsi_buf for a regular I/O holds the FCP cmnd, |
---|
| 6865 | + * the FCP rsp, a SGE for each, and a SGE for up to |
---|
| 6866 | + * cfg_sg_seg_cnt data segments. |
---|
| 6867 | + */ |
---|
| 6868 | + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + |
---|
| 6869 | + sizeof(struct fcp_rsp) + |
---|
| 6870 | + ((phba->cfg_sg_seg_cnt + extra) * |
---|
| 6871 | + sizeof(struct sli4_sge)); |
---|
| 6872 | + |
---|
| 6873 | + /* Total SGEs for scsi_sg_list */ |
---|
| 6874 | + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; |
---|
| 6875 | + phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; |
---|
| 6876 | + |
---|
| 6877 | + /* |
---|
| 6878 | + * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only |
---|
| 6879 | + * need to post 1 page for the SGL. |
---|
| 6880 | + */ |
---|
| 6881 | + } |
---|
| 6882 | + |
---|
| 6883 | + if (phba->cfg_xpsgl && !phba->nvmet_support) |
---|
| 6884 | + phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; |
---|
| 6885 | + else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) |
---|
| 6886 | + phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; |
---|
| 6887 | + else |
---|
| 6888 | + phba->cfg_sg_dma_buf_size = |
---|
| 6889 | + SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); |
---|
| 6890 | + |
---|
| 6891 | + phba->border_sge_num = phba->cfg_sg_dma_buf_size / |
---|
| 6892 | + sizeof(struct sli4_sge); |
---|
| 6893 | + |
---|
| 6894 | + /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ |
---|
| 6895 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
| 6896 | + if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { |
---|
| 6897 | + lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, |
---|
| 6898 | + "6300 Reducing NVME sg segment " |
---|
| 6899 | + "cnt to %d\n", |
---|
| 6900 | + LPFC_MAX_NVME_SEG_CNT); |
---|
| 6901 | + phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; |
---|
| 6902 | + } else |
---|
| 6903 | + phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; |
---|
| 6904 | + } |
---|
| 6905 | + |
---|
| 6906 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, |
---|
| 6907 | + "9087 sg_seg_cnt:%d dmabuf_size:%d " |
---|
| 6908 | + "total:%d scsi:%d nvme:%d\n", |
---|
| 6909 | + phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, |
---|
| 6910 | + phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, |
---|
| 6911 | + phba->cfg_nvme_seg_cnt); |
---|
| 6912 | + |
---|
| 6913 | + if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) |
---|
| 6914 | + i = phba->cfg_sg_dma_buf_size; |
---|
| 6915 | + else |
---|
| 6916 | + i = SLI4_PAGE_SIZE; |
---|
| 6917 | + |
---|
| 6918 | + phba->lpfc_sg_dma_buf_pool = |
---|
| 6919 | + dma_pool_create("lpfc_sg_dma_buf_pool", |
---|
| 6920 | + &phba->pcidev->dev, |
---|
| 6921 | + phba->cfg_sg_dma_buf_size, |
---|
| 6922 | + i, 0); |
---|
| 6923 | + if (!phba->lpfc_sg_dma_buf_pool) |
---|
| 6924 | + goto out_free_bsmbx; |
---|
| 6925 | + |
---|
| 6926 | + phba->lpfc_cmd_rsp_buf_pool = |
---|
| 6927 | + dma_pool_create("lpfc_cmd_rsp_buf_pool", |
---|
| 6928 | + &phba->pcidev->dev, |
---|
| 6929 | + sizeof(struct fcp_cmnd) + |
---|
| 6930 | + sizeof(struct fcp_rsp), |
---|
| 6931 | + i, 0); |
---|
| 6932 | + if (!phba->lpfc_cmd_rsp_buf_pool) |
---|
| 6933 | + goto out_free_sg_dma_buf; |
---|
| 6934 | + |
---|
6182 | 6935 | mempool_free(mboxq, phba->mbox_mem_pool); |
---|
6183 | 6936 | |
---|
6184 | 6937 | /* Verify OAS is supported */ |
---|
6185 | 6938 | lpfc_sli4_oas_verify(phba); |
---|
6186 | | - if (phba->cfg_fof) |
---|
6187 | | - fof_vectors = 1; |
---|
| 6939 | + |
---|
| 6940 | + /* Verify RAS support on adapter */ |
---|
| 6941 | + lpfc_sli4_ras_init(phba); |
---|
6188 | 6942 | |
---|
6189 | 6943 | /* Verify all the SLI4 queues */ |
---|
6190 | 6944 | rc = lpfc_sli4_queue_verify(phba); |
---|
6191 | 6945 | if (rc) |
---|
6192 | | - goto out_free_bsmbx; |
---|
| 6946 | + goto out_free_cmd_rsp_buf; |
---|
6193 | 6947 | |
---|
6194 | 6948 | /* Create driver internal CQE event pool */ |
---|
6195 | 6949 | rc = lpfc_sli4_cq_event_pool_create(phba); |
---|
6196 | 6950 | if (rc) |
---|
6197 | | - goto out_free_bsmbx; |
---|
| 6951 | + goto out_free_cmd_rsp_buf; |
---|
6198 | 6952 | |
---|
6199 | 6953 | /* Initialize sgl lists per host */ |
---|
6200 | 6954 | lpfc_init_sgl_list(phba); |
---|
.. | .. |
---|
6202 | 6956 | /* Allocate and initialize active sgl array */ |
---|
6203 | 6957 | rc = lpfc_init_active_sgl_array(phba); |
---|
6204 | 6958 | if (rc) { |
---|
6205 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 6959 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
6206 | 6960 | "1430 Failed to initialize sgl list.\n"); |
---|
6207 | 6961 | goto out_destroy_cq_event_pool; |
---|
6208 | 6962 | } |
---|
6209 | 6963 | rc = lpfc_sli4_init_rpi_hdrs(phba); |
---|
6210 | 6964 | if (rc) { |
---|
6211 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 6965 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
6212 | 6966 | "1432 Failed to initialize rpi headers.\n"); |
---|
6213 | 6967 | goto out_free_active_sgl; |
---|
6214 | 6968 | } |
---|
.. | .. |
---|
6218 | 6972 | phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), |
---|
6219 | 6973 | GFP_KERNEL); |
---|
6220 | 6974 | if (!phba->fcf.fcf_rr_bmask) { |
---|
6221 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 6975 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
6222 | 6976 | "2759 Failed allocate memory for FCF round " |
---|
6223 | 6977 | "robin failover bmask\n"); |
---|
6224 | 6978 | rc = -ENOMEM; |
---|
6225 | 6979 | goto out_remove_rpi_hdrs; |
---|
6226 | 6980 | } |
---|
6227 | 6981 | |
---|
6228 | | - phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs, |
---|
6229 | | - sizeof(struct lpfc_hba_eq_hdl), |
---|
6230 | | - GFP_KERNEL); |
---|
| 6982 | + phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, |
---|
| 6983 | + sizeof(struct lpfc_hba_eq_hdl), |
---|
| 6984 | + GFP_KERNEL); |
---|
6231 | 6985 | if (!phba->sli4_hba.hba_eq_hdl) { |
---|
6232 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 6986 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
6233 | 6987 | "2572 Failed allocate memory for " |
---|
6234 | 6988 | "fast-path per-EQ handle array\n"); |
---|
6235 | 6989 | rc = -ENOMEM; |
---|
6236 | 6990 | goto out_free_fcf_rr_bmask; |
---|
6237 | 6991 | } |
---|
6238 | 6992 | |
---|
6239 | | - phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu, |
---|
| 6993 | + phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, |
---|
6240 | 6994 | sizeof(struct lpfc_vector_map_info), |
---|
6241 | 6995 | GFP_KERNEL); |
---|
6242 | 6996 | if (!phba->sli4_hba.cpu_map) { |
---|
6243 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 6997 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
6244 | 6998 | "3327 Failed allocate memory for msi-x " |
---|
6245 | 6999 | "interrupt vector mapping\n"); |
---|
6246 | 7000 | rc = -ENOMEM; |
---|
6247 | 7001 | goto out_free_hba_eq_hdl; |
---|
6248 | 7002 | } |
---|
6249 | | - if (lpfc_used_cpu == NULL) { |
---|
6250 | | - lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t), |
---|
6251 | | - GFP_KERNEL); |
---|
6252 | | - if (!lpfc_used_cpu) { |
---|
6253 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
6254 | | - "3335 Failed allocate memory for msi-x " |
---|
6255 | | - "interrupt vector mapping\n"); |
---|
6256 | | - kfree(phba->sli4_hba.cpu_map); |
---|
6257 | | - rc = -ENOMEM; |
---|
6258 | | - goto out_free_hba_eq_hdl; |
---|
6259 | | - } |
---|
6260 | | - for (i = 0; i < lpfc_present_cpu; i++) |
---|
6261 | | - lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY; |
---|
| 7003 | + |
---|
| 7004 | + phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); |
---|
| 7005 | + if (!phba->sli4_hba.eq_info) { |
---|
| 7006 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 7007 | + "3321 Failed allocation for per_cpu stats\n"); |
---|
| 7008 | + rc = -ENOMEM; |
---|
| 7009 | + goto out_free_hba_cpu_map; |
---|
6262 | 7010 | } |
---|
| 7011 | + |
---|
| 7012 | + phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu, |
---|
| 7013 | + sizeof(*phba->sli4_hba.idle_stat), |
---|
| 7014 | + GFP_KERNEL); |
---|
| 7015 | + if (!phba->sli4_hba.idle_stat) { |
---|
| 7016 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 7017 | + "3390 Failed allocation for idle_stat\n"); |
---|
| 7018 | + rc = -ENOMEM; |
---|
| 7019 | + goto out_free_hba_eq_info; |
---|
| 7020 | + } |
---|
| 7021 | + |
---|
| 7022 | +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
| 7023 | + phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); |
---|
| 7024 | + if (!phba->sli4_hba.c_stat) { |
---|
| 7025 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 7026 | + "3332 Failed allocating per cpu hdwq stats\n"); |
---|
| 7027 | + rc = -ENOMEM; |
---|
| 7028 | + goto out_free_hba_idle_stat; |
---|
| 7029 | + } |
---|
| 7030 | +#endif |
---|
6263 | 7031 | |
---|
6264 | 7032 | /* |
---|
6265 | 7033 | * Enable sr-iov virtual functions if supported and configured |
---|
.. | .. |
---|
6280 | 7048 | |
---|
6281 | 7049 | return 0; |
---|
6282 | 7050 | |
---|
| 7051 | +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
| 7052 | +out_free_hba_idle_stat: |
---|
| 7053 | + kfree(phba->sli4_hba.idle_stat); |
---|
| 7054 | +#endif |
---|
| 7055 | +out_free_hba_eq_info: |
---|
| 7056 | + free_percpu(phba->sli4_hba.eq_info); |
---|
| 7057 | +out_free_hba_cpu_map: |
---|
| 7058 | + kfree(phba->sli4_hba.cpu_map); |
---|
6283 | 7059 | out_free_hba_eq_hdl: |
---|
6284 | 7060 | kfree(phba->sli4_hba.hba_eq_hdl); |
---|
6285 | 7061 | out_free_fcf_rr_bmask: |
---|
.. | .. |
---|
6290 | 7066 | lpfc_free_active_sgl(phba); |
---|
6291 | 7067 | out_destroy_cq_event_pool: |
---|
6292 | 7068 | lpfc_sli4_cq_event_pool_destroy(phba); |
---|
| 7069 | +out_free_cmd_rsp_buf: |
---|
| 7070 | + dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); |
---|
| 7071 | + phba->lpfc_cmd_rsp_buf_pool = NULL; |
---|
| 7072 | +out_free_sg_dma_buf: |
---|
| 7073 | + dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); |
---|
| 7074 | + phba->lpfc_sg_dma_buf_pool = NULL; |
---|
6293 | 7075 | out_free_bsmbx: |
---|
6294 | 7076 | lpfc_destroy_bootstrap_mbox(phba); |
---|
6295 | 7077 | out_free_mem: |
---|
6296 | 7078 | lpfc_mem_free(phba); |
---|
| 7079 | +out_destroy_workqueue: |
---|
| 7080 | + destroy_workqueue(phba->wq); |
---|
| 7081 | + phba->wq = NULL; |
---|
6297 | 7082 | return rc; |
---|
6298 | 7083 | } |
---|
6299 | 7084 | |
---|
.. | .. |
---|
6309 | 7094 | { |
---|
6310 | 7095 | struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; |
---|
6311 | 7096 | |
---|
| 7097 | + free_percpu(phba->sli4_hba.eq_info); |
---|
| 7098 | +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
| 7099 | + free_percpu(phba->sli4_hba.c_stat); |
---|
| 7100 | +#endif |
---|
| 7101 | + kfree(phba->sli4_hba.idle_stat); |
---|
| 7102 | + |
---|
6312 | 7103 | /* Free memory allocated for msi-x interrupt vector to CPU mapping */ |
---|
6313 | 7104 | kfree(phba->sli4_hba.cpu_map); |
---|
| 7105 | + phba->sli4_hba.num_possible_cpu = 0; |
---|
6314 | 7106 | phba->sli4_hba.num_present_cpu = 0; |
---|
6315 | | - phba->sli4_hba.num_online_cpu = 0; |
---|
6316 | 7107 | phba->sli4_hba.curr_disp_cpu = 0; |
---|
| 7108 | + cpumask_clear(&phba->sli4_hba.irq_aff_mask); |
---|
6317 | 7109 | |
---|
6318 | 7110 | /* Free memory allocated for fast-path work queue handles */ |
---|
6319 | 7111 | kfree(phba->sli4_hba.hba_eq_hdl); |
---|
.. | .. |
---|
6381 | 7173 | phba->lpfc_stop_port = lpfc_stop_port_s4; |
---|
6382 | 7174 | break; |
---|
6383 | 7175 | default: |
---|
6384 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 7176 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
6385 | 7177 | "1431 Invalid HBA PCI-device group: 0x%x\n", |
---|
6386 | 7178 | dev_grp); |
---|
6387 | 7179 | return -ENODEV; |
---|
.. | .. |
---|
6413 | 7205 | error = PTR_ERR(phba->worker_thread); |
---|
6414 | 7206 | return error; |
---|
6415 | 7207 | } |
---|
6416 | | - |
---|
6417 | | - /* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */ |
---|
6418 | | - if (phba->sli_rev == LPFC_SLI_REV4) |
---|
6419 | | - phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); |
---|
6420 | | - else |
---|
6421 | | - phba->wq = NULL; |
---|
6422 | 7208 | |
---|
6423 | 7209 | return 0; |
---|
6424 | 7210 | } |
---|
.. | .. |
---|
6471 | 7257 | /** |
---|
6472 | 7258 | * lpfc_init_iocb_list - Allocate and initialize iocb list. |
---|
6473 | 7259 | * @phba: pointer to lpfc hba data structure. |
---|
| 7260 | + * @iocb_count: number of requested iocbs |
---|
6474 | 7261 | * |
---|
6475 | 7262 | * This routine is invoked to allocate and initizlize the driver's IOCB |
---|
6476 | 7263 | * list and set up the IOCB tag array accordingly. |
---|
.. | .. |
---|
6493 | 7280 | if (iocbq_entry == NULL) { |
---|
6494 | 7281 | printk(KERN_ERR "%s: only allocated %d iocbs of " |
---|
6495 | 7282 | "expected %d count. Unloading driver.\n", |
---|
6496 | | - __func__, i, LPFC_IOCB_LIST_CNT); |
---|
| 7283 | + __func__, i, iocb_count); |
---|
6497 | 7284 | goto out_free_iocbq; |
---|
6498 | 7285 | } |
---|
6499 | 7286 | |
---|
.. | .. |
---|
6650 | 7437 | /* els xri-sgl book keeping */ |
---|
6651 | 7438 | phba->sli4_hba.els_xri_cnt = 0; |
---|
6652 | 7439 | |
---|
6653 | | - /* scsi xri-buffer book keeping */ |
---|
6654 | | - phba->sli4_hba.scsi_xri_cnt = 0; |
---|
6655 | | - |
---|
6656 | 7440 | /* nvme xri-buffer book keeping */ |
---|
6657 | | - phba->sli4_hba.nvme_xri_cnt = 0; |
---|
| 7441 | + phba->sli4_hba.io_xri_cnt = 0; |
---|
6658 | 7442 | } |
---|
6659 | 7443 | |
---|
6660 | 7444 | /** |
---|
.. | .. |
---|
6685 | 7469 | |
---|
6686 | 7470 | rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); |
---|
6687 | 7471 | if (!rpi_hdr) { |
---|
6688 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
---|
| 7472 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
6689 | 7473 | "0391 Error during rpi post operation\n"); |
---|
6690 | 7474 | lpfc_sli4_remove_rpis(phba); |
---|
6691 | 7475 | rc = -ENODEV; |
---|
.. | .. |
---|
6748 | 7532 | if (!dmabuf) |
---|
6749 | 7533 | return NULL; |
---|
6750 | 7534 | |
---|
6751 | | - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, |
---|
6752 | | - LPFC_HDR_TEMPLATE_SIZE, |
---|
6753 | | - &dmabuf->phys, GFP_KERNEL); |
---|
| 7535 | + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, |
---|
| 7536 | + LPFC_HDR_TEMPLATE_SIZE, |
---|
| 7537 | + &dmabuf->phys, GFP_KERNEL); |
---|
6754 | 7538 | if (!dmabuf->virt) { |
---|
6755 | 7539 | rpi_hdr = NULL; |
---|
6756 | 7540 | goto err_free_dmabuf; |
---|
.. | .. |
---|
6868 | 7652 | static void |
---|
6869 | 7653 | lpfc_hba_free(struct lpfc_hba *phba) |
---|
6870 | 7654 | { |
---|
| 7655 | + if (phba->sli_rev == LPFC_SLI_REV4) |
---|
| 7656 | + kfree(phba->sli4_hba.hdwq); |
---|
| 7657 | + |
---|
6871 | 7658 | /* Release the driver assigned board number */ |
---|
6872 | 7659 | idr_remove(&lpfc_hba_index, phba->brd_no); |
---|
6873 | 7660 | |
---|
.. | .. |
---|
6903 | 7690 | phba->fc_arbtov = FF_DEF_ARBTOV; |
---|
6904 | 7691 | |
---|
6905 | 7692 | atomic_set(&phba->sdev_cnt, 0); |
---|
6906 | | - atomic_set(&phba->fc4ScsiInputRequests, 0); |
---|
6907 | | - atomic_set(&phba->fc4ScsiOutputRequests, 0); |
---|
6908 | | - atomic_set(&phba->fc4ScsiControlRequests, 0); |
---|
6909 | | - atomic_set(&phba->fc4ScsiIoCmpls, 0); |
---|
6910 | 7693 | vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); |
---|
6911 | 7694 | if (!vport) |
---|
6912 | 7695 | return -ENODEV; |
---|
.. | .. |
---|
6916 | 7699 | |
---|
6917 | 7700 | if (phba->nvmet_support) { |
---|
6918 | 7701 | /* Only 1 vport (pport) will support NVME target */ |
---|
6919 | | - if (phba->txrdy_payload_pool == NULL) { |
---|
6920 | | - phba->txrdy_payload_pool = dma_pool_create( |
---|
6921 | | - "txrdy_pool", &phba->pcidev->dev, |
---|
6922 | | - TXRDY_PAYLOAD_LEN, 16, 0); |
---|
6923 | | - if (phba->txrdy_payload_pool) { |
---|
6924 | | - phba->targetport = NULL; |
---|
6925 | | - phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; |
---|
6926 | | - lpfc_printf_log(phba, KERN_INFO, |
---|
6927 | | - LOG_INIT | LOG_NVME_DISC, |
---|
6928 | | - "6076 NVME Target Found\n"); |
---|
6929 | | - } |
---|
6930 | | - } |
---|
| 7702 | + phba->targetport = NULL; |
---|
| 7703 | + phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; |
---|
| 7704 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, |
---|
| 7705 | + "6076 NVME Target Found\n"); |
---|
6931 | 7706 | } |
---|
6932 | 7707 | |
---|
6933 | 7708 | lpfc_debugfs_initialize(vport); |
---|
.. | .. |
---|
6984 | 7759 | uint32_t old_mask; |
---|
6985 | 7760 | uint32_t old_guard; |
---|
6986 | 7761 | |
---|
6987 | | - int pagecnt = 10; |
---|
6988 | 7762 | if (phba->cfg_prot_mask && phba->cfg_prot_guard) { |
---|
6989 | 7763 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
6990 | 7764 | "1478 Registering BlockGuard with the " |
---|
.. | .. |
---|
7007 | 7781 | if (phba->cfg_prot_mask && phba->cfg_prot_guard) { |
---|
7008 | 7782 | if ((old_mask != phba->cfg_prot_mask) || |
---|
7009 | 7783 | (old_guard != phba->cfg_prot_guard)) |
---|
7010 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 7784 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
7011 | 7785 | "1475 Registering BlockGuard with the " |
---|
7012 | 7786 | "SCSI layer: mask %d guard %d\n", |
---|
7013 | 7787 | phba->cfg_prot_mask, |
---|
.. | .. |
---|
7016 | 7790 | scsi_host_set_prot(shost, phba->cfg_prot_mask); |
---|
7017 | 7791 | scsi_host_set_guard(shost, phba->cfg_prot_guard); |
---|
7018 | 7792 | } else |
---|
7019 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 7793 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
7020 | 7794 | "1479 Not Registering BlockGuard with the SCSI " |
---|
7021 | 7795 | "layer, Bad protection parameters: %d %d\n", |
---|
7022 | 7796 | old_mask, old_guard); |
---|
7023 | 7797 | } |
---|
7024 | | - |
---|
7025 | | - if (!_dump_buf_data) { |
---|
7026 | | - while (pagecnt) { |
---|
7027 | | - spin_lock_init(&_dump_buf_lock); |
---|
7028 | | - _dump_buf_data = |
---|
7029 | | - (char *) __get_free_pages(GFP_KERNEL, pagecnt); |
---|
7030 | | - if (_dump_buf_data) { |
---|
7031 | | - lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
---|
7032 | | - "9043 BLKGRD: allocated %d pages for " |
---|
7033 | | - "_dump_buf_data at 0x%p\n", |
---|
7034 | | - (1 << pagecnt), _dump_buf_data); |
---|
7035 | | - _dump_buf_data_order = pagecnt; |
---|
7036 | | - memset(_dump_buf_data, 0, |
---|
7037 | | - ((1 << PAGE_SHIFT) << pagecnt)); |
---|
7038 | | - break; |
---|
7039 | | - } else |
---|
7040 | | - --pagecnt; |
---|
7041 | | - } |
---|
7042 | | - if (!_dump_buf_data_order) |
---|
7043 | | - lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
---|
7044 | | - "9044 BLKGRD: ERROR unable to allocate " |
---|
7045 | | - "memory for hexdump\n"); |
---|
7046 | | - } else |
---|
7047 | | - lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
---|
7048 | | - "9045 BLKGRD: already allocated _dump_buf_data=0x%p" |
---|
7049 | | - "\n", _dump_buf_data); |
---|
7050 | | - if (!_dump_buf_dif) { |
---|
7051 | | - while (pagecnt) { |
---|
7052 | | - _dump_buf_dif = |
---|
7053 | | - (char *) __get_free_pages(GFP_KERNEL, pagecnt); |
---|
7054 | | - if (_dump_buf_dif) { |
---|
7055 | | - lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
---|
7056 | | - "9046 BLKGRD: allocated %d pages for " |
---|
7057 | | - "_dump_buf_dif at 0x%p\n", |
---|
7058 | | - (1 << pagecnt), _dump_buf_dif); |
---|
7059 | | - _dump_buf_dif_order = pagecnt; |
---|
7060 | | - memset(_dump_buf_dif, 0, |
---|
7061 | | - ((1 << PAGE_SHIFT) << pagecnt)); |
---|
7062 | | - break; |
---|
7063 | | - } else |
---|
7064 | | - --pagecnt; |
---|
7065 | | - } |
---|
7066 | | - if (!_dump_buf_dif_order) |
---|
7067 | | - lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
---|
7068 | | - "9047 BLKGRD: ERROR unable to allocate " |
---|
7069 | | - "memory for hexdump\n"); |
---|
7070 | | - } else |
---|
7071 | | - lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
---|
7072 | | - "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", |
---|
7073 | | - _dump_buf_dif); |
---|
7074 | 7798 | } |
---|
7075 | 7799 | |
---|
7076 | 7800 | /** |
---|
.. | .. |
---|
7095 | 7819 | */ |
---|
7096 | 7820 | shost = pci_get_drvdata(phba->pcidev); |
---|
7097 | 7821 | shost->can_queue = phba->cfg_hba_queue_depth - 10; |
---|
7098 | | - if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) |
---|
7099 | | - lpfc_setup_bg(phba, shost); |
---|
7100 | 7822 | |
---|
7101 | 7823 | lpfc_host_attrib_init(shost); |
---|
7102 | 7824 | |
---|
.. | .. |
---|
7132 | 7854 | static int |
---|
7133 | 7855 | lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) |
---|
7134 | 7856 | { |
---|
7135 | | - struct pci_dev *pdev; |
---|
| 7857 | + struct pci_dev *pdev = phba->pcidev; |
---|
7136 | 7858 | unsigned long bar0map_len, bar2map_len; |
---|
7137 | 7859 | int i, hbq_count; |
---|
7138 | 7860 | void *ptr; |
---|
7139 | | - int error = -ENODEV; |
---|
| 7861 | + int error; |
---|
7140 | 7862 | |
---|
7141 | | - /* Obtain PCI device reference */ |
---|
7142 | | - if (!phba->pcidev) |
---|
7143 | | - return error; |
---|
7144 | | - else |
---|
7145 | | - pdev = phba->pcidev; |
---|
| 7863 | + if (!pdev) |
---|
| 7864 | + return -ENODEV; |
---|
7146 | 7865 | |
---|
7147 | 7866 | /* Set the device DMA mask size */ |
---|
7148 | | - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 |
---|
7149 | | - || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { |
---|
7150 | | - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 |
---|
7151 | | - || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { |
---|
7152 | | - return error; |
---|
7153 | | - } |
---|
7154 | | - } |
---|
| 7867 | + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); |
---|
| 7868 | + if (error) |
---|
| 7869 | + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
---|
| 7870 | + if (error) |
---|
| 7871 | + return error; |
---|
| 7872 | + error = -ENODEV; |
---|
7155 | 7873 | |
---|
7156 | 7874 | /* Get the bus address of Bar0 and Bar2 and the number of bytes |
---|
7157 | 7875 | * required by each mapping. |
---|
.. | .. |
---|
7179 | 7897 | } |
---|
7180 | 7898 | |
---|
7181 | 7899 | /* Allocate memory for SLI-2 structures */ |
---|
7182 | | - phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, |
---|
7183 | | - &phba->slim2p.phys, GFP_KERNEL); |
---|
| 7900 | + phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, |
---|
| 7901 | + &phba->slim2p.phys, GFP_KERNEL); |
---|
7184 | 7902 | if (!phba->slim2p.virt) |
---|
7185 | 7903 | goto out_iounmap; |
---|
7186 | 7904 | |
---|
.. | .. |
---|
7303 | 8021 | * other register reads as the data may not be valid. Just exit. |
---|
7304 | 8022 | */ |
---|
7305 | 8023 | if (port_error) { |
---|
7306 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 8024 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
7307 | 8025 | "1408 Port Failed POST - portsmphr=0x%x, " |
---|
7308 | 8026 | "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " |
---|
7309 | 8027 | "scr2=x%x, hscratch=x%x, pstatus=x%x\n", |
---|
.. | .. |
---|
7352 | 8070 | readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); |
---|
7353 | 8071 | if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || |
---|
7354 | 8072 | (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { |
---|
7355 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 8073 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 8074 | + LOG_TRACE_EVENT, |
---|
7356 | 8075 | "1422 Unrecoverable Error " |
---|
7357 | 8076 | "Detected during POST " |
---|
7358 | 8077 | "uerr_lo_reg=0x%x, " |
---|
.. | .. |
---|
7379 | 8098 | phba->work_status[1] = |
---|
7380 | 8099 | readl(phba->sli4_hba.u.if_type2. |
---|
7381 | 8100 | ERR2regaddr); |
---|
7382 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 8101 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
7383 | 8102 | "2888 Unrecoverable port error " |
---|
7384 | 8103 | "following POST: port status reg " |
---|
7385 | 8104 | "0x%x, port_smphr reg 0x%x, " |
---|
.. | .. |
---|
7492 | 8211 | /** |
---|
7493 | 8212 | * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. |
---|
7494 | 8213 | * @phba: pointer to lpfc hba data structure. |
---|
| 8214 | + * @if_type: sli if type to operate on. |
---|
7495 | 8215 | * |
---|
7496 | 8216 | * This routine is invoked to set up SLI4 BAR1 register memory map. |
---|
7497 | 8217 | **/ |
---|
.. | .. |
---|
7598 | 8318 | * plus an alignment restriction of 16 bytes. |
---|
7599 | 8319 | */ |
---|
7600 | 8320 | bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); |
---|
7601 | | - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size, |
---|
7602 | | - &dmabuf->phys, GFP_KERNEL); |
---|
| 8321 | + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, |
---|
| 8322 | + &dmabuf->phys, GFP_KERNEL); |
---|
7603 | 8323 | if (!dmabuf->virt) { |
---|
7604 | 8324 | kfree(dmabuf); |
---|
7605 | 8325 | return -ENOMEM; |
---|
.. | .. |
---|
7663 | 8383 | memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); |
---|
7664 | 8384 | } |
---|
7665 | 8385 | |
---|
| 8386 | +static const char * const lpfc_topo_to_str[] = { |
---|
| 8387 | + "Loop then P2P", |
---|
| 8388 | + "Loopback", |
---|
| 8389 | + "P2P Only", |
---|
| 8390 | + "Unsupported", |
---|
| 8391 | + "Loop Only", |
---|
| 8392 | + "Unsupported", |
---|
| 8393 | + "P2P then Loop", |
---|
| 8394 | +}; |
---|
| 8395 | + |
---|
| 8396 | +#define LINK_FLAGS_DEF 0x0 |
---|
| 8397 | +#define LINK_FLAGS_P2P 0x1 |
---|
| 8398 | +#define LINK_FLAGS_LOOP 0x2 |
---|
| 8399 | +/** |
---|
| 8400 | + * lpfc_map_topology - Map the topology read from READ_CONFIG |
---|
| 8401 | + * @phba: pointer to lpfc hba data structure. |
---|
| 8402 | + * @rd_config: pointer to read config data |
---|
| 8403 | + * |
---|
| 8404 | + * This routine is invoked to map the topology values as read |
---|
| 8405 | + * from the read config mailbox command. If the persistent |
---|
| 8406 | + * topology feature is supported, the firmware will provide the |
---|
| 8407 | + * saved topology information to be used in INIT_LINK |
---|
| 8408 | + **/ |
---|
| 8409 | +static void |
---|
| 8410 | +lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) |
---|
| 8411 | +{ |
---|
| 8412 | + u8 ptv, tf, pt; |
---|
| 8413 | + |
---|
| 8414 | + ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); |
---|
| 8415 | + tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); |
---|
| 8416 | + pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); |
---|
| 8417 | + |
---|
| 8418 | + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
---|
| 8419 | + "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", |
---|
| 8420 | + ptv, tf, pt); |
---|
| 8421 | + if (!ptv) { |
---|
| 8422 | + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
---|
| 8423 | + "2019 FW does not support persistent topology " |
---|
| 8424 | + "Using driver parameter defined value [%s]", |
---|
| 8425 | + lpfc_topo_to_str[phba->cfg_topology]); |
---|
| 8426 | + return; |
---|
| 8427 | + } |
---|
| 8428 | + /* FW supports persistent topology - override module parameter value */ |
---|
| 8429 | + phba->hba_flag |= HBA_PERSISTENT_TOPO; |
---|
| 8430 | + switch (phba->pcidev->device) { |
---|
| 8431 | + case PCI_DEVICE_ID_LANCER_G7_FC: |
---|
| 8432 | + case PCI_DEVICE_ID_LANCER_G6_FC: |
---|
| 8433 | + if (!tf) { |
---|
| 8434 | + phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) |
---|
| 8435 | + ? FLAGS_TOPOLOGY_MODE_LOOP |
---|
| 8436 | + : FLAGS_TOPOLOGY_MODE_PT_PT); |
---|
| 8437 | + } else { |
---|
| 8438 | + phba->hba_flag &= ~HBA_PERSISTENT_TOPO; |
---|
| 8439 | + } |
---|
| 8440 | + break; |
---|
| 8441 | + default: /* G5 */ |
---|
| 8442 | + if (tf) { |
---|
| 8443 | + /* If topology failover set - pt is '0' or '1' */ |
---|
| 8444 | + phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : |
---|
| 8445 | + FLAGS_TOPOLOGY_MODE_LOOP_PT); |
---|
| 8446 | + } else { |
---|
| 8447 | + phba->cfg_topology = ((pt == LINK_FLAGS_P2P) |
---|
| 8448 | + ? FLAGS_TOPOLOGY_MODE_PT_PT |
---|
| 8449 | + : FLAGS_TOPOLOGY_MODE_LOOP); |
---|
| 8450 | + } |
---|
| 8451 | + break; |
---|
| 8452 | + } |
---|
| 8453 | + if (phba->hba_flag & HBA_PERSISTENT_TOPO) { |
---|
| 8454 | + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
---|
| 8455 | + "2020 Using persistent topology value [%s]", |
---|
| 8456 | + lpfc_topo_to_str[phba->cfg_topology]); |
---|
| 8457 | + } else { |
---|
| 8458 | + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
---|
| 8459 | + "2021 Invalid topology values from FW " |
---|
| 8460 | + "Using driver parameter defined value [%s]", |
---|
| 8461 | + lpfc_topo_to_str[phba->cfg_topology]); |
---|
| 8462 | + } |
---|
| 8463 | +} |
---|
| 8464 | + |
---|
7666 | 8465 | /** |
---|
7667 | 8466 | * lpfc_sli4_read_config - Get the config parameters. |
---|
7668 | 8467 | * @phba: pointer to lpfc hba data structure. |
---|
.. | .. |
---|
7688 | 8487 | struct lpfc_rsrc_desc_fcfcoe *desc; |
---|
7689 | 8488 | char *pdesc_0; |
---|
7690 | 8489 | uint16_t forced_link_speed; |
---|
7691 | | - uint32_t if_type; |
---|
| 8490 | + uint32_t if_type, qmin; |
---|
7692 | 8491 | int length, i, rc = 0, rc2; |
---|
7693 | 8492 | |
---|
7694 | 8493 | pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
---|
7695 | 8494 | if (!pmb) { |
---|
7696 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 8495 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
7697 | 8496 | "2011 Unable to allocate memory for issuing " |
---|
7698 | 8497 | "SLI_CONFIG_SPECIAL mailbox command\n"); |
---|
7699 | 8498 | return -ENOMEM; |
---|
.. | .. |
---|
7703 | 8502 | |
---|
7704 | 8503 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); |
---|
7705 | 8504 | if (rc != MBX_SUCCESS) { |
---|
7706 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
7707 | | - "2012 Mailbox failed , mbxCmd x%x " |
---|
7708 | | - "READ_CONFIG, mbxStatus x%x\n", |
---|
7709 | | - bf_get(lpfc_mqe_command, &pmb->u.mqe), |
---|
7710 | | - bf_get(lpfc_mqe_status, &pmb->u.mqe)); |
---|
| 8505 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 8506 | + "2012 Mailbox failed , mbxCmd x%x " |
---|
| 8507 | + "READ_CONFIG, mbxStatus x%x\n", |
---|
| 8508 | + bf_get(lpfc_mqe_command, &pmb->u.mqe), |
---|
| 8509 | + bf_get(lpfc_mqe_status, &pmb->u.mqe)); |
---|
7711 | 8510 | rc = -EIO; |
---|
7712 | 8511 | } else { |
---|
7713 | 8512 | rd_config = &pmb->u.mqe.un.rd_config; |
---|
.. | .. |
---|
7730 | 8529 | phba->sli4_hba.bbscn_params.word0 = rd_config->word8; |
---|
7731 | 8530 | } |
---|
7732 | 8531 | |
---|
| 8532 | + phba->sli4_hba.conf_trunk = |
---|
| 8533 | + bf_get(lpfc_mbx_rd_conf_trunk, rd_config); |
---|
7733 | 8534 | phba->sli4_hba.extents_in_use = |
---|
7734 | 8535 | bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); |
---|
7735 | 8536 | phba->sli4_hba.max_cfg_param.max_xri = |
---|
7736 | 8537 | bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); |
---|
| 8538 | + /* Reduce resource usage in kdump environment */ |
---|
| 8539 | + if (is_kdump_kernel() && |
---|
| 8540 | + phba->sli4_hba.max_cfg_param.max_xri > 512) |
---|
| 8541 | + phba->sli4_hba.max_cfg_param.max_xri = 512; |
---|
7737 | 8542 | phba->sli4_hba.max_cfg_param.xri_base = |
---|
7738 | 8543 | bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); |
---|
7739 | 8544 | phba->sli4_hba.max_cfg_param.max_vpi = |
---|
.. | .. |
---|
7768 | 8573 | phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? |
---|
7769 | 8574 | (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; |
---|
7770 | 8575 | phba->max_vports = phba->max_vpi; |
---|
| 8576 | + lpfc_map_topology(phba, rd_config); |
---|
7771 | 8577 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
---|
7772 | 8578 | "2003 cfg params Extents? %d " |
---|
7773 | 8579 | "XRI(B:%d M:%d), " |
---|
7774 | 8580 | "VPI(B:%d M:%d) " |
---|
7775 | 8581 | "VFI(B:%d M:%d) " |
---|
7776 | 8582 | "RPI(B:%d M:%d) " |
---|
7777 | | - "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n", |
---|
| 8583 | + "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n", |
---|
7778 | 8584 | phba->sli4_hba.extents_in_use, |
---|
7779 | 8585 | phba->sli4_hba.max_cfg_param.xri_base, |
---|
7780 | 8586 | phba->sli4_hba.max_cfg_param.max_xri, |
---|
.. | .. |
---|
7788 | 8594 | phba->sli4_hba.max_cfg_param.max_eq, |
---|
7789 | 8595 | phba->sli4_hba.max_cfg_param.max_cq, |
---|
7790 | 8596 | phba->sli4_hba.max_cfg_param.max_wq, |
---|
7791 | | - phba->sli4_hba.max_cfg_param.max_rq); |
---|
| 8597 | + phba->sli4_hba.max_cfg_param.max_rq, |
---|
| 8598 | + phba->lmt); |
---|
7792 | 8599 | |
---|
7793 | 8600 | /* |
---|
7794 | | - * Calculate NVME queue resources based on how |
---|
7795 | | - * many WQ/CQs are available. |
---|
| 8601 | + * Calculate queue resources based on how |
---|
| 8602 | + * many WQ/CQ/EQs are available. |
---|
7796 | 8603 | */ |
---|
7797 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
7798 | | - length = phba->sli4_hba.max_cfg_param.max_wq; |
---|
7799 | | - if (phba->sli4_hba.max_cfg_param.max_cq < |
---|
7800 | | - phba->sli4_hba.max_cfg_param.max_wq) |
---|
7801 | | - length = phba->sli4_hba.max_cfg_param.max_cq; |
---|
| 8604 | + qmin = phba->sli4_hba.max_cfg_param.max_wq; |
---|
| 8605 | + if (phba->sli4_hba.max_cfg_param.max_cq < qmin) |
---|
| 8606 | + qmin = phba->sli4_hba.max_cfg_param.max_cq; |
---|
| 8607 | + if (phba->sli4_hba.max_cfg_param.max_eq < qmin) |
---|
| 8608 | + qmin = phba->sli4_hba.max_cfg_param.max_eq; |
---|
| 8609 | + /* |
---|
| 8610 | + * Whats left after this can go toward NVME / FCP. |
---|
| 8611 | + * The minus 4 accounts for ELS, NVME LS, MBOX |
---|
| 8612 | + * plus one extra. When configured for |
---|
| 8613 | + * NVMET, FCP io channel WQs are not created. |
---|
| 8614 | + */ |
---|
| 8615 | + qmin -= 4; |
---|
7802 | 8616 | |
---|
7803 | | - /* |
---|
7804 | | - * Whats left after this can go toward NVME. |
---|
7805 | | - * The minus 6 accounts for ELS, NVME LS, MBOX |
---|
7806 | | - * fof plus a couple extra. When configured for |
---|
7807 | | - * NVMET, FCP io channel WQs are not created. |
---|
7808 | | - */ |
---|
7809 | | - length -= 6; |
---|
7810 | | - if (!phba->nvmet_support) |
---|
7811 | | - length -= phba->cfg_fcp_io_channel; |
---|
7812 | | - |
---|
7813 | | - if (phba->cfg_nvme_io_channel > length) { |
---|
7814 | | - lpfc_printf_log( |
---|
7815 | | - phba, KERN_ERR, LOG_SLI, |
---|
7816 | | - "2005 Reducing NVME IO channel to %d: " |
---|
7817 | | - "WQ %d CQ %d NVMEIO %d FCPIO %d\n", |
---|
7818 | | - length, |
---|
| 8617 | + /* Check to see if there is enough for NVME */ |
---|
| 8618 | + if ((phba->cfg_irq_chann > qmin) || |
---|
| 8619 | + (phba->cfg_hdw_queue > qmin)) { |
---|
| 8620 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 8621 | + "2005 Reducing Queues - " |
---|
| 8622 | + "FW resource limitation: " |
---|
| 8623 | + "WQ %d CQ %d EQ %d: min %d: " |
---|
| 8624 | + "IRQ %d HDWQ %d\n", |
---|
7819 | 8625 | phba->sli4_hba.max_cfg_param.max_wq, |
---|
7820 | 8626 | phba->sli4_hba.max_cfg_param.max_cq, |
---|
7821 | | - phba->cfg_nvme_io_channel, |
---|
7822 | | - phba->cfg_fcp_io_channel); |
---|
| 8627 | + phba->sli4_hba.max_cfg_param.max_eq, |
---|
| 8628 | + qmin, phba->cfg_irq_chann, |
---|
| 8629 | + phba->cfg_hdw_queue); |
---|
7823 | 8630 | |
---|
7824 | | - phba->cfg_nvme_io_channel = length; |
---|
7825 | | - } |
---|
| 8631 | + if (phba->cfg_irq_chann > qmin) |
---|
| 8632 | + phba->cfg_irq_chann = qmin; |
---|
| 8633 | + if (phba->cfg_hdw_queue > qmin) |
---|
| 8634 | + phba->cfg_hdw_queue = qmin; |
---|
7826 | 8635 | } |
---|
7827 | 8636 | } |
---|
7828 | 8637 | |
---|
.. | .. |
---|
7875 | 8684 | LPFC_USER_LINK_SPEED_AUTO; |
---|
7876 | 8685 | break; |
---|
7877 | 8686 | default: |
---|
7878 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 8687 | + lpfc_printf_log(phba, KERN_ERR, |
---|
| 8688 | + LOG_TRACE_EVENT, |
---|
7879 | 8689 | "0047 Unrecognized link " |
---|
7880 | 8690 | "speed : %d\n", |
---|
7881 | 8691 | forced_link_speed); |
---|
.. | .. |
---|
7912 | 8722 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
---|
7913 | 8723 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
---|
7914 | 8724 | if (rc2 || shdr_status || shdr_add_status) { |
---|
7915 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 8725 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
7916 | 8726 | "3026 Mailbox failed , mbxCmd x%x " |
---|
7917 | 8727 | "GET_FUNCTION_CONFIG, mbxStatus x%x\n", |
---|
7918 | 8728 | bf_get(lpfc_mqe_command, &pmb->u.mqe), |
---|
.. | .. |
---|
7949 | 8759 | "vf_number:%d\n", phba->sli4_hba.iov.pf_number, |
---|
7950 | 8760 | phba->sli4_hba.iov.vf_number); |
---|
7951 | 8761 | else |
---|
7952 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
---|
| 8762 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
7953 | 8763 | "3028 GET_FUNCTION_CONFIG: failed to find " |
---|
7954 | | - "Resrouce Descriptor:x%x\n", |
---|
| 8764 | + "Resource Descriptor:x%x\n", |
---|
7955 | 8765 | LPFC_RSRC_DESC_TYPE_FCFCOE); |
---|
7956 | 8766 | |
---|
7957 | 8767 | read_cfg_out: |
---|
.. | .. |
---|
7986 | 8796 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, |
---|
7987 | 8797 | GFP_KERNEL); |
---|
7988 | 8798 | if (!mboxq) { |
---|
7989 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 8799 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
7990 | 8800 | "0492 Unable to allocate memory for " |
---|
7991 | 8801 | "issuing SLI_CONFIG_SPECIAL mailbox " |
---|
7992 | 8802 | "command\n"); |
---|
.. | .. |
---|
8001 | 8811 | memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); |
---|
8002 | 8812 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
---|
8003 | 8813 | if (rc != MBX_SUCCESS) { |
---|
8004 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 8814 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8005 | 8815 | "0493 SLI_CONFIG_SPECIAL mailbox " |
---|
8006 | 8816 | "failed with status x%x\n", |
---|
8007 | 8817 | rc); |
---|
.. | .. |
---|
8034 | 8844 | static int |
---|
8035 | 8845 | lpfc_sli4_queue_verify(struct lpfc_hba *phba) |
---|
8036 | 8846 | { |
---|
8037 | | - int io_channel; |
---|
8038 | | - int fof_vectors = phba->cfg_fof ? 1 : 0; |
---|
8039 | | - |
---|
8040 | 8847 | /* |
---|
8041 | 8848 | * Sanity check for configured queue parameters against the run-time |
---|
8042 | 8849 | * device parameters |
---|
8043 | 8850 | */ |
---|
8044 | 8851 | |
---|
8045 | | - /* Sanity check on HBA EQ parameters */ |
---|
8046 | | - io_channel = phba->io_channel_irqs; |
---|
8047 | | - |
---|
8048 | | - if (phba->sli4_hba.num_online_cpu < io_channel) { |
---|
8049 | | - lpfc_printf_log(phba, |
---|
8050 | | - KERN_ERR, LOG_INIT, |
---|
8051 | | - "3188 Reducing IO channels to match number of " |
---|
8052 | | - "online CPUs: from %d to %d\n", |
---|
8053 | | - io_channel, phba->sli4_hba.num_online_cpu); |
---|
8054 | | - io_channel = phba->sli4_hba.num_online_cpu; |
---|
8055 | | - } |
---|
8056 | | - |
---|
8057 | | - if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) { |
---|
8058 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8059 | | - "2575 Reducing IO channels to match number of " |
---|
8060 | | - "available EQs: from %d to %d\n", |
---|
8061 | | - io_channel, |
---|
8062 | | - phba->sli4_hba.max_cfg_param.max_eq); |
---|
8063 | | - io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors; |
---|
8064 | | - } |
---|
8065 | | - |
---|
8066 | | - /* The actual number of FCP / NVME event queues adopted */ |
---|
8067 | | - if (io_channel != phba->io_channel_irqs) |
---|
8068 | | - phba->io_channel_irqs = io_channel; |
---|
8069 | | - if (phba->cfg_fcp_io_channel > io_channel) |
---|
8070 | | - phba->cfg_fcp_io_channel = io_channel; |
---|
8071 | | - if (phba->cfg_nvme_io_channel > io_channel) |
---|
8072 | | - phba->cfg_nvme_io_channel = io_channel; |
---|
8073 | 8852 | if (phba->nvmet_support) { |
---|
8074 | | - if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq) |
---|
8075 | | - phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; |
---|
| 8853 | + if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) |
---|
| 8854 | + phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; |
---|
| 8855 | + if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) |
---|
| 8856 | + phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; |
---|
8076 | 8857 | } |
---|
8077 | | - if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) |
---|
8078 | | - phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; |
---|
8079 | 8858 | |
---|
8080 | 8859 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8081 | | - "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n", |
---|
8082 | | - phba->io_channel_irqs, phba->cfg_fcp_io_channel, |
---|
8083 | | - phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq); |
---|
| 8860 | + "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", |
---|
| 8861 | + phba->cfg_hdw_queue, phba->cfg_irq_chann, |
---|
| 8862 | + phba->cfg_nvmet_mrq); |
---|
8084 | 8863 | |
---|
8085 | 8864 | /* Get EQ depth from module parameter, fake the default for now */ |
---|
8086 | 8865 | phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; |
---|
.. | .. |
---|
8093 | 8872 | } |
---|
8094 | 8873 | |
---|
8095 | 8874 | static int |
---|
8096 | | -lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) |
---|
| 8875 | +lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) |
---|
8097 | 8876 | { |
---|
8098 | 8877 | struct lpfc_queue *qdesc; |
---|
| 8878 | + u32 wqesize; |
---|
| 8879 | + int cpu; |
---|
8099 | 8880 | |
---|
8100 | | - qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, |
---|
8101 | | - phba->sli4_hba.cq_esize, |
---|
8102 | | - LPFC_CQE_EXP_COUNT); |
---|
8103 | | - if (!qdesc) { |
---|
8104 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8105 | | - "0508 Failed allocate fast-path NVME CQ (%d)\n", |
---|
8106 | | - wqidx); |
---|
8107 | | - return 1; |
---|
8108 | | - } |
---|
8109 | | - qdesc->qe_valid = 1; |
---|
8110 | | - phba->sli4_hba.nvme_cq[wqidx] = qdesc; |
---|
8111 | | - |
---|
8112 | | - qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, |
---|
8113 | | - LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT); |
---|
8114 | | - if (!qdesc) { |
---|
8115 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8116 | | - "0509 Failed allocate fast-path NVME WQ (%d)\n", |
---|
8117 | | - wqidx); |
---|
8118 | | - return 1; |
---|
8119 | | - } |
---|
8120 | | - phba->sli4_hba.nvme_wq[wqidx] = qdesc; |
---|
8121 | | - list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); |
---|
8122 | | - return 0; |
---|
8123 | | -} |
---|
8124 | | - |
---|
8125 | | -static int |
---|
8126 | | -lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) |
---|
8127 | | -{ |
---|
8128 | | - struct lpfc_queue *qdesc; |
---|
8129 | | - uint32_t wqesize; |
---|
8130 | | - |
---|
8131 | | - /* Create Fast Path FCP CQs */ |
---|
| 8881 | + cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); |
---|
| 8882 | + /* Create Fast Path IO CQs */ |
---|
8132 | 8883 | if (phba->enab_exp_wqcq_pages) |
---|
8133 | 8884 | /* Increase the CQ size when WQEs contain an embedded cdb */ |
---|
8134 | 8885 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, |
---|
8135 | 8886 | phba->sli4_hba.cq_esize, |
---|
8136 | | - LPFC_CQE_EXP_COUNT); |
---|
| 8887 | + LPFC_CQE_EXP_COUNT, cpu); |
---|
8137 | 8888 | |
---|
8138 | 8889 | else |
---|
8139 | 8890 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, |
---|
8140 | 8891 | phba->sli4_hba.cq_esize, |
---|
8141 | | - phba->sli4_hba.cq_ecount); |
---|
| 8892 | + phba->sli4_hba.cq_ecount, cpu); |
---|
8142 | 8893 | if (!qdesc) { |
---|
8143 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8144 | | - "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx); |
---|
| 8894 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 8895 | + "0499 Failed allocate fast-path IO CQ (%d)\n", |
---|
| 8896 | + idx); |
---|
8145 | 8897 | return 1; |
---|
8146 | 8898 | } |
---|
8147 | 8899 | qdesc->qe_valid = 1; |
---|
8148 | | - phba->sli4_hba.fcp_cq[wqidx] = qdesc; |
---|
| 8900 | + qdesc->hdwq = idx; |
---|
| 8901 | + qdesc->chann = cpu; |
---|
| 8902 | + phba->sli4_hba.hdwq[idx].io_cq = qdesc; |
---|
8149 | 8903 | |
---|
8150 | | - /* Create Fast Path FCP WQs */ |
---|
| 8904 | + /* Create Fast Path IO WQs */ |
---|
8151 | 8905 | if (phba->enab_exp_wqcq_pages) { |
---|
8152 | 8906 | /* Increase the WQ size when WQEs contain an embedded cdb */ |
---|
8153 | 8907 | wqesize = (phba->fcp_embed_io) ? |
---|
8154 | 8908 | LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; |
---|
8155 | 8909 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, |
---|
8156 | 8910 | wqesize, |
---|
8157 | | - LPFC_WQE_EXP_COUNT); |
---|
| 8911 | + LPFC_WQE_EXP_COUNT, cpu); |
---|
8158 | 8912 | } else |
---|
8159 | 8913 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, |
---|
8160 | 8914 | phba->sli4_hba.wq_esize, |
---|
8161 | | - phba->sli4_hba.wq_ecount); |
---|
| 8915 | + phba->sli4_hba.wq_ecount, cpu); |
---|
8162 | 8916 | |
---|
8163 | 8917 | if (!qdesc) { |
---|
8164 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8165 | | - "0503 Failed allocate fast-path FCP WQ (%d)\n", |
---|
8166 | | - wqidx); |
---|
| 8918 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 8919 | + "0503 Failed allocate fast-path IO WQ (%d)\n", |
---|
| 8920 | + idx); |
---|
8167 | 8921 | return 1; |
---|
8168 | 8922 | } |
---|
8169 | | - phba->sli4_hba.fcp_wq[wqidx] = qdesc; |
---|
| 8923 | + qdesc->hdwq = idx; |
---|
| 8924 | + qdesc->chann = cpu; |
---|
| 8925 | + phba->sli4_hba.hdwq[idx].io_wq = qdesc; |
---|
8170 | 8926 | list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); |
---|
8171 | 8927 | return 0; |
---|
8172 | 8928 | } |
---|
.. | .. |
---|
8189 | 8945 | lpfc_sli4_queue_create(struct lpfc_hba *phba) |
---|
8190 | 8946 | { |
---|
8191 | 8947 | struct lpfc_queue *qdesc; |
---|
8192 | | - int idx, io_channel; |
---|
| 8948 | + int idx, cpu, eqcpu; |
---|
| 8949 | + struct lpfc_sli4_hdw_queue *qp; |
---|
| 8950 | + struct lpfc_vector_map_info *cpup; |
---|
| 8951 | + struct lpfc_vector_map_info *eqcpup; |
---|
| 8952 | + struct lpfc_eq_intr_info *eqi; |
---|
8193 | 8953 | |
---|
8194 | 8954 | /* |
---|
8195 | 8955 | * Create HBA Record arrays. |
---|
8196 | 8956 | * Both NVME and FCP will share that same vectors / EQs |
---|
8197 | 8957 | */ |
---|
8198 | | - io_channel = phba->io_channel_irqs; |
---|
8199 | | - if (!io_channel) |
---|
8200 | | - return -ERANGE; |
---|
8201 | | - |
---|
8202 | 8958 | phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; |
---|
8203 | 8959 | phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; |
---|
8204 | 8960 | phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; |
---|
.. | .. |
---|
8210 | 8966 | phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; |
---|
8211 | 8967 | phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; |
---|
8212 | 8968 | |
---|
8213 | | - phba->sli4_hba.hba_eq = kcalloc(io_channel, |
---|
8214 | | - sizeof(struct lpfc_queue *), |
---|
8215 | | - GFP_KERNEL); |
---|
8216 | | - if (!phba->sli4_hba.hba_eq) { |
---|
8217 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8218 | | - "2576 Failed allocate memory for " |
---|
8219 | | - "fast-path EQ record array\n"); |
---|
8220 | | - goto out_error; |
---|
8221 | | - } |
---|
8222 | | - |
---|
8223 | | - if (phba->cfg_fcp_io_channel) { |
---|
8224 | | - phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel, |
---|
8225 | | - sizeof(struct lpfc_queue *), |
---|
8226 | | - GFP_KERNEL); |
---|
8227 | | - if (!phba->sli4_hba.fcp_cq) { |
---|
8228 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8229 | | - "2577 Failed allocate memory for " |
---|
8230 | | - "fast-path CQ record array\n"); |
---|
| 8969 | + if (!phba->sli4_hba.hdwq) { |
---|
| 8970 | + phba->sli4_hba.hdwq = kcalloc( |
---|
| 8971 | + phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), |
---|
| 8972 | + GFP_KERNEL); |
---|
| 8973 | + if (!phba->sli4_hba.hdwq) { |
---|
| 8974 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 8975 | + "6427 Failed allocate memory for " |
---|
| 8976 | + "fast-path Hardware Queue array\n"); |
---|
8231 | 8977 | goto out_error; |
---|
8232 | 8978 | } |
---|
8233 | | - phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel, |
---|
8234 | | - sizeof(struct lpfc_queue *), |
---|
8235 | | - GFP_KERNEL); |
---|
8236 | | - if (!phba->sli4_hba.fcp_wq) { |
---|
8237 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8238 | | - "2578 Failed allocate memory for " |
---|
8239 | | - "fast-path FCP WQ record array\n"); |
---|
8240 | | - goto out_error; |
---|
8241 | | - } |
---|
8242 | | - /* |
---|
8243 | | - * Since the first EQ can have multiple CQs associated with it, |
---|
8244 | | - * this array is used to quickly see if we have a FCP fast-path |
---|
8245 | | - * CQ match. |
---|
8246 | | - */ |
---|
8247 | | - phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel, |
---|
8248 | | - sizeof(uint16_t), |
---|
8249 | | - GFP_KERNEL); |
---|
8250 | | - if (!phba->sli4_hba.fcp_cq_map) { |
---|
8251 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8252 | | - "2545 Failed allocate memory for " |
---|
8253 | | - "fast-path CQ map\n"); |
---|
8254 | | - goto out_error; |
---|
| 8979 | + /* Prepare hardware queues to take IO buffers */ |
---|
| 8980 | + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
---|
| 8981 | + qp = &phba->sli4_hba.hdwq[idx]; |
---|
| 8982 | + spin_lock_init(&qp->io_buf_list_get_lock); |
---|
| 8983 | + spin_lock_init(&qp->io_buf_list_put_lock); |
---|
| 8984 | + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); |
---|
| 8985 | + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); |
---|
| 8986 | + qp->get_io_bufs = 0; |
---|
| 8987 | + qp->put_io_bufs = 0; |
---|
| 8988 | + qp->total_io_bufs = 0; |
---|
| 8989 | + spin_lock_init(&qp->abts_io_buf_list_lock); |
---|
| 8990 | + INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); |
---|
| 8991 | + qp->abts_scsi_io_bufs = 0; |
---|
| 8992 | + qp->abts_nvme_io_bufs = 0; |
---|
| 8993 | + INIT_LIST_HEAD(&qp->sgl_list); |
---|
| 8994 | + INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); |
---|
| 8995 | + spin_lock_init(&qp->hdwq_lock); |
---|
8255 | 8996 | } |
---|
8256 | 8997 | } |
---|
8257 | 8998 | |
---|
8258 | | - if (phba->cfg_nvme_io_channel) { |
---|
8259 | | - phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel, |
---|
8260 | | - sizeof(struct lpfc_queue *), |
---|
8261 | | - GFP_KERNEL); |
---|
8262 | | - if (!phba->sli4_hba.nvme_cq) { |
---|
8263 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8264 | | - "6077 Failed allocate memory for " |
---|
8265 | | - "fast-path CQ record array\n"); |
---|
8266 | | - goto out_error; |
---|
8267 | | - } |
---|
8268 | | - |
---|
8269 | | - phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel, |
---|
8270 | | - sizeof(struct lpfc_queue *), |
---|
8271 | | - GFP_KERNEL); |
---|
8272 | | - if (!phba->sli4_hba.nvme_wq) { |
---|
8273 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8274 | | - "2581 Failed allocate memory for " |
---|
8275 | | - "fast-path NVME WQ record array\n"); |
---|
8276 | | - goto out_error; |
---|
8277 | | - } |
---|
8278 | | - |
---|
8279 | | - /* |
---|
8280 | | - * Since the first EQ can have multiple CQs associated with it, |
---|
8281 | | - * this array is used to quickly see if we have a NVME fast-path |
---|
8282 | | - * CQ match. |
---|
8283 | | - */ |
---|
8284 | | - phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel, |
---|
8285 | | - sizeof(uint16_t), |
---|
8286 | | - GFP_KERNEL); |
---|
8287 | | - if (!phba->sli4_hba.nvme_cq_map) { |
---|
8288 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8289 | | - "6078 Failed allocate memory for " |
---|
8290 | | - "fast-path CQ map\n"); |
---|
8291 | | - goto out_error; |
---|
8292 | | - } |
---|
8293 | | - |
---|
| 8999 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
8294 | 9000 | if (phba->nvmet_support) { |
---|
8295 | 9001 | phba->sli4_hba.nvmet_cqset = kcalloc( |
---|
8296 | 9002 | phba->cfg_nvmet_mrq, |
---|
8297 | 9003 | sizeof(struct lpfc_queue *), |
---|
8298 | 9004 | GFP_KERNEL); |
---|
8299 | 9005 | if (!phba->sli4_hba.nvmet_cqset) { |
---|
8300 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9006 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8301 | 9007 | "3121 Fail allocate memory for " |
---|
8302 | 9008 | "fast-path CQ set array\n"); |
---|
8303 | 9009 | goto out_error; |
---|
.. | .. |
---|
8307 | 9013 | sizeof(struct lpfc_queue *), |
---|
8308 | 9014 | GFP_KERNEL); |
---|
8309 | 9015 | if (!phba->sli4_hba.nvmet_mrq_hdr) { |
---|
8310 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9016 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8311 | 9017 | "3122 Fail allocate memory for " |
---|
8312 | 9018 | "fast-path RQ set hdr array\n"); |
---|
8313 | 9019 | goto out_error; |
---|
.. | .. |
---|
8317 | 9023 | sizeof(struct lpfc_queue *), |
---|
8318 | 9024 | GFP_KERNEL); |
---|
8319 | 9025 | if (!phba->sli4_hba.nvmet_mrq_data) { |
---|
8320 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9026 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8321 | 9027 | "3124 Fail allocate memory for " |
---|
8322 | 9028 | "fast-path RQ set data array\n"); |
---|
8323 | 9029 | goto out_error; |
---|
.. | .. |
---|
8328 | 9034 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); |
---|
8329 | 9035 | |
---|
8330 | 9036 | /* Create HBA Event Queues (EQs) */ |
---|
8331 | | - for (idx = 0; idx < io_channel; idx++) { |
---|
8332 | | - /* Create EQs */ |
---|
| 9037 | + for_each_present_cpu(cpu) { |
---|
| 9038 | + /* We only want to create 1 EQ per vector, even though |
---|
| 9039 | + * multiple CPUs might be using that vector. so only |
---|
| 9040 | + * selects the CPUs that are LPFC_CPU_FIRST_IRQ. |
---|
| 9041 | + */ |
---|
| 9042 | + cpup = &phba->sli4_hba.cpu_map[cpu]; |
---|
| 9043 | + if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) |
---|
| 9044 | + continue; |
---|
| 9045 | + |
---|
| 9046 | + /* Get a ptr to the Hardware Queue associated with this CPU */ |
---|
| 9047 | + qp = &phba->sli4_hba.hdwq[cpup->hdwq]; |
---|
| 9048 | + |
---|
| 9049 | + /* Allocate an EQ */ |
---|
8333 | 9050 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, |
---|
8334 | 9051 | phba->sli4_hba.eq_esize, |
---|
8335 | | - phba->sli4_hba.eq_ecount); |
---|
| 9052 | + phba->sli4_hba.eq_ecount, cpu); |
---|
8336 | 9053 | if (!qdesc) { |
---|
8337 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8338 | | - "0497 Failed allocate EQ (%d)\n", idx); |
---|
| 9054 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 9055 | + "0497 Failed allocate EQ (%d)\n", |
---|
| 9056 | + cpup->hdwq); |
---|
8339 | 9057 | goto out_error; |
---|
8340 | 9058 | } |
---|
8341 | 9059 | qdesc->qe_valid = 1; |
---|
8342 | | - phba->sli4_hba.hba_eq[idx] = qdesc; |
---|
| 9060 | + qdesc->hdwq = cpup->hdwq; |
---|
| 9061 | + qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ |
---|
| 9062 | + qdesc->last_cpu = qdesc->chann; |
---|
| 9063 | + |
---|
| 9064 | + /* Save the allocated EQ in the Hardware Queue */ |
---|
| 9065 | + qp->hba_eq = qdesc; |
---|
| 9066 | + |
---|
| 9067 | + eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); |
---|
| 9068 | + list_add(&qdesc->cpu_list, &eqi->list); |
---|
8343 | 9069 | } |
---|
8344 | 9070 | |
---|
8345 | | - /* FCP and NVME io channels are not required to be balanced */ |
---|
| 9071 | + /* Now we need to populate the other Hardware Queues, that share |
---|
| 9072 | + * an IRQ vector, with the associated EQ ptr. |
---|
| 9073 | + */ |
---|
| 9074 | + for_each_present_cpu(cpu) { |
---|
| 9075 | + cpup = &phba->sli4_hba.cpu_map[cpu]; |
---|
8346 | 9076 | |
---|
8347 | | - for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) |
---|
8348 | | - if (lpfc_alloc_fcp_wq_cq(phba, idx)) |
---|
8349 | | - goto out_error; |
---|
| 9077 | + /* Check for EQ already allocated in previous loop */ |
---|
| 9078 | + if (cpup->flag & LPFC_CPU_FIRST_IRQ) |
---|
| 9079 | + continue; |
---|
8350 | 9080 | |
---|
8351 | | - for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++) |
---|
8352 | | - if (lpfc_alloc_nvme_wq_cq(phba, idx)) |
---|
| 9081 | + /* Check for multiple CPUs per hdwq */ |
---|
| 9082 | + qp = &phba->sli4_hba.hdwq[cpup->hdwq]; |
---|
| 9083 | + if (qp->hba_eq) |
---|
| 9084 | + continue; |
---|
| 9085 | + |
---|
| 9086 | + /* We need to share an EQ for this hdwq */ |
---|
| 9087 | + eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); |
---|
| 9088 | + eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; |
---|
| 9089 | + qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; |
---|
| 9090 | + } |
---|
| 9091 | + |
---|
| 9092 | + /* Allocate IO Path SLI4 CQ/WQs */ |
---|
| 9093 | + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
---|
| 9094 | + if (lpfc_alloc_io_wq_cq(phba, idx)) |
---|
8353 | 9095 | goto out_error; |
---|
| 9096 | + } |
---|
8354 | 9097 | |
---|
8355 | 9098 | if (phba->nvmet_support) { |
---|
8356 | 9099 | for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { |
---|
| 9100 | + cpu = lpfc_find_cpu_handle(phba, idx, |
---|
| 9101 | + LPFC_FIND_BY_HDWQ); |
---|
8357 | 9102 | qdesc = lpfc_sli4_queue_alloc(phba, |
---|
8358 | 9103 | LPFC_DEFAULT_PAGE_SIZE, |
---|
8359 | 9104 | phba->sli4_hba.cq_esize, |
---|
8360 | | - phba->sli4_hba.cq_ecount); |
---|
| 9105 | + phba->sli4_hba.cq_ecount, |
---|
| 9106 | + cpu); |
---|
8361 | 9107 | if (!qdesc) { |
---|
8362 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8363 | | - "3142 Failed allocate NVME " |
---|
8364 | | - "CQ Set (%d)\n", idx); |
---|
| 9108 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 9109 | + "3142 Failed allocate NVME " |
---|
| 9110 | + "CQ Set (%d)\n", idx); |
---|
8365 | 9111 | goto out_error; |
---|
8366 | 9112 | } |
---|
8367 | 9113 | qdesc->qe_valid = 1; |
---|
| 9114 | + qdesc->hdwq = idx; |
---|
| 9115 | + qdesc->chann = cpu; |
---|
8368 | 9116 | phba->sli4_hba.nvmet_cqset[idx] = qdesc; |
---|
8369 | 9117 | } |
---|
8370 | 9118 | } |
---|
.. | .. |
---|
8373 | 9121 | * Create Slow Path Completion Queues (CQs) |
---|
8374 | 9122 | */ |
---|
8375 | 9123 | |
---|
| 9124 | + cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); |
---|
8376 | 9125 | /* Create slow-path Mailbox Command Complete Queue */ |
---|
8377 | 9126 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, |
---|
8378 | 9127 | phba->sli4_hba.cq_esize, |
---|
8379 | | - phba->sli4_hba.cq_ecount); |
---|
| 9128 | + phba->sli4_hba.cq_ecount, cpu); |
---|
8380 | 9129 | if (!qdesc) { |
---|
8381 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9130 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8382 | 9131 | "0500 Failed allocate slow-path mailbox CQ\n"); |
---|
8383 | 9132 | goto out_error; |
---|
8384 | 9133 | } |
---|
.. | .. |
---|
8388 | 9137 | /* Create slow-path ELS Complete Queue */ |
---|
8389 | 9138 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, |
---|
8390 | 9139 | phba->sli4_hba.cq_esize, |
---|
8391 | | - phba->sli4_hba.cq_ecount); |
---|
| 9140 | + phba->sli4_hba.cq_ecount, cpu); |
---|
8392 | 9141 | if (!qdesc) { |
---|
8393 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9142 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8394 | 9143 | "0501 Failed allocate slow-path ELS CQ\n"); |
---|
8395 | 9144 | goto out_error; |
---|
8396 | 9145 | } |
---|
8397 | 9146 | qdesc->qe_valid = 1; |
---|
| 9147 | + qdesc->chann = cpu; |
---|
8398 | 9148 | phba->sli4_hba.els_cq = qdesc; |
---|
8399 | 9149 | |
---|
8400 | 9150 | |
---|
.. | .. |
---|
8406 | 9156 | |
---|
8407 | 9157 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, |
---|
8408 | 9158 | phba->sli4_hba.mq_esize, |
---|
8409 | | - phba->sli4_hba.mq_ecount); |
---|
| 9159 | + phba->sli4_hba.mq_ecount, cpu); |
---|
8410 | 9160 | if (!qdesc) { |
---|
8411 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9161 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8412 | 9162 | "0505 Failed allocate slow-path MQ\n"); |
---|
8413 | 9163 | goto out_error; |
---|
8414 | 9164 | } |
---|
| 9165 | + qdesc->chann = cpu; |
---|
8415 | 9166 | phba->sli4_hba.mbx_wq = qdesc; |
---|
8416 | 9167 | |
---|
8417 | 9168 | /* |
---|
.. | .. |
---|
8421 | 9172 | /* Create slow-path ELS Work Queue */ |
---|
8422 | 9173 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, |
---|
8423 | 9174 | phba->sli4_hba.wq_esize, |
---|
8424 | | - phba->sli4_hba.wq_ecount); |
---|
| 9175 | + phba->sli4_hba.wq_ecount, cpu); |
---|
8425 | 9176 | if (!qdesc) { |
---|
8426 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9177 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8427 | 9178 | "0504 Failed allocate slow-path ELS WQ\n"); |
---|
8428 | 9179 | goto out_error; |
---|
8429 | 9180 | } |
---|
| 9181 | + qdesc->chann = cpu; |
---|
8430 | 9182 | phba->sli4_hba.els_wq = qdesc; |
---|
8431 | 9183 | list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); |
---|
8432 | 9184 | |
---|
.. | .. |
---|
8434 | 9186 | /* Create NVME LS Complete Queue */ |
---|
8435 | 9187 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, |
---|
8436 | 9188 | phba->sli4_hba.cq_esize, |
---|
8437 | | - phba->sli4_hba.cq_ecount); |
---|
| 9189 | + phba->sli4_hba.cq_ecount, cpu); |
---|
8438 | 9190 | if (!qdesc) { |
---|
8439 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9191 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8440 | 9192 | "6079 Failed allocate NVME LS CQ\n"); |
---|
8441 | 9193 | goto out_error; |
---|
8442 | 9194 | } |
---|
| 9195 | + qdesc->chann = cpu; |
---|
8443 | 9196 | qdesc->qe_valid = 1; |
---|
8444 | 9197 | phba->sli4_hba.nvmels_cq = qdesc; |
---|
8445 | 9198 | |
---|
8446 | 9199 | /* Create NVME LS Work Queue */ |
---|
8447 | 9200 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, |
---|
8448 | 9201 | phba->sli4_hba.wq_esize, |
---|
8449 | | - phba->sli4_hba.wq_ecount); |
---|
| 9202 | + phba->sli4_hba.wq_ecount, cpu); |
---|
8450 | 9203 | if (!qdesc) { |
---|
8451 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9204 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8452 | 9205 | "6080 Failed allocate NVME LS WQ\n"); |
---|
8453 | 9206 | goto out_error; |
---|
8454 | 9207 | } |
---|
| 9208 | + qdesc->chann = cpu; |
---|
8455 | 9209 | phba->sli4_hba.nvmels_wq = qdesc; |
---|
8456 | 9210 | list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); |
---|
8457 | 9211 | } |
---|
.. | .. |
---|
8463 | 9217 | /* Create Receive Queue for header */ |
---|
8464 | 9218 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, |
---|
8465 | 9219 | phba->sli4_hba.rq_esize, |
---|
8466 | | - phba->sli4_hba.rq_ecount); |
---|
| 9220 | + phba->sli4_hba.rq_ecount, cpu); |
---|
8467 | 9221 | if (!qdesc) { |
---|
8468 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9222 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8469 | 9223 | "0506 Failed allocate receive HRQ\n"); |
---|
8470 | 9224 | goto out_error; |
---|
8471 | 9225 | } |
---|
.. | .. |
---|
8474 | 9228 | /* Create Receive Queue for data */ |
---|
8475 | 9229 | qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, |
---|
8476 | 9230 | phba->sli4_hba.rq_esize, |
---|
8477 | | - phba->sli4_hba.rq_ecount); |
---|
| 9231 | + phba->sli4_hba.rq_ecount, cpu); |
---|
8478 | 9232 | if (!qdesc) { |
---|
8479 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9233 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8480 | 9234 | "0507 Failed allocate receive DRQ\n"); |
---|
8481 | 9235 | goto out_error; |
---|
8482 | 9236 | } |
---|
8483 | 9237 | phba->sli4_hba.dat_rq = qdesc; |
---|
8484 | 9238 | |
---|
8485 | | - if (phba->nvmet_support) { |
---|
| 9239 | + if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && |
---|
| 9240 | + phba->nvmet_support) { |
---|
8486 | 9241 | for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { |
---|
| 9242 | + cpu = lpfc_find_cpu_handle(phba, idx, |
---|
| 9243 | + LPFC_FIND_BY_HDWQ); |
---|
8487 | 9244 | /* Create NVMET Receive Queue for header */ |
---|
8488 | 9245 | qdesc = lpfc_sli4_queue_alloc(phba, |
---|
8489 | 9246 | LPFC_DEFAULT_PAGE_SIZE, |
---|
8490 | 9247 | phba->sli4_hba.rq_esize, |
---|
8491 | | - LPFC_NVMET_RQE_DEF_COUNT); |
---|
| 9248 | + LPFC_NVMET_RQE_DEF_COUNT, |
---|
| 9249 | + cpu); |
---|
8492 | 9250 | if (!qdesc) { |
---|
8493 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9251 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8494 | 9252 | "3146 Failed allocate " |
---|
8495 | 9253 | "receive HRQ\n"); |
---|
8496 | 9254 | goto out_error; |
---|
8497 | 9255 | } |
---|
| 9256 | + qdesc->hdwq = idx; |
---|
8498 | 9257 | phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; |
---|
8499 | 9258 | |
---|
8500 | 9259 | /* Only needed for header of RQ pair */ |
---|
8501 | | - qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb), |
---|
8502 | | - GFP_KERNEL); |
---|
| 9260 | + qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), |
---|
| 9261 | + GFP_KERNEL, |
---|
| 9262 | + cpu_to_node(cpu)); |
---|
8503 | 9263 | if (qdesc->rqbp == NULL) { |
---|
8504 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9264 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8505 | 9265 | "6131 Failed allocate " |
---|
8506 | 9266 | "Header RQBP\n"); |
---|
8507 | 9267 | goto out_error; |
---|
.. | .. |
---|
8514 | 9274 | qdesc = lpfc_sli4_queue_alloc(phba, |
---|
8515 | 9275 | LPFC_DEFAULT_PAGE_SIZE, |
---|
8516 | 9276 | phba->sli4_hba.rq_esize, |
---|
8517 | | - LPFC_NVMET_RQE_DEF_COUNT); |
---|
| 9277 | + LPFC_NVMET_RQE_DEF_COUNT, |
---|
| 9278 | + cpu); |
---|
8518 | 9279 | if (!qdesc) { |
---|
8519 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9280 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8520 | 9281 | "3156 Failed allocate " |
---|
8521 | 9282 | "receive DRQ\n"); |
---|
8522 | 9283 | goto out_error; |
---|
8523 | 9284 | } |
---|
| 9285 | + qdesc->hdwq = idx; |
---|
8524 | 9286 | phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; |
---|
8525 | 9287 | } |
---|
8526 | 9288 | } |
---|
8527 | 9289 | |
---|
8528 | | - /* Create the Queues needed for Flash Optimized Fabric operations */ |
---|
8529 | | - if (phba->cfg_fof) |
---|
8530 | | - lpfc_fof_queue_create(phba); |
---|
| 9290 | + /* Clear NVME stats */ |
---|
| 9291 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
| 9292 | + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
---|
| 9293 | + memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, |
---|
| 9294 | + sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); |
---|
| 9295 | + } |
---|
| 9296 | + } |
---|
| 9297 | + |
---|
| 9298 | + /* Clear SCSI stats */ |
---|
| 9299 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { |
---|
| 9300 | + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
---|
| 9301 | + memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, |
---|
| 9302 | + sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); |
---|
| 9303 | + } |
---|
| 9304 | + } |
---|
| 9305 | + |
---|
8531 | 9306 | return 0; |
---|
8532 | 9307 | |
---|
8533 | 9308 | out_error: |
---|
.. | .. |
---|
8560 | 9335 | } |
---|
8561 | 9336 | |
---|
8562 | 9337 | static inline void |
---|
8563 | | -lpfc_sli4_release_queue_map(uint16_t **qmap) |
---|
| 9338 | +lpfc_sli4_release_hdwq(struct lpfc_hba *phba) |
---|
8564 | 9339 | { |
---|
8565 | | - if (*qmap != NULL) { |
---|
8566 | | - kfree(*qmap); |
---|
8567 | | - *qmap = NULL; |
---|
| 9340 | + struct lpfc_sli4_hdw_queue *hdwq; |
---|
| 9341 | + struct lpfc_queue *eq; |
---|
| 9342 | + uint32_t idx; |
---|
| 9343 | + |
---|
| 9344 | + hdwq = phba->sli4_hba.hdwq; |
---|
| 9345 | + |
---|
| 9346 | + /* Loop thru all Hardware Queues */ |
---|
| 9347 | + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
---|
| 9348 | + /* Free the CQ/WQ corresponding to the Hardware Queue */ |
---|
| 9349 | + lpfc_sli4_queue_free(hdwq[idx].io_cq); |
---|
| 9350 | + lpfc_sli4_queue_free(hdwq[idx].io_wq); |
---|
| 9351 | + hdwq[idx].hba_eq = NULL; |
---|
| 9352 | + hdwq[idx].io_cq = NULL; |
---|
| 9353 | + hdwq[idx].io_wq = NULL; |
---|
| 9354 | + if (phba->cfg_xpsgl && !phba->nvmet_support) |
---|
| 9355 | + lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); |
---|
| 9356 | + lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); |
---|
| 9357 | + } |
---|
| 9358 | + /* Loop thru all IRQ vectors */ |
---|
| 9359 | + for (idx = 0; idx < phba->cfg_irq_chann; idx++) { |
---|
| 9360 | + /* Free the EQ corresponding to the IRQ vector */ |
---|
| 9361 | + eq = phba->sli4_hba.hba_eq_hdl[idx].eq; |
---|
| 9362 | + lpfc_sli4_queue_free(eq); |
---|
| 9363 | + phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; |
---|
8568 | 9364 | } |
---|
8569 | 9365 | } |
---|
8570 | 9366 | |
---|
.. | .. |
---|
8583 | 9379 | void |
---|
8584 | 9380 | lpfc_sli4_queue_destroy(struct lpfc_hba *phba) |
---|
8585 | 9381 | { |
---|
8586 | | - if (phba->cfg_fof) |
---|
8587 | | - lpfc_fof_queue_destroy(phba); |
---|
| 9382 | + /* |
---|
| 9383 | + * Set FREE_INIT before beginning to free the queues. |
---|
| 9384 | + * Wait until the users of queues to acknowledge to |
---|
| 9385 | + * release queues by clearing FREE_WAIT. |
---|
| 9386 | + */ |
---|
| 9387 | + spin_lock_irq(&phba->hbalock); |
---|
| 9388 | + phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; |
---|
| 9389 | + while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { |
---|
| 9390 | + spin_unlock_irq(&phba->hbalock); |
---|
| 9391 | + msleep(20); |
---|
| 9392 | + spin_lock_irq(&phba->hbalock); |
---|
| 9393 | + } |
---|
| 9394 | + spin_unlock_irq(&phba->hbalock); |
---|
| 9395 | + |
---|
| 9396 | + lpfc_sli4_cleanup_poll_list(phba); |
---|
8588 | 9397 | |
---|
8589 | 9398 | /* Release HBA eqs */ |
---|
8590 | | - lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs); |
---|
8591 | | - |
---|
8592 | | - /* Release FCP cqs */ |
---|
8593 | | - lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq, |
---|
8594 | | - phba->cfg_fcp_io_channel); |
---|
8595 | | - |
---|
8596 | | - /* Release FCP wqs */ |
---|
8597 | | - lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq, |
---|
8598 | | - phba->cfg_fcp_io_channel); |
---|
8599 | | - |
---|
8600 | | - /* Release FCP CQ mapping array */ |
---|
8601 | | - lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map); |
---|
8602 | | - |
---|
8603 | | - /* Release NVME cqs */ |
---|
8604 | | - lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq, |
---|
8605 | | - phba->cfg_nvme_io_channel); |
---|
8606 | | - |
---|
8607 | | - /* Release NVME wqs */ |
---|
8608 | | - lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq, |
---|
8609 | | - phba->cfg_nvme_io_channel); |
---|
8610 | | - |
---|
8611 | | - /* Release NVME CQ mapping array */ |
---|
8612 | | - lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map); |
---|
| 9399 | + if (phba->sli4_hba.hdwq) |
---|
| 9400 | + lpfc_sli4_release_hdwq(phba); |
---|
8613 | 9401 | |
---|
8614 | 9402 | if (phba->nvmet_support) { |
---|
8615 | 9403 | lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, |
---|
.. | .. |
---|
8645 | 9433 | |
---|
8646 | 9434 | /* Everything on this list has been freed */ |
---|
8647 | 9435 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); |
---|
| 9436 | + |
---|
| 9437 | + /* Done with freeing the queues */ |
---|
| 9438 | + spin_lock_irq(&phba->hbalock); |
---|
| 9439 | + phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; |
---|
| 9440 | + spin_unlock_irq(&phba->hbalock); |
---|
8648 | 9441 | } |
---|
8649 | 9442 | |
---|
8650 | 9443 | int |
---|
.. | .. |
---|
8675 | 9468 | int rc; |
---|
8676 | 9469 | |
---|
8677 | 9470 | if (!eq || !cq || !wq) { |
---|
8678 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9471 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8679 | 9472 | "6085 Fast-path %s (%d) not allocated\n", |
---|
8680 | 9473 | ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); |
---|
8681 | 9474 | return -ENOMEM; |
---|
.. | .. |
---|
8685 | 9478 | rc = lpfc_cq_create(phba, cq, eq, |
---|
8686 | 9479 | (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); |
---|
8687 | 9480 | if (rc) { |
---|
8688 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8689 | | - "6086 Failed setup of CQ (%d), rc = 0x%x\n", |
---|
8690 | | - qidx, (uint32_t)rc); |
---|
| 9481 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 9482 | + "6086 Failed setup of CQ (%d), rc = 0x%x\n", |
---|
| 9483 | + qidx, (uint32_t)rc); |
---|
8691 | 9484 | return rc; |
---|
8692 | 9485 | } |
---|
8693 | | - cq->chann = qidx; |
---|
8694 | 9486 | |
---|
8695 | 9487 | if (qtype != LPFC_MBOX) { |
---|
8696 | | - /* Setup nvme_cq_map for fast lookup */ |
---|
| 9488 | + /* Setup cq_map for fast lookup */ |
---|
8697 | 9489 | if (cq_map) |
---|
8698 | 9490 | *cq_map = cq->queue_id; |
---|
8699 | 9491 | |
---|
.. | .. |
---|
8704 | 9496 | /* create the wq */ |
---|
8705 | 9497 | rc = lpfc_wq_create(phba, wq, cq, qtype); |
---|
8706 | 9498 | if (rc) { |
---|
8707 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8708 | | - "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n", |
---|
| 9499 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 9500 | + "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", |
---|
8709 | 9501 | qidx, (uint32_t)rc); |
---|
8710 | 9502 | /* no need to tear down cq - caller will do so */ |
---|
8711 | 9503 | return rc; |
---|
8712 | 9504 | } |
---|
8713 | | - wq->chann = qidx; |
---|
8714 | 9505 | |
---|
8715 | 9506 | /* Bind this CQ/WQ to the NVME ring */ |
---|
8716 | 9507 | pring = wq->pring; |
---|
.. | .. |
---|
8723 | 9514 | } else { |
---|
8724 | 9515 | rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); |
---|
8725 | 9516 | if (rc) { |
---|
8726 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8727 | | - "0539 Failed setup of slow-path MQ: " |
---|
8728 | | - "rc = 0x%x\n", rc); |
---|
| 9517 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 9518 | + "0539 Failed setup of slow-path MQ: " |
---|
| 9519 | + "rc = 0x%x\n", rc); |
---|
8729 | 9520 | /* no need to tear down cq - caller will do so */ |
---|
8730 | 9521 | return rc; |
---|
8731 | 9522 | } |
---|
.. | .. |
---|
8737 | 9528 | } |
---|
8738 | 9529 | |
---|
8739 | 9530 | return 0; |
---|
| 9531 | +} |
---|
| 9532 | + |
---|
| 9533 | +/** |
---|
| 9534 | + * lpfc_setup_cq_lookup - Setup the CQ lookup table |
---|
| 9535 | + * @phba: pointer to lpfc hba data structure. |
---|
| 9536 | + * |
---|
| 9537 | + * This routine will populate the cq_lookup table by all |
---|
| 9538 | + * available CQ queue_id's. |
---|
| 9539 | + **/ |
---|
| 9540 | +static void |
---|
| 9541 | +lpfc_setup_cq_lookup(struct lpfc_hba *phba) |
---|
| 9542 | +{ |
---|
| 9543 | + struct lpfc_queue *eq, *childq; |
---|
| 9544 | + int qidx; |
---|
| 9545 | + |
---|
| 9546 | + memset(phba->sli4_hba.cq_lookup, 0, |
---|
| 9547 | + (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); |
---|
| 9548 | + /* Loop thru all IRQ vectors */ |
---|
| 9549 | + for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { |
---|
| 9550 | + /* Get the EQ corresponding to the IRQ vector */ |
---|
| 9551 | + eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; |
---|
| 9552 | + if (!eq) |
---|
| 9553 | + continue; |
---|
| 9554 | + /* Loop through all CQs associated with that EQ */ |
---|
| 9555 | + list_for_each_entry(childq, &eq->child_list, list) { |
---|
| 9556 | + if (childq->queue_id > phba->sli4_hba.cq_max) |
---|
| 9557 | + continue; |
---|
| 9558 | + if (childq->subtype == LPFC_IO) |
---|
| 9559 | + phba->sli4_hba.cq_lookup[childq->queue_id] = |
---|
| 9560 | + childq; |
---|
| 9561 | + } |
---|
| 9562 | + } |
---|
8740 | 9563 | } |
---|
8741 | 9564 | |
---|
8742 | 9565 | /** |
---|
.. | .. |
---|
8756 | 9579 | { |
---|
8757 | 9580 | uint32_t shdr_status, shdr_add_status; |
---|
8758 | 9581 | union lpfc_sli4_cfg_shdr *shdr; |
---|
| 9582 | + struct lpfc_vector_map_info *cpup; |
---|
| 9583 | + struct lpfc_sli4_hdw_queue *qp; |
---|
8759 | 9584 | LPFC_MBOXQ_t *mboxq; |
---|
8760 | | - int qidx; |
---|
8761 | | - uint32_t length, io_channel; |
---|
| 9585 | + int qidx, cpu; |
---|
| 9586 | + uint32_t length, usdelay; |
---|
8762 | 9587 | int rc = -ENOMEM; |
---|
8763 | 9588 | |
---|
8764 | 9589 | /* Check for dual-ULP support */ |
---|
8765 | 9590 | mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
---|
8766 | 9591 | if (!mboxq) { |
---|
8767 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9592 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8768 | 9593 | "3249 Unable to allocate memory for " |
---|
8769 | 9594 | "QUERY_FW_CFG mailbox command\n"); |
---|
8770 | 9595 | return -ENOMEM; |
---|
.. | .. |
---|
8782 | 9607 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
---|
8783 | 9608 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
---|
8784 | 9609 | if (shdr_status || shdr_add_status || rc) { |
---|
8785 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9610 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8786 | 9611 | "3250 QUERY_FW_CFG mailbox failed with status " |
---|
8787 | 9612 | "x%x add_status x%x, mbx status x%x\n", |
---|
8788 | 9613 | shdr_status, shdr_add_status, rc); |
---|
8789 | | - if (rc != MBX_TIMEOUT) |
---|
8790 | | - mempool_free(mboxq, phba->mbox_mem_pool); |
---|
| 9614 | + mempool_free(mboxq, phba->mbox_mem_pool); |
---|
8791 | 9615 | rc = -ENXIO; |
---|
8792 | 9616 | goto out_error; |
---|
8793 | 9617 | } |
---|
.. | .. |
---|
8803 | 9627 | "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, |
---|
8804 | 9628 | phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); |
---|
8805 | 9629 | |
---|
8806 | | - if (rc != MBX_TIMEOUT) |
---|
8807 | | - mempool_free(mboxq, phba->mbox_mem_pool); |
---|
| 9630 | + mempool_free(mboxq, phba->mbox_mem_pool); |
---|
8808 | 9631 | |
---|
8809 | 9632 | /* |
---|
8810 | 9633 | * Set up HBA Event Queues (EQs) |
---|
8811 | 9634 | */ |
---|
8812 | | - io_channel = phba->io_channel_irqs; |
---|
| 9635 | + qp = phba->sli4_hba.hdwq; |
---|
8813 | 9636 | |
---|
8814 | 9637 | /* Set up HBA event queue */ |
---|
8815 | | - if (io_channel && !phba->sli4_hba.hba_eq) { |
---|
8816 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9638 | + if (!qp) { |
---|
| 9639 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8817 | 9640 | "3147 Fast-path EQs not allocated\n"); |
---|
8818 | 9641 | rc = -ENOMEM; |
---|
8819 | 9642 | goto out_error; |
---|
8820 | 9643 | } |
---|
8821 | | - for (qidx = 0; qidx < io_channel; qidx++) { |
---|
8822 | | - if (!phba->sli4_hba.hba_eq[qidx]) { |
---|
8823 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8824 | | - "0522 Fast-path EQ (%d) not " |
---|
8825 | | - "allocated\n", qidx); |
---|
8826 | | - rc = -ENOMEM; |
---|
8827 | | - goto out_destroy; |
---|
| 9644 | + |
---|
| 9645 | + /* Loop thru all IRQ vectors */ |
---|
| 9646 | + for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { |
---|
| 9647 | + /* Create HBA Event Queues (EQs) in order */ |
---|
| 9648 | + for_each_present_cpu(cpu) { |
---|
| 9649 | + cpup = &phba->sli4_hba.cpu_map[cpu]; |
---|
| 9650 | + |
---|
| 9651 | + /* Look for the CPU thats using that vector with |
---|
| 9652 | + * LPFC_CPU_FIRST_IRQ set. |
---|
| 9653 | + */ |
---|
| 9654 | + if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) |
---|
| 9655 | + continue; |
---|
| 9656 | + if (qidx != cpup->eq) |
---|
| 9657 | + continue; |
---|
| 9658 | + |
---|
| 9659 | + /* Create an EQ for that vector */ |
---|
| 9660 | + rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, |
---|
| 9661 | + phba->cfg_fcp_imax); |
---|
| 9662 | + if (rc) { |
---|
| 9663 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 9664 | + "0523 Failed setup of fast-path" |
---|
| 9665 | + " EQ (%d), rc = 0x%x\n", |
---|
| 9666 | + cpup->eq, (uint32_t)rc); |
---|
| 9667 | + goto out_destroy; |
---|
| 9668 | + } |
---|
| 9669 | + |
---|
| 9670 | + /* Save the EQ for that vector in the hba_eq_hdl */ |
---|
| 9671 | + phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = |
---|
| 9672 | + qp[cpup->hdwq].hba_eq; |
---|
| 9673 | + |
---|
| 9674 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
| 9675 | + "2584 HBA EQ setup: queue[%d]-id=%d\n", |
---|
| 9676 | + cpup->eq, |
---|
| 9677 | + qp[cpup->hdwq].hba_eq->queue_id); |
---|
8828 | 9678 | } |
---|
8829 | | - rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx], |
---|
8830 | | - phba->cfg_fcp_imax); |
---|
| 9679 | + } |
---|
| 9680 | + |
---|
| 9681 | + /* Loop thru all Hardware Queues */ |
---|
| 9682 | + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { |
---|
| 9683 | + cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); |
---|
| 9684 | + cpup = &phba->sli4_hba.cpu_map[cpu]; |
---|
| 9685 | + |
---|
| 9686 | + /* Create the CQ/WQ corresponding to the Hardware Queue */ |
---|
| 9687 | + rc = lpfc_create_wq_cq(phba, |
---|
| 9688 | + phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, |
---|
| 9689 | + qp[qidx].io_cq, |
---|
| 9690 | + qp[qidx].io_wq, |
---|
| 9691 | + &phba->sli4_hba.hdwq[qidx].io_cq_map, |
---|
| 9692 | + qidx, |
---|
| 9693 | + LPFC_IO); |
---|
8831 | 9694 | if (rc) { |
---|
8832 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8833 | | - "0523 Failed setup of fast-path EQ " |
---|
8834 | | - "(%d), rc = 0x%x\n", qidx, |
---|
8835 | | - (uint32_t)rc); |
---|
8836 | | - goto out_destroy; |
---|
8837 | | - } |
---|
8838 | | - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
8839 | | - "2584 HBA EQ setup: queue[%d]-id=%d\n", |
---|
8840 | | - qidx, phba->sli4_hba.hba_eq[qidx]->queue_id); |
---|
8841 | | - } |
---|
8842 | | - |
---|
8843 | | - if (phba->cfg_nvme_io_channel) { |
---|
8844 | | - if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) { |
---|
8845 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8846 | | - "6084 Fast-path NVME %s array not allocated\n", |
---|
8847 | | - (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ"); |
---|
8848 | | - rc = -ENOMEM; |
---|
8849 | | - goto out_destroy; |
---|
8850 | | - } |
---|
8851 | | - |
---|
8852 | | - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { |
---|
8853 | | - rc = lpfc_create_wq_cq(phba, |
---|
8854 | | - phba->sli4_hba.hba_eq[ |
---|
8855 | | - qidx % io_channel], |
---|
8856 | | - phba->sli4_hba.nvme_cq[qidx], |
---|
8857 | | - phba->sli4_hba.nvme_wq[qidx], |
---|
8858 | | - &phba->sli4_hba.nvme_cq_map[qidx], |
---|
8859 | | - qidx, LPFC_NVME); |
---|
8860 | | - if (rc) { |
---|
8861 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8862 | | - "6123 Failed to setup fastpath " |
---|
8863 | | - "NVME WQ/CQ (%d), rc = 0x%x\n", |
---|
8864 | | - qidx, (uint32_t)rc); |
---|
8865 | | - goto out_destroy; |
---|
8866 | | - } |
---|
8867 | | - } |
---|
8868 | | - } |
---|
8869 | | - |
---|
8870 | | - if (phba->cfg_fcp_io_channel) { |
---|
8871 | | - /* Set up fast-path FCP Response Complete Queue */ |
---|
8872 | | - if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) { |
---|
8873 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8874 | | - "3148 Fast-path FCP %s array not allocated\n", |
---|
8875 | | - phba->sli4_hba.fcp_cq ? "WQ" : "CQ"); |
---|
8876 | | - rc = -ENOMEM; |
---|
8877 | | - goto out_destroy; |
---|
8878 | | - } |
---|
8879 | | - |
---|
8880 | | - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) { |
---|
8881 | | - rc = lpfc_create_wq_cq(phba, |
---|
8882 | | - phba->sli4_hba.hba_eq[ |
---|
8883 | | - qidx % io_channel], |
---|
8884 | | - phba->sli4_hba.fcp_cq[qidx], |
---|
8885 | | - phba->sli4_hba.fcp_wq[qidx], |
---|
8886 | | - &phba->sli4_hba.fcp_cq_map[qidx], |
---|
8887 | | - qidx, LPFC_FCP); |
---|
8888 | | - if (rc) { |
---|
8889 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9695 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8890 | 9696 | "0535 Failed to setup fastpath " |
---|
8891 | | - "FCP WQ/CQ (%d), rc = 0x%x\n", |
---|
| 9697 | + "IO WQ/CQ (%d), rc = 0x%x\n", |
---|
8892 | 9698 | qidx, (uint32_t)rc); |
---|
8893 | | - goto out_destroy; |
---|
8894 | | - } |
---|
| 9699 | + goto out_destroy; |
---|
8895 | 9700 | } |
---|
8896 | 9701 | } |
---|
8897 | 9702 | |
---|
.. | .. |
---|
8902 | 9707 | /* Set up slow-path MBOX CQ/MQ */ |
---|
8903 | 9708 | |
---|
8904 | 9709 | if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { |
---|
8905 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9710 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8906 | 9711 | "0528 %s not allocated\n", |
---|
8907 | 9712 | phba->sli4_hba.mbx_cq ? |
---|
8908 | 9713 | "Mailbox WQ" : "Mailbox CQ"); |
---|
.. | .. |
---|
8910 | 9715 | goto out_destroy; |
---|
8911 | 9716 | } |
---|
8912 | 9717 | |
---|
8913 | | - rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], |
---|
| 9718 | + rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, |
---|
8914 | 9719 | phba->sli4_hba.mbx_cq, |
---|
8915 | 9720 | phba->sli4_hba.mbx_wq, |
---|
8916 | 9721 | NULL, 0, LPFC_MBOX); |
---|
8917 | 9722 | if (rc) { |
---|
8918 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9723 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8919 | 9724 | "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", |
---|
8920 | 9725 | (uint32_t)rc); |
---|
8921 | 9726 | goto out_destroy; |
---|
8922 | 9727 | } |
---|
8923 | 9728 | if (phba->nvmet_support) { |
---|
8924 | 9729 | if (!phba->sli4_hba.nvmet_cqset) { |
---|
8925 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9730 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8926 | 9731 | "3165 Fast-path NVME CQ Set " |
---|
8927 | 9732 | "array not allocated\n"); |
---|
8928 | 9733 | rc = -ENOMEM; |
---|
.. | .. |
---|
8931 | 9736 | if (phba->cfg_nvmet_mrq > 1) { |
---|
8932 | 9737 | rc = lpfc_cq_create_set(phba, |
---|
8933 | 9738 | phba->sli4_hba.nvmet_cqset, |
---|
8934 | | - phba->sli4_hba.hba_eq, |
---|
| 9739 | + qp, |
---|
8935 | 9740 | LPFC_WCQ, LPFC_NVMET); |
---|
8936 | 9741 | if (rc) { |
---|
8937 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9742 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8938 | 9743 | "3164 Failed setup of NVME CQ " |
---|
8939 | 9744 | "Set, rc = 0x%x\n", |
---|
8940 | 9745 | (uint32_t)rc); |
---|
.. | .. |
---|
8943 | 9748 | } else { |
---|
8944 | 9749 | /* Set up NVMET Receive Complete Queue */ |
---|
8945 | 9750 | rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], |
---|
8946 | | - phba->sli4_hba.hba_eq[0], |
---|
| 9751 | + qp[0].hba_eq, |
---|
8947 | 9752 | LPFC_WCQ, LPFC_NVMET); |
---|
8948 | 9753 | if (rc) { |
---|
8949 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9754 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8950 | 9755 | "6089 Failed setup NVMET CQ: " |
---|
8951 | 9756 | "rc = 0x%x\n", (uint32_t)rc); |
---|
8952 | 9757 | goto out_destroy; |
---|
.. | .. |
---|
8957 | 9762 | "6090 NVMET CQ setup: cq-id=%d, " |
---|
8958 | 9763 | "parent eq-id=%d\n", |
---|
8959 | 9764 | phba->sli4_hba.nvmet_cqset[0]->queue_id, |
---|
8960 | | - phba->sli4_hba.hba_eq[0]->queue_id); |
---|
| 9765 | + qp[0].hba_eq->queue_id); |
---|
8961 | 9766 | } |
---|
8962 | 9767 | } |
---|
8963 | 9768 | |
---|
8964 | 9769 | /* Set up slow-path ELS WQ/CQ */ |
---|
8965 | 9770 | if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { |
---|
8966 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9771 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8967 | 9772 | "0530 ELS %s not allocated\n", |
---|
8968 | 9773 | phba->sli4_hba.els_cq ? "WQ" : "CQ"); |
---|
8969 | 9774 | rc = -ENOMEM; |
---|
8970 | 9775 | goto out_destroy; |
---|
8971 | 9776 | } |
---|
8972 | | - rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], |
---|
8973 | | - phba->sli4_hba.els_cq, |
---|
8974 | | - phba->sli4_hba.els_wq, |
---|
8975 | | - NULL, 0, LPFC_ELS); |
---|
| 9777 | + rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, |
---|
| 9778 | + phba->sli4_hba.els_cq, |
---|
| 9779 | + phba->sli4_hba.els_wq, |
---|
| 9780 | + NULL, 0, LPFC_ELS); |
---|
8976 | 9781 | if (rc) { |
---|
8977 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
8978 | | - "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n", |
---|
8979 | | - (uint32_t)rc); |
---|
| 9782 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 9783 | + "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", |
---|
| 9784 | + (uint32_t)rc); |
---|
8980 | 9785 | goto out_destroy; |
---|
8981 | 9786 | } |
---|
8982 | 9787 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
.. | .. |
---|
8984 | 9789 | phba->sli4_hba.els_wq->queue_id, |
---|
8985 | 9790 | phba->sli4_hba.els_cq->queue_id); |
---|
8986 | 9791 | |
---|
8987 | | - if (phba->cfg_nvme_io_channel) { |
---|
| 9792 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
8988 | 9793 | /* Set up NVME LS Complete Queue */ |
---|
8989 | 9794 | if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { |
---|
8990 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9795 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
8991 | 9796 | "6091 LS %s not allocated\n", |
---|
8992 | 9797 | phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); |
---|
8993 | 9798 | rc = -ENOMEM; |
---|
8994 | 9799 | goto out_destroy; |
---|
8995 | 9800 | } |
---|
8996 | | - rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], |
---|
8997 | | - phba->sli4_hba.nvmels_cq, |
---|
8998 | | - phba->sli4_hba.nvmels_wq, |
---|
8999 | | - NULL, 0, LPFC_NVME_LS); |
---|
| 9801 | + rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, |
---|
| 9802 | + phba->sli4_hba.nvmels_cq, |
---|
| 9803 | + phba->sli4_hba.nvmels_wq, |
---|
| 9804 | + NULL, 0, LPFC_NVME_LS); |
---|
9000 | 9805 | if (rc) { |
---|
9001 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
9002 | | - "0529 Failed setup of NVVME LS WQ/CQ: " |
---|
9003 | | - "rc = 0x%x\n", (uint32_t)rc); |
---|
| 9806 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 9807 | + "0526 Failed setup of NVVME LS WQ/CQ: " |
---|
| 9808 | + "rc = 0x%x\n", (uint32_t)rc); |
---|
9004 | 9809 | goto out_destroy; |
---|
9005 | 9810 | } |
---|
9006 | 9811 | |
---|
.. | .. |
---|
9018 | 9823 | if ((!phba->sli4_hba.nvmet_cqset) || |
---|
9019 | 9824 | (!phba->sli4_hba.nvmet_mrq_hdr) || |
---|
9020 | 9825 | (!phba->sli4_hba.nvmet_mrq_data)) { |
---|
9021 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9826 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
9022 | 9827 | "6130 MRQ CQ Queues not " |
---|
9023 | 9828 | "allocated\n"); |
---|
9024 | 9829 | rc = -ENOMEM; |
---|
.. | .. |
---|
9031 | 9836 | phba->sli4_hba.nvmet_cqset, |
---|
9032 | 9837 | LPFC_NVMET); |
---|
9033 | 9838 | if (rc) { |
---|
9034 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9839 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
9035 | 9840 | "6098 Failed setup of NVMET " |
---|
9036 | 9841 | "MRQ: rc = 0x%x\n", |
---|
9037 | 9842 | (uint32_t)rc); |
---|
.. | .. |
---|
9045 | 9850 | phba->sli4_hba.nvmet_cqset[0], |
---|
9046 | 9851 | LPFC_NVMET); |
---|
9047 | 9852 | if (rc) { |
---|
9048 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9853 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
9049 | 9854 | "6057 Failed setup of NVMET " |
---|
9050 | 9855 | "Receive Queue: rc = 0x%x\n", |
---|
9051 | 9856 | (uint32_t)rc); |
---|
.. | .. |
---|
9064 | 9869 | } |
---|
9065 | 9870 | |
---|
9066 | 9871 | if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { |
---|
9067 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9872 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
9068 | 9873 | "0540 Receive Queue not allocated\n"); |
---|
9069 | 9874 | rc = -ENOMEM; |
---|
9070 | 9875 | goto out_destroy; |
---|
.. | .. |
---|
9073 | 9878 | rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, |
---|
9074 | 9879 | phba->sli4_hba.els_cq, LPFC_USOL); |
---|
9075 | 9880 | if (rc) { |
---|
9076 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 9881 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
9077 | 9882 | "0541 Failed setup of Receive Queue: " |
---|
9078 | 9883 | "rc = 0x%x\n", (uint32_t)rc); |
---|
9079 | 9884 | goto out_destroy; |
---|
.. | .. |
---|
9086 | 9891 | phba->sli4_hba.dat_rq->queue_id, |
---|
9087 | 9892 | phba->sli4_hba.els_cq->queue_id); |
---|
9088 | 9893 | |
---|
9089 | | - if (phba->cfg_fof) { |
---|
9090 | | - rc = lpfc_fof_queue_setup(phba); |
---|
9091 | | - if (rc) { |
---|
9092 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
9093 | | - "0549 Failed setup of FOF Queues: " |
---|
9094 | | - "rc = 0x%x\n", rc); |
---|
| 9894 | + if (phba->cfg_fcp_imax) |
---|
| 9895 | + usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; |
---|
| 9896 | + else |
---|
| 9897 | + usdelay = 0; |
---|
| 9898 | + |
---|
| 9899 | + for (qidx = 0; qidx < phba->cfg_irq_chann; |
---|
| 9900 | + qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) |
---|
| 9901 | + lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, |
---|
| 9902 | + usdelay); |
---|
| 9903 | + |
---|
| 9904 | + if (phba->sli4_hba.cq_max) { |
---|
| 9905 | + kfree(phba->sli4_hba.cq_lookup); |
---|
| 9906 | + phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), |
---|
| 9907 | + sizeof(struct lpfc_queue *), GFP_KERNEL); |
---|
| 9908 | + if (!phba->sli4_hba.cq_lookup) { |
---|
| 9909 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 9910 | + "0549 Failed setup of CQ Lookup table: " |
---|
| 9911 | + "size 0x%x\n", phba->sli4_hba.cq_max); |
---|
| 9912 | + rc = -ENOMEM; |
---|
9095 | 9913 | goto out_destroy; |
---|
9096 | 9914 | } |
---|
| 9915 | + lpfc_setup_cq_lookup(phba); |
---|
9097 | 9916 | } |
---|
9098 | | - |
---|
9099 | | - for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) |
---|
9100 | | - lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, |
---|
9101 | | - phba->cfg_fcp_imax); |
---|
9102 | | - |
---|
9103 | 9917 | return 0; |
---|
9104 | 9918 | |
---|
9105 | 9919 | out_destroy: |
---|
.. | .. |
---|
9123 | 9937 | void |
---|
9124 | 9938 | lpfc_sli4_queue_unset(struct lpfc_hba *phba) |
---|
9125 | 9939 | { |
---|
| 9940 | + struct lpfc_sli4_hdw_queue *qp; |
---|
| 9941 | + struct lpfc_queue *eq; |
---|
9126 | 9942 | int qidx; |
---|
9127 | | - |
---|
9128 | | - /* Unset the queues created for Flash Optimized Fabric operations */ |
---|
9129 | | - if (phba->cfg_fof) |
---|
9130 | | - lpfc_fof_queue_destroy(phba); |
---|
9131 | 9943 | |
---|
9132 | 9944 | /* Unset mailbox command work queue */ |
---|
9133 | 9945 | if (phba->sli4_hba.mbx_wq) |
---|
.. | .. |
---|
9146 | 9958 | lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, |
---|
9147 | 9959 | phba->sli4_hba.dat_rq); |
---|
9148 | 9960 | |
---|
9149 | | - /* Unset FCP work queue */ |
---|
9150 | | - if (phba->sli4_hba.fcp_wq) |
---|
9151 | | - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) |
---|
9152 | | - lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]); |
---|
9153 | | - |
---|
9154 | | - /* Unset NVME work queue */ |
---|
9155 | | - if (phba->sli4_hba.nvme_wq) { |
---|
9156 | | - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) |
---|
9157 | | - lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]); |
---|
9158 | | - } |
---|
9159 | | - |
---|
9160 | 9961 | /* Unset mailbox command complete queue */ |
---|
9161 | 9962 | if (phba->sli4_hba.mbx_cq) |
---|
9162 | 9963 | lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); |
---|
.. | .. |
---|
9168 | 9969 | /* Unset NVME LS complete queue */ |
---|
9169 | 9970 | if (phba->sli4_hba.nvmels_cq) |
---|
9170 | 9971 | lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); |
---|
9171 | | - |
---|
9172 | | - /* Unset NVME response complete queue */ |
---|
9173 | | - if (phba->sli4_hba.nvme_cq) |
---|
9174 | | - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) |
---|
9175 | | - lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]); |
---|
9176 | 9972 | |
---|
9177 | 9973 | if (phba->nvmet_support) { |
---|
9178 | 9974 | /* Unset NVMET MRQ queue */ |
---|
.. | .. |
---|
9192 | 9988 | } |
---|
9193 | 9989 | } |
---|
9194 | 9990 | |
---|
9195 | | - /* Unset FCP response complete queue */ |
---|
9196 | | - if (phba->sli4_hba.fcp_cq) |
---|
9197 | | - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) |
---|
9198 | | - lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]); |
---|
| 9991 | + /* Unset fast-path SLI4 queues */ |
---|
| 9992 | + if (phba->sli4_hba.hdwq) { |
---|
| 9993 | + /* Loop thru all Hardware Queues */ |
---|
| 9994 | + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { |
---|
| 9995 | + /* Destroy the CQ/WQ corresponding to Hardware Queue */ |
---|
| 9996 | + qp = &phba->sli4_hba.hdwq[qidx]; |
---|
| 9997 | + lpfc_wq_destroy(phba, qp->io_wq); |
---|
| 9998 | + lpfc_cq_destroy(phba, qp->io_cq); |
---|
| 9999 | + } |
---|
| 10000 | + /* Loop thru all IRQ vectors */ |
---|
| 10001 | + for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { |
---|
| 10002 | + /* Destroy the EQ corresponding to the IRQ vector */ |
---|
| 10003 | + eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; |
---|
| 10004 | + lpfc_eq_destroy(phba, eq); |
---|
| 10005 | + } |
---|
| 10006 | + } |
---|
9199 | 10007 | |
---|
9200 | | - /* Unset fast-path event queue */ |
---|
9201 | | - if (phba->sli4_hba.hba_eq) |
---|
9202 | | - for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) |
---|
9203 | | - lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]); |
---|
| 10008 | + kfree(phba->sli4_hba.cq_lookup); |
---|
| 10009 | + phba->sli4_hba.cq_lookup = NULL; |
---|
| 10010 | + phba->sli4_hba.cq_max = 0; |
---|
9204 | 10011 | } |
---|
9205 | 10012 | |
---|
9206 | 10013 | /** |
---|
.. | .. |
---|
9346 | 10153 | static void |
---|
9347 | 10154 | lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) |
---|
9348 | 10155 | { |
---|
9349 | | - LIST_HEAD(cqelist); |
---|
9350 | | - struct lpfc_cq_event *cqe; |
---|
| 10156 | + LIST_HEAD(cq_event_list); |
---|
| 10157 | + struct lpfc_cq_event *cq_event; |
---|
9351 | 10158 | unsigned long iflags; |
---|
9352 | 10159 | |
---|
9353 | 10160 | /* Retrieve all the pending WCQEs from pending WCQE lists */ |
---|
9354 | | - spin_lock_irqsave(&phba->hbalock, iflags); |
---|
9355 | | - /* Pending FCP XRI abort events */ |
---|
9356 | | - list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, |
---|
9357 | | - &cqelist); |
---|
9358 | | - /* Pending ELS XRI abort events */ |
---|
9359 | | - list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, |
---|
9360 | | - &cqelist); |
---|
9361 | | - /* Pending asynnc events */ |
---|
9362 | | - list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, |
---|
9363 | | - &cqelist); |
---|
9364 | | - spin_unlock_irqrestore(&phba->hbalock, iflags); |
---|
9365 | 10161 | |
---|
9366 | | - while (!list_empty(&cqelist)) { |
---|
9367 | | - list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); |
---|
9368 | | - lpfc_sli4_cq_event_release(phba, cqe); |
---|
| 10162 | + /* Pending ELS XRI abort events */ |
---|
| 10163 | + spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); |
---|
| 10164 | + list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, |
---|
| 10165 | + &cq_event_list); |
---|
| 10166 | + spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); |
---|
| 10167 | + |
---|
| 10168 | + /* Pending asynnc events */ |
---|
| 10169 | + spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); |
---|
| 10170 | + list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, |
---|
| 10171 | + &cq_event_list); |
---|
| 10172 | + spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); |
---|
| 10173 | + |
---|
| 10174 | + while (!list_empty(&cq_event_list)) { |
---|
| 10175 | + list_remove_head(&cq_event_list, cq_event, |
---|
| 10176 | + struct lpfc_cq_event, list); |
---|
| 10177 | + lpfc_sli4_cq_event_release(phba, cq_event); |
---|
9369 | 10178 | } |
---|
9370 | 10179 | } |
---|
9371 | 10180 | |
---|
.. | .. |
---|
9399 | 10208 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, |
---|
9400 | 10209 | GFP_KERNEL); |
---|
9401 | 10210 | if (!mboxq) { |
---|
9402 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 10211 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
9403 | 10212 | "0494 Unable to allocate memory for " |
---|
9404 | 10213 | "issuing SLI_FUNCTION_RESET mailbox " |
---|
9405 | 10214 | "command\n"); |
---|
.. | .. |
---|
9416 | 10225 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
---|
9417 | 10226 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, |
---|
9418 | 10227 | &shdr->response); |
---|
9419 | | - if (rc != MBX_TIMEOUT) |
---|
9420 | | - mempool_free(mboxq, phba->mbox_mem_pool); |
---|
| 10228 | + mempool_free(mboxq, phba->mbox_mem_pool); |
---|
9421 | 10229 | if (shdr_status || shdr_add_status || rc) { |
---|
9422 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 10230 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
9423 | 10231 | "0495 SLI_FUNCTION_RESET mailbox " |
---|
9424 | 10232 | "failed with status x%x add_status x%x," |
---|
9425 | 10233 | " mbx status x%x\n", |
---|
.. | .. |
---|
9451 | 10259 | phba->sli4_hba.u.if_type2.ERR1regaddr); |
---|
9452 | 10260 | phba->work_status[1] = readl( |
---|
9453 | 10261 | phba->sli4_hba.u.if_type2.ERR2regaddr); |
---|
9454 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 10262 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
9455 | 10263 | "2890 Port not ready, port status reg " |
---|
9456 | 10264 | "0x%x error 1=0x%x, error 2=0x%x\n", |
---|
9457 | 10265 | reg_data.word0, |
---|
.. | .. |
---|
9493 | 10301 | out: |
---|
9494 | 10302 | /* Catch the not-ready port failure after a port reset. */ |
---|
9495 | 10303 | if (rc) { |
---|
9496 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 10304 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
9497 | 10305 | "3317 HBA not functional: IP Reset Failed " |
---|
9498 | 10306 | "try: echo fw_reset > board_mode\n"); |
---|
9499 | 10307 | rc = -ENODEV; |
---|
.. | .. |
---|
9516 | 10324 | static int |
---|
9517 | 10325 | lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) |
---|
9518 | 10326 | { |
---|
9519 | | - struct pci_dev *pdev; |
---|
| 10327 | + struct pci_dev *pdev = phba->pcidev; |
---|
9520 | 10328 | unsigned long bar0map_len, bar1map_len, bar2map_len; |
---|
9521 | | - int error = -ENODEV; |
---|
| 10329 | + int error; |
---|
9522 | 10330 | uint32_t if_type; |
---|
9523 | 10331 | |
---|
9524 | | - /* Obtain PCI device reference */ |
---|
9525 | | - if (!phba->pcidev) |
---|
9526 | | - return error; |
---|
9527 | | - else |
---|
9528 | | - pdev = phba->pcidev; |
---|
| 10332 | + if (!pdev) |
---|
| 10333 | + return -ENODEV; |
---|
9529 | 10334 | |
---|
9530 | 10335 | /* Set the device DMA mask size */ |
---|
9531 | | - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 |
---|
9532 | | - || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { |
---|
9533 | | - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 |
---|
9534 | | - || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { |
---|
9535 | | - return error; |
---|
9536 | | - } |
---|
9537 | | - } |
---|
| 10336 | + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); |
---|
| 10337 | + if (error) |
---|
| 10338 | + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
---|
| 10339 | + if (error) |
---|
| 10340 | + return error; |
---|
9538 | 10341 | |
---|
9539 | 10342 | /* |
---|
9540 | 10343 | * The BARs and register set definitions and offset locations are |
---|
.. | .. |
---|
9542 | 10345 | */ |
---|
9543 | 10346 | if (pci_read_config_dword(pdev, LPFC_SLI_INTF, |
---|
9544 | 10347 | &phba->sli4_hba.sli_intf.word0)) { |
---|
9545 | | - return error; |
---|
| 10348 | + return -ENODEV; |
---|
9546 | 10349 | } |
---|
9547 | 10350 | |
---|
9548 | 10351 | /* There is no SLI3 failback for SLI4 devices. */ |
---|
9549 | 10352 | if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != |
---|
9550 | 10353 | LPFC_SLI_INTF_VALID) { |
---|
9551 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 10354 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
9552 | 10355 | "2894 SLI_INTF reg contents invalid " |
---|
9553 | 10356 | "sli_intf reg 0x%x\n", |
---|
9554 | 10357 | phba->sli4_hba.sli_intf.word0); |
---|
9555 | | - return error; |
---|
| 10358 | + return -ENODEV; |
---|
9556 | 10359 | } |
---|
9557 | 10360 | |
---|
9558 | 10361 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); |
---|
.. | .. |
---|
9576 | 10379 | dev_printk(KERN_ERR, &pdev->dev, |
---|
9577 | 10380 | "ioremap failed for SLI4 PCI config " |
---|
9578 | 10381 | "registers.\n"); |
---|
9579 | | - goto out; |
---|
| 10382 | + return -ENODEV; |
---|
9580 | 10383 | } |
---|
9581 | 10384 | phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; |
---|
9582 | 10385 | /* Set up BAR0 PCI config space register memory map */ |
---|
.. | .. |
---|
9587 | 10390 | if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { |
---|
9588 | 10391 | dev_printk(KERN_ERR, &pdev->dev, |
---|
9589 | 10392 | "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); |
---|
9590 | | - goto out; |
---|
| 10393 | + return -ENODEV; |
---|
9591 | 10394 | } |
---|
9592 | 10395 | phba->sli4_hba.conf_regs_memmap_p = |
---|
9593 | 10396 | ioremap(phba->pci_bar0_map, bar0map_len); |
---|
.. | .. |
---|
9595 | 10398 | dev_printk(KERN_ERR, &pdev->dev, |
---|
9596 | 10399 | "ioremap failed for SLI4 PCI config " |
---|
9597 | 10400 | "registers.\n"); |
---|
9598 | | - goto out; |
---|
| 10401 | + return -ENODEV; |
---|
9599 | 10402 | } |
---|
9600 | 10403 | lpfc_sli4_bar0_register_memmap(phba, if_type); |
---|
9601 | 10404 | } |
---|
.. | .. |
---|
9641 | 10444 | if (!phba->sli4_hba.drbl_regs_memmap_p) { |
---|
9642 | 10445 | dev_err(&pdev->dev, |
---|
9643 | 10446 | "ioremap failed for SLI4 HBA doorbell registers.\n"); |
---|
| 10447 | + error = -ENOMEM; |
---|
9644 | 10448 | goto out_iounmap_conf; |
---|
9645 | 10449 | } |
---|
9646 | 10450 | phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; |
---|
.. | .. |
---|
9673 | 10477 | goto out_iounmap_all; |
---|
9674 | 10478 | } else { |
---|
9675 | 10479 | error = -ENOMEM; |
---|
9676 | | - goto out_iounmap_all; |
---|
| 10480 | + goto out_iounmap_ctrl; |
---|
9677 | 10481 | } |
---|
9678 | 10482 | } |
---|
9679 | 10483 | |
---|
.. | .. |
---|
9690 | 10494 | if (!phba->sli4_hba.dpp_regs_memmap_p) { |
---|
9691 | 10495 | dev_err(&pdev->dev, |
---|
9692 | 10496 | "ioremap failed for SLI4 HBA dpp registers.\n"); |
---|
9693 | | - goto out_iounmap_ctrl; |
---|
| 10497 | + error = -ENOMEM; |
---|
| 10498 | + goto out_iounmap_all; |
---|
9694 | 10499 | } |
---|
9695 | 10500 | phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; |
---|
9696 | 10501 | } |
---|
.. | .. |
---|
9700 | 10505 | case LPFC_SLI_INTF_IF_TYPE_0: |
---|
9701 | 10506 | case LPFC_SLI_INTF_IF_TYPE_2: |
---|
9702 | 10507 | phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; |
---|
9703 | | - phba->sli4_hba.sli4_eq_release = lpfc_sli4_eq_release; |
---|
9704 | | - phba->sli4_hba.sli4_cq_release = lpfc_sli4_cq_release; |
---|
| 10508 | + phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; |
---|
| 10509 | + phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; |
---|
9705 | 10510 | break; |
---|
9706 | 10511 | case LPFC_SLI_INTF_IF_TYPE_6: |
---|
9707 | 10512 | phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; |
---|
9708 | | - phba->sli4_hba.sli4_eq_release = lpfc_sli4_if6_eq_release; |
---|
9709 | | - phba->sli4_hba.sli4_cq_release = lpfc_sli4_if6_cq_release; |
---|
| 10513 | + phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; |
---|
| 10514 | + phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; |
---|
9710 | 10515 | break; |
---|
9711 | 10516 | default: |
---|
9712 | 10517 | break; |
---|
.. | .. |
---|
9715 | 10520 | return 0; |
---|
9716 | 10521 | |
---|
9717 | 10522 | out_iounmap_all: |
---|
9718 | | - iounmap(phba->sli4_hba.drbl_regs_memmap_p); |
---|
| 10523 | + if (phba->sli4_hba.drbl_regs_memmap_p) |
---|
| 10524 | + iounmap(phba->sli4_hba.drbl_regs_memmap_p); |
---|
9719 | 10525 | out_iounmap_ctrl: |
---|
9720 | | - iounmap(phba->sli4_hba.ctrl_regs_memmap_p); |
---|
| 10526 | + if (phba->sli4_hba.ctrl_regs_memmap_p) |
---|
| 10527 | + iounmap(phba->sli4_hba.ctrl_regs_memmap_p); |
---|
9721 | 10528 | out_iounmap_conf: |
---|
9722 | 10529 | iounmap(phba->sli4_hba.conf_regs_memmap_p); |
---|
9723 | | -out: |
---|
| 10530 | + |
---|
9724 | 10531 | return error; |
---|
9725 | 10532 | } |
---|
9726 | 10533 | |
---|
.. | .. |
---|
9749 | 10556 | case LPFC_SLI_INTF_IF_TYPE_6: |
---|
9750 | 10557 | iounmap(phba->sli4_hba.drbl_regs_memmap_p); |
---|
9751 | 10558 | iounmap(phba->sli4_hba.conf_regs_memmap_p); |
---|
| 10559 | + if (phba->sli4_hba.dpp_regs_memmap_p) |
---|
| 10560 | + iounmap(phba->sli4_hba.dpp_regs_memmap_p); |
---|
9752 | 10561 | break; |
---|
9753 | 10562 | case LPFC_SLI_INTF_IF_TYPE_1: |
---|
9754 | 10563 | default: |
---|
.. | .. |
---|
9819 | 10628 | |
---|
9820 | 10629 | if (!pmb) { |
---|
9821 | 10630 | rc = -ENOMEM; |
---|
9822 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 10631 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
9823 | 10632 | "0474 Unable to allocate memory for issuing " |
---|
9824 | 10633 | "MBOX_CONFIG_MSI command\n"); |
---|
9825 | 10634 | goto mem_fail_out; |
---|
.. | .. |
---|
9902 | 10711 | /** |
---|
9903 | 10712 | * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. |
---|
9904 | 10713 | * @phba: pointer to lpfc hba data structure. |
---|
| 10714 | + * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). |
---|
9905 | 10715 | * |
---|
9906 | 10716 | * This routine is invoked to enable device interrupt and associate driver's |
---|
9907 | 10717 | * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface |
---|
.. | .. |
---|
9987 | 10797 | } |
---|
9988 | 10798 | |
---|
9989 | 10799 | /** |
---|
| 10800 | + * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue |
---|
| 10801 | + * @phba: pointer to lpfc hba data structure. |
---|
| 10802 | + * @id: EQ vector index or Hardware Queue index |
---|
| 10803 | + * @match: LPFC_FIND_BY_EQ = match by EQ |
---|
| 10804 | + * LPFC_FIND_BY_HDWQ = match by Hardware Queue |
---|
| 10805 | + * Return the CPU that matches the selection criteria |
---|
| 10806 | + */ |
---|
| 10807 | +static uint16_t |
---|
| 10808 | +lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) |
---|
| 10809 | +{ |
---|
| 10810 | + struct lpfc_vector_map_info *cpup; |
---|
| 10811 | + int cpu; |
---|
| 10812 | + |
---|
| 10813 | + /* Loop through all CPUs */ |
---|
| 10814 | + for_each_present_cpu(cpu) { |
---|
| 10815 | + cpup = &phba->sli4_hba.cpu_map[cpu]; |
---|
| 10816 | + |
---|
| 10817 | + /* If we are matching by EQ, there may be multiple CPUs using |
---|
| 10818 | + * using the same vector, so select the one with |
---|
| 10819 | + * LPFC_CPU_FIRST_IRQ set. |
---|
| 10820 | + */ |
---|
| 10821 | + if ((match == LPFC_FIND_BY_EQ) && |
---|
| 10822 | + (cpup->flag & LPFC_CPU_FIRST_IRQ) && |
---|
| 10823 | + (cpup->eq == id)) |
---|
| 10824 | + return cpu; |
---|
| 10825 | + |
---|
| 10826 | + /* If matching by HDWQ, select the first CPU that matches */ |
---|
| 10827 | + if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) |
---|
| 10828 | + return cpu; |
---|
| 10829 | + } |
---|
| 10830 | + return 0; |
---|
| 10831 | +} |
---|
| 10832 | + |
---|
| 10833 | +#ifdef CONFIG_X86 |
---|
| 10834 | +/** |
---|
| 10835 | + * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded |
---|
| 10836 | + * @phba: pointer to lpfc hba data structure. |
---|
| 10837 | + * @cpu: CPU map index |
---|
| 10838 | + * @phys_id: CPU package physical id |
---|
| 10839 | + * @core_id: CPU core id |
---|
| 10840 | + */ |
---|
| 10841 | +static int |
---|
| 10842 | +lpfc_find_hyper(struct lpfc_hba *phba, int cpu, |
---|
| 10843 | + uint16_t phys_id, uint16_t core_id) |
---|
| 10844 | +{ |
---|
| 10845 | + struct lpfc_vector_map_info *cpup; |
---|
| 10846 | + int idx; |
---|
| 10847 | + |
---|
| 10848 | + for_each_present_cpu(idx) { |
---|
| 10849 | + cpup = &phba->sli4_hba.cpu_map[idx]; |
---|
| 10850 | + /* Does the cpup match the one we are looking for */ |
---|
| 10851 | + if ((cpup->phys_id == phys_id) && |
---|
| 10852 | + (cpup->core_id == core_id) && |
---|
| 10853 | + (cpu != idx)) |
---|
| 10854 | + return 1; |
---|
| 10855 | + } |
---|
| 10856 | + return 0; |
---|
| 10857 | +} |
---|
| 10858 | +#endif |
---|
| 10859 | + |
---|
| 10860 | +/* |
---|
| 10861 | + * lpfc_assign_eq_map_info - Assigns eq for vector_map structure |
---|
| 10862 | + * @phba: pointer to lpfc hba data structure. |
---|
| 10863 | + * @eqidx: index for eq and irq vector |
---|
| 10864 | + * @flag: flags to set for vector_map structure |
---|
| 10865 | + * @cpu: cpu used to index vector_map structure |
---|
| 10866 | + * |
---|
| 10867 | + * The routine assigns eq info into vector_map structure |
---|
| 10868 | + */ |
---|
| 10869 | +static inline void |
---|
| 10870 | +lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, |
---|
| 10871 | + unsigned int cpu) |
---|
| 10872 | +{ |
---|
| 10873 | + struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; |
---|
| 10874 | + struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); |
---|
| 10875 | + |
---|
| 10876 | + cpup->eq = eqidx; |
---|
| 10877 | + cpup->flag |= flag; |
---|
| 10878 | + |
---|
| 10879 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
| 10880 | + "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", |
---|
| 10881 | + cpu, eqhdl->irq, cpup->eq, cpup->flag); |
---|
| 10882 | +} |
---|
| 10883 | + |
---|
| 10884 | +/** |
---|
| 10885 | + * lpfc_cpu_map_array_init - Initialize cpu_map structure |
---|
| 10886 | + * @phba: pointer to lpfc hba data structure. |
---|
| 10887 | + * |
---|
| 10888 | + * The routine initializes the cpu_map array structure |
---|
| 10889 | + */ |
---|
| 10890 | +static void |
---|
| 10891 | +lpfc_cpu_map_array_init(struct lpfc_hba *phba) |
---|
| 10892 | +{ |
---|
| 10893 | + struct lpfc_vector_map_info *cpup; |
---|
| 10894 | + struct lpfc_eq_intr_info *eqi; |
---|
| 10895 | + int cpu; |
---|
| 10896 | + |
---|
| 10897 | + for_each_possible_cpu(cpu) { |
---|
| 10898 | + cpup = &phba->sli4_hba.cpu_map[cpu]; |
---|
| 10899 | + cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; |
---|
| 10900 | + cpup->core_id = LPFC_VECTOR_MAP_EMPTY; |
---|
| 10901 | + cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; |
---|
| 10902 | + cpup->eq = LPFC_VECTOR_MAP_EMPTY; |
---|
| 10903 | + cpup->flag = 0; |
---|
| 10904 | + eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); |
---|
| 10905 | + INIT_LIST_HEAD(&eqi->list); |
---|
| 10906 | + eqi->icnt = 0; |
---|
| 10907 | + } |
---|
| 10908 | +} |
---|
| 10909 | + |
---|
| 10910 | +/** |
---|
| 10911 | + * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure |
---|
| 10912 | + * @phba: pointer to lpfc hba data structure. |
---|
| 10913 | + * |
---|
| 10914 | + * The routine initializes the hba_eq_hdl array structure |
---|
| 10915 | + */ |
---|
| 10916 | +static void |
---|
| 10917 | +lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) |
---|
| 10918 | +{ |
---|
| 10919 | + struct lpfc_hba_eq_hdl *eqhdl; |
---|
| 10920 | + int i; |
---|
| 10921 | + |
---|
| 10922 | + for (i = 0; i < phba->cfg_irq_chann; i++) { |
---|
| 10923 | + eqhdl = lpfc_get_eq_hdl(i); |
---|
| 10924 | + eqhdl->irq = LPFC_VECTOR_MAP_EMPTY; |
---|
| 10925 | + eqhdl->phba = phba; |
---|
| 10926 | + } |
---|
| 10927 | +} |
---|
| 10928 | + |
---|
| 10929 | +/** |
---|
9990 | 10930 | * lpfc_cpu_affinity_check - Check vector CPU affinity mappings |
---|
9991 | 10931 | * @phba: pointer to lpfc hba data structure. |
---|
9992 | 10932 | * @vectors: number of msix vectors allocated. |
---|
9993 | 10933 | * |
---|
9994 | 10934 | * The routine will figure out the CPU affinity assignment for every |
---|
9995 | | - * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated |
---|
9996 | | - * with a pointer to the CPU mask that defines ALL the CPUs this vector |
---|
9997 | | - * can be associated with. If the vector can be unquely associated with |
---|
9998 | | - * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu. |
---|
| 10935 | + * MSI-X vector allocated for the HBA. |
---|
9999 | 10936 | * In addition, the CPU to IO channel mapping will be calculated |
---|
10000 | 10937 | * and the phba->sli4_hba.cpu_map array will reflect this. |
---|
10001 | 10938 | */ |
---|
10002 | 10939 | static void |
---|
10003 | 10940 | lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) |
---|
10004 | 10941 | { |
---|
| 10942 | + int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; |
---|
| 10943 | + int max_phys_id, min_phys_id; |
---|
| 10944 | + int max_core_id, min_core_id; |
---|
10005 | 10945 | struct lpfc_vector_map_info *cpup; |
---|
10006 | | - int index = 0; |
---|
10007 | | - int vec = 0; |
---|
10008 | | - int cpu; |
---|
| 10946 | + struct lpfc_vector_map_info *new_cpup; |
---|
10009 | 10947 | #ifdef CONFIG_X86 |
---|
10010 | 10948 | struct cpuinfo_x86 *cpuinfo; |
---|
10011 | 10949 | #endif |
---|
| 10950 | +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
| 10951 | + struct lpfc_hdwq_stat *c_stat; |
---|
| 10952 | +#endif |
---|
10012 | 10953 | |
---|
10013 | | - /* Init cpu_map array */ |
---|
10014 | | - memset(phba->sli4_hba.cpu_map, 0xff, |
---|
10015 | | - (sizeof(struct lpfc_vector_map_info) * |
---|
10016 | | - phba->sli4_hba.num_present_cpu)); |
---|
| 10954 | + max_phys_id = 0; |
---|
| 10955 | + min_phys_id = LPFC_VECTOR_MAP_EMPTY; |
---|
| 10956 | + max_core_id = 0; |
---|
| 10957 | + min_core_id = LPFC_VECTOR_MAP_EMPTY; |
---|
10017 | 10958 | |
---|
10018 | 10959 | /* Update CPU map with physical id and core id of each CPU */ |
---|
10019 | | - cpup = phba->sli4_hba.cpu_map; |
---|
10020 | | - for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { |
---|
| 10960 | + for_each_present_cpu(cpu) { |
---|
| 10961 | + cpup = &phba->sli4_hba.cpu_map[cpu]; |
---|
10021 | 10962 | #ifdef CONFIG_X86 |
---|
10022 | 10963 | cpuinfo = &cpu_data(cpu); |
---|
10023 | 10964 | cpup->phys_id = cpuinfo->phys_proc_id; |
---|
10024 | 10965 | cpup->core_id = cpuinfo->cpu_core_id; |
---|
| 10966 | + if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) |
---|
| 10967 | + cpup->flag |= LPFC_CPU_MAP_HYPER; |
---|
10025 | 10968 | #else |
---|
10026 | 10969 | /* No distinction between CPUs for other platforms */ |
---|
10027 | 10970 | cpup->phys_id = 0; |
---|
10028 | | - cpup->core_id = 0; |
---|
| 10971 | + cpup->core_id = cpu; |
---|
10029 | 10972 | #endif |
---|
10030 | | - cpup->channel_id = index; /* For now round robin */ |
---|
10031 | | - cpup->irq = pci_irq_vector(phba->pcidev, vec); |
---|
10032 | | - vec++; |
---|
10033 | | - if (vec >= vectors) |
---|
10034 | | - vec = 0; |
---|
10035 | | - index++; |
---|
10036 | | - if (index >= phba->cfg_fcp_io_channel) |
---|
10037 | | - index = 0; |
---|
10038 | | - cpup++; |
---|
| 10973 | + |
---|
| 10974 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
| 10975 | + "3328 CPU %d physid %d coreid %d flag x%x\n", |
---|
| 10976 | + cpu, cpup->phys_id, cpup->core_id, cpup->flag); |
---|
| 10977 | + |
---|
| 10978 | + if (cpup->phys_id > max_phys_id) |
---|
| 10979 | + max_phys_id = cpup->phys_id; |
---|
| 10980 | + if (cpup->phys_id < min_phys_id) |
---|
| 10981 | + min_phys_id = cpup->phys_id; |
---|
| 10982 | + |
---|
| 10983 | + if (cpup->core_id > max_core_id) |
---|
| 10984 | + max_core_id = cpup->core_id; |
---|
| 10985 | + if (cpup->core_id < min_core_id) |
---|
| 10986 | + min_core_id = cpup->core_id; |
---|
| 10987 | + } |
---|
| 10988 | + |
---|
| 10989 | + /* After looking at each irq vector assigned to this pcidev, its |
---|
| 10990 | + * possible to see that not ALL CPUs have been accounted for. |
---|
| 10991 | + * Next we will set any unassigned (unaffinitized) cpu map |
---|
| 10992 | + * entries to a IRQ on the same phys_id. |
---|
| 10993 | + */ |
---|
| 10994 | + first_cpu = cpumask_first(cpu_present_mask); |
---|
| 10995 | + start_cpu = first_cpu; |
---|
| 10996 | + |
---|
| 10997 | + for_each_present_cpu(cpu) { |
---|
| 10998 | + cpup = &phba->sli4_hba.cpu_map[cpu]; |
---|
| 10999 | + |
---|
| 11000 | + /* Is this CPU entry unassigned */ |
---|
| 11001 | + if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { |
---|
| 11002 | + /* Mark CPU as IRQ not assigned by the kernel */ |
---|
| 11003 | + cpup->flag |= LPFC_CPU_MAP_UNASSIGN; |
---|
| 11004 | + |
---|
| 11005 | + /* If so, find a new_cpup thats on the the SAME |
---|
| 11006 | + * phys_id as cpup. start_cpu will start where we |
---|
| 11007 | + * left off so all unassigned entries don't get assgined |
---|
| 11008 | + * the IRQ of the first entry. |
---|
| 11009 | + */ |
---|
| 11010 | + new_cpu = start_cpu; |
---|
| 11011 | + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { |
---|
| 11012 | + new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; |
---|
| 11013 | + if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && |
---|
| 11014 | + (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && |
---|
| 11015 | + (new_cpup->phys_id == cpup->phys_id)) |
---|
| 11016 | + goto found_same; |
---|
| 11017 | + new_cpu = cpumask_next( |
---|
| 11018 | + new_cpu, cpu_present_mask); |
---|
| 11019 | + if (new_cpu == nr_cpumask_bits) |
---|
| 11020 | + new_cpu = first_cpu; |
---|
| 11021 | + } |
---|
| 11022 | + /* At this point, we leave the CPU as unassigned */ |
---|
| 11023 | + continue; |
---|
| 11024 | +found_same: |
---|
| 11025 | + /* We found a matching phys_id, so copy the IRQ info */ |
---|
| 11026 | + cpup->eq = new_cpup->eq; |
---|
| 11027 | + |
---|
| 11028 | + /* Bump start_cpu to the next slot to minmize the |
---|
| 11029 | + * chance of having multiple unassigned CPU entries |
---|
| 11030 | + * selecting the same IRQ. |
---|
| 11031 | + */ |
---|
| 11032 | + start_cpu = cpumask_next(new_cpu, cpu_present_mask); |
---|
| 11033 | + if (start_cpu == nr_cpumask_bits) |
---|
| 11034 | + start_cpu = first_cpu; |
---|
| 11035 | + |
---|
| 11036 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
| 11037 | + "3337 Set Affinity: CPU %d " |
---|
| 11038 | + "eq %d from peer cpu %d same " |
---|
| 11039 | + "phys_id (%d)\n", |
---|
| 11040 | + cpu, cpup->eq, new_cpu, |
---|
| 11041 | + cpup->phys_id); |
---|
| 11042 | + } |
---|
| 11043 | + } |
---|
| 11044 | + |
---|
| 11045 | + /* Set any unassigned cpu map entries to a IRQ on any phys_id */ |
---|
| 11046 | + start_cpu = first_cpu; |
---|
| 11047 | + |
---|
| 11048 | + for_each_present_cpu(cpu) { |
---|
| 11049 | + cpup = &phba->sli4_hba.cpu_map[cpu]; |
---|
| 11050 | + |
---|
| 11051 | + /* Is this entry unassigned */ |
---|
| 11052 | + if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { |
---|
| 11053 | + /* Mark it as IRQ not assigned by the kernel */ |
---|
| 11054 | + cpup->flag |= LPFC_CPU_MAP_UNASSIGN; |
---|
| 11055 | + |
---|
| 11056 | + /* If so, find a new_cpup thats on ANY phys_id |
---|
| 11057 | + * as the cpup. start_cpu will start where we |
---|
| 11058 | + * left off so all unassigned entries don't get |
---|
| 11059 | + * assigned the IRQ of the first entry. |
---|
| 11060 | + */ |
---|
| 11061 | + new_cpu = start_cpu; |
---|
| 11062 | + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { |
---|
| 11063 | + new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; |
---|
| 11064 | + if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && |
---|
| 11065 | + (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) |
---|
| 11066 | + goto found_any; |
---|
| 11067 | + new_cpu = cpumask_next( |
---|
| 11068 | + new_cpu, cpu_present_mask); |
---|
| 11069 | + if (new_cpu == nr_cpumask_bits) |
---|
| 11070 | + new_cpu = first_cpu; |
---|
| 11071 | + } |
---|
| 11072 | + /* We should never leave an entry unassigned */ |
---|
| 11073 | + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 11074 | + "3339 Set Affinity: CPU %d " |
---|
| 11075 | + "eq %d UNASSIGNED\n", |
---|
| 11076 | + cpup->hdwq, cpup->eq); |
---|
| 11077 | + continue; |
---|
| 11078 | +found_any: |
---|
| 11079 | + /* We found an available entry, copy the IRQ info */ |
---|
| 11080 | + cpup->eq = new_cpup->eq; |
---|
| 11081 | + |
---|
| 11082 | + /* Bump start_cpu to the next slot to minmize the |
---|
| 11083 | + * chance of having multiple unassigned CPU entries |
---|
| 11084 | + * selecting the same IRQ. |
---|
| 11085 | + */ |
---|
| 11086 | + start_cpu = cpumask_next(new_cpu, cpu_present_mask); |
---|
| 11087 | + if (start_cpu == nr_cpumask_bits) |
---|
| 11088 | + start_cpu = first_cpu; |
---|
| 11089 | + |
---|
| 11090 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
| 11091 | + "3338 Set Affinity: CPU %d " |
---|
| 11092 | + "eq %d from peer cpu %d (%d/%d)\n", |
---|
| 11093 | + cpu, cpup->eq, new_cpu, |
---|
| 11094 | + new_cpup->phys_id, new_cpup->core_id); |
---|
| 11095 | + } |
---|
| 11096 | + } |
---|
| 11097 | + |
---|
| 11098 | + /* Assign hdwq indices that are unique across all cpus in the map |
---|
| 11099 | + * that are also FIRST_CPUs. |
---|
| 11100 | + */ |
---|
| 11101 | + idx = 0; |
---|
| 11102 | + for_each_present_cpu(cpu) { |
---|
| 11103 | + cpup = &phba->sli4_hba.cpu_map[cpu]; |
---|
| 11104 | + |
---|
| 11105 | + /* Only FIRST IRQs get a hdwq index assignment. */ |
---|
| 11106 | + if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) |
---|
| 11107 | + continue; |
---|
| 11108 | + |
---|
| 11109 | + /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ |
---|
| 11110 | + cpup->hdwq = idx; |
---|
| 11111 | + idx++; |
---|
| 11112 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
| 11113 | + "3333 Set Affinity: CPU %d (phys %d core %d): " |
---|
| 11114 | + "hdwq %d eq %d flg x%x\n", |
---|
| 11115 | + cpu, cpup->phys_id, cpup->core_id, |
---|
| 11116 | + cpup->hdwq, cpup->eq, cpup->flag); |
---|
| 11117 | + } |
---|
| 11118 | + /* Associate a hdwq with each cpu_map entry |
---|
| 11119 | + * This will be 1 to 1 - hdwq to cpu, unless there are less |
---|
| 11120 | + * hardware queues then CPUs. For that case we will just round-robin |
---|
| 11121 | + * the available hardware queues as they get assigned to CPUs. |
---|
| 11122 | + * The next_idx is the idx from the FIRST_CPU loop above to account |
---|
| 11123 | + * for irq_chann < hdwq. The idx is used for round-robin assignments |
---|
| 11124 | + * and needs to start at 0. |
---|
| 11125 | + */ |
---|
| 11126 | + next_idx = idx; |
---|
| 11127 | + start_cpu = 0; |
---|
| 11128 | + idx = 0; |
---|
| 11129 | + for_each_present_cpu(cpu) { |
---|
| 11130 | + cpup = &phba->sli4_hba.cpu_map[cpu]; |
---|
| 11131 | + |
---|
| 11132 | + /* FIRST cpus are already mapped. */ |
---|
| 11133 | + if (cpup->flag & LPFC_CPU_FIRST_IRQ) |
---|
| 11134 | + continue; |
---|
| 11135 | + |
---|
| 11136 | + /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq |
---|
| 11137 | + * of the unassigned cpus to the next idx so that all |
---|
| 11138 | + * hdw queues are fully utilized. |
---|
| 11139 | + */ |
---|
| 11140 | + if (next_idx < phba->cfg_hdw_queue) { |
---|
| 11141 | + cpup->hdwq = next_idx; |
---|
| 11142 | + next_idx++; |
---|
| 11143 | + continue; |
---|
| 11144 | + } |
---|
| 11145 | + |
---|
| 11146 | + /* Not a First CPU and all hdw_queues are used. Reuse a |
---|
| 11147 | + * Hardware Queue for another CPU, so be smart about it |
---|
| 11148 | + * and pick one that has its IRQ/EQ mapped to the same phys_id |
---|
| 11149 | + * (CPU package) and core_id. |
---|
| 11150 | + */ |
---|
| 11151 | + new_cpu = start_cpu; |
---|
| 11152 | + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { |
---|
| 11153 | + new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; |
---|
| 11154 | + if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && |
---|
| 11155 | + new_cpup->phys_id == cpup->phys_id && |
---|
| 11156 | + new_cpup->core_id == cpup->core_id) { |
---|
| 11157 | + goto found_hdwq; |
---|
| 11158 | + } |
---|
| 11159 | + new_cpu = cpumask_next(new_cpu, cpu_present_mask); |
---|
| 11160 | + if (new_cpu == nr_cpumask_bits) |
---|
| 11161 | + new_cpu = first_cpu; |
---|
| 11162 | + } |
---|
| 11163 | + |
---|
| 11164 | + /* If we can't match both phys_id and core_id, |
---|
| 11165 | + * settle for just a phys_id match. |
---|
| 11166 | + */ |
---|
| 11167 | + new_cpu = start_cpu; |
---|
| 11168 | + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { |
---|
| 11169 | + new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; |
---|
| 11170 | + if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && |
---|
| 11171 | + new_cpup->phys_id == cpup->phys_id) |
---|
| 11172 | + goto found_hdwq; |
---|
| 11173 | + |
---|
| 11174 | + new_cpu = cpumask_next(new_cpu, cpu_present_mask); |
---|
| 11175 | + if (new_cpu == nr_cpumask_bits) |
---|
| 11176 | + new_cpu = first_cpu; |
---|
| 11177 | + } |
---|
| 11178 | + |
---|
| 11179 | + /* Otherwise just round robin on cfg_hdw_queue */ |
---|
| 11180 | + cpup->hdwq = idx % phba->cfg_hdw_queue; |
---|
| 11181 | + idx++; |
---|
| 11182 | + goto logit; |
---|
| 11183 | + found_hdwq: |
---|
| 11184 | + /* We found an available entry, copy the IRQ info */ |
---|
| 11185 | + start_cpu = cpumask_next(new_cpu, cpu_present_mask); |
---|
| 11186 | + if (start_cpu == nr_cpumask_bits) |
---|
| 11187 | + start_cpu = first_cpu; |
---|
| 11188 | + cpup->hdwq = new_cpup->hdwq; |
---|
| 11189 | + logit: |
---|
| 11190 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
| 11191 | + "3335 Set Affinity: CPU %d (phys %d core %d): " |
---|
| 11192 | + "hdwq %d eq %d flg x%x\n", |
---|
| 11193 | + cpu, cpup->phys_id, cpup->core_id, |
---|
| 11194 | + cpup->hdwq, cpup->eq, cpup->flag); |
---|
| 11195 | + } |
---|
| 11196 | + |
---|
| 11197 | + /* |
---|
| 11198 | + * Initialize the cpu_map slots for not-present cpus in case |
---|
| 11199 | + * a cpu is hot-added. Perform a simple hdwq round robin assignment. |
---|
| 11200 | + */ |
---|
| 11201 | + idx = 0; |
---|
| 11202 | + for_each_possible_cpu(cpu) { |
---|
| 11203 | + cpup = &phba->sli4_hba.cpu_map[cpu]; |
---|
| 11204 | +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
| 11205 | + c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); |
---|
| 11206 | + c_stat->hdwq_no = cpup->hdwq; |
---|
| 11207 | +#endif |
---|
| 11208 | + if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) |
---|
| 11209 | + continue; |
---|
| 11210 | + |
---|
| 11211 | + cpup->hdwq = idx++ % phba->cfg_hdw_queue; |
---|
| 11212 | +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
---|
| 11213 | + c_stat->hdwq_no = cpup->hdwq; |
---|
| 11214 | +#endif |
---|
| 11215 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
| 11216 | + "3340 Set Affinity: not present " |
---|
| 11217 | + "CPU %d hdwq %d\n", |
---|
| 11218 | + cpu, cpup->hdwq); |
---|
| 11219 | + } |
---|
| 11220 | + |
---|
| 11221 | + /* The cpu_map array will be used later during initialization |
---|
| 11222 | + * when EQ / CQ / WQs are allocated and configured. |
---|
| 11223 | + */ |
---|
| 11224 | + return; |
---|
| 11225 | +} |
---|
| 11226 | + |
---|
| 11227 | +/** |
---|
| 11228 | + * lpfc_cpuhp_get_eq |
---|
| 11229 | + * |
---|
| 11230 | + * @phba: pointer to lpfc hba data structure. |
---|
| 11231 | + * @cpu: cpu going offline |
---|
| 11232 | + * @eqlist: eq list to append to |
---|
| 11233 | + */ |
---|
| 11234 | +static int |
---|
| 11235 | +lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, |
---|
| 11236 | + struct list_head *eqlist) |
---|
| 11237 | +{ |
---|
| 11238 | + const struct cpumask *maskp; |
---|
| 11239 | + struct lpfc_queue *eq; |
---|
| 11240 | + struct cpumask *tmp; |
---|
| 11241 | + u16 idx; |
---|
| 11242 | + |
---|
| 11243 | + tmp = kzalloc(cpumask_size(), GFP_KERNEL); |
---|
| 11244 | + if (!tmp) |
---|
| 11245 | + return -ENOMEM; |
---|
| 11246 | + |
---|
| 11247 | + for (idx = 0; idx < phba->cfg_irq_chann; idx++) { |
---|
| 11248 | + maskp = pci_irq_get_affinity(phba->pcidev, idx); |
---|
| 11249 | + if (!maskp) |
---|
| 11250 | + continue; |
---|
| 11251 | + /* |
---|
| 11252 | + * if irq is not affinitized to the cpu going |
---|
| 11253 | + * then we don't need to poll the eq attached |
---|
| 11254 | + * to it. |
---|
| 11255 | + */ |
---|
| 11256 | + if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) |
---|
| 11257 | + continue; |
---|
| 11258 | + /* get the cpus that are online and are affini- |
---|
| 11259 | + * tized to this irq vector. If the count is |
---|
| 11260 | + * more than 1 then cpuhp is not going to shut- |
---|
| 11261 | + * down this vector. Since this cpu has not |
---|
| 11262 | + * gone offline yet, we need >1. |
---|
| 11263 | + */ |
---|
| 11264 | + cpumask_and(tmp, maskp, cpu_online_mask); |
---|
| 11265 | + if (cpumask_weight(tmp) > 1) |
---|
| 11266 | + continue; |
---|
| 11267 | + |
---|
| 11268 | + /* Now that we have an irq to shutdown, get the eq |
---|
| 11269 | + * mapped to this irq. Note: multiple hdwq's in |
---|
| 11270 | + * the software can share an eq, but eventually |
---|
| 11271 | + * only eq will be mapped to this vector |
---|
| 11272 | + */ |
---|
| 11273 | + eq = phba->sli4_hba.hba_eq_hdl[idx].eq; |
---|
| 11274 | + list_add(&eq->_poll_list, eqlist); |
---|
| 11275 | + } |
---|
| 11276 | + kfree(tmp); |
---|
| 11277 | + return 0; |
---|
| 11278 | +} |
---|
| 11279 | + |
---|
| 11280 | +static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) |
---|
| 11281 | +{ |
---|
| 11282 | + if (phba->sli_rev != LPFC_SLI_REV4) |
---|
| 11283 | + return; |
---|
| 11284 | + |
---|
| 11285 | + cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, |
---|
| 11286 | + &phba->cpuhp); |
---|
| 11287 | + /* |
---|
| 11288 | + * unregistering the instance doesn't stop the polling |
---|
| 11289 | + * timer. Wait for the poll timer to retire. |
---|
| 11290 | + */ |
---|
| 11291 | + synchronize_rcu(); |
---|
| 11292 | + del_timer_sync(&phba->cpuhp_poll_timer); |
---|
| 11293 | +} |
---|
| 11294 | + |
---|
| 11295 | +static void lpfc_cpuhp_remove(struct lpfc_hba *phba) |
---|
| 11296 | +{ |
---|
| 11297 | + if (phba->pport->fc_flag & FC_OFFLINE_MODE) |
---|
| 11298 | + return; |
---|
| 11299 | + |
---|
| 11300 | + __lpfc_cpuhp_remove(phba); |
---|
| 11301 | +} |
---|
| 11302 | + |
---|
| 11303 | +static void lpfc_cpuhp_add(struct lpfc_hba *phba) |
---|
| 11304 | +{ |
---|
| 11305 | + if (phba->sli_rev != LPFC_SLI_REV4) |
---|
| 11306 | + return; |
---|
| 11307 | + |
---|
| 11308 | + rcu_read_lock(); |
---|
| 11309 | + |
---|
| 11310 | + if (!list_empty(&phba->poll_list)) |
---|
| 11311 | + mod_timer(&phba->cpuhp_poll_timer, |
---|
| 11312 | + jiffies + msecs_to_jiffies(LPFC_POLL_HB)); |
---|
| 11313 | + |
---|
| 11314 | + rcu_read_unlock(); |
---|
| 11315 | + |
---|
| 11316 | + cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, |
---|
| 11317 | + &phba->cpuhp); |
---|
| 11318 | +} |
---|
| 11319 | + |
---|
| 11320 | +static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) |
---|
| 11321 | +{ |
---|
| 11322 | + if (phba->pport->load_flag & FC_UNLOADING) { |
---|
| 11323 | + *retval = -EAGAIN; |
---|
| 11324 | + return true; |
---|
| 11325 | + } |
---|
| 11326 | + |
---|
| 11327 | + if (phba->sli_rev != LPFC_SLI_REV4) { |
---|
| 11328 | + *retval = 0; |
---|
| 11329 | + return true; |
---|
| 11330 | + } |
---|
| 11331 | + |
---|
| 11332 | + /* proceed with the hotplug */ |
---|
| 11333 | + return false; |
---|
| 11334 | +} |
---|
| 11335 | + |
---|
| 11336 | +/** |
---|
| 11337 | + * lpfc_irq_set_aff - set IRQ affinity |
---|
| 11338 | + * @eqhdl: EQ handle |
---|
| 11339 | + * @cpu: cpu to set affinity |
---|
| 11340 | + * |
---|
| 11341 | + **/ |
---|
| 11342 | +static inline void |
---|
| 11343 | +lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) |
---|
| 11344 | +{ |
---|
| 11345 | + cpumask_clear(&eqhdl->aff_mask); |
---|
| 11346 | + cpumask_set_cpu(cpu, &eqhdl->aff_mask); |
---|
| 11347 | + irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); |
---|
| 11348 | + irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); |
---|
| 11349 | +} |
---|
| 11350 | + |
---|
| 11351 | +/** |
---|
| 11352 | + * lpfc_irq_clear_aff - clear IRQ affinity |
---|
| 11353 | + * @eqhdl: EQ handle |
---|
| 11354 | + * |
---|
| 11355 | + **/ |
---|
| 11356 | +static inline void |
---|
| 11357 | +lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) |
---|
| 11358 | +{ |
---|
| 11359 | + cpumask_clear(&eqhdl->aff_mask); |
---|
| 11360 | + irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); |
---|
| 11361 | +} |
---|
| 11362 | + |
---|
| 11363 | +/** |
---|
| 11364 | + * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event |
---|
| 11365 | + * @phba: pointer to HBA context object. |
---|
| 11366 | + * @cpu: cpu going offline/online |
---|
| 11367 | + * @offline: true, cpu is going offline. false, cpu is coming online. |
---|
| 11368 | + * |
---|
| 11369 | + * If cpu is going offline, we'll try our best effort to find the next |
---|
| 11370 | + * online cpu on the phba's original_mask and migrate all offlining IRQ |
---|
| 11371 | + * affinities. |
---|
| 11372 | + * |
---|
| 11373 | + * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu. |
---|
| 11374 | + * |
---|
| 11375 | + * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on |
---|
| 11376 | + * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. |
---|
| 11377 | + * |
---|
| 11378 | + **/ |
---|
| 11379 | +static void |
---|
| 11380 | +lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) |
---|
| 11381 | +{ |
---|
| 11382 | + struct lpfc_vector_map_info *cpup; |
---|
| 11383 | + struct cpumask *aff_mask; |
---|
| 11384 | + unsigned int cpu_select, cpu_next, idx; |
---|
| 11385 | + const struct cpumask *orig_mask; |
---|
| 11386 | + |
---|
| 11387 | + if (phba->irq_chann_mode == NORMAL_MODE) |
---|
| 11388 | + return; |
---|
| 11389 | + |
---|
| 11390 | + orig_mask = &phba->sli4_hba.irq_aff_mask; |
---|
| 11391 | + |
---|
| 11392 | + if (!cpumask_test_cpu(cpu, orig_mask)) |
---|
| 11393 | + return; |
---|
| 11394 | + |
---|
| 11395 | + cpup = &phba->sli4_hba.cpu_map[cpu]; |
---|
| 11396 | + |
---|
| 11397 | + if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) |
---|
| 11398 | + return; |
---|
| 11399 | + |
---|
| 11400 | + if (offline) { |
---|
| 11401 | + /* Find next online CPU on original mask */ |
---|
| 11402 | + cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); |
---|
| 11403 | + cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); |
---|
| 11404 | + |
---|
| 11405 | + /* Found a valid CPU */ |
---|
| 11406 | + if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { |
---|
| 11407 | + /* Go through each eqhdl and ensure offlining |
---|
| 11408 | + * cpu aff_mask is migrated |
---|
| 11409 | + */ |
---|
| 11410 | + for (idx = 0; idx < phba->cfg_irq_chann; idx++) { |
---|
| 11411 | + aff_mask = lpfc_get_aff_mask(idx); |
---|
| 11412 | + |
---|
| 11413 | + /* Migrate affinity */ |
---|
| 11414 | + if (cpumask_test_cpu(cpu, aff_mask)) |
---|
| 11415 | + lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), |
---|
| 11416 | + cpu_select); |
---|
| 11417 | + } |
---|
| 11418 | + } else { |
---|
| 11419 | + /* Rely on irqbalance if no online CPUs left on NUMA */ |
---|
| 11420 | + for (idx = 0; idx < phba->cfg_irq_chann; idx++) |
---|
| 11421 | + lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); |
---|
| 11422 | + } |
---|
| 11423 | + } else { |
---|
| 11424 | + /* Migrate affinity back to this CPU */ |
---|
| 11425 | + lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); |
---|
10039 | 11426 | } |
---|
10040 | 11427 | } |
---|
10041 | 11428 | |
---|
| 11429 | +static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) |
---|
| 11430 | +{ |
---|
| 11431 | + struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); |
---|
| 11432 | + struct lpfc_queue *eq, *next; |
---|
| 11433 | + LIST_HEAD(eqlist); |
---|
| 11434 | + int retval; |
---|
| 11435 | + |
---|
| 11436 | + if (!phba) { |
---|
| 11437 | + WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); |
---|
| 11438 | + return 0; |
---|
| 11439 | + } |
---|
| 11440 | + |
---|
| 11441 | + if (__lpfc_cpuhp_checks(phba, &retval)) |
---|
| 11442 | + return retval; |
---|
| 11443 | + |
---|
| 11444 | + lpfc_irq_rebalance(phba, cpu, true); |
---|
| 11445 | + |
---|
| 11446 | + retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); |
---|
| 11447 | + if (retval) |
---|
| 11448 | + return retval; |
---|
| 11449 | + |
---|
| 11450 | + /* start polling on these eq's */ |
---|
| 11451 | + list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { |
---|
| 11452 | + list_del_init(&eq->_poll_list); |
---|
| 11453 | + lpfc_sli4_start_polling(eq); |
---|
| 11454 | + } |
---|
| 11455 | + |
---|
| 11456 | + return 0; |
---|
| 11457 | +} |
---|
| 11458 | + |
---|
| 11459 | +static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) |
---|
| 11460 | +{ |
---|
| 11461 | + struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); |
---|
| 11462 | + struct lpfc_queue *eq, *next; |
---|
| 11463 | + unsigned int n; |
---|
| 11464 | + int retval; |
---|
| 11465 | + |
---|
| 11466 | + if (!phba) { |
---|
| 11467 | + WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); |
---|
| 11468 | + return 0; |
---|
| 11469 | + } |
---|
| 11470 | + |
---|
| 11471 | + if (__lpfc_cpuhp_checks(phba, &retval)) |
---|
| 11472 | + return retval; |
---|
| 11473 | + |
---|
| 11474 | + lpfc_irq_rebalance(phba, cpu, false); |
---|
| 11475 | + |
---|
| 11476 | + list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { |
---|
| 11477 | + n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); |
---|
| 11478 | + if (n == cpu) |
---|
| 11479 | + lpfc_sli4_stop_polling(eq); |
---|
| 11480 | + } |
---|
| 11481 | + |
---|
| 11482 | + return 0; |
---|
| 11483 | +} |
---|
10042 | 11484 | |
---|
10043 | 11485 | /** |
---|
10044 | 11486 | * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device |
---|
10045 | 11487 | * @phba: pointer to lpfc hba data structure. |
---|
10046 | 11488 | * |
---|
10047 | 11489 | * This routine is invoked to enable the MSI-X interrupt vectors to device |
---|
10048 | | - * with SLI-4 interface spec. |
---|
| 11490 | + * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them |
---|
| 11491 | + * to cpus on the system. |
---|
| 11492 | + * |
---|
| 11493 | + * When cfg_irq_numa is enabled, the adapter will only allocate vectors for |
---|
| 11494 | + * the number of cpus on the same numa node as this adapter. The vectors are |
---|
| 11495 | + * allocated without requesting OS affinity mapping. A vector will be |
---|
| 11496 | + * allocated and assigned to each online and offline cpu. If the cpu is |
---|
| 11497 | + * online, then affinity will be set to that cpu. If the cpu is offline, then |
---|
| 11498 | + * affinity will be set to the nearest peer cpu within the numa node that is |
---|
| 11499 | + * online. If there are no online cpus within the numa node, affinity is not |
---|
| 11500 | + * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping |
---|
| 11501 | + * is consistent with the way cpu online/offline is handled when cfg_irq_numa is |
---|
| 11502 | + * configured. |
---|
| 11503 | + * |
---|
| 11504 | + * If numa mode is not enabled and there is more than 1 vector allocated, then |
---|
| 11505 | + * the driver relies on the managed irq interface where the OS assigns vector to |
---|
| 11506 | + * cpu affinity. The driver will then use that affinity mapping to setup its |
---|
| 11507 | + * cpu mapping table. |
---|
10049 | 11508 | * |
---|
10050 | 11509 | * Return codes |
---|
10051 | 11510 | * 0 - successful |
---|
.. | .. |
---|
10056 | 11515 | { |
---|
10057 | 11516 | int vectors, rc, index; |
---|
10058 | 11517 | char *name; |
---|
| 11518 | + const struct cpumask *aff_mask = NULL; |
---|
| 11519 | + unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; |
---|
| 11520 | + struct lpfc_vector_map_info *cpup; |
---|
| 11521 | + struct lpfc_hba_eq_hdl *eqhdl; |
---|
| 11522 | + const struct cpumask *maskp; |
---|
| 11523 | + unsigned int flags = PCI_IRQ_MSIX; |
---|
10059 | 11524 | |
---|
10060 | 11525 | /* Set up MSI-X multi-message vectors */ |
---|
10061 | | - vectors = phba->io_channel_irqs; |
---|
10062 | | - if (phba->cfg_fof) |
---|
10063 | | - vectors++; |
---|
| 11526 | + vectors = phba->cfg_irq_chann; |
---|
10064 | 11527 | |
---|
10065 | | - rc = pci_alloc_irq_vectors(phba->pcidev, |
---|
10066 | | - (phba->nvmet_support) ? 1 : 2, |
---|
10067 | | - vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); |
---|
| 11528 | + if (phba->irq_chann_mode != NORMAL_MODE) |
---|
| 11529 | + aff_mask = &phba->sli4_hba.irq_aff_mask; |
---|
| 11530 | + |
---|
| 11531 | + if (aff_mask) { |
---|
| 11532 | + cpu_cnt = cpumask_weight(aff_mask); |
---|
| 11533 | + vectors = min(phba->cfg_irq_chann, cpu_cnt); |
---|
| 11534 | + |
---|
| 11535 | + /* cpu: iterates over aff_mask including offline or online |
---|
| 11536 | + * cpu_select: iterates over online aff_mask to set affinity |
---|
| 11537 | + */ |
---|
| 11538 | + cpu = cpumask_first(aff_mask); |
---|
| 11539 | + cpu_select = lpfc_next_online_cpu(aff_mask, cpu); |
---|
| 11540 | + } else { |
---|
| 11541 | + flags |= PCI_IRQ_AFFINITY; |
---|
| 11542 | + } |
---|
| 11543 | + |
---|
| 11544 | + rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); |
---|
10068 | 11545 | if (rc < 0) { |
---|
10069 | 11546 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
10070 | 11547 | "0484 PCI enable MSI-X failed (%d)\n", rc); |
---|
.. | .. |
---|
10074 | 11551 | |
---|
10075 | 11552 | /* Assign MSI-X vectors to interrupt handlers */ |
---|
10076 | 11553 | for (index = 0; index < vectors; index++) { |
---|
10077 | | - name = phba->sli4_hba.hba_eq_hdl[index].handler_name; |
---|
| 11554 | + eqhdl = lpfc_get_eq_hdl(index); |
---|
| 11555 | + name = eqhdl->handler_name; |
---|
10078 | 11556 | memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); |
---|
10079 | 11557 | snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, |
---|
10080 | 11558 | LPFC_DRIVER_HANDLER_NAME"%d", index); |
---|
10081 | 11559 | |
---|
10082 | | - phba->sli4_hba.hba_eq_hdl[index].idx = index; |
---|
10083 | | - phba->sli4_hba.hba_eq_hdl[index].phba = phba; |
---|
10084 | | - atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1); |
---|
10085 | | - if (phba->cfg_fof && (index == (vectors - 1))) |
---|
10086 | | - rc = request_irq(pci_irq_vector(phba->pcidev, index), |
---|
10087 | | - &lpfc_sli4_fof_intr_handler, 0, |
---|
10088 | | - name, |
---|
10089 | | - &phba->sli4_hba.hba_eq_hdl[index]); |
---|
10090 | | - else |
---|
10091 | | - rc = request_irq(pci_irq_vector(phba->pcidev, index), |
---|
10092 | | - &lpfc_sli4_hba_intr_handler, 0, |
---|
10093 | | - name, |
---|
10094 | | - &phba->sli4_hba.hba_eq_hdl[index]); |
---|
| 11560 | + eqhdl->idx = index; |
---|
| 11561 | + rc = request_irq(pci_irq_vector(phba->pcidev, index), |
---|
| 11562 | + &lpfc_sli4_hba_intr_handler, 0, |
---|
| 11563 | + name, eqhdl); |
---|
10095 | 11564 | if (rc) { |
---|
10096 | 11565 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
---|
10097 | 11566 | "0486 MSI-X fast-path (%d) " |
---|
10098 | 11567 | "request_irq failed (%d)\n", index, rc); |
---|
10099 | 11568 | goto cfg_fail_out; |
---|
10100 | 11569 | } |
---|
| 11570 | + |
---|
| 11571 | + eqhdl->irq = pci_irq_vector(phba->pcidev, index); |
---|
| 11572 | + |
---|
| 11573 | + if (aff_mask) { |
---|
| 11574 | + /* If found a neighboring online cpu, set affinity */ |
---|
| 11575 | + if (cpu_select < nr_cpu_ids) |
---|
| 11576 | + lpfc_irq_set_aff(eqhdl, cpu_select); |
---|
| 11577 | + |
---|
| 11578 | + /* Assign EQ to cpu_map */ |
---|
| 11579 | + lpfc_assign_eq_map_info(phba, index, |
---|
| 11580 | + LPFC_CPU_FIRST_IRQ, |
---|
| 11581 | + cpu); |
---|
| 11582 | + |
---|
| 11583 | + /* Iterate to next offline or online cpu in aff_mask */ |
---|
| 11584 | + cpu = cpumask_next(cpu, aff_mask); |
---|
| 11585 | + |
---|
| 11586 | + /* Find next online cpu in aff_mask to set affinity */ |
---|
| 11587 | + cpu_select = lpfc_next_online_cpu(aff_mask, cpu); |
---|
| 11588 | + } else if (vectors == 1) { |
---|
| 11589 | + cpu = cpumask_first(cpu_present_mask); |
---|
| 11590 | + lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, |
---|
| 11591 | + cpu); |
---|
| 11592 | + } else { |
---|
| 11593 | + maskp = pci_irq_get_affinity(phba->pcidev, index); |
---|
| 11594 | + |
---|
| 11595 | + /* Loop through all CPUs associated with vector index */ |
---|
| 11596 | + for_each_cpu_and(cpu, maskp, cpu_present_mask) { |
---|
| 11597 | + cpup = &phba->sli4_hba.cpu_map[cpu]; |
---|
| 11598 | + |
---|
| 11599 | + /* If this is the first CPU thats assigned to |
---|
| 11600 | + * this vector, set LPFC_CPU_FIRST_IRQ. |
---|
| 11601 | + * |
---|
| 11602 | + * With certain platforms its possible that irq |
---|
| 11603 | + * vectors are affinitized to all the cpu's. |
---|
| 11604 | + * This can result in each cpu_map.eq to be set |
---|
| 11605 | + * to the last vector, resulting in overwrite |
---|
| 11606 | + * of all the previous cpu_map.eq. Ensure that |
---|
| 11607 | + * each vector receives a place in cpu_map. |
---|
| 11608 | + * Later call to lpfc_cpu_affinity_check will |
---|
| 11609 | + * ensure we are nicely balanced out. |
---|
| 11610 | + */ |
---|
| 11611 | + if (cpup->eq != LPFC_VECTOR_MAP_EMPTY) |
---|
| 11612 | + continue; |
---|
| 11613 | + lpfc_assign_eq_map_info(phba, index, |
---|
| 11614 | + LPFC_CPU_FIRST_IRQ, |
---|
| 11615 | + cpu); |
---|
| 11616 | + break; |
---|
| 11617 | + } |
---|
| 11618 | + } |
---|
10101 | 11619 | } |
---|
10102 | 11620 | |
---|
10103 | | - if (phba->cfg_fof) |
---|
10104 | | - vectors--; |
---|
10105 | | - |
---|
10106 | | - if (vectors != phba->io_channel_irqs) { |
---|
10107 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 11621 | + if (vectors != phba->cfg_irq_chann) { |
---|
| 11622 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
10108 | 11623 | "3238 Reducing IO channels to match number of " |
---|
10109 | 11624 | "MSI-X vectors, requested %d got %d\n", |
---|
10110 | | - phba->io_channel_irqs, vectors); |
---|
10111 | | - if (phba->cfg_fcp_io_channel > vectors) |
---|
10112 | | - phba->cfg_fcp_io_channel = vectors; |
---|
10113 | | - if (phba->cfg_nvme_io_channel > vectors) |
---|
10114 | | - phba->cfg_nvme_io_channel = vectors; |
---|
10115 | | - if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel) |
---|
10116 | | - phba->io_channel_irqs = phba->cfg_fcp_io_channel; |
---|
10117 | | - else |
---|
10118 | | - phba->io_channel_irqs = phba->cfg_nvme_io_channel; |
---|
| 11625 | + phba->cfg_irq_chann, vectors); |
---|
| 11626 | + if (phba->cfg_irq_chann > vectors) |
---|
| 11627 | + phba->cfg_irq_chann = vectors; |
---|
10119 | 11628 | } |
---|
10120 | | - lpfc_cpu_affinity_check(phba, vectors); |
---|
10121 | 11629 | |
---|
10122 | 11630 | return rc; |
---|
10123 | 11631 | |
---|
10124 | 11632 | cfg_fail_out: |
---|
10125 | 11633 | /* free the irq already requested */ |
---|
10126 | | - for (--index; index >= 0; index--) |
---|
10127 | | - free_irq(pci_irq_vector(phba->pcidev, index), |
---|
10128 | | - &phba->sli4_hba.hba_eq_hdl[index]); |
---|
| 11634 | + for (--index; index >= 0; index--) { |
---|
| 11635 | + eqhdl = lpfc_get_eq_hdl(index); |
---|
| 11636 | + lpfc_irq_clear_aff(eqhdl); |
---|
| 11637 | + irq_set_affinity_hint(eqhdl->irq, NULL); |
---|
| 11638 | + free_irq(eqhdl->irq, eqhdl); |
---|
| 11639 | + } |
---|
10129 | 11640 | |
---|
10130 | 11641 | /* Unconfigure MSI-X capability structure */ |
---|
10131 | 11642 | pci_free_irq_vectors(phba->pcidev); |
---|
.. | .. |
---|
10139 | 11650 | * @phba: pointer to lpfc hba data structure. |
---|
10140 | 11651 | * |
---|
10141 | 11652 | * This routine is invoked to enable the MSI interrupt mode to device with |
---|
10142 | | - * SLI-4 interface spec. The kernel function pci_enable_msi() is called |
---|
10143 | | - * to enable the MSI vector. The device driver is responsible for calling |
---|
10144 | | - * the request_irq() to register MSI vector with a interrupt the handler, |
---|
10145 | | - * which is done in this function. |
---|
| 11653 | + * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is |
---|
| 11654 | + * called to enable the MSI vector. The device driver is responsible for |
---|
| 11655 | + * calling the request_irq() to register MSI vector with a interrupt the |
---|
| 11656 | + * handler, which is done in this function. |
---|
10146 | 11657 | * |
---|
10147 | 11658 | * Return codes |
---|
10148 | 11659 | * 0 - successful |
---|
.. | .. |
---|
10152 | 11663 | lpfc_sli4_enable_msi(struct lpfc_hba *phba) |
---|
10153 | 11664 | { |
---|
10154 | 11665 | int rc, index; |
---|
| 11666 | + unsigned int cpu; |
---|
| 11667 | + struct lpfc_hba_eq_hdl *eqhdl; |
---|
10155 | 11668 | |
---|
10156 | | - rc = pci_enable_msi(phba->pcidev); |
---|
10157 | | - if (!rc) |
---|
| 11669 | + rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, |
---|
| 11670 | + PCI_IRQ_MSI | PCI_IRQ_AFFINITY); |
---|
| 11671 | + if (rc > 0) |
---|
10158 | 11672 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
10159 | 11673 | "0487 PCI enable MSI mode success.\n"); |
---|
10160 | 11674 | else { |
---|
10161 | 11675 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
---|
10162 | 11676 | "0488 PCI enable MSI mode failed (%d)\n", rc); |
---|
10163 | | - return rc; |
---|
| 11677 | + return rc ? rc : -1; |
---|
10164 | 11678 | } |
---|
10165 | 11679 | |
---|
10166 | 11680 | rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, |
---|
10167 | 11681 | 0, LPFC_DRIVER_NAME, phba); |
---|
10168 | 11682 | if (rc) { |
---|
10169 | | - pci_disable_msi(phba->pcidev); |
---|
| 11683 | + pci_free_irq_vectors(phba->pcidev); |
---|
10170 | 11684 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
---|
10171 | 11685 | "0490 MSI request_irq failed (%d)\n", rc); |
---|
10172 | 11686 | return rc; |
---|
10173 | 11687 | } |
---|
10174 | 11688 | |
---|
10175 | | - for (index = 0; index < phba->io_channel_irqs; index++) { |
---|
10176 | | - phba->sli4_hba.hba_eq_hdl[index].idx = index; |
---|
10177 | | - phba->sli4_hba.hba_eq_hdl[index].phba = phba; |
---|
| 11689 | + eqhdl = lpfc_get_eq_hdl(0); |
---|
| 11690 | + eqhdl->irq = pci_irq_vector(phba->pcidev, 0); |
---|
| 11691 | + |
---|
| 11692 | + cpu = cpumask_first(cpu_present_mask); |
---|
| 11693 | + lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); |
---|
| 11694 | + |
---|
| 11695 | + for (index = 0; index < phba->cfg_irq_chann; index++) { |
---|
| 11696 | + eqhdl = lpfc_get_eq_hdl(index); |
---|
| 11697 | + eqhdl->idx = index; |
---|
10178 | 11698 | } |
---|
10179 | 11699 | |
---|
10180 | | - if (phba->cfg_fof) { |
---|
10181 | | - phba->sli4_hba.hba_eq_hdl[index].idx = index; |
---|
10182 | | - phba->sli4_hba.hba_eq_hdl[index].phba = phba; |
---|
10183 | | - } |
---|
10184 | 11700 | return 0; |
---|
10185 | 11701 | } |
---|
10186 | 11702 | |
---|
10187 | 11703 | /** |
---|
10188 | 11704 | * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device |
---|
10189 | 11705 | * @phba: pointer to lpfc hba data structure. |
---|
| 11706 | + * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). |
---|
10190 | 11707 | * |
---|
10191 | 11708 | * This routine is invoked to enable device interrupt and associate driver's |
---|
10192 | 11709 | * interrupt handler(s) to interrupt vector(s) to device with SLI-4 |
---|
.. | .. |
---|
10236 | 11753 | IRQF_SHARED, LPFC_DRIVER_NAME, phba); |
---|
10237 | 11754 | if (!retval) { |
---|
10238 | 11755 | struct lpfc_hba_eq_hdl *eqhdl; |
---|
| 11756 | + unsigned int cpu; |
---|
10239 | 11757 | |
---|
10240 | 11758 | /* Indicate initialization to INTx mode */ |
---|
10241 | 11759 | phba->intr_type = INTx; |
---|
10242 | 11760 | intr_mode = 0; |
---|
10243 | 11761 | |
---|
10244 | | - for (idx = 0; idx < phba->io_channel_irqs; idx++) { |
---|
10245 | | - eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; |
---|
| 11762 | + eqhdl = lpfc_get_eq_hdl(0); |
---|
| 11763 | + eqhdl->irq = pci_irq_vector(phba->pcidev, 0); |
---|
| 11764 | + |
---|
| 11765 | + cpu = cpumask_first(cpu_present_mask); |
---|
| 11766 | + lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, |
---|
| 11767 | + cpu); |
---|
| 11768 | + for (idx = 0; idx < phba->cfg_irq_chann; idx++) { |
---|
| 11769 | + eqhdl = lpfc_get_eq_hdl(idx); |
---|
10246 | 11770 | eqhdl->idx = idx; |
---|
10247 | | - eqhdl->phba = phba; |
---|
10248 | | - atomic_set(&eqhdl->hba_eq_in_use, 1); |
---|
10249 | | - } |
---|
10250 | | - if (phba->cfg_fof) { |
---|
10251 | | - eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; |
---|
10252 | | - eqhdl->idx = idx; |
---|
10253 | | - eqhdl->phba = phba; |
---|
10254 | | - atomic_set(&eqhdl->hba_eq_in_use, 1); |
---|
10255 | 11771 | } |
---|
10256 | 11772 | } |
---|
10257 | 11773 | } |
---|
.. | .. |
---|
10273 | 11789 | /* Disable the currently initialized interrupt mode */ |
---|
10274 | 11790 | if (phba->intr_type == MSIX) { |
---|
10275 | 11791 | int index; |
---|
| 11792 | + struct lpfc_hba_eq_hdl *eqhdl; |
---|
10276 | 11793 | |
---|
10277 | 11794 | /* Free up MSI-X multi-message vectors */ |
---|
10278 | | - for (index = 0; index < phba->io_channel_irqs; index++) |
---|
10279 | | - free_irq(pci_irq_vector(phba->pcidev, index), |
---|
10280 | | - &phba->sli4_hba.hba_eq_hdl[index]); |
---|
10281 | | - |
---|
10282 | | - if (phba->cfg_fof) |
---|
10283 | | - free_irq(pci_irq_vector(phba->pcidev, index), |
---|
10284 | | - &phba->sli4_hba.hba_eq_hdl[index]); |
---|
| 11795 | + for (index = 0; index < phba->cfg_irq_chann; index++) { |
---|
| 11796 | + eqhdl = lpfc_get_eq_hdl(index); |
---|
| 11797 | + lpfc_irq_clear_aff(eqhdl); |
---|
| 11798 | + irq_set_affinity_hint(eqhdl->irq, NULL); |
---|
| 11799 | + free_irq(eqhdl->irq, eqhdl); |
---|
| 11800 | + } |
---|
10285 | 11801 | } else { |
---|
10286 | 11802 | free_irq(phba->pcidev->irq, phba); |
---|
10287 | 11803 | } |
---|
.. | .. |
---|
10342 | 11858 | static void |
---|
10343 | 11859 | lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) |
---|
10344 | 11860 | { |
---|
| 11861 | + struct lpfc_sli4_hdw_queue *qp; |
---|
| 11862 | + int idx, ccnt; |
---|
10345 | 11863 | int wait_time = 0; |
---|
10346 | | - int nvme_xri_cmpl = 1; |
---|
| 11864 | + int io_xri_cmpl = 1; |
---|
10347 | 11865 | int nvmet_xri_cmpl = 1; |
---|
10348 | | - int fcp_xri_cmpl = 1; |
---|
10349 | 11866 | int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); |
---|
10350 | 11867 | |
---|
10351 | 11868 | /* Driver just aborted IOs during the hba_unset process. Pause |
---|
.. | .. |
---|
10358 | 11875 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) |
---|
10359 | 11876 | lpfc_nvme_wait_for_io_drain(phba); |
---|
10360 | 11877 | |
---|
10361 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) |
---|
10362 | | - fcp_xri_cmpl = |
---|
10363 | | - list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); |
---|
| 11878 | + ccnt = 0; |
---|
| 11879 | + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
---|
| 11880 | + qp = &phba->sli4_hba.hdwq[idx]; |
---|
| 11881 | + io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); |
---|
| 11882 | + if (!io_xri_cmpl) /* if list is NOT empty */ |
---|
| 11883 | + ccnt++; |
---|
| 11884 | + } |
---|
| 11885 | + if (ccnt) |
---|
| 11886 | + io_xri_cmpl = 0; |
---|
| 11887 | + |
---|
10364 | 11888 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
10365 | | - nvme_xri_cmpl = |
---|
10366 | | - list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list); |
---|
10367 | 11889 | nvmet_xri_cmpl = |
---|
10368 | 11890 | list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); |
---|
10369 | 11891 | } |
---|
10370 | 11892 | |
---|
10371 | | - while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl || |
---|
10372 | | - !nvmet_xri_cmpl) { |
---|
| 11893 | + while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { |
---|
10373 | 11894 | if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { |
---|
10374 | 11895 | if (!nvmet_xri_cmpl) |
---|
10375 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 11896 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
10376 | 11897 | "6424 NVMET XRI exchange busy " |
---|
10377 | 11898 | "wait time: %d seconds.\n", |
---|
10378 | 11899 | wait_time/1000); |
---|
10379 | | - if (!nvme_xri_cmpl) |
---|
10380 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
10381 | | - "6100 NVME XRI exchange busy " |
---|
10382 | | - "wait time: %d seconds.\n", |
---|
10383 | | - wait_time/1000); |
---|
10384 | | - if (!fcp_xri_cmpl) |
---|
10385 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
10386 | | - "2877 FCP XRI exchange busy " |
---|
| 11900 | + if (!io_xri_cmpl) |
---|
| 11901 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 11902 | + "6100 IO XRI exchange busy " |
---|
10387 | 11903 | "wait time: %d seconds.\n", |
---|
10388 | 11904 | wait_time/1000); |
---|
10389 | 11905 | if (!els_xri_cmpl) |
---|
10390 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 11906 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
10391 | 11907 | "2878 ELS XRI exchange busy " |
---|
10392 | 11908 | "wait time: %d seconds.\n", |
---|
10393 | 11909 | wait_time/1000); |
---|
.. | .. |
---|
10397 | 11913 | msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); |
---|
10398 | 11914 | wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; |
---|
10399 | 11915 | } |
---|
| 11916 | + |
---|
| 11917 | + ccnt = 0; |
---|
| 11918 | + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { |
---|
| 11919 | + qp = &phba->sli4_hba.hdwq[idx]; |
---|
| 11920 | + io_xri_cmpl = list_empty( |
---|
| 11921 | + &qp->lpfc_abts_io_buf_list); |
---|
| 11922 | + if (!io_xri_cmpl) /* if list is NOT empty */ |
---|
| 11923 | + ccnt++; |
---|
| 11924 | + } |
---|
| 11925 | + if (ccnt) |
---|
| 11926 | + io_xri_cmpl = 0; |
---|
| 11927 | + |
---|
10400 | 11928 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
10401 | | - nvme_xri_cmpl = list_empty( |
---|
10402 | | - &phba->sli4_hba.lpfc_abts_nvme_buf_list); |
---|
10403 | 11929 | nvmet_xri_cmpl = list_empty( |
---|
10404 | 11930 | &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); |
---|
10405 | 11931 | } |
---|
10406 | | - |
---|
10407 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) |
---|
10408 | | - fcp_xri_cmpl = list_empty( |
---|
10409 | | - &phba->sli4_hba.lpfc_abts_scsi_buf_list); |
---|
10410 | | - |
---|
10411 | 11932 | els_xri_cmpl = |
---|
10412 | 11933 | list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); |
---|
10413 | 11934 | |
---|
.. | .. |
---|
10432 | 11953 | struct pci_dev *pdev = phba->pcidev; |
---|
10433 | 11954 | |
---|
10434 | 11955 | lpfc_stop_hba_timers(phba); |
---|
10435 | | - phba->sli4_hba.intr_enable = 0; |
---|
| 11956 | + if (phba->pport) |
---|
| 11957 | + phba->sli4_hba.intr_enable = 0; |
---|
10436 | 11958 | |
---|
10437 | 11959 | /* |
---|
10438 | 11960 | * Gracefully wait out the potential current outstanding asynchronous |
---|
.. | .. |
---|
10466 | 11988 | /* Wait for completion of device XRI exchange busy */ |
---|
10467 | 11989 | lpfc_sli4_xri_exchange_busy_wait(phba); |
---|
10468 | 11990 | |
---|
| 11991 | + /* per-phba callback de-registration for hotplug event */ |
---|
| 11992 | + if (phba->pport) |
---|
| 11993 | + lpfc_cpuhp_remove(phba); |
---|
| 11994 | + |
---|
10469 | 11995 | /* Disable PCI subsystem interrupt */ |
---|
10470 | 11996 | lpfc_sli4_disable_intr(phba); |
---|
10471 | 11997 | |
---|
.. | .. |
---|
10476 | 12002 | /* Stop kthread signal shall trigger work_done one more time */ |
---|
10477 | 12003 | kthread_stop(phba->worker_thread); |
---|
10478 | 12004 | |
---|
| 12005 | + /* Disable FW logging to host memory */ |
---|
| 12006 | + lpfc_ras_stop_fwlog(phba); |
---|
| 12007 | + |
---|
10479 | 12008 | /* Unset the queues shared with the hardware then release all |
---|
10480 | 12009 | * allocated resources. |
---|
10481 | 12010 | */ |
---|
.. | .. |
---|
10485 | 12014 | /* Reset SLI4 HBA FCoE function */ |
---|
10486 | 12015 | lpfc_pci_function_reset(phba); |
---|
10487 | 12016 | |
---|
| 12017 | + /* Free RAS DMA memory */ |
---|
| 12018 | + if (phba->ras_fwlog.ras_enabled) |
---|
| 12019 | + lpfc_sli4_ras_dma_free(phba); |
---|
| 12020 | + |
---|
10488 | 12021 | /* Stop the SLI4 device port */ |
---|
10489 | | - phba->pport->work_port_events = 0; |
---|
| 12022 | + if (phba->pport) |
---|
| 12023 | + phba->pport->work_port_events = 0; |
---|
10490 | 12024 | } |
---|
10491 | 12025 | |
---|
10492 | 12026 | /** |
---|
.. | .. |
---|
10558 | 12092 | sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); |
---|
10559 | 12093 | sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); |
---|
10560 | 12094 | sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); |
---|
| 12095 | + sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); |
---|
10561 | 12096 | sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, |
---|
10562 | 12097 | mbx_sli4_parameters); |
---|
10563 | 12098 | sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); |
---|
.. | .. |
---|
10565 | 12100 | mbx_sli4_parameters); |
---|
10566 | 12101 | phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); |
---|
10567 | 12102 | phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); |
---|
10568 | | - phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) && |
---|
10569 | | - bf_get(cfg_xib, mbx_sli4_parameters)); |
---|
10570 | 12103 | |
---|
10571 | | - if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) || |
---|
10572 | | - !phba->nvme_support) { |
---|
10573 | | - phba->nvme_support = 0; |
---|
10574 | | - phba->nvmet_support = 0; |
---|
10575 | | - phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF; |
---|
10576 | | - phba->cfg_nvme_io_channel = 0; |
---|
10577 | | - phba->io_channel_irqs = phba->cfg_fcp_io_channel; |
---|
10578 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, |
---|
10579 | | - "6101 Disabling NVME support: " |
---|
10580 | | - "Not supported by firmware: %d %d\n", |
---|
10581 | | - bf_get(cfg_nvme, mbx_sli4_parameters), |
---|
10582 | | - bf_get(cfg_xib, mbx_sli4_parameters)); |
---|
| 12104 | + /* Check for Extended Pre-Registered SGL support */ |
---|
| 12105 | + phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); |
---|
10583 | 12106 | |
---|
10584 | | - /* If firmware doesn't support NVME, just use SCSI support */ |
---|
10585 | | - if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) |
---|
10586 | | - return -ENODEV; |
---|
10587 | | - phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; |
---|
| 12107 | + /* Check for firmware nvme support */ |
---|
| 12108 | + rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && |
---|
| 12109 | + bf_get(cfg_xib, mbx_sli4_parameters)); |
---|
| 12110 | + |
---|
| 12111 | + if (rc) { |
---|
| 12112 | + /* Save this to indicate the Firmware supports NVME */ |
---|
| 12113 | + sli4_params->nvme = 1; |
---|
| 12114 | + |
---|
| 12115 | + /* Firmware NVME support, check driver FC4 NVME support */ |
---|
| 12116 | + if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { |
---|
| 12117 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, |
---|
| 12118 | + "6133 Disabling NVME support: " |
---|
| 12119 | + "FC4 type not supported: x%x\n", |
---|
| 12120 | + phba->cfg_enable_fc4_type); |
---|
| 12121 | + goto fcponly; |
---|
| 12122 | + } |
---|
| 12123 | + } else { |
---|
| 12124 | + /* No firmware NVME support, check driver FC4 NVME support */ |
---|
| 12125 | + sli4_params->nvme = 0; |
---|
| 12126 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
| 12127 | + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, |
---|
| 12128 | + "6101 Disabling NVME support: Not " |
---|
| 12129 | + "supported by firmware (%d %d) x%x\n", |
---|
| 12130 | + bf_get(cfg_nvme, mbx_sli4_parameters), |
---|
| 12131 | + bf_get(cfg_xib, mbx_sli4_parameters), |
---|
| 12132 | + phba->cfg_enable_fc4_type); |
---|
| 12133 | +fcponly: |
---|
| 12134 | + phba->nvme_support = 0; |
---|
| 12135 | + phba->nvmet_support = 0; |
---|
| 12136 | + phba->cfg_nvmet_mrq = 0; |
---|
| 12137 | + phba->cfg_nvme_seg_cnt = 0; |
---|
| 12138 | + |
---|
| 12139 | + /* If no FC4 type support, move to just SCSI support */ |
---|
| 12140 | + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) |
---|
| 12141 | + return -ENODEV; |
---|
| 12142 | + phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; |
---|
| 12143 | + } |
---|
10588 | 12144 | } |
---|
| 12145 | + |
---|
| 12146 | + /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to |
---|
| 12147 | + * accommodate 512K and 1M IOs in a single nvme buf. |
---|
| 12148 | + */ |
---|
| 12149 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) |
---|
| 12150 | + phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; |
---|
10589 | 12151 | |
---|
10590 | 12152 | /* Only embed PBDE for if_type 6, PBDE support requires xib be set */ |
---|
10591 | 12153 | if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != |
---|
.. | .. |
---|
10650 | 12212 | phba->mds_diags_support = 1; |
---|
10651 | 12213 | else |
---|
10652 | 12214 | phba->mds_diags_support = 0; |
---|
| 12215 | + |
---|
| 12216 | + /* |
---|
| 12217 | + * Check if the SLI port supports NSLER |
---|
| 12218 | + */ |
---|
| 12219 | + if (bf_get(cfg_nsler, mbx_sli4_parameters)) |
---|
| 12220 | + phba->nsler = 1; |
---|
| 12221 | + else |
---|
| 12222 | + phba->nsler = 0; |
---|
| 12223 | + |
---|
10653 | 12224 | return 0; |
---|
10654 | 12225 | } |
---|
10655 | 12226 | |
---|
.. | .. |
---|
10756 | 12327 | /* Configure and enable interrupt */ |
---|
10757 | 12328 | intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); |
---|
10758 | 12329 | if (intr_mode == LPFC_INTR_ERROR) { |
---|
10759 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 12330 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
10760 | 12331 | "0431 Failed to enable interrupt.\n"); |
---|
10761 | 12332 | error = -ENODEV; |
---|
10762 | 12333 | goto out_free_sysfs_attr; |
---|
10763 | 12334 | } |
---|
10764 | 12335 | /* SLI-3 HBA setup */ |
---|
10765 | 12336 | if (lpfc_sli_hba_setup(phba)) { |
---|
10766 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 12337 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
10767 | 12338 | "1477 Failed to set up hba\n"); |
---|
10768 | 12339 | error = -ENODEV; |
---|
10769 | 12340 | goto out_remove_device; |
---|
.. | .. |
---|
10878 | 12449 | kfree(phba->vpi_ids); |
---|
10879 | 12450 | |
---|
10880 | 12451 | lpfc_stop_hba_timers(phba); |
---|
10881 | | - spin_lock_irq(&phba->hbalock); |
---|
| 12452 | + spin_lock_irq(&phba->port_list_lock); |
---|
10882 | 12453 | list_del_init(&vport->listentry); |
---|
10883 | | - spin_unlock_irq(&phba->hbalock); |
---|
| 12454 | + spin_unlock_irq(&phba->port_list_lock); |
---|
10884 | 12455 | |
---|
10885 | 12456 | lpfc_debugfs_terminate(vport); |
---|
10886 | 12457 | |
---|
.. | .. |
---|
10898 | 12469 | * corresponding pools here. |
---|
10899 | 12470 | */ |
---|
10900 | 12471 | lpfc_scsi_free(phba); |
---|
| 12472 | + lpfc_free_iocb_list(phba); |
---|
| 12473 | + |
---|
10901 | 12474 | lpfc_mem_free_all(phba); |
---|
10902 | 12475 | |
---|
10903 | 12476 | dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), |
---|
.. | .. |
---|
11019 | 12592 | /* Configure and enable interrupt */ |
---|
11020 | 12593 | intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); |
---|
11021 | 12594 | if (intr_mode == LPFC_INTR_ERROR) { |
---|
11022 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 12595 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
11023 | 12596 | "0430 PM resume Failed to enable interrupt\n"); |
---|
11024 | 12597 | return -EIO; |
---|
11025 | 12598 | } else |
---|
.. | .. |
---|
11045 | 12618 | static void |
---|
11046 | 12619 | lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) |
---|
11047 | 12620 | { |
---|
11048 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 12621 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
11049 | 12622 | "2723 PCI channel I/O abort preparing for recovery\n"); |
---|
11050 | 12623 | |
---|
11051 | 12624 | /* |
---|
.. | .. |
---|
11066 | 12639 | static void |
---|
11067 | 12640 | lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) |
---|
11068 | 12641 | { |
---|
11069 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 12642 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
11070 | 12643 | "2710 PCI channel disable preparing for reset\n"); |
---|
11071 | 12644 | |
---|
11072 | 12645 | /* Block any management I/Os to the device */ |
---|
.. | .. |
---|
11076 | 12649 | lpfc_scsi_dev_block(phba); |
---|
11077 | 12650 | |
---|
11078 | 12651 | /* Flush all driver's outstanding SCSI I/Os as we are to reset */ |
---|
11079 | | - lpfc_sli_flush_fcp_rings(phba); |
---|
| 12652 | + lpfc_sli_flush_io_rings(phba); |
---|
11080 | 12653 | |
---|
11081 | 12654 | /* stop all timers */ |
---|
11082 | 12655 | lpfc_stop_hba_timers(phba); |
---|
.. | .. |
---|
11097 | 12670 | static void |
---|
11098 | 12671 | lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) |
---|
11099 | 12672 | { |
---|
11100 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 12673 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
11101 | 12674 | "2711 PCI channel permanent disable for failure\n"); |
---|
11102 | 12675 | /* Block all SCSI devices' I/Os on the host */ |
---|
11103 | 12676 | lpfc_scsi_dev_block(phba); |
---|
.. | .. |
---|
11106 | 12679 | lpfc_stop_hba_timers(phba); |
---|
11107 | 12680 | |
---|
11108 | 12681 | /* Clean up all driver's outstanding SCSI I/Os */ |
---|
11109 | | - lpfc_sli_flush_fcp_rings(phba); |
---|
| 12682 | + lpfc_sli_flush_io_rings(phba); |
---|
11110 | 12683 | } |
---|
11111 | 12684 | |
---|
11112 | 12685 | /** |
---|
.. | .. |
---|
11148 | 12721 | return PCI_ERS_RESULT_DISCONNECT; |
---|
11149 | 12722 | default: |
---|
11150 | 12723 | /* Unknown state, prepare and request slot reset */ |
---|
11151 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 12724 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
11152 | 12725 | "0472 Unknown PCI error state: x%x\n", state); |
---|
11153 | 12726 | lpfc_sli_prep_dev_for_reset(phba); |
---|
11154 | 12727 | return PCI_ERS_RESULT_NEED_RESET; |
---|
.. | .. |
---|
11206 | 12779 | /* Configure and enable interrupt */ |
---|
11207 | 12780 | intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); |
---|
11208 | 12781 | if (intr_mode == LPFC_INTR_ERROR) { |
---|
11209 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 12782 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
11210 | 12783 | "0427 Cannot re-enable interrupt after " |
---|
11211 | 12784 | "slot reset.\n"); |
---|
11212 | 12785 | return PCI_ERS_RESULT_DISCONNECT; |
---|
.. | .. |
---|
11242 | 12815 | |
---|
11243 | 12816 | /* Bring device online, it will be no-op for non-fatal error resume */ |
---|
11244 | 12817 | lpfc_online(phba); |
---|
11245 | | - |
---|
11246 | | - /* Clean up Advanced Error Reporting (AER) if needed */ |
---|
11247 | | - if (phba->hba_flag & HBA_AER_ENABLED) |
---|
11248 | | - pci_cleanup_aer_uncorrect_error_status(pdev); |
---|
11249 | 12818 | } |
---|
11250 | 12819 | |
---|
11251 | 12820 | /** |
---|
.. | .. |
---|
11295 | 12864 | } |
---|
11296 | 12865 | |
---|
11297 | 12866 | |
---|
11298 | | -static void |
---|
| 12867 | +static int |
---|
11299 | 12868 | lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, |
---|
11300 | 12869 | uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, |
---|
11301 | 12870 | const struct firmware *fw) |
---|
11302 | 12871 | { |
---|
11303 | | - if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) || |
---|
11304 | | - (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && |
---|
11305 | | - magic_number != MAGIC_NUMER_G6) || |
---|
11306 | | - (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC && |
---|
11307 | | - magic_number != MAGIC_NUMER_G7)) |
---|
11308 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
11309 | | - "3030 This firmware version is not supported on " |
---|
11310 | | - "this HBA model. Device:%x Magic:%x Type:%x " |
---|
11311 | | - "ID:%x Size %d %zd\n", |
---|
11312 | | - phba->pcidev->device, magic_number, ftype, fid, |
---|
11313 | | - fsize, fw->size); |
---|
11314 | | - else |
---|
11315 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
11316 | | - "3022 FW Download failed. Device:%x Magic:%x Type:%x " |
---|
11317 | | - "ID:%x Size %d %zd\n", |
---|
11318 | | - phba->pcidev->device, magic_number, ftype, fid, |
---|
11319 | | - fsize, fw->size); |
---|
11320 | | -} |
---|
| 12872 | + int rc; |
---|
11321 | 12873 | |
---|
| 12874 | + /* Three cases: (1) FW was not supported on the detected adapter. |
---|
| 12875 | + * (2) FW update has been locked out administratively. |
---|
| 12876 | + * (3) Some other error during FW update. |
---|
| 12877 | + * In each case, an unmaskable message is written to the console |
---|
| 12878 | + * for admin diagnosis. |
---|
| 12879 | + */ |
---|
| 12880 | + if (offset == ADD_STATUS_FW_NOT_SUPPORTED || |
---|
| 12881 | + (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && |
---|
| 12882 | + magic_number != MAGIC_NUMBER_G6) || |
---|
| 12883 | + (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC && |
---|
| 12884 | + magic_number != MAGIC_NUMBER_G7)) { |
---|
| 12885 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 12886 | + "3030 This firmware version is not supported on" |
---|
| 12887 | + " this HBA model. Device:%x Magic:%x Type:%x " |
---|
| 12888 | + "ID:%x Size %d %zd\n", |
---|
| 12889 | + phba->pcidev->device, magic_number, ftype, fid, |
---|
| 12890 | + fsize, fw->size); |
---|
| 12891 | + rc = -EINVAL; |
---|
| 12892 | + } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { |
---|
| 12893 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 12894 | + "3021 Firmware downloads have been prohibited " |
---|
| 12895 | + "by a system configuration setting on " |
---|
| 12896 | + "Device:%x Magic:%x Type:%x ID:%x Size %d " |
---|
| 12897 | + "%zd\n", |
---|
| 12898 | + phba->pcidev->device, magic_number, ftype, fid, |
---|
| 12899 | + fsize, fw->size); |
---|
| 12900 | + rc = -EACCES; |
---|
| 12901 | + } else { |
---|
| 12902 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 12903 | + "3022 FW Download failed. Add Status x%x " |
---|
| 12904 | + "Device:%x Magic:%x Type:%x ID:%x Size %d " |
---|
| 12905 | + "%zd\n", |
---|
| 12906 | + offset, phba->pcidev->device, magic_number, |
---|
| 12907 | + ftype, fid, fsize, fw->size); |
---|
| 12908 | + rc = -EIO; |
---|
| 12909 | + } |
---|
| 12910 | + return rc; |
---|
| 12911 | +} |
---|
11322 | 12912 | |
---|
11323 | 12913 | /** |
---|
11324 | 12914 | * lpfc_write_firmware - attempt to write a firmware image to the port |
---|
11325 | 12915 | * @fw: pointer to firmware image returned from request_firmware. |
---|
11326 | | - * @phba: pointer to lpfc hba data structure. |
---|
| 12916 | + * @context: pointer to firmware image returned from request_firmware. |
---|
11327 | 12917 | * |
---|
11328 | 12918 | **/ |
---|
11329 | 12919 | static void |
---|
.. | .. |
---|
11353 | 12943 | INIT_LIST_HEAD(&dma_buffer_list); |
---|
11354 | 12944 | lpfc_decode_firmware_rev(phba, fwrev, 1); |
---|
11355 | 12945 | if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { |
---|
11356 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 12946 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
11357 | 12947 | "3023 Updating Firmware, Current Version:%s " |
---|
11358 | 12948 | "New Version:%s\n", |
---|
11359 | 12949 | fwrev, image->revision); |
---|
.. | .. |
---|
11392 | 12982 | rc = lpfc_wr_object(phba, &dma_buffer_list, |
---|
11393 | 12983 | (fw->size - offset), &offset); |
---|
11394 | 12984 | if (rc) { |
---|
11395 | | - lpfc_log_write_firmware_error(phba, offset, |
---|
11396 | | - magic_number, ftype, fid, fsize, fw); |
---|
| 12985 | + rc = lpfc_log_write_firmware_error(phba, offset, |
---|
| 12986 | + magic_number, |
---|
| 12987 | + ftype, |
---|
| 12988 | + fid, |
---|
| 12989 | + fsize, |
---|
| 12990 | + fw); |
---|
11397 | 12991 | goto release_out; |
---|
11398 | 12992 | } |
---|
11399 | 12993 | } |
---|
11400 | 12994 | rc = offset; |
---|
11401 | 12995 | } else |
---|
11402 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 12996 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
11403 | 12997 | "3029 Skipped Firmware update, Current " |
---|
11404 | 12998 | "Version:%s New Version:%s\n", |
---|
11405 | 12999 | fwrev, image->revision); |
---|
.. | .. |
---|
11413 | 13007 | } |
---|
11414 | 13008 | release_firmware(fw); |
---|
11415 | 13009 | out: |
---|
11416 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
11417 | | - "3024 Firmware update done: %d.\n", rc); |
---|
11418 | | - return; |
---|
| 13010 | + if (rc < 0) |
---|
| 13011 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 13012 | + "3062 Firmware update error, status %d.\n", rc); |
---|
| 13013 | + else |
---|
| 13014 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 13015 | + "3024 Firmware update success: size %d.\n", rc); |
---|
11419 | 13016 | } |
---|
11420 | 13017 | |
---|
11421 | 13018 | /** |
---|
11422 | 13019 | * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade |
---|
11423 | 13020 | * @phba: pointer to lpfc hba data structure. |
---|
| 13021 | + * @fw_upgrade: which firmware to update. |
---|
11424 | 13022 | * |
---|
11425 | 13023 | * This routine is called to perform Linux generic firmware upgrade on device |
---|
11426 | 13024 | * that supports such feature. |
---|
.. | .. |
---|
11487 | 13085 | if (!phba) |
---|
11488 | 13086 | return -ENOMEM; |
---|
11489 | 13087 | |
---|
| 13088 | + INIT_LIST_HEAD(&phba->poll_list); |
---|
| 13089 | + |
---|
11490 | 13090 | /* Perform generic PCI device enabling operation */ |
---|
11491 | 13091 | error = lpfc_enable_pci_dev(phba); |
---|
11492 | 13092 | if (error) |
---|
.. | .. |
---|
11527 | 13127 | /* Get the default values for Model Name and Description */ |
---|
11528 | 13128 | lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); |
---|
11529 | 13129 | |
---|
| 13130 | + /* Now, trying to enable interrupt and bring up the device */ |
---|
| 13131 | + cfg_mode = phba->cfg_use_msi; |
---|
| 13132 | + |
---|
| 13133 | + /* Put device to a known state before enabling interrupt */ |
---|
| 13134 | + phba->pport = NULL; |
---|
| 13135 | + lpfc_stop_port(phba); |
---|
| 13136 | + |
---|
| 13137 | + /* Init cpu_map array */ |
---|
| 13138 | + lpfc_cpu_map_array_init(phba); |
---|
| 13139 | + |
---|
| 13140 | + /* Init hba_eq_hdl array */ |
---|
| 13141 | + lpfc_hba_eq_hdl_array_init(phba); |
---|
| 13142 | + |
---|
| 13143 | + /* Configure and enable interrupt */ |
---|
| 13144 | + intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); |
---|
| 13145 | + if (intr_mode == LPFC_INTR_ERROR) { |
---|
| 13146 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 13147 | + "0426 Failed to enable interrupt.\n"); |
---|
| 13148 | + error = -ENODEV; |
---|
| 13149 | + goto out_unset_driver_resource; |
---|
| 13150 | + } |
---|
| 13151 | + /* Default to single EQ for non-MSI-X */ |
---|
| 13152 | + if (phba->intr_type != MSIX) { |
---|
| 13153 | + phba->cfg_irq_chann = 1; |
---|
| 13154 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
| 13155 | + if (phba->nvmet_support) |
---|
| 13156 | + phba->cfg_nvmet_mrq = 1; |
---|
| 13157 | + } |
---|
| 13158 | + } |
---|
| 13159 | + lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); |
---|
| 13160 | + |
---|
11530 | 13161 | /* Create SCSI host to the physical port */ |
---|
11531 | 13162 | error = lpfc_create_shost(phba); |
---|
11532 | 13163 | if (error) { |
---|
11533 | 13164 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
11534 | 13165 | "1415 Failed to create scsi host.\n"); |
---|
11535 | | - goto out_unset_driver_resource; |
---|
| 13166 | + goto out_disable_intr; |
---|
11536 | 13167 | } |
---|
| 13168 | + vport = phba->pport; |
---|
| 13169 | + shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ |
---|
11537 | 13170 | |
---|
11538 | 13171 | /* Configure sysfs attributes */ |
---|
11539 | | - vport = phba->pport; |
---|
11540 | 13172 | error = lpfc_alloc_sysfs_attr(vport); |
---|
11541 | 13173 | if (error) { |
---|
11542 | 13174 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
.. | .. |
---|
11544 | 13176 | goto out_destroy_shost; |
---|
11545 | 13177 | } |
---|
11546 | 13178 | |
---|
11547 | | - shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ |
---|
11548 | | - /* Now, trying to enable interrupt and bring up the device */ |
---|
11549 | | - cfg_mode = phba->cfg_use_msi; |
---|
11550 | | - |
---|
11551 | | - /* Put device to a known state before enabling interrupt */ |
---|
11552 | | - lpfc_stop_port(phba); |
---|
11553 | | - |
---|
11554 | | - /* Configure and enable interrupt */ |
---|
11555 | | - intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); |
---|
11556 | | - if (intr_mode == LPFC_INTR_ERROR) { |
---|
11557 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
11558 | | - "0426 Failed to enable interrupt.\n"); |
---|
11559 | | - error = -ENODEV; |
---|
11560 | | - goto out_free_sysfs_attr; |
---|
11561 | | - } |
---|
11562 | | - /* Default to single EQ for non-MSI-X */ |
---|
11563 | | - if (phba->intr_type != MSIX) { |
---|
11564 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) |
---|
11565 | | - phba->cfg_fcp_io_channel = 1; |
---|
11566 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
11567 | | - phba->cfg_nvme_io_channel = 1; |
---|
11568 | | - if (phba->nvmet_support) |
---|
11569 | | - phba->cfg_nvmet_mrq = 1; |
---|
11570 | | - } |
---|
11571 | | - phba->io_channel_irqs = 1; |
---|
11572 | | - } |
---|
11573 | | - |
---|
11574 | 13179 | /* Set up SLI-4 HBA */ |
---|
11575 | 13180 | if (lpfc_sli4_hba_setup(phba)) { |
---|
11576 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 13181 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
11577 | 13182 | "1421 Failed to set up hba\n"); |
---|
11578 | 13183 | error = -ENODEV; |
---|
11579 | | - goto out_disable_intr; |
---|
| 13184 | + goto out_free_sysfs_attr; |
---|
11580 | 13185 | } |
---|
11581 | 13186 | |
---|
11582 | 13187 | /* Log the current active interrupt mode */ |
---|
.. | .. |
---|
11589 | 13194 | /* NVME support in FW earlier in the driver load corrects the |
---|
11590 | 13195 | * FC4 type making a check for nvme_support unnecessary. |
---|
11591 | 13196 | */ |
---|
11592 | | - if ((phba->nvmet_support == 0) && |
---|
11593 | | - (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { |
---|
11594 | | - /* Create NVME binding with nvme_fc_transport. This |
---|
11595 | | - * ensures the vport is initialized. If the localport |
---|
11596 | | - * create fails, it should not unload the driver to |
---|
11597 | | - * support field issues. |
---|
11598 | | - */ |
---|
11599 | | - error = lpfc_nvme_create_localport(vport); |
---|
11600 | | - if (error) { |
---|
11601 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
11602 | | - "6004 NVME registration failed, " |
---|
11603 | | - "error x%x\n", |
---|
11604 | | - error); |
---|
| 13197 | + if (phba->nvmet_support == 0) { |
---|
| 13198 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
---|
| 13199 | + /* Create NVME binding with nvme_fc_transport. This |
---|
| 13200 | + * ensures the vport is initialized. If the localport |
---|
| 13201 | + * create fails, it should not unload the driver to |
---|
| 13202 | + * support field issues. |
---|
| 13203 | + */ |
---|
| 13204 | + error = lpfc_nvme_create_localport(vport); |
---|
| 13205 | + if (error) { |
---|
| 13206 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
| 13207 | + "6004 NVME registration " |
---|
| 13208 | + "failed, error x%x\n", |
---|
| 13209 | + error); |
---|
| 13210 | + } |
---|
11605 | 13211 | } |
---|
11606 | 13212 | } |
---|
11607 | 13213 | |
---|
.. | .. |
---|
11611 | 13217 | |
---|
11612 | 13218 | /* Check if there are static vports to be created. */ |
---|
11613 | 13219 | lpfc_create_static_vport(phba); |
---|
| 13220 | + |
---|
| 13221 | + /* Enable RAS FW log support */ |
---|
| 13222 | + lpfc_sli4_ras_setup(phba); |
---|
| 13223 | + |
---|
| 13224 | + timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); |
---|
| 13225 | + cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); |
---|
| 13226 | + |
---|
11614 | 13227 | return 0; |
---|
11615 | 13228 | |
---|
11616 | | -out_disable_intr: |
---|
11617 | | - lpfc_sli4_disable_intr(phba); |
---|
11618 | 13229 | out_free_sysfs_attr: |
---|
11619 | 13230 | lpfc_free_sysfs_attr(vport); |
---|
11620 | 13231 | out_destroy_shost: |
---|
11621 | 13232 | lpfc_destroy_shost(phba); |
---|
| 13233 | +out_disable_intr: |
---|
| 13234 | + lpfc_sli4_disable_intr(phba); |
---|
11622 | 13235 | out_unset_driver_resource: |
---|
11623 | 13236 | lpfc_unset_driver_resource_phase2(phba); |
---|
11624 | 13237 | out_unset_driver_resource_s4: |
---|
.. | .. |
---|
11681 | 13294 | lpfc_nvmet_destroy_targetport(phba); |
---|
11682 | 13295 | lpfc_nvme_destroy_localport(vport); |
---|
11683 | 13296 | |
---|
| 13297 | + /* De-allocate multi-XRI pools */ |
---|
| 13298 | + if (phba->cfg_xri_rebalancing) |
---|
| 13299 | + lpfc_destroy_multixri_pools(phba); |
---|
| 13300 | + |
---|
11684 | 13301 | /* |
---|
11685 | 13302 | * Bring down the SLI Layer. This step disables all interrupts, |
---|
11686 | 13303 | * clears the rings, discards all mailbox commands, and resets |
---|
11687 | 13304 | * the HBA FCoE function. |
---|
11688 | 13305 | */ |
---|
11689 | 13306 | lpfc_debugfs_terminate(vport); |
---|
11690 | | - lpfc_sli4_hba_unset(phba); |
---|
11691 | 13307 | |
---|
11692 | 13308 | lpfc_stop_hba_timers(phba); |
---|
11693 | | - spin_lock_irq(&phba->hbalock); |
---|
| 13309 | + spin_lock_irq(&phba->port_list_lock); |
---|
11694 | 13310 | list_del_init(&vport->listentry); |
---|
11695 | | - spin_unlock_irq(&phba->hbalock); |
---|
| 13311 | + spin_unlock_irq(&phba->port_list_lock); |
---|
11696 | 13312 | |
---|
11697 | 13313 | /* Perform scsi free before driver resource_unset since scsi |
---|
11698 | 13314 | * buffers are released to their corresponding pools here. |
---|
11699 | 13315 | */ |
---|
11700 | | - lpfc_scsi_free(phba); |
---|
11701 | | - lpfc_nvme_free(phba); |
---|
| 13316 | + lpfc_io_free(phba); |
---|
11702 | 13317 | lpfc_free_iocb_list(phba); |
---|
| 13318 | + lpfc_sli4_hba_unset(phba); |
---|
11703 | 13319 | |
---|
11704 | 13320 | lpfc_unset_driver_resource_phase2(phba); |
---|
11705 | 13321 | lpfc_sli4_driver_resource_unset(phba); |
---|
.. | .. |
---|
11820 | 13436 | /* Configure and enable interrupt */ |
---|
11821 | 13437 | intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); |
---|
11822 | 13438 | if (intr_mode == LPFC_INTR_ERROR) { |
---|
11823 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 13439 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
11824 | 13440 | "0294 PM resume Failed to enable interrupt\n"); |
---|
11825 | 13441 | return -EIO; |
---|
11826 | 13442 | } else |
---|
.. | .. |
---|
11846 | 13462 | static void |
---|
11847 | 13463 | lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) |
---|
11848 | 13464 | { |
---|
11849 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 13465 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
11850 | 13466 | "2828 PCI channel I/O abort preparing for recovery\n"); |
---|
11851 | 13467 | /* |
---|
11852 | 13468 | * There may be errored I/Os through HBA, abort all I/Os on txcmplq |
---|
.. | .. |
---|
11866 | 13482 | static void |
---|
11867 | 13483 | lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) |
---|
11868 | 13484 | { |
---|
11869 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 13485 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
11870 | 13486 | "2826 PCI channel disable preparing for reset\n"); |
---|
11871 | 13487 | |
---|
11872 | 13488 | /* Block any management I/Os to the device */ |
---|
.. | .. |
---|
11875 | 13491 | /* Block all SCSI devices' I/Os on the host */ |
---|
11876 | 13492 | lpfc_scsi_dev_block(phba); |
---|
11877 | 13493 | |
---|
11878 | | - /* Flush all driver's outstanding SCSI I/Os as we are to reset */ |
---|
11879 | | - lpfc_sli_flush_fcp_rings(phba); |
---|
11880 | | - |
---|
11881 | | - /* Flush the outstanding NVME IOs if fc4 type enabled. */ |
---|
11882 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) |
---|
11883 | | - lpfc_sli_flush_nvme_rings(phba); |
---|
| 13494 | + /* Flush all driver's outstanding I/Os as we are to reset */ |
---|
| 13495 | + lpfc_sli_flush_io_rings(phba); |
---|
11884 | 13496 | |
---|
11885 | 13497 | /* stop all timers */ |
---|
11886 | 13498 | lpfc_stop_hba_timers(phba); |
---|
.. | .. |
---|
11902 | 13514 | static void |
---|
11903 | 13515 | lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) |
---|
11904 | 13516 | { |
---|
11905 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 13517 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
11906 | 13518 | "2827 PCI channel permanent disable for failure\n"); |
---|
11907 | 13519 | |
---|
11908 | 13520 | /* Block all SCSI devices' I/Os on the host */ |
---|
.. | .. |
---|
11911 | 13523 | /* stop all timers */ |
---|
11912 | 13524 | lpfc_stop_hba_timers(phba); |
---|
11913 | 13525 | |
---|
11914 | | - /* Clean up all driver's outstanding SCSI I/Os */ |
---|
11915 | | - lpfc_sli_flush_fcp_rings(phba); |
---|
11916 | | - |
---|
11917 | | - /* Flush the outstanding NVME IOs if fc4 type enabled. */ |
---|
11918 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) |
---|
11919 | | - lpfc_sli_flush_nvme_rings(phba); |
---|
| 13526 | + /* Clean up all driver's outstanding I/Os */ |
---|
| 13527 | + lpfc_sli_flush_io_rings(phba); |
---|
11920 | 13528 | } |
---|
11921 | 13529 | |
---|
11922 | 13530 | /** |
---|
.. | .. |
---|
11956 | 13564 | return PCI_ERS_RESULT_DISCONNECT; |
---|
11957 | 13565 | default: |
---|
11958 | 13566 | /* Unknown state, prepare and request slot reset */ |
---|
11959 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 13567 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
11960 | 13568 | "2825 Unknown PCI error state: x%x\n", state); |
---|
11961 | 13569 | lpfc_sli4_prep_dev_for_reset(phba); |
---|
11962 | 13570 | return PCI_ERS_RESULT_NEED_RESET; |
---|
.. | .. |
---|
12011 | 13619 | psli->sli_flag &= ~LPFC_SLI_ACTIVE; |
---|
12012 | 13620 | spin_unlock_irq(&phba->hbalock); |
---|
12013 | 13621 | |
---|
| 13622 | + /* Init cpu_map array */ |
---|
| 13623 | + lpfc_cpu_map_array_init(phba); |
---|
12014 | 13624 | /* Configure and enable interrupt */ |
---|
12015 | 13625 | intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); |
---|
12016 | 13626 | if (intr_mode == LPFC_INTR_ERROR) { |
---|
12017 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 13627 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
12018 | 13628 | "2824 Cannot re-enable interrupt after " |
---|
12019 | 13629 | "slot reset.\n"); |
---|
12020 | 13630 | return PCI_ERS_RESULT_DISCONNECT; |
---|
.. | .. |
---|
12057 | 13667 | /* Bring the device back online */ |
---|
12058 | 13668 | lpfc_online(phba); |
---|
12059 | 13669 | } |
---|
12060 | | - |
---|
12061 | | - /* Clean up Advanced Error Reporting (AER) if needed */ |
---|
12062 | | - if (phba->hba_flag & HBA_AER_ENABLED) |
---|
12063 | | - pci_cleanup_aer_uncorrect_error_status(pdev); |
---|
12064 | 13670 | } |
---|
12065 | 13671 | |
---|
12066 | 13672 | /** |
---|
.. | .. |
---|
12123 | 13729 | lpfc_pci_remove_one_s4(pdev); |
---|
12124 | 13730 | break; |
---|
12125 | 13731 | default: |
---|
12126 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 13732 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
12127 | 13733 | "1424 Invalid PCI device group: 0x%x\n", |
---|
12128 | 13734 | phba->pci_dev_grp); |
---|
12129 | 13735 | break; |
---|
.. | .. |
---|
12160 | 13766 | rc = lpfc_pci_suspend_one_s4(pdev, msg); |
---|
12161 | 13767 | break; |
---|
12162 | 13768 | default: |
---|
12163 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 13769 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
12164 | 13770 | "1425 Invalid PCI device group: 0x%x\n", |
---|
12165 | 13771 | phba->pci_dev_grp); |
---|
12166 | 13772 | break; |
---|
.. | .. |
---|
12196 | 13802 | rc = lpfc_pci_resume_one_s4(pdev); |
---|
12197 | 13803 | break; |
---|
12198 | 13804 | default: |
---|
12199 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 13805 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
12200 | 13806 | "1426 Invalid PCI device group: 0x%x\n", |
---|
12201 | 13807 | phba->pci_dev_grp); |
---|
12202 | 13808 | break; |
---|
.. | .. |
---|
12234 | 13840 | rc = lpfc_io_error_detected_s4(pdev, state); |
---|
12235 | 13841 | break; |
---|
12236 | 13842 | default: |
---|
12237 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 13843 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
12238 | 13844 | "1427 Invalid PCI device group: 0x%x\n", |
---|
12239 | 13845 | phba->pci_dev_grp); |
---|
12240 | 13846 | break; |
---|
.. | .. |
---|
12271 | 13877 | rc = lpfc_io_slot_reset_s4(pdev); |
---|
12272 | 13878 | break; |
---|
12273 | 13879 | default: |
---|
12274 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 13880 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
12275 | 13881 | "1428 Invalid PCI device group: 0x%x\n", |
---|
12276 | 13882 | phba->pci_dev_grp); |
---|
12277 | 13883 | break; |
---|
.. | .. |
---|
12303 | 13909 | lpfc_io_resume_s4(pdev); |
---|
12304 | 13910 | break; |
---|
12305 | 13911 | default: |
---|
12306 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
---|
| 13912 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
---|
12307 | 13913 | "1429 Invalid PCI device group: 0x%x\n", |
---|
12308 | 13914 | phba->pci_dev_grp); |
---|
12309 | 13915 | break; |
---|
.. | .. |
---|
12321 | 13927 | * is destroyed. |
---|
12322 | 13928 | * |
---|
12323 | 13929 | **/ |
---|
12324 | | -void |
---|
| 13930 | +static void |
---|
12325 | 13931 | lpfc_sli4_oas_verify(struct lpfc_hba *phba) |
---|
12326 | 13932 | { |
---|
12327 | 13933 | |
---|
.. | .. |
---|
12332 | 13938 | phba->cfg_fof = 1; |
---|
12333 | 13939 | } else { |
---|
12334 | 13940 | phba->cfg_fof = 0; |
---|
12335 | | - if (phba->device_data_mem_pool) |
---|
12336 | | - mempool_destroy(phba->device_data_mem_pool); |
---|
| 13941 | + mempool_destroy(phba->device_data_mem_pool); |
---|
12337 | 13942 | phba->device_data_mem_pool = NULL; |
---|
12338 | 13943 | } |
---|
12339 | 13944 | |
---|
.. | .. |
---|
12341 | 13946 | } |
---|
12342 | 13947 | |
---|
12343 | 13948 | /** |
---|
12344 | | - * lpfc_fof_queue_setup - Set up all the fof queues |
---|
| 13949 | + * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter |
---|
12345 | 13950 | * @phba: pointer to lpfc hba data structure. |
---|
12346 | 13951 | * |
---|
12347 | | - * This routine is invoked to set up all the fof queues for the FC HBA |
---|
12348 | | - * operation. |
---|
12349 | | - * |
---|
12350 | | - * Return codes |
---|
12351 | | - * 0 - successful |
---|
12352 | | - * -ENOMEM - No available memory |
---|
| 13952 | + * This routine checks to see if RAS is supported by the adapter. Check the |
---|
| 13953 | + * function through which RAS support enablement is to be done. |
---|
12353 | 13954 | **/ |
---|
12354 | | -int |
---|
12355 | | -lpfc_fof_queue_setup(struct lpfc_hba *phba) |
---|
| 13955 | +void |
---|
| 13956 | +lpfc_sli4_ras_init(struct lpfc_hba *phba) |
---|
12356 | 13957 | { |
---|
12357 | | - struct lpfc_sli_ring *pring; |
---|
12358 | | - int rc; |
---|
12359 | | - |
---|
12360 | | - rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); |
---|
12361 | | - if (rc) |
---|
12362 | | - return -ENOMEM; |
---|
12363 | | - |
---|
12364 | | - if (phba->cfg_fof) { |
---|
12365 | | - |
---|
12366 | | - rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, |
---|
12367 | | - phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); |
---|
12368 | | - if (rc) |
---|
12369 | | - goto out_oas_cq; |
---|
12370 | | - |
---|
12371 | | - rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq, |
---|
12372 | | - phba->sli4_hba.oas_cq, LPFC_FCP); |
---|
12373 | | - if (rc) |
---|
12374 | | - goto out_oas_wq; |
---|
12375 | | - |
---|
12376 | | - /* Bind this CQ/WQ to the NVME ring */ |
---|
12377 | | - pring = phba->sli4_hba.oas_wq->pring; |
---|
12378 | | - pring->sli.sli4.wqp = |
---|
12379 | | - (void *)phba->sli4_hba.oas_wq; |
---|
12380 | | - phba->sli4_hba.oas_cq->pring = pring; |
---|
12381 | | - } |
---|
12382 | | - |
---|
12383 | | - return 0; |
---|
12384 | | - |
---|
12385 | | -out_oas_wq: |
---|
12386 | | - lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); |
---|
12387 | | -out_oas_cq: |
---|
12388 | | - lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); |
---|
12389 | | - return rc; |
---|
12390 | | - |
---|
12391 | | -} |
---|
12392 | | - |
---|
12393 | | -/** |
---|
12394 | | - * lpfc_fof_queue_create - Create all the fof queues |
---|
12395 | | - * @phba: pointer to lpfc hba data structure. |
---|
12396 | | - * |
---|
12397 | | - * This routine is invoked to allocate all the fof queues for the FC HBA |
---|
12398 | | - * operation. For each SLI4 queue type, the parameters such as queue entry |
---|
12399 | | - * count (queue depth) shall be taken from the module parameter. For now, |
---|
12400 | | - * we just use some constant number as place holder. |
---|
12401 | | - * |
---|
12402 | | - * Return codes |
---|
12403 | | - * 0 - successful |
---|
12404 | | - * -ENOMEM - No availble memory |
---|
12405 | | - * -EIO - The mailbox failed to complete successfully. |
---|
12406 | | - **/ |
---|
12407 | | -int |
---|
12408 | | -lpfc_fof_queue_create(struct lpfc_hba *phba) |
---|
12409 | | -{ |
---|
12410 | | - struct lpfc_queue *qdesc; |
---|
12411 | | - uint32_t wqesize; |
---|
12412 | | - |
---|
12413 | | - /* Create FOF EQ */ |
---|
12414 | | - qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, |
---|
12415 | | - phba->sli4_hba.eq_esize, |
---|
12416 | | - phba->sli4_hba.eq_ecount); |
---|
12417 | | - if (!qdesc) |
---|
12418 | | - goto out_error; |
---|
12419 | | - |
---|
12420 | | - qdesc->qe_valid = 1; |
---|
12421 | | - phba->sli4_hba.fof_eq = qdesc; |
---|
12422 | | - |
---|
12423 | | - if (phba->cfg_fof) { |
---|
12424 | | - |
---|
12425 | | - /* Create OAS CQ */ |
---|
12426 | | - if (phba->enab_exp_wqcq_pages) |
---|
12427 | | - qdesc = lpfc_sli4_queue_alloc(phba, |
---|
12428 | | - LPFC_EXPANDED_PAGE_SIZE, |
---|
12429 | | - phba->sli4_hba.cq_esize, |
---|
12430 | | - LPFC_CQE_EXP_COUNT); |
---|
| 13958 | + switch (phba->pcidev->device) { |
---|
| 13959 | + case PCI_DEVICE_ID_LANCER_G6_FC: |
---|
| 13960 | + case PCI_DEVICE_ID_LANCER_G7_FC: |
---|
| 13961 | + phba->ras_fwlog.ras_hwsupport = true; |
---|
| 13962 | + if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && |
---|
| 13963 | + phba->cfg_ras_fwlog_buffsize) |
---|
| 13964 | + phba->ras_fwlog.ras_enabled = true; |
---|
12431 | 13965 | else |
---|
12432 | | - qdesc = lpfc_sli4_queue_alloc(phba, |
---|
12433 | | - LPFC_DEFAULT_PAGE_SIZE, |
---|
12434 | | - phba->sli4_hba.cq_esize, |
---|
12435 | | - phba->sli4_hba.cq_ecount); |
---|
12436 | | - if (!qdesc) |
---|
12437 | | - goto out_error; |
---|
12438 | | - |
---|
12439 | | - qdesc->qe_valid = 1; |
---|
12440 | | - phba->sli4_hba.oas_cq = qdesc; |
---|
12441 | | - |
---|
12442 | | - /* Create OAS WQ */ |
---|
12443 | | - if (phba->enab_exp_wqcq_pages) { |
---|
12444 | | - wqesize = (phba->fcp_embed_io) ? |
---|
12445 | | - LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; |
---|
12446 | | - qdesc = lpfc_sli4_queue_alloc(phba, |
---|
12447 | | - LPFC_EXPANDED_PAGE_SIZE, |
---|
12448 | | - wqesize, |
---|
12449 | | - LPFC_WQE_EXP_COUNT); |
---|
12450 | | - } else |
---|
12451 | | - qdesc = lpfc_sli4_queue_alloc(phba, |
---|
12452 | | - LPFC_DEFAULT_PAGE_SIZE, |
---|
12453 | | - phba->sli4_hba.wq_esize, |
---|
12454 | | - phba->sli4_hba.wq_ecount); |
---|
12455 | | - |
---|
12456 | | - if (!qdesc) |
---|
12457 | | - goto out_error; |
---|
12458 | | - |
---|
12459 | | - phba->sli4_hba.oas_wq = qdesc; |
---|
12460 | | - list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); |
---|
12461 | | - |
---|
| 13966 | + phba->ras_fwlog.ras_enabled = false; |
---|
| 13967 | + break; |
---|
| 13968 | + default: |
---|
| 13969 | + phba->ras_fwlog.ras_hwsupport = false; |
---|
12462 | 13970 | } |
---|
12463 | | - return 0; |
---|
12464 | | - |
---|
12465 | | -out_error: |
---|
12466 | | - lpfc_fof_queue_destroy(phba); |
---|
12467 | | - return -ENOMEM; |
---|
12468 | 13971 | } |
---|
12469 | 13972 | |
---|
12470 | | -/** |
---|
12471 | | - * lpfc_fof_queue_destroy - Destroy all the fof queues |
---|
12472 | | - * @phba: pointer to lpfc hba data structure. |
---|
12473 | | - * |
---|
12474 | | - * This routine is invoked to release all the SLI4 queues with the FC HBA |
---|
12475 | | - * operation. |
---|
12476 | | - * |
---|
12477 | | - * Return codes |
---|
12478 | | - * 0 - successful |
---|
12479 | | - **/ |
---|
12480 | | -int |
---|
12481 | | -lpfc_fof_queue_destroy(struct lpfc_hba *phba) |
---|
12482 | | -{ |
---|
12483 | | - /* Release FOF Event queue */ |
---|
12484 | | - if (phba->sli4_hba.fof_eq != NULL) { |
---|
12485 | | - lpfc_sli4_queue_free(phba->sli4_hba.fof_eq); |
---|
12486 | | - phba->sli4_hba.fof_eq = NULL; |
---|
12487 | | - } |
---|
12488 | | - |
---|
12489 | | - /* Release OAS Completion queue */ |
---|
12490 | | - if (phba->sli4_hba.oas_cq != NULL) { |
---|
12491 | | - lpfc_sli4_queue_free(phba->sli4_hba.oas_cq); |
---|
12492 | | - phba->sli4_hba.oas_cq = NULL; |
---|
12493 | | - } |
---|
12494 | | - |
---|
12495 | | - /* Release OAS Work queue */ |
---|
12496 | | - if (phba->sli4_hba.oas_wq != NULL) { |
---|
12497 | | - lpfc_sli4_queue_free(phba->sli4_hba.oas_wq); |
---|
12498 | | - phba->sli4_hba.oas_wq = NULL; |
---|
12499 | | - } |
---|
12500 | | - return 0; |
---|
12501 | | -} |
---|
12502 | 13973 | |
---|
12503 | 13974 | MODULE_DEVICE_TABLE(pci, lpfc_id_table); |
---|
12504 | 13975 | |
---|
.. | .. |
---|
12546 | 14017 | { |
---|
12547 | 14018 | int error = 0; |
---|
12548 | 14019 | |
---|
12549 | | - printk(LPFC_MODULE_DESC "\n"); |
---|
12550 | | - printk(LPFC_COPYRIGHT "\n"); |
---|
| 14020 | + pr_info(LPFC_MODULE_DESC "\n"); |
---|
| 14021 | + pr_info(LPFC_COPYRIGHT "\n"); |
---|
12551 | 14022 | |
---|
12552 | 14023 | error = misc_register(&lpfc_mgmt_dev); |
---|
12553 | 14024 | if (error) |
---|
12554 | 14025 | printk(KERN_ERR "Could not register lpfcmgmt device, " |
---|
12555 | 14026 | "misc_register returned with status %d", error); |
---|
12556 | 14027 | |
---|
| 14028 | + error = -ENOMEM; |
---|
12557 | 14029 | lpfc_transport_functions.vport_create = lpfc_vport_create; |
---|
12558 | 14030 | lpfc_transport_functions.vport_delete = lpfc_vport_delete; |
---|
12559 | 14031 | lpfc_transport_template = |
---|
12560 | 14032 | fc_attach_transport(&lpfc_transport_functions); |
---|
12561 | 14033 | if (lpfc_transport_template == NULL) |
---|
12562 | | - return -ENOMEM; |
---|
| 14034 | + goto unregister; |
---|
12563 | 14035 | lpfc_vport_transport_template = |
---|
12564 | 14036 | fc_attach_transport(&lpfc_vport_transport_functions); |
---|
12565 | 14037 | if (lpfc_vport_transport_template == NULL) { |
---|
12566 | 14038 | fc_release_transport(lpfc_transport_template); |
---|
12567 | | - return -ENOMEM; |
---|
| 14039 | + goto unregister; |
---|
12568 | 14040 | } |
---|
12569 | 14041 | lpfc_nvme_cmd_template(); |
---|
12570 | 14042 | lpfc_nvmet_cmd_template(); |
---|
12571 | 14043 | |
---|
12572 | 14044 | /* Initialize in case vector mapping is needed */ |
---|
12573 | | - lpfc_used_cpu = NULL; |
---|
12574 | 14045 | lpfc_present_cpu = num_present_cpus(); |
---|
12575 | 14046 | |
---|
| 14047 | + error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, |
---|
| 14048 | + "lpfc/sli4:online", |
---|
| 14049 | + lpfc_cpu_online, lpfc_cpu_offline); |
---|
| 14050 | + if (error < 0) |
---|
| 14051 | + goto cpuhp_failure; |
---|
| 14052 | + lpfc_cpuhp_state = error; |
---|
| 14053 | + |
---|
12576 | 14054 | error = pci_register_driver(&lpfc_driver); |
---|
12577 | | - if (error) { |
---|
12578 | | - fc_release_transport(lpfc_transport_template); |
---|
12579 | | - fc_release_transport(lpfc_vport_transport_template); |
---|
12580 | | - } |
---|
| 14055 | + if (error) |
---|
| 14056 | + goto unwind; |
---|
12581 | 14057 | |
---|
12582 | 14058 | return error; |
---|
| 14059 | + |
---|
| 14060 | +unwind: |
---|
| 14061 | + cpuhp_remove_multi_state(lpfc_cpuhp_state); |
---|
| 14062 | +cpuhp_failure: |
---|
| 14063 | + fc_release_transport(lpfc_transport_template); |
---|
| 14064 | + fc_release_transport(lpfc_vport_transport_template); |
---|
| 14065 | +unregister: |
---|
| 14066 | + misc_deregister(&lpfc_mgmt_dev); |
---|
| 14067 | + |
---|
| 14068 | + return error; |
---|
| 14069 | +} |
---|
| 14070 | + |
---|
| 14071 | +void lpfc_dmp_dbg(struct lpfc_hba *phba) |
---|
| 14072 | +{ |
---|
| 14073 | + unsigned int start_idx; |
---|
| 14074 | + unsigned int dbg_cnt; |
---|
| 14075 | + unsigned int temp_idx; |
---|
| 14076 | + int i; |
---|
| 14077 | + int j = 0; |
---|
| 14078 | + unsigned long rem_nsec; |
---|
| 14079 | + |
---|
| 14080 | + if (phba->cfg_log_verbose) |
---|
| 14081 | + return; |
---|
| 14082 | + |
---|
| 14083 | + if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) |
---|
| 14084 | + return; |
---|
| 14085 | + |
---|
| 14086 | + start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ; |
---|
| 14087 | + dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt); |
---|
| 14088 | + temp_idx = start_idx; |
---|
| 14089 | + if (dbg_cnt >= DBG_LOG_SZ) { |
---|
| 14090 | + dbg_cnt = DBG_LOG_SZ; |
---|
| 14091 | + temp_idx -= 1; |
---|
| 14092 | + } else { |
---|
| 14093 | + if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) { |
---|
| 14094 | + temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ; |
---|
| 14095 | + } else { |
---|
| 14096 | + if (start_idx < dbg_cnt) |
---|
| 14097 | + start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx); |
---|
| 14098 | + else |
---|
| 14099 | + start_idx -= dbg_cnt; |
---|
| 14100 | + } |
---|
| 14101 | + } |
---|
| 14102 | + dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n", |
---|
| 14103 | + start_idx, temp_idx, dbg_cnt); |
---|
| 14104 | + |
---|
| 14105 | + for (i = 0; i < dbg_cnt; i++) { |
---|
| 14106 | + if ((start_idx + i) < DBG_LOG_SZ) |
---|
| 14107 | + temp_idx = (start_idx + i) % DBG_LOG_SZ; |
---|
| 14108 | + else |
---|
| 14109 | + temp_idx = j++; |
---|
| 14110 | + rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC); |
---|
| 14111 | + dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s", |
---|
| 14112 | + temp_idx, |
---|
| 14113 | + (unsigned long)phba->dbg_log[temp_idx].t_ns, |
---|
| 14114 | + rem_nsec / 1000, |
---|
| 14115 | + phba->dbg_log[temp_idx].log); |
---|
| 14116 | + } |
---|
| 14117 | + atomic_set(&phba->dbg_log_cnt, 0); |
---|
| 14118 | + atomic_set(&phba->dbg_log_dmping, 0); |
---|
| 14119 | +} |
---|
| 14120 | + |
---|
| 14121 | +__printf(2, 3) |
---|
| 14122 | +void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...) |
---|
| 14123 | +{ |
---|
| 14124 | + unsigned int idx; |
---|
| 14125 | + va_list args; |
---|
| 14126 | + int dbg_dmping = atomic_read(&phba->dbg_log_dmping); |
---|
| 14127 | + struct va_format vaf; |
---|
| 14128 | + |
---|
| 14129 | + |
---|
| 14130 | + va_start(args, fmt); |
---|
| 14131 | + if (unlikely(dbg_dmping)) { |
---|
| 14132 | + vaf.fmt = fmt; |
---|
| 14133 | + vaf.va = &args; |
---|
| 14134 | + dev_info(&phba->pcidev->dev, "%pV", &vaf); |
---|
| 14135 | + va_end(args); |
---|
| 14136 | + return; |
---|
| 14137 | + } |
---|
| 14138 | + idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) % |
---|
| 14139 | + DBG_LOG_SZ; |
---|
| 14140 | + |
---|
| 14141 | + atomic_inc(&phba->dbg_log_cnt); |
---|
| 14142 | + |
---|
| 14143 | + vscnprintf(phba->dbg_log[idx].log, |
---|
| 14144 | + sizeof(phba->dbg_log[idx].log), fmt, args); |
---|
| 14145 | + va_end(args); |
---|
| 14146 | + |
---|
| 14147 | + phba->dbg_log[idx].t_ns = local_clock(); |
---|
12583 | 14148 | } |
---|
12584 | 14149 | |
---|
12585 | 14150 | /** |
---|
.. | .. |
---|
12594 | 14159 | { |
---|
12595 | 14160 | misc_deregister(&lpfc_mgmt_dev); |
---|
12596 | 14161 | pci_unregister_driver(&lpfc_driver); |
---|
| 14162 | + cpuhp_remove_multi_state(lpfc_cpuhp_state); |
---|
12597 | 14163 | fc_release_transport(lpfc_transport_template); |
---|
12598 | 14164 | fc_release_transport(lpfc_vport_transport_template); |
---|
12599 | | - if (_dump_buf_data) { |
---|
12600 | | - printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " |
---|
12601 | | - "_dump_buf_data at 0x%p\n", |
---|
12602 | | - (1L << _dump_buf_data_order), _dump_buf_data); |
---|
12603 | | - free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); |
---|
12604 | | - } |
---|
12605 | | - |
---|
12606 | | - if (_dump_buf_dif) { |
---|
12607 | | - printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " |
---|
12608 | | - "_dump_buf_dif at 0x%p\n", |
---|
12609 | | - (1L << _dump_buf_dif_order), _dump_buf_dif); |
---|
12610 | | - free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); |
---|
12611 | | - } |
---|
12612 | | - kfree(lpfc_used_cpu); |
---|
12613 | 14165 | idr_destroy(&lpfc_hba_index); |
---|
12614 | 14166 | } |
---|
12615 | 14167 | |
---|