forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/scsi/lpfc/lpfc_sli4.h
....@@ -1,7 +1,7 @@
11 /*******************************************************************
22 * This file is part of the Emulex Linux Device Driver for *
33 * Fibre Channel Host Bus Adapters. *
4
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
4
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
55 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
66 * Copyright (C) 2009-2016 Emulex. All rights reserved. *
77 * EMULEX and SLI are trademarks of Emulex. *
....@@ -20,6 +20,13 @@
2020 * included with this package. *
2121 *******************************************************************/
2222
23
+#include <linux/irq_poll.h>
24
+#include <linux/cpufreq.h>
25
+
26
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
27
+#define CONFIG_SCSI_LPFC_DEBUG_FS
28
+#endif
29
+
2330 #define LPFC_ACTIVE_MBOX_WAIT_CNT 100
2431 #define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000
2532 #define LPFC_XRI_EXCH_BUSY_WAIT_T1 10
....@@ -36,14 +43,19 @@
3643 #define LPFC_NEMBED_MBOX_SGL_CNT 254
3744
3845 /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
39
-#define LPFC_HBA_IO_CHAN_MIN 0
40
-#define LPFC_HBA_IO_CHAN_MAX 32
41
-#define LPFC_FCP_IO_CHAN_DEF 4
42
-#define LPFC_NVME_IO_CHAN_DEF 0
46
+#define LPFC_HBA_HDWQ_MIN 0
47
+#define LPFC_HBA_HDWQ_MAX 256
48
+#define LPFC_HBA_HDWQ_DEF LPFC_HBA_HDWQ_MIN
4349
44
-/* Number of channels used for Flash Optimized Fabric (FOF) operations */
50
+/* irq_chann range, values */
51
+#define LPFC_IRQ_CHANN_MIN 0
52
+#define LPFC_IRQ_CHANN_MAX 256
53
+#define LPFC_IRQ_CHANN_DEF LPFC_IRQ_CHANN_MIN
4554
46
-#define LPFC_FOF_IO_CHAN_NUM 1
55
+/* FCP MQ queue count limiting */
56
+#define LPFC_FCP_MQ_THRESHOLD_MIN 0
57
+#define LPFC_FCP_MQ_THRESHOLD_MAX 256
58
+#define LPFC_FCP_MQ_THRESHOLD_DEF 8
4759
4860 /*
4961 * Provide the default FCF Record attributes used by the driver
....@@ -107,27 +119,11 @@
107119 enum lpfc_sli4_queue_subtype {
108120 LPFC_NONE,
109121 LPFC_MBOX,
110
- LPFC_FCP,
122
+ LPFC_IO,
111123 LPFC_ELS,
112
- LPFC_NVME,
113124 LPFC_NVMET,
114125 LPFC_NVME_LS,
115126 LPFC_USOL
116
-};
117
-
118
-union sli4_qe {
119
- void *address;
120
- struct lpfc_eqe *eqe;
121
- struct lpfc_cqe *cqe;
122
- struct lpfc_mcqe *mcqe;
123
- struct lpfc_wcqe_complete *wcqe_complete;
124
- struct lpfc_wcqe_release *wcqe_release;
125
- struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
126
- struct lpfc_rcqe_complete *rcqe_complete;
127
- struct lpfc_mqe *mqe;
128
- union lpfc_wqe *wqe;
129
- union lpfc_wqe128 *wqe128;
130
- struct lpfc_rqe *rqe;
131127 };
132128
133129 /* RQ buffer list */
....@@ -142,9 +138,36 @@
142138 struct rqb_dmabuf *);
143139 };
144140
141
+enum lpfc_poll_mode {
142
+ LPFC_QUEUE_WORK,
143
+ LPFC_IRQ_POLL
144
+};
145
+
146
+struct lpfc_idle_stat {
147
+ u64 prev_idle;
148
+ u64 prev_wall;
149
+};
150
+
145151 struct lpfc_queue {
146152 struct list_head list;
147153 struct list_head wq_list;
154
+
155
+ /*
156
+ * If interrupts are in effect on _all_ the eq's the footprint
157
+ * of polling code is zero (except mode). This memory is chec-
158
+ * ked for every io to see if the io needs to be polled and
159
+ * while completion to check if the eq's needs to be rearmed.
160
+ * Keep in same cacheline as the queue ptr to avoid cpu fetch
161
+ * stalls. Using 1B memory will leave us with 7B hole. Fill
162
+ * it with other frequently used members.
163
+ */
164
+ uint16_t last_cpu; /* most recent cpu */
165
+ uint16_t hdwq;
166
+ uint8_t qe_valid;
167
+ uint8_t mode; /* interrupt or polling */
168
+#define LPFC_EQ_INTERRUPT 0
169
+#define LPFC_EQ_POLL 1
170
+
148171 struct list_head wqfull_list;
149172 enum lpfc_sli4_queue_type type;
150173 enum lpfc_sli4_queue_subtype subtype;
....@@ -152,33 +175,67 @@
152175 struct list_head child_list;
153176 struct list_head page_list;
154177 struct list_head sgl_list;
178
+ struct list_head cpu_list;
155179 uint32_t entry_count; /* Number of entries to support on the queue */
156180 uint32_t entry_size; /* Size of each queue entry. */
157
- uint32_t entry_repost; /* Count of entries before doorbell is rung */
158
-#define LPFC_EQ_REPOST 8
159
-#define LPFC_MQ_REPOST 8
160
-#define LPFC_CQ_REPOST 64
161
-#define LPFC_RQ_REPOST 64
162
-#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
181
+ uint32_t entry_cnt_per_pg;
182
+ uint32_t notify_interval; /* Queue Notification Interval
183
+ * For chip->host queues (EQ, CQ, RQ):
184
+ * specifies the interval (number of
185
+ * entries) where the doorbell is rung to
186
+ * notify the chip of entry consumption.
187
+ * For host->chip queues (WQ):
188
+ * specifies the interval (number of
189
+ * entries) where consumption CQE is
190
+ * requested to indicate WQ entries
191
+ * consumed by the chip.
192
+ * Not used on an MQ.
193
+ */
194
+#define LPFC_EQ_NOTIFY_INTRVL 16
195
+#define LPFC_CQ_NOTIFY_INTRVL 16
196
+#define LPFC_WQ_NOTIFY_INTRVL 16
197
+#define LPFC_RQ_NOTIFY_INTRVL 16
198
+ uint32_t max_proc_limit; /* Queue Processing Limit
199
+ * For chip->host queues (EQ, CQ):
200
+ * specifies the maximum number of
201
+ * entries to be consumed in one
202
+ * processing iteration sequence. Queue
203
+ * will be rearmed after each iteration.
204
+ * Not used on an MQ, RQ or WQ.
205
+ */
206
+#define LPFC_EQ_MAX_PROC_LIMIT 256
207
+#define LPFC_CQ_MIN_PROC_LIMIT 64
208
+#define LPFC_CQ_MAX_PROC_LIMIT LPFC_CQE_EXP_COUNT // 4096
209
+#define LPFC_CQ_DEF_MAX_PROC_LIMIT LPFC_CQE_DEF_COUNT // 1024
210
+#define LPFC_CQ_MIN_THRESHOLD_TO_POLL 64
211
+#define LPFC_CQ_MAX_THRESHOLD_TO_POLL LPFC_CQ_DEF_MAX_PROC_LIMIT
212
+#define LPFC_CQ_DEF_THRESHOLD_TO_POLL LPFC_CQ_DEF_MAX_PROC_LIMIT
213
+ uint32_t queue_claimed; /* indicates queue is being processed */
163214 uint32_t queue_id; /* Queue ID assigned by the hardware */
164215 uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
165216 uint32_t host_index; /* The host's index for putting or getting */
166217 uint32_t hba_index; /* The last known hba index for get or put */
218
+ uint32_t q_mode;
167219
168220 struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
169221 struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
170222
171
- uint32_t q_mode;
172223 uint16_t page_count; /* Number of pages allocated for this queue */
173224 uint16_t page_size; /* size of page allocated for this queue */
174225 #define LPFC_EXPANDED_PAGE_SIZE 16384
175226 #define LPFC_DEFAULT_PAGE_SIZE 4096
176
- uint16_t chann; /* IO channel this queue is associated with */
227
+ uint16_t chann; /* Hardware Queue association WQ/CQ */
228
+ /* CPU affinity for EQ */
229
+#define LPFC_FIND_BY_EQ 0
230
+#define LPFC_FIND_BY_HDWQ 1
177231 uint8_t db_format;
178232 #define LPFC_DB_RING_FORMAT 0x01
179233 #define LPFC_DB_LIST_FORMAT 0x02
180234 uint8_t q_flag;
181235 #define HBA_NVMET_WQFULL 0x1 /* We hit WQ Full condition for NVMET */
236
+#define HBA_NVMET_CQ_NOTIFY 0x1 /* LPFC_NVMET_CQ_NOTIFY CQEs this EQE */
237
+#define HBA_EQ_DELAY_CHK 0x2 /* EQ is a candidate for coalescing */
238
+#define LPFC_NVMET_CQ_NOTIFY 4
182239 void __iomem *db_regaddr;
183240 uint16_t dpp_enable;
184241 uint16_t dpp_id;
....@@ -212,23 +269,29 @@
212269 #define RQ_buf_posted q_cnt_3
213270 #define RQ_rcv_buf q_cnt_4
214271
215
- struct work_struct irqwork;
216
- struct work_struct spwork;
272
+ struct work_struct irqwork;
273
+ struct work_struct spwork;
274
+ struct delayed_work sched_irqwork;
275
+ struct delayed_work sched_spwork;
217276
218277 uint64_t isr_timestamp;
219
- uint8_t qe_valid;
220278 struct lpfc_queue *assoc_qp;
221
- union sli4_qe qe[1]; /* array to index entries (must be last) */
279
+ struct list_head _poll_list;
280
+ void **q_pgs; /* array to index entries per page */
281
+
282
+#define LPFC_IRQ_POLL_WEIGHT 256
283
+ struct irq_poll iop;
284
+ enum lpfc_poll_mode poll_mode;
222285 };
223286
224287 struct lpfc_sli4_link {
225
- uint16_t speed;
288
+ uint32_t speed;
226289 uint8_t duplex;
227290 uint8_t status;
228291 uint8_t type;
229292 uint8_t number;
230293 uint8_t fault;
231
- uint16_t logical_speed;
294
+ uint32_t logical_speed;
232295 uint16_t topology;
233296 };
234297
....@@ -426,14 +489,16 @@
426489 #define LPFC_SLI4_HANDLER_NAME_SZ 16
427490 struct lpfc_hba_eq_hdl {
428491 uint32_t idx;
492
+ uint16_t irq;
429493 char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
430494 struct lpfc_hba *phba;
431
- atomic_t hba_eq_in_use;
432
- struct cpumask *cpumask;
433
- /* CPU affinitsed to or 0xffffffff if multiple */
434
- uint32_t cpu;
435
-#define LPFC_MULTI_CPU_AFFINITY 0xffffffff
495
+ struct lpfc_queue *eq;
496
+ struct cpumask aff_mask;
436497 };
498
+
499
+#define lpfc_get_eq_hdl(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx])
500
+#define lpfc_get_aff_mask(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx].aff_mask)
501
+#define lpfc_get_irq(eqidx) (phba->sli4_hba.hba_eq_hdl[eqidx].irq)
437502
438503 /*BB Credit recovery value*/
439504 struct lpfc_bbscn_params {
....@@ -492,9 +557,11 @@
492557 uint8_t cqav;
493558 uint8_t wqsize;
494559 uint8_t bv1s;
560
+ uint8_t pls;
495561 #define LPFC_WQ_SZ64_SUPPORT 1
496562 #define LPFC_WQ_SZ128_SUPPORT 2
497563 uint8_t wqpcnt;
564
+ uint8_t nvme;
498565 };
499566
500567 #define LPFC_CQ_4K_PAGE_SZ 0x1
....@@ -512,8 +579,9 @@
512579 #define LPFC_LNK_DAT_INVAL 0
513580 #define LPFC_LNK_DAT_VAL 1
514581 uint8_t lnk_tp;
515
-#define LPFC_LNK_GE 0x0 /* FCoE */
516
-#define LPFC_LNK_FC 0x1 /* FC */
582
+#define LPFC_LNK_GE 0x0 /* FCoE */
583
+#define LPFC_LNK_FC 0x1 /* FC */
584
+#define LPFC_LNK_FC_TRUNKED 0x2 /* FC_Trunked */
517585 uint8_t lnk_no;
518586 uint8_t optic_state;
519587 };
....@@ -521,16 +589,176 @@
521589 #define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
522590 LPFC_FOF_IO_CHAN_NUM)
523591
524
-/* Used for IRQ vector to CPU mapping */
592
+/* Used for tracking CPU mapping attributes */
525593 struct lpfc_vector_map_info {
526594 uint16_t phys_id;
527595 uint16_t core_id;
528
- uint16_t irq;
529
- uint16_t channel_id;
596
+ uint16_t eq;
597
+ uint16_t hdwq;
598
+ uint16_t flag;
599
+#define LPFC_CPU_MAP_HYPER 0x1
600
+#define LPFC_CPU_MAP_UNASSIGN 0x2
601
+#define LPFC_CPU_FIRST_IRQ 0x4
530602 };
531603 #define LPFC_VECTOR_MAP_EMPTY 0xffff
532604
605
+/* Multi-XRI pool */
606
+#define XRI_BATCH 8
607
+
608
+struct lpfc_pbl_pool {
609
+ struct list_head list;
610
+ u32 count;
611
+ spinlock_t lock; /* lock for pbl_pool*/
612
+};
613
+
614
+struct lpfc_pvt_pool {
615
+ u32 low_watermark;
616
+ u32 high_watermark;
617
+
618
+ struct list_head list;
619
+ u32 count;
620
+ spinlock_t lock; /* lock for pvt_pool */
621
+};
622
+
623
+struct lpfc_multixri_pool {
624
+ u32 xri_limit;
625
+
626
+ /* Starting point when searching a pbl_pool with round-robin method */
627
+ u32 rrb_next_hwqid;
628
+
629
+ /* Used by lpfc_adjust_pvt_pool_count.
630
+ * io_req_count is incremented by 1 during IO submission. The heartbeat
631
+ * handler uses these two variables to determine if pvt_pool is idle or
632
+ * busy.
633
+ */
634
+ u32 prev_io_req_count;
635
+ u32 io_req_count;
636
+
637
+ /* statistics */
638
+ u32 pbl_empty_count;
639
+#ifdef LPFC_MXP_STAT
640
+ u32 above_limit_count;
641
+ u32 below_limit_count;
642
+ u32 local_pbl_hit_count;
643
+ u32 other_pbl_hit_count;
644
+ u32 stat_max_hwm;
645
+
646
+#define LPFC_MXP_SNAPSHOT_TAKEN 3 /* snapshot is taken at 3rd heartbeats */
647
+ u32 stat_pbl_count;
648
+ u32 stat_pvt_count;
649
+ u32 stat_busy_count;
650
+ u32 stat_snapshot_taken;
651
+#endif
652
+
653
+ /* TODO: Separate pvt_pool into get and put list */
654
+ struct lpfc_pbl_pool pbl_pool; /* Public free XRI pool */
655
+ struct lpfc_pvt_pool pvt_pool; /* Private free XRI pool */
656
+};
657
+
658
+struct lpfc_fc4_ctrl_stat {
659
+ u32 input_requests;
660
+ u32 output_requests;
661
+ u32 control_requests;
662
+ u32 io_cmpls;
663
+};
664
+
665
+#ifdef LPFC_HDWQ_LOCK_STAT
666
+struct lpfc_lock_stat {
667
+ uint32_t alloc_xri_get;
668
+ uint32_t alloc_xri_put;
669
+ uint32_t free_xri;
670
+ uint32_t wq_access;
671
+ uint32_t alloc_pvt_pool;
672
+ uint32_t mv_from_pvt_pool;
673
+ uint32_t mv_to_pub_pool;
674
+ uint32_t mv_to_pvt_pool;
675
+ uint32_t free_pub_pool;
676
+ uint32_t free_pvt_pool;
677
+};
678
+#endif
679
+
680
+struct lpfc_eq_intr_info {
681
+ struct list_head list;
682
+ uint32_t icnt;
683
+};
684
+
533685 /* SLI4 HBA data structure entries */
686
+struct lpfc_sli4_hdw_queue {
687
+ /* Pointers to the constructed SLI4 queues */
688
+ struct lpfc_queue *hba_eq; /* Event queues for HBA */
689
+ struct lpfc_queue *io_cq; /* Fast-path FCP & NVME compl queue */
690
+ struct lpfc_queue *io_wq; /* Fast-path FCP & NVME work queue */
691
+ uint16_t io_cq_map;
692
+
693
+ /* Keep track of IO buffers for this hardware queue */
694
+ spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */
695
+ struct list_head lpfc_io_buf_list_get;
696
+ spinlock_t io_buf_list_put_lock; /* Common buf free list lock */
697
+ struct list_head lpfc_io_buf_list_put;
698
+ spinlock_t abts_io_buf_list_lock; /* list of aborted IOs */
699
+ struct list_head lpfc_abts_io_buf_list;
700
+ uint32_t total_io_bufs;
701
+ uint32_t get_io_bufs;
702
+ uint32_t put_io_bufs;
703
+ uint32_t empty_io_bufs;
704
+ uint32_t abts_scsi_io_bufs;
705
+ uint32_t abts_nvme_io_bufs;
706
+
707
+ /* Multi-XRI pool per HWQ */
708
+ struct lpfc_multixri_pool *p_multixri_pool;
709
+
710
+ /* FC-4 Stats counters */
711
+ struct lpfc_fc4_ctrl_stat nvme_cstat;
712
+ struct lpfc_fc4_ctrl_stat scsi_cstat;
713
+#ifdef LPFC_HDWQ_LOCK_STAT
714
+ struct lpfc_lock_stat lock_conflict;
715
+#endif
716
+
717
+ /* Per HDWQ pool resources */
718
+ struct list_head sgl_list;
719
+ struct list_head cmd_rsp_buf_list;
720
+
721
+ /* Lock for syncing Per HDWQ pool resources */
722
+ spinlock_t hdwq_lock;
723
+};
724
+
725
+#ifdef LPFC_HDWQ_LOCK_STAT
726
+/* compile time trylock stats */
727
+#define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \
728
+ { \
729
+ int only_once = 1; \
730
+ while (spin_trylock_irqsave(lock, flag) == 0) { \
731
+ if (only_once) { \
732
+ only_once = 0; \
733
+ qp->lock_conflict.lstat++; \
734
+ } \
735
+ } \
736
+ }
737
+#define lpfc_qp_spin_lock(lock, qp, lstat) \
738
+ { \
739
+ int only_once = 1; \
740
+ while (spin_trylock(lock) == 0) { \
741
+ if (only_once) { \
742
+ only_once = 0; \
743
+ qp->lock_conflict.lstat++; \
744
+ } \
745
+ } \
746
+ }
747
+#else
748
+#define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \
749
+ spin_lock_irqsave(lock, flag)
750
+#define lpfc_qp_spin_lock(lock, qp, lstat) spin_lock(lock)
751
+#endif
752
+
753
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
754
+struct lpfc_hdwq_stat {
755
+ u32 hdwq_no;
756
+ u32 rcv_io;
757
+ u32 xmt_io;
758
+ u32 cmpl_io;
759
+};
760
+#endif
761
+
534762 struct lpfc_sli4_hba {
535763 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
536764 * config space registers
....@@ -599,21 +827,19 @@
599827 struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
600828
601829 void (*sli4_eq_clr_intr)(struct lpfc_queue *q);
602
- uint32_t (*sli4_eq_release)(struct lpfc_queue *q, bool arm);
603
- uint32_t (*sli4_cq_release)(struct lpfc_queue *q, bool arm);
830
+ void (*sli4_write_eq_db)(struct lpfc_hba *phba, struct lpfc_queue *eq,
831
+ uint32_t count, bool arm);
832
+ void (*sli4_write_cq_db)(struct lpfc_hba *phba, struct lpfc_queue *cq,
833
+ uint32_t count, bool arm);
604834
605835 /* Pointers to the constructed SLI4 queues */
606
- struct lpfc_queue **hba_eq; /* Event queues for HBA */
607
- struct lpfc_queue **fcp_cq; /* Fast-path FCP compl queue */
608
- struct lpfc_queue **nvme_cq; /* Fast-path NVME compl queue */
836
+ struct lpfc_sli4_hdw_queue *hdwq;
837
+ struct list_head lpfc_wq_list;
838
+
839
+ /* Pointers to the constructed SLI4 queues for NVMET */
609840 struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */
610841 struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */
611842 struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */
612
- struct lpfc_queue **fcp_wq; /* Fast-path FCP work queue */
613
- struct lpfc_queue **nvme_wq; /* Fast-path NVME work queue */
614
- uint16_t *fcp_cq_map;
615
- uint16_t *nvme_cq_map;
616
- struct list_head lpfc_wq_list;
617843
618844 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
619845 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
....@@ -631,13 +857,7 @@
631857 uint32_t ulp0_mode; /* ULP0 protocol mode */
632858 uint32_t ulp1_mode; /* ULP1 protocol mode */
633859
634
- struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */
635
-
636860 /* Optimized Access Storage specific queues/structures */
637
-
638
- struct lpfc_queue *oas_cq; /* OAS completion queue */
639
- struct lpfc_queue *oas_wq; /* OAS Work queue */
640
- struct lpfc_sli_ring *oas_ring;
641861 uint64_t oas_next_lun;
642862 uint8_t oas_next_tgt_wwpn[8];
643863 uint8_t oas_next_vpt_wwpn[8];
....@@ -663,22 +883,24 @@
663883 uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
664884 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
665885 uint16_t next_rpi;
666
- uint16_t nvme_xri_max;
667
- uint16_t nvme_xri_cnt;
668
- uint16_t nvme_xri_start;
669
- uint16_t scsi_xri_max;
670
- uint16_t scsi_xri_cnt;
671
- uint16_t scsi_xri_start;
886
+ uint16_t io_xri_max;
887
+ uint16_t io_xri_cnt;
888
+ uint16_t io_xri_start;
672889 uint16_t els_xri_cnt;
673890 uint16_t nvmet_xri_cnt;
674891 uint16_t nvmet_io_wait_cnt;
675892 uint16_t nvmet_io_wait_total;
893
+ uint16_t cq_max;
894
+ struct lpfc_queue **cq_lookup;
676895 struct list_head lpfc_els_sgl_list;
677896 struct list_head lpfc_abts_els_sgl_list;
897
+ spinlock_t abts_io_buf_list_lock; /* list of aborted SCSI IOs */
898
+ struct list_head lpfc_abts_io_buf_list;
678899 struct list_head lpfc_nvmet_sgl_list;
900
+ spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */
679901 struct list_head lpfc_abts_nvmet_ctx_list;
680
- struct list_head lpfc_abts_scsi_buf_list;
681
- struct list_head lpfc_abts_nvme_buf_list;
902
+ spinlock_t t_active_list_lock; /* list of active NVMET IOs */
903
+ struct list_head t_active_ctx_list;
682904 struct list_head lpfc_nvmet_io_wait_list;
683905 struct lpfc_nvmet_ctx_info *nvmet_ctx_info;
684906 struct lpfc_sglq **lpfc_sglq_active_list;
....@@ -698,8 +920,9 @@
698920 struct list_head sp_queue_event;
699921 struct list_head sp_cqe_event_pool;
700922 struct list_head sp_asynce_work_queue;
701
- struct list_head sp_fcp_xri_aborted_work_queue;
923
+ spinlock_t asynce_list_lock; /* protect sp_asynce_work_queue list */
702924 struct list_head sp_els_xri_aborted_work_queue;
925
+ spinlock_t els_xri_abrt_list_lock; /* protect els_xri_aborted list */
703926 struct list_head sp_unsol_work_queue;
704927 struct lpfc_sli4_link link_state;
705928 struct lpfc_sli4_lnk_info lnk_info;
....@@ -707,17 +930,46 @@
707930 #define LPFC_SLI4_PPNAME_NON 0
708931 #define LPFC_SLI4_PPNAME_GET 1
709932 struct lpfc_iov iov;
710
- spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
711
- spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
712933 spinlock_t sgl_list_lock; /* list of aborted els IOs */
713934 spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
714935 uint32_t physical_port;
715936
716937 /* CPU to vector mapping information */
717938 struct lpfc_vector_map_info *cpu_map;
718
- uint16_t num_online_cpu;
939
+ uint16_t num_possible_cpu;
719940 uint16_t num_present_cpu;
941
+ struct cpumask irq_aff_mask;
720942 uint16_t curr_disp_cpu;
943
+ struct lpfc_eq_intr_info __percpu *eq_info;
944
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
945
+ struct lpfc_hdwq_stat __percpu *c_stat;
946
+#endif
947
+ struct lpfc_idle_stat *idle_stat;
948
+ uint32_t conf_trunk;
949
+#define lpfc_conf_trunk_port0_WORD conf_trunk
950
+#define lpfc_conf_trunk_port0_SHIFT 0
951
+#define lpfc_conf_trunk_port0_MASK 0x1
952
+#define lpfc_conf_trunk_port1_WORD conf_trunk
953
+#define lpfc_conf_trunk_port1_SHIFT 1
954
+#define lpfc_conf_trunk_port1_MASK 0x1
955
+#define lpfc_conf_trunk_port2_WORD conf_trunk
956
+#define lpfc_conf_trunk_port2_SHIFT 2
957
+#define lpfc_conf_trunk_port2_MASK 0x1
958
+#define lpfc_conf_trunk_port3_WORD conf_trunk
959
+#define lpfc_conf_trunk_port3_SHIFT 3
960
+#define lpfc_conf_trunk_port3_MASK 0x1
961
+#define lpfc_conf_trunk_port0_nd_WORD conf_trunk
962
+#define lpfc_conf_trunk_port0_nd_SHIFT 4
963
+#define lpfc_conf_trunk_port0_nd_MASK 0x1
964
+#define lpfc_conf_trunk_port1_nd_WORD conf_trunk
965
+#define lpfc_conf_trunk_port1_nd_SHIFT 5
966
+#define lpfc_conf_trunk_port1_nd_MASK 0x1
967
+#define lpfc_conf_trunk_port2_nd_WORD conf_trunk
968
+#define lpfc_conf_trunk_port2_nd_SHIFT 6
969
+#define lpfc_conf_trunk_port2_nd_MASK 0x1
970
+#define lpfc_conf_trunk_port3_nd_WORD conf_trunk
971
+#define lpfc_conf_trunk_port3_nd_SHIFT 7
972
+#define lpfc_conf_trunk_port3_nd_MASK 0x1
721973 };
722974
723975 enum lpfc_sge_type {
....@@ -801,16 +1053,18 @@
8011053 uint16_t);
8021054
8031055 void lpfc_sli4_hba_reset(struct lpfc_hba *);
804
-struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
805
- uint32_t, uint32_t);
1056
+struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *phba,
1057
+ uint32_t page_size,
1058
+ uint32_t entry_size,
1059
+ uint32_t entry_count, int cpu);
8061060 void lpfc_sli4_queue_free(struct lpfc_queue *);
8071061 int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
808
-int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
809
- uint32_t numq, uint32_t imax);
1062
+void lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
1063
+ uint32_t numq, uint32_t usdelay);
8101064 int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
8111065 struct lpfc_queue *, uint32_t, uint32_t);
8121066 int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
813
- struct lpfc_queue **eqp, uint32_t type,
1067
+ struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
8141068 uint32_t subtype);
8151069 int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
8161070 struct lpfc_queue *, uint32_t);
....@@ -830,12 +1084,10 @@
8301084 int lpfc_sli4_queue_setup(struct lpfc_hba *);
8311085 void lpfc_sli4_queue_unset(struct lpfc_hba *);
8321086 int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
833
-int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
834
-int lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba);
1087
+int lpfc_repost_io_sgl_list(struct lpfc_hba *phba);
8351088 uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
8361089 void lpfc_sli4_free_xri(struct lpfc_hba *, int);
8371090 int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
838
-int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
8391091 struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
8401092 struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
8411093 void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
....@@ -852,12 +1104,12 @@
8521104 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
8531105 int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
8541106 void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
855
-void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
856
-void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
857
-void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
858
- struct sli4_wcqe_xri_aborted *);
1107
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba);
8591108 void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
860
- struct sli4_wcqe_xri_aborted *axri);
1109
+ struct sli4_wcqe_xri_aborted *axri,
1110
+ struct lpfc_io_buf *lpfc_ncmd);
1111
+void lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
1112
+ struct sli4_wcqe_xri_aborted *axri, int idx);
8611113 void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
8621114 struct sli4_wcqe_xri_aborted *axri);
8631115 void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
....@@ -870,12 +1122,16 @@
8701122 int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
8711123 int lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba);
8721124 int lpfc_sli4_init_vpi(struct lpfc_vport *);
873
-inline void lpfc_sli4_eq_clr_intr(struct lpfc_queue *);
874
-uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
875
-uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
876
-inline void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q);
877
-uint32_t lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm);
878
-uint32_t lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm);
1125
+void lpfc_sli4_eq_clr_intr(struct lpfc_queue *);
1126
+void lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
1127
+ uint32_t count, bool arm);
1128
+void lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
1129
+ uint32_t count, bool arm);
1130
+void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q);
1131
+void lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
1132
+ uint32_t count, bool arm);
1133
+void lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
1134
+ uint32_t count, bool arm);
8791135 void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
8801136 int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
8811137 int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
....@@ -887,3 +1143,20 @@
8871143 int lpfc_sli4_post_status_check(struct lpfc_hba *);
8881144 uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
8891145 uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
1146
+void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);
1147
+struct sli4_hybrid_sgl *lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba,
1148
+ struct lpfc_io_buf *buf);
1149
+struct fcp_cmd_rsp_buf *lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
1150
+ struct lpfc_io_buf *buf);
1151
+int lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *buf);
1152
+int lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
1153
+ struct lpfc_io_buf *buf);
1154
+void lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
1155
+ struct lpfc_sli4_hdw_queue *hdwq);
1156
+void lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
1157
+ struct lpfc_sli4_hdw_queue *hdwq);
1158
+static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
1159
+{
1160
+ return q->q_pgs[idx / q->entry_cnt_per_pg] +
1161
+ (q->entry_size * (idx % q->entry_cnt_per_pg));
1162
+}