forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/scsi/lpfc/lpfc_nvme.h
....@@ -1,7 +1,7 @@
11 /*******************************************************************
22 * This file is part of the Emulex Linux Device Driver for *
33 * Fibre Channel Host Bus Adapters. *
4
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
4
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
55 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
66 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
77 * EMULEX and SLI are trademarks of Emulex. *
....@@ -21,6 +21,10 @@
2121 * included with this package. *
2222 ********************************************************************/
2323
24
+#include <linux/nvme.h>
25
+#include <linux/nvme-fc-driver.h>
26
+#include <linux/nvme-fc.h>
27
+
2428 #define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */
2529
2630 #define LPFC_NVME_ERSP_LEN 0x20
....@@ -29,6 +33,9 @@
2933 #define LPFC_NVME_EXPEDITE_XRICNT 8
3034 #define LPFC_NVME_FB_SHIFT 9
3135 #define LPFC_NVME_MAX_FB (1 << 20) /* 1M */
36
+
37
+#define LPFC_MAX_NVME_INFO_TMP_LEN 100
38
+#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n"
3239
3340 #define lpfc_ndlp_get_nrport(ndlp) \
3441 ((!ndlp->nrport || (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG)) \
....@@ -40,19 +47,11 @@
4047 uint32_t cpu_id; /* current cpu id at time of create */
4148 };
4249
43
-struct lpfc_nvme_ctrl_stat {
44
- atomic_t fc4NvmeInputRequests;
45
- atomic_t fc4NvmeOutputRequests;
46
- atomic_t fc4NvmeControlRequests;
47
- atomic_t fc4NvmeIoCmpls;
48
-};
49
-
5050 /* Declare nvme-based local and remote port definitions. */
5151 struct lpfc_nvme_lport {
5252 struct lpfc_vport *vport;
5353 struct completion *lport_unreg_cmp;
5454 /* Add stats counters here */
55
- struct lpfc_nvme_ctrl_stat *cstat;
5655 atomic_t fc4NvmeLsRequests;
5756 atomic_t fc4NvmeLsCmpls;
5857 atomic_t xmt_fcp_noxri;
....@@ -76,57 +75,182 @@
7675 struct completion rport_unreg_done;
7776 };
7877
79
-struct lpfc_nvme_buf {
78
+struct lpfc_nvme_fcpreq_priv {
79
+ struct lpfc_io_buf *nvme_buf;
80
+};
81
+
82
+/*
83
+ * set NVME LS request timeouts to 30s. It is larger than the 2*R_A_TOV
84
+ * set by the spec, which appears to have issues with some devices.
85
+ */
86
+#define LPFC_NVME_LS_TIMEOUT 30
87
+
88
+
89
+#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
90
+#define LPFC_NVMET_RQE_MIN_POST 128
91
+#define LPFC_NVMET_RQE_DEF_POST 512
92
+#define LPFC_NVMET_RQE_DEF_COUNT 2048
93
+#define LPFC_NVMET_SUCCESS_LEN 12
94
+
95
+#define LPFC_NVMET_MRQ_AUTO 0
96
+#define LPFC_NVMET_MRQ_MAX 16
97
+
98
+#define LPFC_NVMET_WAIT_TMO (5 * MSEC_PER_SEC)
99
+
100
+/* Used for NVME Target */
101
+#define LPFC_NVMET_INV_HOST_ACTIVE 1
102
+
103
+struct lpfc_nvmet_tgtport {
104
+ struct lpfc_hba *phba;
105
+ struct completion *tport_unreg_cmp;
106
+ atomic_t state; /* tracks nvmet hosthandle invalidation */
107
+
108
+ /* Stats counters - lpfc_nvmet_unsol_ls_buffer */
109
+ atomic_t rcv_ls_req_in;
110
+ atomic_t rcv_ls_req_out;
111
+ atomic_t rcv_ls_req_drop;
112
+ atomic_t xmt_ls_abort;
113
+ atomic_t xmt_ls_abort_cmpl;
114
+
115
+ /* Stats counters - lpfc_nvmet_xmt_ls_rsp */
116
+ atomic_t xmt_ls_rsp;
117
+ atomic_t xmt_ls_drop;
118
+
119
+ /* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
120
+ atomic_t xmt_ls_rsp_error;
121
+ atomic_t xmt_ls_rsp_aborted;
122
+ atomic_t xmt_ls_rsp_xb_set;
123
+ atomic_t xmt_ls_rsp_cmpl;
124
+
125
+ /* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
126
+ atomic_t rcv_fcp_cmd_in;
127
+ atomic_t rcv_fcp_cmd_out;
128
+ atomic_t rcv_fcp_cmd_drop;
129
+ atomic_t rcv_fcp_cmd_defer;
130
+ atomic_t xmt_fcp_release;
131
+
132
+ /* Stats counters - lpfc_nvmet_xmt_fcp_op */
133
+ atomic_t xmt_fcp_drop;
134
+ atomic_t xmt_fcp_read_rsp;
135
+ atomic_t xmt_fcp_read;
136
+ atomic_t xmt_fcp_write;
137
+ atomic_t xmt_fcp_rsp;
138
+
139
+ /* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
140
+ atomic_t xmt_fcp_rsp_xb_set;
141
+ atomic_t xmt_fcp_rsp_cmpl;
142
+ atomic_t xmt_fcp_rsp_error;
143
+ atomic_t xmt_fcp_rsp_aborted;
144
+ atomic_t xmt_fcp_rsp_drop;
145
+
146
+ /* Stats counters - lpfc_nvmet_xmt_fcp_abort */
147
+ atomic_t xmt_fcp_xri_abort_cqe;
148
+ atomic_t xmt_fcp_abort;
149
+ atomic_t xmt_fcp_abort_cmpl;
150
+ atomic_t xmt_abort_sol;
151
+ atomic_t xmt_abort_unsol;
152
+ atomic_t xmt_abort_rsp;
153
+ atomic_t xmt_abort_rsp_error;
154
+
155
+ /* Stats counters - defer IO */
156
+ atomic_t defer_ctx;
157
+ atomic_t defer_fod;
158
+ atomic_t defer_wqfull;
159
+};
160
+
161
+struct lpfc_nvmet_ctx_info {
162
+ struct list_head nvmet_ctx_list;
163
+ spinlock_t nvmet_ctx_list_lock; /* lock per CPU */
164
+ struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
165
+ struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
166
+ uint16_t nvmet_ctx_list_cnt;
167
+ char pad[16]; /* pad to a cache-line */
168
+};
169
+
170
+/* This retrieves the context info associated with the specified cpu / mrq */
171
+#define lpfc_get_ctx_list(phba, cpu, mrq) \
172
+ (phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
173
+
174
+/* Values for state field of struct lpfc_async_xchg_ctx */
175
+#define LPFC_NVME_STE_LS_RCV 1
176
+#define LPFC_NVME_STE_LS_ABORT 2
177
+#define LPFC_NVME_STE_LS_RSP 3
178
+#define LPFC_NVME_STE_RCV 4
179
+#define LPFC_NVME_STE_DATA 5
180
+#define LPFC_NVME_STE_ABORT 6
181
+#define LPFC_NVME_STE_DONE 7
182
+#define LPFC_NVME_STE_FREE 0xff
183
+
184
+/* Values for flag field of struct lpfc_async_xchg_ctx */
185
+#define LPFC_NVME_IO_INP 0x1 /* IO is in progress on exchange */
186
+#define LPFC_NVME_ABORT_OP 0x2 /* Abort WQE issued on exchange */
187
+#define LPFC_NVME_XBUSY 0x4 /* XB bit set on IO cmpl */
188
+#define LPFC_NVME_CTX_RLS 0x8 /* ctx free requested */
189
+#define LPFC_NVME_ABTS_RCV 0x10 /* ABTS received on exchange */
190
+#define LPFC_NVME_CTX_REUSE_WQ 0x20 /* ctx reused via WQ */
191
+#define LPFC_NVME_DEFER_WQFULL 0x40 /* Waiting on a free WQE */
192
+#define LPFC_NVME_TNOTIFY 0x80 /* notify transport of abts */
193
+
194
+struct lpfc_async_xchg_ctx {
195
+ union {
196
+ struct nvmefc_tgt_fcp_req fcp_req;
197
+ } hdlrctx;
80198 struct list_head list;
81
- struct nvmefc_fcp_req *nvmeCmd;
82
- struct lpfc_nvme_rport *nrport;
199
+ struct lpfc_hba *phba;
83200 struct lpfc_nodelist *ndlp;
84
-
85
- uint32_t timeout;
86
-
87
- uint16_t flags; /* TBD convert exch_busy to flags */
88
-#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
89
-#define LPFC_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
90
- uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
91
- uint16_t status; /* From IOCB Word 7- ulpStatus */
201
+ struct nvmefc_ls_req *ls_req;
202
+ struct nvmefc_ls_rsp ls_rsp;
203
+ struct lpfc_iocbq *wqeq;
204
+ struct lpfc_iocbq *abort_wqeq;
205
+ spinlock_t ctxlock; /* protect flag access */
206
+ uint32_t sid;
207
+ uint32_t offset;
208
+ uint16_t oxid;
209
+ uint16_t size;
210
+ uint16_t entry_cnt;
92211 uint16_t cpu;
93
- uint16_t qidx;
94
- uint16_t sqid;
95
- uint32_t result; /* From IOCB Word 4. */
212
+ uint16_t idx;
213
+ uint16_t state;
214
+ uint16_t flag;
215
+ void *payload;
216
+ struct rqb_dmabuf *rqb_buffer;
217
+ struct lpfc_nvmet_ctxbuf *ctxbuf;
218
+ struct lpfc_sli4_hdw_queue *hdwq;
96219
97
- uint32_t seg_cnt; /* Number of scatter-gather segments returned by
98
- * dma_map_sg. The driver needs this for calls
99
- * to dma_unmap_sg.
100
- */
101
- dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
102
-
103
- /*
104
- * data and dma_handle are the kernel virtual and bus address of the
105
- * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
106
- * gather bde list that supports the sg_tablesize value.
107
- */
108
- void *data;
109
- dma_addr_t dma_handle;
110
-
111
- struct sli4_sge *nvme_sgl;
112
- dma_addr_t dma_phys_sgl;
113
-
114
- /* cur_iocbq has phys of the dma-able buffer.
115
- * Iotag is in here
116
- */
117
- struct lpfc_iocbq cur_iocbq;
118
-
119
- wait_queue_head_t *waitq;
120
- unsigned long start_time;
121220 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
122
- uint64_t ts_cmd_start;
123
- uint64_t ts_last_cmd;
124
- uint64_t ts_cmd_wqput;
125
- uint64_t ts_isr_cmpl;
221
+ uint64_t ts_isr_cmd;
222
+ uint64_t ts_cmd_nvme;
223
+ uint64_t ts_nvme_data;
224
+ uint64_t ts_data_wqput;
225
+ uint64_t ts_isr_data;
126226 uint64_t ts_data_nvme;
227
+ uint64_t ts_nvme_status;
228
+ uint64_t ts_status_wqput;
229
+ uint64_t ts_isr_status;
230
+ uint64_t ts_status_nvme;
127231 #endif
128232 };
129233
130
-struct lpfc_nvme_fcpreq_priv {
131
- struct lpfc_nvme_buf *nvme_buf;
132
-};
234
+
235
+/* routines found in lpfc_nvme.c */
236
+int __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
237
+ struct nvmefc_ls_req *pnvme_lsreq,
238
+ void (*gen_req_cmp)(struct lpfc_hba *phba,
239
+ struct lpfc_iocbq *cmdwqe,
240
+ struct lpfc_wcqe_complete *wcqe));
241
+void __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
242
+ struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
243
+int __lpfc_nvme_ls_abort(struct lpfc_vport *vport,
244
+ struct lpfc_nodelist *ndlp, struct nvmefc_ls_req *pnvme_lsreq);
245
+
246
+/* routines found in lpfc_nvmet.c */
247
+int lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
248
+ struct lpfc_async_xchg_ctx *ctxp, uint32_t sid,
249
+ uint16_t xri);
250
+int __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
251
+ struct nvmefc_ls_rsp *ls_rsp,
252
+ void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
253
+ struct lpfc_iocbq *cmdwqe,
254
+ struct lpfc_wcqe_complete *wcqe));
255
+void __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba,
256
+ struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);