hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/scsi/lpfc/lpfc_nvmet.c
....@@ -1,7 +1,7 @@
11 /*******************************************************************
22 * This file is part of the Emulex Linux Device Driver for *
33 * Fibre Channsel Host Bus Adapters. *
4
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
4
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
55 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
66 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
77 * EMULEX and SLI are trademarks of Emulex. *
....@@ -36,10 +36,6 @@
3636 #include <scsi/scsi_transport_fc.h>
3737 #include <scsi/fc/fc_fs.h>
3838
39
-#include <linux/nvme.h>
40
-#include <linux/nvme-fc-driver.h>
41
-#include <linux/nvme-fc.h>
42
-
4339 #include "lpfc_version.h"
4440 #include "lpfc_hw4.h"
4541 #include "lpfc_hw.h"
....@@ -50,29 +46,28 @@
5046 #include "lpfc.h"
5147 #include "lpfc_scsi.h"
5248 #include "lpfc_nvme.h"
53
-#include "lpfc_nvmet.h"
5449 #include "lpfc_logmsg.h"
5550 #include "lpfc_crtn.h"
5651 #include "lpfc_vport.h"
5752 #include "lpfc_debugfs.h"
5853
5954 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60
- struct lpfc_nvmet_rcv_ctx *,
55
+ struct lpfc_async_xchg_ctx *,
6156 dma_addr_t rspbuf,
6257 uint16_t rspsize);
6358 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64
- struct lpfc_nvmet_rcv_ctx *);
59
+ struct lpfc_async_xchg_ctx *);
6560 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66
- struct lpfc_nvmet_rcv_ctx *,
61
+ struct lpfc_async_xchg_ctx *,
6762 uint32_t, uint16_t);
6863 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69
- struct lpfc_nvmet_rcv_ctx *,
64
+ struct lpfc_async_xchg_ctx *,
7065 uint32_t, uint16_t);
71
-static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72
- struct lpfc_nvmet_rcv_ctx *,
73
- uint32_t, uint16_t);
7466 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
75
- struct lpfc_nvmet_rcv_ctx *);
67
+ struct lpfc_async_xchg_ctx *);
68
+static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
69
+
70
+static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
7671
7772 static union lpfc_wqe128 lpfc_tsend_cmd_template;
7873 static union lpfc_wqe128 lpfc_treceive_cmd_template;
....@@ -217,24 +212,119 @@
217212 /* Word 12, 13, 14, 15 - is zero */
218213 }
219214
220
-void
221
-lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
215
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
216
+static struct lpfc_async_xchg_ctx *
217
+lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
222218 {
219
+ struct lpfc_async_xchg_ctx *ctxp;
223220 unsigned long iflag;
221
+ bool found = false;
222
+
223
+ spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
224
+ list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
225
+ if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
226
+ continue;
227
+
228
+ found = true;
229
+ break;
230
+ }
231
+ spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
232
+ if (found)
233
+ return ctxp;
234
+
235
+ return NULL;
236
+}
237
+
238
+static struct lpfc_async_xchg_ctx *
239
+lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
240
+{
241
+ struct lpfc_async_xchg_ctx *ctxp;
242
+ unsigned long iflag;
243
+ bool found = false;
244
+
245
+ spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
246
+ list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
247
+ if (ctxp->oxid != oxid || ctxp->sid != sid)
248
+ continue;
249
+
250
+ found = true;
251
+ break;
252
+ }
253
+ spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
254
+ if (found)
255
+ return ctxp;
256
+
257
+ return NULL;
258
+}
259
+#endif
260
+
261
+static void
262
+lpfc_nvmet_defer_release(struct lpfc_hba *phba,
263
+ struct lpfc_async_xchg_ctx *ctxp)
264
+{
265
+ lockdep_assert_held(&ctxp->ctxlock);
224266
225267 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
226
- "6313 NVMET Defer ctx release xri x%x flg x%x\n",
268
+ "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
227269 ctxp->oxid, ctxp->flag);
228270
229
- spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
230
- if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
231
- spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
232
- iflag);
271
+ if (ctxp->flag & LPFC_NVME_CTX_RLS)
233272 return;
234
- }
235
- ctxp->flag |= LPFC_NVMET_CTX_RLS;
273
+
274
+ ctxp->flag |= LPFC_NVME_CTX_RLS;
275
+ spin_lock(&phba->sli4_hba.t_active_list_lock);
276
+ list_del(&ctxp->list);
277
+ spin_unlock(&phba->sli4_hba.t_active_list_lock);
278
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
236279 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
237
- spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
280
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
281
+}
282
+
283
+/**
284
+ * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the
285
+ * transmission of an NVME LS response.
286
+ * @phba: Pointer to HBA context object.
287
+ * @cmdwqe: Pointer to driver command WQE object.
288
+ * @wcqe: Pointer to driver response CQE object.
289
+ *
290
+ * The function is called from SLI ring event handler with no
291
+ * lock held. The function frees memory resources used for the command
292
+ * used to send the NVME LS RSP.
293
+ **/
294
+void
295
+__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
296
+ struct lpfc_wcqe_complete *wcqe)
297
+{
298
+ struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2;
299
+ struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
300
+ uint32_t status, result;
301
+
302
+ status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
303
+ result = wcqe->parameter;
304
+
305
+ if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
306
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
307
+ "6410 NVMEx LS cmpl state mismatch IO x%x: "
308
+ "%d %d\n",
309
+ axchg->oxid, axchg->state, axchg->entry_cnt);
310
+ }
311
+
312
+ lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n",
313
+ axchg->oxid, status, result);
314
+
315
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
316
+ "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
317
+ status, result, axchg->oxid);
318
+
319
+ lpfc_nlp_put(cmdwqe->context1);
320
+ cmdwqe->context2 = NULL;
321
+ cmdwqe->context3 = NULL;
322
+ lpfc_sli_release_iocbq(phba, cmdwqe);
323
+ ls_rsp->done(ls_rsp);
324
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
325
+ "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n",
326
+ status, axchg->oxid);
327
+ kfree(axchg);
238328 }
239329
240330 /**
....@@ -245,33 +335,23 @@
245335 *
246336 * The function is called from SLI ring event handler with no
247337 * lock held. This function is the completion handler for NVME LS commands
248
- * The function frees memory resources used for the NVME commands.
338
+ * The function updates any states and statistics, then calls the
339
+ * generic completion handler to free resources.
249340 **/
250341 static void
251342 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
252343 struct lpfc_wcqe_complete *wcqe)
253344 {
254345 struct lpfc_nvmet_tgtport *tgtp;
255
- struct nvmefc_tgt_ls_req *rsp;
256
- struct lpfc_nvmet_rcv_ctx *ctxp;
257346 uint32_t status, result;
258347
259
- status = bf_get(lpfc_wcqe_c_status, wcqe);
260
- result = wcqe->parameter;
261
- ctxp = cmdwqe->context2;
262
-
263
- if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
264
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
265
- "6410 NVMET LS cmpl state mismatch IO x%x: "
266
- "%d %d\n",
267
- ctxp->oxid, ctxp->state, ctxp->entry_cnt);
268
- }
269
-
270348 if (!phba->targetport)
271
- goto out;
349
+ goto finish;
350
+
351
+ status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
352
+ result = wcqe->parameter;
272353
273354 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
274
-
275355 if (tgtp) {
276356 if (status) {
277357 atomic_inc(&tgtp->xmt_ls_rsp_error);
....@@ -284,22 +364,8 @@
284364 }
285365 }
286366
287
-out:
288
- rsp = &ctxp->ctx.ls_req;
289
-
290
- lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
291
- ctxp->oxid, status, result);
292
-
293
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
294
- "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
295
- status, result, ctxp->oxid);
296
-
297
- lpfc_nlp_put(cmdwqe->context1);
298
- cmdwqe->context2 = NULL;
299
- cmdwqe->context3 = NULL;
300
- lpfc_sli_release_iocbq(phba, cmdwqe);
301
- rsp->done(rsp);
302
- kfree(ctxp);
367
+finish:
368
+ __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, wcqe);
303369 }
304370
305371 /**
....@@ -319,29 +385,42 @@
319385 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
320386 {
321387 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
322
- struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
388
+ struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
323389 struct lpfc_nvmet_tgtport *tgtp;
324390 struct fc_frame_header *fc_hdr;
325391 struct rqb_dmabuf *nvmebuf;
326392 struct lpfc_nvmet_ctx_info *infop;
327
- uint32_t *payload;
328
- uint32_t size, oxid, sid, rc;
393
+ uint32_t size, oxid, sid;
329394 int cpu;
330395 unsigned long iflag;
331396
332
- if (ctxp->txrdy) {
333
- dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
334
- ctxp->txrdy_phys);
335
- ctxp->txrdy = NULL;
336
- ctxp->txrdy_phys = 0;
337
- }
338
-
339
- if (ctxp->state == LPFC_NVMET_STE_FREE) {
340
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
397
+ if (ctxp->state == LPFC_NVME_STE_FREE) {
398
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
341399 "6411 NVMET free, already free IO x%x: %d %d\n",
342400 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
343401 }
344
- ctxp->state = LPFC_NVMET_STE_FREE;
402
+
403
+ if (ctxp->rqb_buffer) {
404
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
405
+ nvmebuf = ctxp->rqb_buffer;
406
+ /* check if freed in another path whilst acquiring lock */
407
+ if (nvmebuf) {
408
+ ctxp->rqb_buffer = NULL;
409
+ if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
410
+ ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
411
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
412
+ nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
413
+ nvmebuf);
414
+ } else {
415
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
416
+ /* repost */
417
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
418
+ }
419
+ } else {
420
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
421
+ }
422
+ }
423
+ ctxp->state = LPFC_NVME_STE_FREE;
345424
346425 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
347426 if (phba->sli4_hba.nvmet_io_wait_cnt) {
....@@ -355,19 +434,17 @@
355434 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
356435 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
357436 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
358
- payload = (uint32_t *)(nvmebuf->dbuf.virt);
359437 size = nvmebuf->bytes_recv;
360438 sid = sli4_sid_from_fc_hdr(fc_hdr);
361439
362
- ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
440
+ ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
363441 ctxp->wqeq = NULL;
364
- ctxp->txrdy = NULL;
365442 ctxp->offset = 0;
366443 ctxp->phba = phba;
367444 ctxp->size = size;
368445 ctxp->oxid = oxid;
369446 ctxp->sid = sid;
370
- ctxp->state = LPFC_NVMET_STE_RCV;
447
+ ctxp->state = LPFC_NVME_STE_RCV;
371448 ctxp->entry_cnt = 1;
372449 ctxp->flag = 0;
373450 ctxp->ctxbuf = ctx_buf;
....@@ -375,8 +452,9 @@
375452 spin_lock_init(&ctxp->ctxlock);
376453
377454 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
378
- if (ctxp->ts_cmd_nvme) {
379
- ctxp->ts_cmd_nvme = ktime_get_ns();
455
+ /* NOTE: isr time stamp is stale when context is re-assigned*/
456
+ if (ctxp->ts_isr_cmd) {
457
+ ctxp->ts_cmd_nvme = 0;
380458 ctxp->ts_nvme_data = 0;
381459 ctxp->ts_data_wqput = 0;
382460 ctxp->ts_isr_data = 0;
....@@ -388,46 +466,28 @@
388466 }
389467 #endif
390468 atomic_inc(&tgtp->rcv_fcp_cmd_in);
391
- /*
392
- * The calling sequence should be:
393
- * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
394
- * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
395
- * When we return from nvmet_fc_rcv_fcp_req, all relevant info
396
- * the NVME command / FC header is stored.
397
- * A buffer has already been reposted for this IO, so just free
398
- * the nvmebuf.
399
- */
400
- rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
401
- payload, size);
402469
403
- /* Process FCP command */
404
- if (rc == 0) {
405
- ctxp->rqb_buffer = NULL;
406
- atomic_inc(&tgtp->rcv_fcp_cmd_out);
407
- nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
408
- return;
470
+ /* Indicate that a replacement buffer has been posted */
471
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
472
+ ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
473
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
474
+
475
+ if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
476
+ atomic_inc(&tgtp->rcv_fcp_cmd_drop);
477
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
478
+ "6181 Unable to queue deferred work "
479
+ "for oxid x%x. "
480
+ "FCP Drop IO [x%x x%x x%x]\n",
481
+ ctxp->oxid,
482
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
483
+ atomic_read(&tgtp->rcv_fcp_cmd_out),
484
+ atomic_read(&tgtp->xmt_fcp_release));
485
+
486
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
487
+ lpfc_nvmet_defer_release(phba, ctxp);
488
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
489
+ lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
409490 }
410
-
411
- /* Processing of FCP command is deferred */
412
- if (rc == -EOVERFLOW) {
413
- lpfc_nvmeio_data(phba,
414
- "NVMET RCV BUSY: xri x%x sz %d "
415
- "from %06x\n",
416
- oxid, size, sid);
417
- atomic_inc(&tgtp->rcv_fcp_cmd_out);
418
- return;
419
- }
420
- atomic_inc(&tgtp->rcv_fcp_cmd_drop);
421
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
422
- "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
423
- ctxp->oxid, rc,
424
- atomic_read(&tgtp->rcv_fcp_cmd_in),
425
- atomic_read(&tgtp->rcv_fcp_cmd_out),
426
- atomic_read(&tgtp->xmt_fcp_release));
427
-
428
- lpfc_nvmet_defer_release(phba, ctxp);
429
- lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
430
- nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
431491 return;
432492 }
433493 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
....@@ -436,7 +496,10 @@
436496 * Use the CPU context list, from the MRQ the IO was received on
437497 * (ctxp->idx), to save context structure.
438498 */
439
- cpu = smp_processor_id();
499
+ spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
500
+ list_del_init(&ctxp->list);
501
+ spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
502
+ cpu = raw_smp_processor_id();
440503 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
441504 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
442505 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
....@@ -448,7 +511,7 @@
448511 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
449512 static void
450513 lpfc_nvmet_ktime(struct lpfc_hba *phba,
451
- struct lpfc_nvmet_rcv_ctx *ctxp)
514
+ struct lpfc_async_xchg_ctx *ctxp)
452515 {
453516 uint64_t seg1, seg2, seg3, seg4, seg5;
454517 uint64_t seg6, seg7, seg8, seg9, seg10;
....@@ -657,16 +720,16 @@
657720 {
658721 struct lpfc_nvmet_tgtport *tgtp;
659722 struct nvmefc_tgt_fcp_req *rsp;
660
- struct lpfc_nvmet_rcv_ctx *ctxp;
723
+ struct lpfc_async_xchg_ctx *ctxp;
661724 uint32_t status, result, op, start_clean, logerr;
662725 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
663
- uint32_t id;
726
+ int id;
664727 #endif
665728
666729 ctxp = cmdwqe->context2;
667
- ctxp->flag &= ~LPFC_NVMET_IO_INP;
730
+ ctxp->flag &= ~LPFC_NVME_IO_INP;
668731
669
- rsp = &ctxp->ctx.fcp_req;
732
+ rsp = &ctxp->hdlrctx.fcp_req;
670733 op = rsp->op;
671734
672735 status = bf_get(lpfc_wcqe_c_status, wcqe);
....@@ -693,18 +756,20 @@
693756
694757 /* pick up SLI4 exhange busy condition */
695758 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
696
- ctxp->flag |= LPFC_NVMET_XBUSY;
759
+ ctxp->flag |= LPFC_NVME_XBUSY;
697760 logerr |= LOG_NVME_ABTS;
698761 if (tgtp)
699762 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
700763
701764 } else {
702
- ctxp->flag &= ~LPFC_NVMET_XBUSY;
765
+ ctxp->flag &= ~LPFC_NVME_XBUSY;
703766 }
704767
705768 lpfc_printf_log(phba, KERN_INFO, logerr,
706
- "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
707
- ctxp->oxid, status, result, ctxp->flag);
769
+ "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
770
+ "XBUSY:x%x\n",
771
+ ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
772
+ status, result, ctxp->flag);
708773
709774 } else {
710775 rsp->fcp_error = NVME_SC_SUCCESS;
....@@ -719,7 +784,7 @@
719784 if ((op == NVMET_FCOP_READDATA_RSP) ||
720785 (op == NVMET_FCOP_RSP)) {
721786 /* Sanity check */
722
- ctxp->state = LPFC_NVMET_STE_DONE;
787
+ ctxp->state = LPFC_NVME_STE_DONE;
723788 ctxp->entry_cnt++;
724789
725790 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
....@@ -744,16 +809,6 @@
744809 ktime_get_ns();
745810 }
746811 }
747
- if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
748
- id = smp_processor_id();
749
- if (ctxp->cpu != id)
750
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
751
- "6703 CPU Check cmpl: "
752
- "cpu %d expect %d\n",
753
- id, ctxp->cpu);
754
- if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
755
- phba->cpucheck_cmpl_io[id]++;
756
- }
757812 #endif
758813 rsp->done(rsp);
759814 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
....@@ -771,32 +826,48 @@
771826 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
772827 ctxp->ts_data_nvme = ktime_get_ns();
773828 }
774
- if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
775
- id = smp_processor_id();
776
- if (ctxp->cpu != id)
777
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
778
- "6704 CPU Check cmdcmpl: "
779
- "cpu %d expect %d\n",
780
- id, ctxp->cpu);
781
- if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
782
- phba->cpucheck_ccmpl_io[id]++;
783
- }
784829 #endif
785830 rsp->done(rsp);
786831 }
832
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
833
+ if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
834
+ id = raw_smp_processor_id();
835
+ this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
836
+ if (ctxp->cpu != id)
837
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
838
+ "6704 CPU Check cmdcmpl: "
839
+ "cpu %d expect %d\n",
840
+ id, ctxp->cpu);
841
+ }
842
+#endif
787843 }
788844
789
-static int
790
-lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
791
- struct nvmefc_tgt_ls_req *rsp)
845
+/**
846
+ * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit
847
+ * an NVME LS rsp for a prior NVME LS request that was received.
848
+ * @axchg: pointer to exchange context for the NVME LS request the response
849
+ * is for.
850
+ * @ls_rsp: pointer to the transport LS RSP that is to be sent
851
+ * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done
852
+ *
853
+ * This routine is used to format and send a WQE to transmit a NVME LS
854
+ * Response. The response is for a prior NVME LS request that was
855
+ * received and posted to the transport.
856
+ *
857
+ * Returns:
858
+ * 0 : if response successfully transmit
859
+ * non-zero : if response failed to transmit, of the form -Exxx.
860
+ **/
861
+int
862
+__lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
863
+ struct nvmefc_ls_rsp *ls_rsp,
864
+ void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
865
+ struct lpfc_iocbq *cmdwqe,
866
+ struct lpfc_wcqe_complete *wcqe))
792867 {
793
- struct lpfc_nvmet_rcv_ctx *ctxp =
794
- container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
795
- struct lpfc_hba *phba = ctxp->phba;
796
- struct hbq_dmabuf *nvmebuf =
797
- (struct hbq_dmabuf *)ctxp->rqb_buffer;
868
+ struct lpfc_hba *phba = axchg->phba;
869
+ struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
798870 struct lpfc_iocbq *nvmewqeq;
799
- struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
800871 struct lpfc_dmabuf dmabuf;
801872 struct ulp_bde64 bpl;
802873 int rc;
....@@ -804,34 +875,27 @@
804875 if (phba->pport->load_flag & FC_UNLOADING)
805876 return -ENODEV;
806877
807
- if (phba->pport->load_flag & FC_UNLOADING)
808
- return -ENODEV;
809
-
810878 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
811
- "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
879
+ "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid);
812880
813
- if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
814
- (ctxp->entry_cnt != 1)) {
815
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
816
- "6412 NVMET LS rsp state mismatch "
881
+ if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) {
882
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
883
+ "6412 NVMEx LS rsp state mismatch "
817884 "oxid x%x: %d %d\n",
818
- ctxp->oxid, ctxp->state, ctxp->entry_cnt);
885
+ axchg->oxid, axchg->state, axchg->entry_cnt);
886
+ return -EALREADY;
819887 }
820
- ctxp->state = LPFC_NVMET_STE_LS_RSP;
821
- ctxp->entry_cnt++;
888
+ axchg->state = LPFC_NVME_STE_LS_RSP;
889
+ axchg->entry_cnt++;
822890
823
- nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
824
- rsp->rsplen);
891
+ nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma,
892
+ ls_rsp->rsplen);
825893 if (nvmewqeq == NULL) {
826
- atomic_inc(&nvmep->xmt_ls_drop);
827
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
828
- "6150 LS Drop IO x%x: Prep\n",
829
- ctxp->oxid);
830
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
831
- atomic_inc(&nvmep->xmt_ls_abort);
832
- lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
833
- ctxp->sid, ctxp->oxid);
834
- return -ENOMEM;
894
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
895
+ "6150 NVMEx LS Drop Rsp x%x: Prep\n",
896
+ axchg->oxid);
897
+ rc = -ENOMEM;
898
+ goto out_free_buf;
835899 }
836900
837901 /* Save numBdes for bpl2sgl */
....@@ -841,40 +905,105 @@
841905 dmabuf.virt = &bpl;
842906 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
843907 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
844
- bpl.tus.f.bdeSize = rsp->rsplen;
908
+ bpl.tus.f.bdeSize = ls_rsp->rsplen;
845909 bpl.tus.f.bdeFlags = 0;
846910 bpl.tus.w = le32_to_cpu(bpl.tus.w);
911
+ /*
912
+ * Note: although we're using stack space for the dmabuf, the
913
+ * call to lpfc_sli4_issue_wqe is synchronous, so it will not
914
+ * be referenced after it returns back to this routine.
915
+ */
847916
848
- nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
917
+ nvmewqeq->wqe_cmpl = xmt_ls_rsp_cmp;
849918 nvmewqeq->iocb_cmpl = NULL;
850
- nvmewqeq->context2 = ctxp;
919
+ nvmewqeq->context2 = axchg;
851920
852
- lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
853
- ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
921
+ lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
922
+ axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
854923
855
- rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
924
+ rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
925
+
926
+ /* clear to be sure there's no reference */
927
+ nvmewqeq->context3 = NULL;
928
+
856929 if (rc == WQE_SUCCESS) {
857930 /*
858931 * Okay to repost buffer here, but wait till cmpl
859932 * before freeing ctxp and iocbq.
860933 */
861934 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
862
- ctxp->rqb_buffer = 0;
863
- atomic_inc(&nvmep->xmt_ls_rsp);
864935 return 0;
865936 }
866
- /* Give back resources */
867
- atomic_inc(&nvmep->xmt_ls_drop);
868
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
869
- "6151 LS Drop IO x%x: Issue %d\n",
870
- ctxp->oxid, rc);
937
+
938
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
939
+ "6151 NVMEx LS RSP x%x: failed to transmit %d\n",
940
+ axchg->oxid, rc);
941
+
942
+ rc = -ENXIO;
871943
872944 lpfc_nlp_put(nvmewqeq->context1);
873945
946
+out_free_buf:
947
+ /* Give back resources */
874948 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
875
- atomic_inc(&nvmep->xmt_ls_abort);
876
- lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
877
- return -ENXIO;
949
+
950
+ /*
951
+ * As transport doesn't track completions of responses, if the rsp
952
+ * fails to send, the transport will effectively ignore the rsp
953
+ * and consider the LS done. However, the driver has an active
954
+ * exchange open for the LS - so be sure to abort the exchange
955
+ * if the response isn't sent.
956
+ */
957
+ lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid);
958
+ return rc;
959
+}
960
+
961
+/**
962
+ * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response
963
+ * @tgtport: pointer to target port that NVME LS is to be transmit from.
964
+ * @ls_rsp: pointer to the transport LS RSP that is to be sent
965
+ *
966
+ * Driver registers this routine to transmit responses for received NVME
967
+ * LS requests.
968
+ *
969
+ * This routine is used to format and send a WQE to transmit a NVME LS
970
+ * Response. The ls_rsp is used to reverse-map the LS to the original
971
+ * NVME LS request sequence, which provides addressing information for
972
+ * the remote port the LS to be sent to, as well as the exchange id
973
+ * that is the LS is bound to.
974
+ *
975
+ * Returns:
976
+ * 0 : if response successfully transmit
977
+ * non-zero : if response failed to transmit, of the form -Exxx.
978
+ **/
979
+static int
980
+lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
981
+ struct nvmefc_ls_rsp *ls_rsp)
982
+{
983
+ struct lpfc_async_xchg_ctx *axchg =
984
+ container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
985
+ struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
986
+ int rc;
987
+
988
+ if (axchg->phba->pport->load_flag & FC_UNLOADING)
989
+ return -ENODEV;
990
+
991
+ rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
992
+
993
+ if (rc) {
994
+ atomic_inc(&nvmep->xmt_ls_drop);
995
+ /*
996
+ * unless the failure is due to having already sent
997
+ * the response, an abort will be generated for the
998
+ * exchange if the rsp can't be sent.
999
+ */
1000
+ if (rc != -EALREADY)
1001
+ atomic_inc(&nvmep->xmt_ls_abort);
1002
+ return rc;
1003
+ }
1004
+
1005
+ atomic_inc(&nvmep->xmt_ls_rsp);
1006
+ return 0;
8781007 }
8791008
8801009 static int
....@@ -882,19 +1011,17 @@
8821011 struct nvmefc_tgt_fcp_req *rsp)
8831012 {
8841013 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
885
- struct lpfc_nvmet_rcv_ctx *ctxp =
886
- container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1014
+ struct lpfc_async_xchg_ctx *ctxp =
1015
+ container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
8871016 struct lpfc_hba *phba = ctxp->phba;
8881017 struct lpfc_queue *wq;
8891018 struct lpfc_iocbq *nvmewqeq;
8901019 struct lpfc_sli_ring *pring;
8911020 unsigned long iflags;
8921021 int rc;
893
-
894
- if (phba->pport->load_flag & FC_UNLOADING) {
895
- rc = -ENODEV;
896
- goto aerr;
897
- }
1022
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1023
+ int id;
1024
+#endif
8981025
8991026 if (phba->pport->load_flag & FC_UNLOADING) {
9001027 rc = -ENODEV;
....@@ -908,27 +1035,29 @@
9081035 else
9091036 ctxp->ts_nvme_data = ktime_get_ns();
9101037 }
911
- if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
912
- int id = smp_processor_id();
913
- ctxp->cpu = id;
914
- if (id < LPFC_CHECK_CPU_CNT)
915
- phba->cpucheck_xmt_io[id]++;
916
- if (rsp->hwqid != id) {
917
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1038
+
1039
+ /* Setup the hdw queue if not already set */
1040
+ if (!ctxp->hdwq)
1041
+ ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
1042
+
1043
+ if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
1044
+ id = raw_smp_processor_id();
1045
+ this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1046
+ if (rsp->hwqid != id)
1047
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
9181048 "6705 CPU Check OP: "
9191049 "cpu %d expect %d\n",
9201050 id, rsp->hwqid);
921
- ctxp->cpu = rsp->hwqid;
922
- }
1051
+ ctxp->cpu = id; /* Setup cpu for cmpl check */
9231052 }
9241053 #endif
9251054
9261055 /* Sanity check */
927
- if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
928
- (ctxp->state == LPFC_NVMET_STE_ABORT)) {
1056
+ if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
1057
+ (ctxp->state == LPFC_NVME_STE_ABORT)) {
9291058 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
930
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
931
- "6102 IO xri x%x aborted\n",
1059
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1060
+ "6102 IO oxid x%x aborted\n",
9321061 ctxp->oxid);
9331062 rc = -ENXIO;
9341063 goto aerr;
....@@ -937,7 +1066,7 @@
9371066 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
9381067 if (nvmewqeq == NULL) {
9391068 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
940
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1069
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9411070 "6152 FCP Drop IO x%x: Prep\n",
9421071 ctxp->oxid);
9431072 rc = -ENXIO;
....@@ -953,8 +1082,8 @@
9531082 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
9541083 ctxp->oxid, rsp->op, rsp->rsplen);
9551084
956
- ctxp->flag |= LPFC_NVMET_IO_INP;
957
- rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
1085
+ ctxp->flag |= LPFC_NVME_IO_INP;
1086
+ rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
9581087 if (rc == WQE_SUCCESS) {
9591088 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
9601089 if (!ctxp->ts_cmd_nvme)
....@@ -972,8 +1101,8 @@
9721101 * WQ was full, so queue nvmewqeq to be sent after
9731102 * WQE release CQE
9741103 */
975
- ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
976
- wq = phba->sli4_hba.nvme_wq[rsp->hwqid];
1104
+ ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
1105
+ wq = ctxp->hdwq->io_wq;
9771106 pring = wq->pring;
9781107 spin_lock_irqsave(&pring->ring_lock, iflags);
9791108 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
....@@ -985,7 +1114,7 @@
9851114
9861115 /* Give back resources */
9871116 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
988
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1117
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9891118 "6153 FCP Drop IO x%x: Issue: %d\n",
9901119 ctxp->oxid, rc);
9911120
....@@ -1012,8 +1141,8 @@
10121141 struct nvmefc_tgt_fcp_req *req)
10131142 {
10141143 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1015
- struct lpfc_nvmet_rcv_ctx *ctxp =
1016
- container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1144
+ struct lpfc_async_xchg_ctx *ctxp =
1145
+ container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
10171146 struct lpfc_hba *phba = ctxp->phba;
10181147 struct lpfc_queue *wq;
10191148 unsigned long flags;
....@@ -1021,11 +1150,11 @@
10211150 if (phba->pport->load_flag & FC_UNLOADING)
10221151 return;
10231152
1024
- if (phba->pport->load_flag & FC_UNLOADING)
1025
- return;
1153
+ if (!ctxp->hdwq)
1154
+ ctxp->hdwq = &phba->sli4_hba.hdwq[0];
10261155
10271156 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1028
- "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
1157
+ "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
10291158 ctxp->oxid, ctxp->flag, ctxp->state);
10301159
10311160 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
....@@ -1034,37 +1163,36 @@
10341163 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
10351164
10361165 spin_lock_irqsave(&ctxp->ctxlock, flags);
1037
- ctxp->state = LPFC_NVMET_STE_ABORT;
10381166
10391167 /* Since iaab/iaar are NOT set, we need to check
10401168 * if the firmware is in process of aborting IO
10411169 */
1042
- if (ctxp->flag & LPFC_NVMET_XBUSY) {
1170
+ if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
10431171 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
10441172 return;
10451173 }
1046
- ctxp->flag |= LPFC_NVMET_ABORT_OP;
1174
+ ctxp->flag |= LPFC_NVME_ABORT_OP;
10471175
1048
- if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
1176
+ if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
1177
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
10491178 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
10501179 ctxp->oxid);
1051
- wq = phba->sli4_hba.nvme_wq[ctxp->wqeq->hba_wqidx];
1052
- spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1180
+ wq = ctxp->hdwq->io_wq;
10531181 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
10541182 return;
10551183 }
1184
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
10561185
1057
- /* An state of LPFC_NVMET_STE_RCV means we have just received
1186
+ /* A state of LPFC_NVME_STE_RCV means we have just received
10581187 * the NVME command and have not started processing it.
10591188 * (by issuing any IO WQEs on this exchange yet)
10601189 */
1061
- if (ctxp->state == LPFC_NVMET_STE_RCV)
1190
+ if (ctxp->state == LPFC_NVME_STE_RCV)
10621191 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
10631192 ctxp->oxid);
10641193 else
10651194 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
10661195 ctxp->oxid);
1067
- spin_unlock_irqrestore(&ctxp->ctxlock, flags);
10681196 }
10691197
10701198 static void
....@@ -1072,22 +1200,26 @@
10721200 struct nvmefc_tgt_fcp_req *rsp)
10731201 {
10741202 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1075
- struct lpfc_nvmet_rcv_ctx *ctxp =
1076
- container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1203
+ struct lpfc_async_xchg_ctx *ctxp =
1204
+ container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
10771205 struct lpfc_hba *phba = ctxp->phba;
10781206 unsigned long flags;
10791207 bool aborting = false;
10801208
1081
- if (ctxp->state != LPFC_NVMET_STE_DONE &&
1082
- ctxp->state != LPFC_NVMET_STE_ABORT) {
1083
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1209
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
1210
+ if (ctxp->flag & LPFC_NVME_XBUSY)
1211
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1212
+ "6027 NVMET release with XBUSY flag x%x"
1213
+ " oxid x%x\n",
1214
+ ctxp->flag, ctxp->oxid);
1215
+ else if (ctxp->state != LPFC_NVME_STE_DONE &&
1216
+ ctxp->state != LPFC_NVME_STE_ABORT)
1217
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10841218 "6413 NVMET release bad state %d %d oxid x%x\n",
10851219 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1086
- }
10871220
1088
- spin_lock_irqsave(&ctxp->ctxlock, flags);
1089
- if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
1090
- (ctxp->flag & LPFC_NVMET_XBUSY)) {
1221
+ if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
1222
+ (ctxp->flag & LPFC_NVME_XBUSY)) {
10911223 aborting = true;
10921224 /* let the abort path do the real release */
10931225 lpfc_nvmet_defer_release(phba, ctxp);
....@@ -1098,6 +1230,7 @@
10981230 ctxp->state, aborting);
10991231
11001232 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1233
+ ctxp->flag &= ~LPFC_NVME_TNOTIFY;
11011234
11021235 if (aborting)
11031236 return;
....@@ -1110,17 +1243,19 @@
11101243 struct nvmefc_tgt_fcp_req *rsp)
11111244 {
11121245 struct lpfc_nvmet_tgtport *tgtp;
1113
- struct lpfc_nvmet_rcv_ctx *ctxp =
1114
- container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1246
+ struct lpfc_async_xchg_ctx *ctxp =
1247
+ container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
11151248 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
11161249 struct lpfc_hba *phba = ctxp->phba;
1250
+ unsigned long iflag;
1251
+
11171252
11181253 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1119
- ctxp->oxid, ctxp->size, smp_processor_id());
1254
+ ctxp->oxid, ctxp->size, raw_smp_processor_id());
11201255
11211256 if (!nvmebuf) {
11221257 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1123
- "6425 Defer rcv: no buffer xri x%x: "
1258
+ "6425 Defer rcv: no buffer oxid x%x: "
11241259 "flg %x ste %x\n",
11251260 ctxp->oxid, ctxp->flag, ctxp->state);
11261261 return;
....@@ -1132,6 +1267,135 @@
11321267
11331268 /* Free the nvmebuf since a new buffer already replaced it */
11341269 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1270
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
1271
+ ctxp->rqb_buffer = NULL;
1272
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1273
+}
1274
+
1275
+/**
1276
+ * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request
1277
+ * @phba: Pointer to HBA context object
1278
+ * @cmdwqe: Pointer to driver command WQE object.
1279
+ * @wcqe: Pointer to driver response CQE object.
1280
+ *
1281
+ * This function is the completion handler for NVME LS requests.
1282
+ * The function updates any states and statistics, then calls the
1283
+ * generic completion handler to finish completion of the request.
1284
+ **/
1285
+static void
1286
+lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1287
+ struct lpfc_wcqe_complete *wcqe)
1288
+{
1289
+ __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
1290
+}
1291
+
1292
+/**
1293
+ * lpfc_nvmet_ls_req - Issue an Link Service request
1294
+ * @targetport - pointer to target instance registered with nvmet transport.
1295
+ * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv.
1296
+ * Driver sets this value to the ndlp pointer.
1297
+ * @pnvme_lsreq - the transport nvme_ls_req structure for the LS
1298
+ *
1299
+ * Driver registers this routine to handle any link service request
1300
+ * from the nvme_fc transport to a remote nvme-aware port.
1301
+ *
1302
+ * Return value :
1303
+ * 0 - Success
1304
+ * non-zero: various error codes, in form of -Exxx
1305
+ **/
1306
+static int
1307
+lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
1308
+ void *hosthandle,
1309
+ struct nvmefc_ls_req *pnvme_lsreq)
1310
+{
1311
+ struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1312
+ struct lpfc_hba *phba;
1313
+ struct lpfc_nodelist *ndlp;
1314
+ int ret;
1315
+ u32 hstate;
1316
+
1317
+ if (!lpfc_nvmet)
1318
+ return -EINVAL;
1319
+
1320
+ phba = lpfc_nvmet->phba;
1321
+ if (phba->pport->load_flag & FC_UNLOADING)
1322
+ return -EINVAL;
1323
+
1324
+ hstate = atomic_read(&lpfc_nvmet->state);
1325
+ if (hstate == LPFC_NVMET_INV_HOST_ACTIVE)
1326
+ return -EACCES;
1327
+
1328
+ ndlp = (struct lpfc_nodelist *)hosthandle;
1329
+
1330
+ ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq,
1331
+ lpfc_nvmet_ls_req_cmp);
1332
+
1333
+ return ret;
1334
+}
1335
+
1336
+/**
1337
+ * lpfc_nvmet_ls_abort - Abort a prior NVME LS request
1338
+ * @targetport: Transport targetport, that LS was issued from.
1339
+ * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv.
1340
+ * Driver sets this value to the ndlp pointer.
1341
+ * @pnvme_lsreq - the transport nvme_ls_req structure for LS to be aborted
1342
+ *
1343
+ * Driver registers this routine to abort an NVME LS request that is
1344
+ * in progress (from the transports perspective).
1345
+ **/
1346
+static void
1347
+lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
1348
+ void *hosthandle,
1349
+ struct nvmefc_ls_req *pnvme_lsreq)
1350
+{
1351
+ struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1352
+ struct lpfc_hba *phba;
1353
+ struct lpfc_nodelist *ndlp;
1354
+ int ret;
1355
+
1356
+ phba = lpfc_nvmet->phba;
1357
+ if (phba->pport->load_flag & FC_UNLOADING)
1358
+ return;
1359
+
1360
+ ndlp = (struct lpfc_nodelist *)hosthandle;
1361
+
1362
+ ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq);
1363
+ if (!ret)
1364
+ atomic_inc(&lpfc_nvmet->xmt_ls_abort);
1365
+}
1366
+
1367
+static void
1368
+lpfc_nvmet_host_release(void *hosthandle)
1369
+{
1370
+ struct lpfc_nodelist *ndlp = hosthandle;
1371
+ struct lpfc_hba *phba = NULL;
1372
+ struct lpfc_nvmet_tgtport *tgtp;
1373
+
1374
+ phba = ndlp->phba;
1375
+ if (!phba->targetport || !phba->targetport->private)
1376
+ return;
1377
+
1378
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1379
+ "6202 NVMET XPT releasing hosthandle x%px\n",
1380
+ hosthandle);
1381
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1382
+ atomic_set(&tgtp->state, 0);
1383
+}
1384
+
1385
+static void
1386
+lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
1387
+{
1388
+ struct lpfc_nvmet_tgtport *tgtp;
1389
+ struct lpfc_hba *phba;
1390
+ uint32_t rc;
1391
+
1392
+ tgtp = tgtport->private;
1393
+ phba = tgtp->phba;
1394
+
1395
+ rc = lpfc_issue_els_rscn(phba->pport, 0);
1396
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1397
+ "6420 NVMET subsystem change: Notification %s\n",
1398
+ (rc) ? "Failed" : "Sent");
11351399 }
11361400
11371401 static struct nvmet_fc_target_template lpfc_tgttemplate = {
....@@ -1141,6 +1405,10 @@
11411405 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
11421406 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
11431407 .defer_rcv = lpfc_nvmet_defer_rcv,
1408
+ .discovery_event = lpfc_nvmet_discovery_event,
1409
+ .ls_req = lpfc_nvmet_ls_req,
1410
+ .ls_abort = lpfc_nvmet_ls_abort,
1411
+ .host_release = lpfc_nvmet_host_release,
11441412
11451413 .max_hw_queues = 1,
11461414 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
....@@ -1151,6 +1419,7 @@
11511419 .target_features = 0,
11521420 /* sizes of additional private data for data structures */
11531421 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1422
+ .lsrqst_priv_sz = 0,
11541423 };
11551424
11561425 static void
....@@ -1163,9 +1432,9 @@
11631432 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
11641433 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
11651434 &infop->nvmet_ctx_list, list) {
1166
- spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1435
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
11671436 list_del_init(&ctx_buf->list);
1168
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1437
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
11691438
11701439 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
11711440 ctx_buf->sglq->state = SGL_FREED;
....@@ -1195,9 +1464,9 @@
11951464
11961465 /* Cycle the the entire CPU context list for every MRQ */
11971466 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1198
- for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
1467
+ for_each_present_cpu(j) {
1468
+ infop = lpfc_get_ctx_list(phba, j, i);
11991469 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1200
- infop++; /* next */
12011470 }
12021471 }
12031472 kfree(phba->sli4_hba.nvmet_ctx_info);
....@@ -1212,17 +1481,17 @@
12121481 union lpfc_wqe128 *wqe;
12131482 struct lpfc_nvmet_ctx_info *last_infop;
12141483 struct lpfc_nvmet_ctx_info *infop;
1215
- int i, j, idx;
1484
+ int i, j, idx, cpu;
12161485
12171486 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
12181487 "6403 Allocate NVMET resources for %d XRIs\n",
12191488 phba->sli4_hba.nvmet_xri_cnt);
12201489
12211490 phba->sli4_hba.nvmet_ctx_info = kcalloc(
1222
- phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
1491
+ phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
12231492 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
12241493 if (!phba->sli4_hba.nvmet_ctx_info) {
1225
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1494
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12261495 "6419 Failed allocate memory for "
12271496 "nvmet context lists\n");
12281497 return -ENOMEM;
....@@ -1247,13 +1516,12 @@
12471516 * of the IO completion. Thus a context that was allocated for MRQ A
12481517 * whose IO completed on CPU B will be freed to cpuB/mrqA.
12491518 */
1250
- infop = phba->sli4_hba.nvmet_ctx_info;
1251
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1519
+ for_each_possible_cpu(i) {
12521520 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1521
+ infop = lpfc_get_ctx_list(phba, i, j);
12531522 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
12541523 spin_lock_init(&infop->nvmet_ctx_list_lock);
12551524 infop->nvmet_ctx_list_cnt = 0;
1256
- infop++;
12571525 }
12581526 }
12591527
....@@ -1263,8 +1531,10 @@
12631531 * MRQ 1 cycling thru CPUs 0 - X, and so on.
12641532 */
12651533 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1266
- last_infop = lpfc_get_ctx_list(phba, 0, j);
1267
- for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) {
1534
+ last_infop = lpfc_get_ctx_list(phba,
1535
+ cpumask_first(cpu_present_mask),
1536
+ j);
1537
+ for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
12681538 infop = lpfc_get_ctx_list(phba, i, j);
12691539 infop->nvmet_ctx_next_cpu = last_infop;
12701540 last_infop = infop;
....@@ -1275,10 +1545,11 @@
12751545 * received command on a per xri basis.
12761546 */
12771547 idx = 0;
1548
+ cpu = cpumask_first(cpu_present_mask);
12781549 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
12791550 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
12801551 if (!ctx_buf) {
1281
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1552
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12821553 "6404 Ran out of memory for NVMET\n");
12831554 return -ENOMEM;
12841555 }
....@@ -1287,19 +1558,19 @@
12871558 GFP_KERNEL);
12881559 if (!ctx_buf->context) {
12891560 kfree(ctx_buf);
1290
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1561
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12911562 "6405 Ran out of NVMET "
12921563 "context memory\n");
12931564 return -ENOMEM;
12941565 }
12951566 ctx_buf->context->ctxbuf = ctx_buf;
1296
- ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1567
+ ctx_buf->context->state = LPFC_NVME_STE_FREE;
12971568
12981569 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
12991570 if (!ctx_buf->iocbq) {
13001571 kfree(ctx_buf->context);
13011572 kfree(ctx_buf);
1302
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1573
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13031574 "6406 Ran out of NVMET iocb/WQEs\n");
13041575 return -ENOMEM;
13051576 }
....@@ -1318,17 +1589,18 @@
13181589 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
13191590 kfree(ctx_buf->context);
13201591 kfree(ctx_buf);
1321
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1592
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13221593 "6407 Ran out of NVMET XRIs\n");
13231594 return -ENOMEM;
13241595 }
1596
+ INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
13251597
13261598 /*
13271599 * Add ctx to MRQidx context list. Our initial assumption
13281600 * is MRQidx will be associated with CPUidx. This association
13291601 * can change on the fly.
13301602 */
1331
- infop = lpfc_get_ctx_list(phba, idx, idx);
1603
+ infop = lpfc_get_ctx_list(phba, cpu, idx);
13321604 spin_lock(&infop->nvmet_ctx_list_lock);
13331605 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
13341606 infop->nvmet_ctx_list_cnt++;
....@@ -1336,16 +1608,23 @@
13361608
13371609 /* Spread ctx structures evenly across all MRQs */
13381610 idx++;
1339
- if (idx >= phba->cfg_nvmet_mrq)
1611
+ if (idx >= phba->cfg_nvmet_mrq) {
13401612 idx = 0;
1613
+ cpu = cpumask_first(cpu_present_mask);
1614
+ continue;
1615
+ }
1616
+ cpu = cpumask_next(cpu, cpu_present_mask);
1617
+ if (cpu == nr_cpu_ids)
1618
+ cpu = cpumask_first(cpu_present_mask);
1619
+
13411620 }
13421621
1343
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1622
+ for_each_present_cpu(i) {
13441623 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
13451624 infop = lpfc_get_ctx_list(phba, i, j);
13461625 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
13471626 "6408 TOTAL NVMET ctx for CPU %d "
1348
- "MRQ %d: cnt %d nextcpu %p\n",
1627
+ "MRQ %d: cnt %d nextcpu x%px\n",
13491628 i, j, infop->nvmet_ctx_list_cnt,
13501629 infop->nvmet_ctx_next_cpu);
13511630 }
....@@ -1373,19 +1652,12 @@
13731652 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
13741653 pinfo.port_id = vport->fc_myDID;
13751654
1376
- /* Limit to LPFC_MAX_NVME_SEG_CNT.
1377
- * For now need + 1 to get around NVME transport logic.
1655
+ /* We need to tell the transport layer + 1 because it takes page
1656
+ * alignment into account. When space for the SGL is allocated we
1657
+ * allocate + 3, one for cmd, one for rsp and one for this alignment
13781658 */
1379
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
1380
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1381
- "6400 Reducing sg segment cnt to %d\n",
1382
- LPFC_MAX_NVME_SEG_CNT);
1383
- phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
1384
- } else {
1385
- phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
1386
- }
13871659 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1388
- lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
1660
+ lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
13891661 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
13901662
13911663 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
....@@ -1396,7 +1668,7 @@
13961668 error = -ENOENT;
13971669 #endif
13981670 if (error) {
1399
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1671
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14001672 "6025 Cannot register NVME targetport x%x: "
14011673 "portnm %llx nodenm %llx segs %d qs %d\n",
14021674 error,
....@@ -1415,7 +1687,7 @@
14151687
14161688 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
14171689 "6026 Registered NVME "
1418
- "targetport: %p, private %p "
1690
+ "targetport: x%px, private x%px "
14191691 "portnm %llx nodenm %llx segs %d qs %d\n",
14201692 phba->targetport, tgtp,
14211693 pinfo.port_name, pinfo.node_name,
....@@ -1470,7 +1742,7 @@
14701742 return 0;
14711743
14721744 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1473
- "6007 Update NVMET port %p did x%x\n",
1745
+ "6007 Update NVMET port x%px did x%x\n",
14741746 phba->targetport, vport->fc_myDID);
14751747
14761748 phba->targetport->port_id = vport->fc_myDID;
....@@ -1489,10 +1761,12 @@
14891761 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
14901762 struct sli4_wcqe_xri_aborted *axri)
14911763 {
1764
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
14921765 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
14931766 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1494
- struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1767
+ struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
14951768 struct lpfc_nvmet_tgtport *tgtp;
1769
+ struct nvmefc_tgt_fcp_req *req = NULL;
14961770 struct lpfc_nodelist *ndlp;
14971771 unsigned long iflag = 0;
14981772 int rrq_empty = 0;
....@@ -1510,23 +1784,25 @@
15101784 }
15111785
15121786 spin_lock_irqsave(&phba->hbalock, iflag);
1513
- spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1787
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
15141788 list_for_each_entry_safe(ctxp, next_ctxp,
15151789 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
15161790 list) {
15171791 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
15181792 continue;
15191793
1794
+ spin_lock(&ctxp->ctxlock);
15201795 /* Check if we already received a free context call
15211796 * and we have completed processing an abort situation.
15221797 */
1523
- if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1524
- !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1525
- list_del(&ctxp->list);
1798
+ if (ctxp->flag & LPFC_NVME_CTX_RLS &&
1799
+ !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
1800
+ list_del_init(&ctxp->list);
15261801 released = true;
15271802 }
1528
- ctxp->flag &= ~LPFC_NVMET_XBUSY;
1529
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1803
+ ctxp->flag &= ~LPFC_NVME_XBUSY;
1804
+ spin_unlock(&ctxp->ctxlock);
1805
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
15301806
15311807 rrq_empty = list_empty(&phba->active_rrq_list);
15321808 spin_unlock_irqrestore(&phba->hbalock, iflag);
....@@ -1541,7 +1817,7 @@
15411817 }
15421818
15431819 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1544
- "6318 XB aborted oxid %x flg x%x (%x)\n",
1820
+ "6318 XB aborted oxid x%x flg x%x (%x)\n",
15451821 ctxp->oxid, ctxp->flag, released);
15461822 if (released)
15471823 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
....@@ -1550,61 +1826,169 @@
15501826 lpfc_worker_wake_up(phba);
15511827 return;
15521828 }
1553
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1829
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
15541830 spin_unlock_irqrestore(&phba->hbalock, iflag);
1831
+
1832
+ ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1833
+ if (ctxp) {
1834
+ /*
1835
+ * Abort already done by FW, so BA_ACC sent.
1836
+ * However, the transport may be unaware.
1837
+ */
1838
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1839
+ "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1840
+ "flag x%x oxid x%x rxid x%x\n",
1841
+ xri, ctxp->state, ctxp->flag, ctxp->oxid,
1842
+ rxid);
1843
+
1844
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
1845
+ ctxp->flag |= LPFC_NVME_ABTS_RCV;
1846
+ ctxp->state = LPFC_NVME_STE_ABORT;
1847
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1848
+
1849
+ lpfc_nvmeio_data(phba,
1850
+ "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1851
+ xri, raw_smp_processor_id(), 0);
1852
+
1853
+ req = &ctxp->hdlrctx.fcp_req;
1854
+ if (req)
1855
+ nvmet_fc_rcv_fcp_abort(phba->targetport, req);
1856
+ }
1857
+#endif
15551858 }
15561859
15571860 int
15581861 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
15591862 struct fc_frame_header *fc_hdr)
1560
-
15611863 {
15621864 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
15631865 struct lpfc_hba *phba = vport->phba;
1564
- struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1866
+ struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
15651867 struct nvmefc_tgt_fcp_req *rsp;
1566
- uint16_t xri;
1868
+ uint32_t sid;
1869
+ uint16_t oxid, xri;
15671870 unsigned long iflag = 0;
15681871
1569
- xri = be16_to_cpu(fc_hdr->fh_ox_id);
1872
+ sid = sli4_sid_from_fc_hdr(fc_hdr);
1873
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
15701874
15711875 spin_lock_irqsave(&phba->hbalock, iflag);
1572
- spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1876
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
15731877 list_for_each_entry_safe(ctxp, next_ctxp,
15741878 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
15751879 list) {
1576
- if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1880
+ if (ctxp->oxid != oxid || ctxp->sid != sid)
15771881 continue;
15781882
1579
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1883
+ xri = ctxp->ctxbuf->sglq->sli4_xritag;
1884
+
1885
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
15801886 spin_unlock_irqrestore(&phba->hbalock, iflag);
15811887
15821888 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1583
- ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1889
+ ctxp->flag |= LPFC_NVME_ABTS_RCV;
15841890 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
15851891
15861892 lpfc_nvmeio_data(phba,
15871893 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1588
- xri, smp_processor_id(), 0);
1894
+ xri, raw_smp_processor_id(), 0);
15891895
15901896 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
15911897 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
15921898
1593
- rsp = &ctxp->ctx.fcp_req;
1899
+ rsp = &ctxp->hdlrctx.fcp_req;
15941900 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
15951901
15961902 /* Respond with BA_ACC accordingly */
15971903 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
15981904 return 0;
15991905 }
1600
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1906
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
16011907 spin_unlock_irqrestore(&phba->hbalock, iflag);
16021908
1603
- lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1604
- xri, smp_processor_id(), 1);
1909
+ /* check the wait list */
1910
+ if (phba->sli4_hba.nvmet_io_wait_cnt) {
1911
+ struct rqb_dmabuf *nvmebuf;
1912
+ struct fc_frame_header *fc_hdr_tmp;
1913
+ u32 sid_tmp;
1914
+ u16 oxid_tmp;
1915
+ bool found = false;
1916
+
1917
+ spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1918
+
1919
+ /* match by oxid and s_id */
1920
+ list_for_each_entry(nvmebuf,
1921
+ &phba->sli4_hba.lpfc_nvmet_io_wait_list,
1922
+ hbuf.list) {
1923
+ fc_hdr_tmp = (struct fc_frame_header *)
1924
+ (nvmebuf->hbuf.virt);
1925
+ oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
1926
+ sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
1927
+ if (oxid_tmp != oxid || sid_tmp != sid)
1928
+ continue;
1929
+
1930
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1931
+ "6321 NVMET Rcv ABTS oxid x%x from x%x "
1932
+ "is waiting for a ctxp\n",
1933
+ oxid, sid);
1934
+
1935
+ list_del_init(&nvmebuf->hbuf.list);
1936
+ phba->sli4_hba.nvmet_io_wait_cnt--;
1937
+ found = true;
1938
+ break;
1939
+ }
1940
+ spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1941
+ iflag);
1942
+
1943
+ /* free buffer since already posted a new DMA buffer to RQ */
1944
+ if (found) {
1945
+ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1946
+ /* Respond with BA_ACC accordingly */
1947
+ lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1948
+ return 0;
1949
+ }
1950
+ }
1951
+
1952
+ /* check active list */
1953
+ ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1954
+ if (ctxp) {
1955
+ xri = ctxp->ctxbuf->sglq->sli4_xritag;
1956
+
1957
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
1958
+ ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
1959
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1960
+
1961
+ lpfc_nvmeio_data(phba,
1962
+ "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1963
+ xri, raw_smp_processor_id(), 0);
1964
+
1965
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1966
+ "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1967
+ "flag x%x state x%x\n",
1968
+ ctxp->oxid, xri, ctxp->flag, ctxp->state);
1969
+
1970
+ if (ctxp->flag & LPFC_NVME_TNOTIFY) {
1971
+ /* Notify the transport */
1972
+ nvmet_fc_rcv_fcp_abort(phba->targetport,
1973
+ &ctxp->hdlrctx.fcp_req);
1974
+ } else {
1975
+ cancel_work_sync(&ctxp->ctxbuf->defer_work);
1976
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
1977
+ lpfc_nvmet_defer_release(phba, ctxp);
1978
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1979
+ }
1980
+ lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1981
+ ctxp->oxid);
1982
+
1983
+ lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1984
+ return 0;
1985
+ }
1986
+
1987
+ lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1988
+ oxid, raw_smp_processor_id(), 1);
16051989
16061990 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1607
- "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
1991
+ "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
16081992
16091993 /* Respond with BA_RJT accordingly */
16101994 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
....@@ -1614,7 +1998,7 @@
16141998
16151999 static void
16162000 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
1617
- struct lpfc_nvmet_rcv_ctx *ctxp)
2001
+ struct lpfc_async_xchg_ctx *ctxp)
16182002 {
16192003 struct lpfc_sli_ring *pring;
16202004 struct lpfc_iocbq *nvmewqeq;
....@@ -1665,6 +2049,7 @@
16652049 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
16662050 struct lpfc_sli_ring *pring;
16672051 struct lpfc_iocbq *nvmewqeq;
2052
+ struct lpfc_async_xchg_ctx *ctxp;
16682053 unsigned long iflags;
16692054 int rc;
16702055
....@@ -1678,13 +2063,26 @@
16782063 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
16792064 list);
16802065 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1681
- rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
2066
+ ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2;
2067
+ rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
16822068 spin_lock_irqsave(&pring->ring_lock, iflags);
16832069 if (rc == -EBUSY) {
16842070 /* WQ was full again, so put it back on the list */
16852071 list_add(&nvmewqeq->list, &wq->wqfull_list);
16862072 spin_unlock_irqrestore(&pring->ring_lock, iflags);
16872073 return;
2074
+ }
2075
+ if (rc == WQE_SUCCESS) {
2076
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2077
+ if (ctxp->ts_cmd_nvme) {
2078
+ if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
2079
+ ctxp->ts_status_wqput = ktime_get_ns();
2080
+ else
2081
+ ctxp->ts_data_wqput = ktime_get_ns();
2082
+ }
2083
+#endif
2084
+ } else {
2085
+ WARN_ON(rc);
16882086 }
16892087 }
16902088 wq->q_flag &= ~HBA_NVMET_WQFULL;
....@@ -1706,16 +2104,16 @@
17062104 return;
17072105 if (phba->targetport) {
17082106 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1709
- for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
1710
- wq = phba->sli4_hba.nvme_wq[qidx];
2107
+ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
2108
+ wq = phba->sli4_hba.hdwq[qidx].io_wq;
17112109 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
17122110 }
17132111 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
17142112 nvmet_fc_unregister_targetport(phba->targetport);
17152113 if (!wait_for_completion_timeout(&tport_unreg_cmp,
17162114 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
1717
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1718
- "6179 Unreg targetport %p timeout "
2115
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2116
+ "6179 Unreg targetport x%px timeout "
17192117 "reached.\n", phba->targetport);
17202118 lpfc_nvmet_cleanup_io_context(phba);
17212119 }
....@@ -1724,104 +2122,169 @@
17242122 }
17252123
17262124 /**
1727
- * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
2125
+ * lpfc_nvmet_handle_lsreq - Process an NVME LS request
17282126 * @phba: pointer to lpfc hba data structure.
1729
- * @pring: pointer to a SLI ring.
1730
- * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2127
+ * @axchg: pointer to exchange context for the NVME LS request
17312128 *
1732
- * This routine is used for processing the WQE associated with a unsolicited
1733
- * event. It first determines whether there is an existing ndlp that matches
1734
- * the DID from the unsolicited WQE. If not, it will create a new one with
1735
- * the DID from the unsolicited WQE. The ELS command from the unsolicited
1736
- * WQE is then used to invoke the proper routine and to set up proper state
1737
- * of the discovery state machine.
1738
- **/
1739
-static void
1740
-lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1741
- struct hbq_dmabuf *nvmebuf)
2129
+ * This routine is used for processing an asychronously received NVME LS
2130
+ * request. Any remaining validation is done and the LS is then forwarded
2131
+ * to the nvmet-fc transport via nvmet_fc_rcv_ls_req().
2132
+ *
2133
+ * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing)
2134
+ * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done.
2135
+ * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
2136
+ *
2137
+ * Returns 0 if LS was handled and delivered to the transport
2138
+ * Returns 1 if LS failed to be handled and should be dropped
2139
+ */
2140
+int
2141
+lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
2142
+ struct lpfc_async_xchg_ctx *axchg)
17422143 {
17432144 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1744
- struct lpfc_nvmet_tgtport *tgtp;
1745
- struct fc_frame_header *fc_hdr;
1746
- struct lpfc_nvmet_rcv_ctx *ctxp;
1747
- uint32_t *payload;
1748
- uint32_t size, oxid, sid, rc;
2145
+ struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private;
2146
+ uint32_t *payload = axchg->payload;
2147
+ int rc;
17492148
1750
- fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1751
- oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1752
-
1753
- if (!phba->targetport) {
1754
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1755
- "6154 LS Drop IO x%x\n", oxid);
1756
- oxid = 0;
1757
- size = 0;
1758
- sid = 0;
1759
- ctxp = NULL;
1760
- goto dropit;
1761
- }
1762
-
1763
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1764
- payload = (uint32_t *)(nvmebuf->dbuf.virt);
1765
- size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
1766
- sid = sli4_sid_from_fc_hdr(fc_hdr);
1767
-
1768
- ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1769
- if (ctxp == NULL) {
1770
- atomic_inc(&tgtp->rcv_ls_req_drop);
1771
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1772
- "6155 LS Drop IO x%x: Alloc\n",
1773
- oxid);
1774
-dropit:
1775
- lpfc_nvmeio_data(phba, "NVMET LS DROP: "
1776
- "xri x%x sz %d from %06x\n",
1777
- oxid, size, sid);
1778
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1779
- return;
1780
- }
1781
- ctxp->phba = phba;
1782
- ctxp->size = size;
1783
- ctxp->oxid = oxid;
1784
- ctxp->sid = sid;
1785
- ctxp->wqeq = NULL;
1786
- ctxp->state = LPFC_NVMET_STE_LS_RCV;
1787
- ctxp->entry_cnt = 1;
1788
- ctxp->rqb_buffer = (void *)nvmebuf;
1789
-
1790
- lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
1791
- oxid, size, sid);
1792
- /*
1793
- * The calling sequence should be:
1794
- * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1795
- * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1796
- */
17972149 atomic_inc(&tgtp->rcv_ls_req_in);
1798
- rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1799
- payload, size);
2150
+
2151
+ /*
2152
+ * Driver passes the ndlp as the hosthandle argument allowing
2153
+ * the transport to generate LS requests for any associateions
2154
+ * that are created.
2155
+ */
2156
+ rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp,
2157
+ axchg->payload, axchg->size);
18002158
18012159 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
18022160 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1803
- "%08x %08x %08x\n", size, rc,
2161
+ "%08x %08x %08x\n", axchg->size, rc,
18042162 *payload, *(payload+1), *(payload+2),
18052163 *(payload+3), *(payload+4), *(payload+5));
18062164
1807
- if (rc == 0) {
2165
+ if (!rc) {
18082166 atomic_inc(&tgtp->rcv_ls_req_out);
2167
+ return 0;
2168
+ }
2169
+
2170
+ atomic_inc(&tgtp->rcv_ls_req_drop);
2171
+#endif
2172
+ return 1;
2173
+}
2174
+
2175
+static void
2176
+lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
2177
+{
2178
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2179
+ struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
2180
+ struct lpfc_hba *phba = ctxp->phba;
2181
+ struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2182
+ struct lpfc_nvmet_tgtport *tgtp;
2183
+ uint32_t *payload, qno;
2184
+ uint32_t rc;
2185
+ unsigned long iflags;
2186
+
2187
+ if (!nvmebuf) {
2188
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2189
+ "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2190
+ "oxid: x%x flg: x%x state: x%x\n",
2191
+ ctxp->oxid, ctxp->flag, ctxp->state);
2192
+ spin_lock_irqsave(&ctxp->ctxlock, iflags);
2193
+ lpfc_nvmet_defer_release(phba, ctxp);
2194
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2195
+ lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2196
+ ctxp->oxid);
18092197 return;
18102198 }
18112199
1812
- lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
1813
- oxid, size, sid);
2200
+ if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
2201
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2202
+ "6324 IO oxid x%x aborted\n",
2203
+ ctxp->oxid);
2204
+ return;
2205
+ }
18142206
1815
- atomic_inc(&tgtp->rcv_ls_req_drop);
1816
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1817
- "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1818
- ctxp->oxid, rc);
2207
+ payload = (uint32_t *)(nvmebuf->dbuf.virt);
2208
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2209
+ ctxp->flag |= LPFC_NVME_TNOTIFY;
2210
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2211
+ if (ctxp->ts_isr_cmd)
2212
+ ctxp->ts_cmd_nvme = ktime_get_ns();
2213
+#endif
2214
+ /*
2215
+ * The calling sequence should be:
2216
+ * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
2217
+ * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2218
+ * When we return from nvmet_fc_rcv_fcp_req, all relevant info
2219
+ * the NVME command / FC header is stored.
2220
+ * A buffer has already been reposted for this IO, so just free
2221
+ * the nvmebuf.
2222
+ */
2223
+ rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
2224
+ payload, ctxp->size);
2225
+ /* Process FCP command */
2226
+ if (rc == 0) {
2227
+ atomic_inc(&tgtp->rcv_fcp_cmd_out);
2228
+ spin_lock_irqsave(&ctxp->ctxlock, iflags);
2229
+ if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
2230
+ (nvmebuf != ctxp->rqb_buffer)) {
2231
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2232
+ return;
2233
+ }
2234
+ ctxp->rqb_buffer = NULL;
2235
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2236
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2237
+ return;
2238
+ }
18192239
1820
- /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1821
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2240
+ /* Processing of FCP command is deferred */
2241
+ if (rc == -EOVERFLOW) {
2242
+ lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
2243
+ "from %06x\n",
2244
+ ctxp->oxid, ctxp->size, ctxp->sid);
2245
+ atomic_inc(&tgtp->rcv_fcp_cmd_out);
2246
+ atomic_inc(&tgtp->defer_fod);
2247
+ spin_lock_irqsave(&ctxp->ctxlock, iflags);
2248
+ if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
2249
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2250
+ return;
2251
+ }
2252
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2253
+ /*
2254
+ * Post a replacement DMA buffer to RQ and defer
2255
+ * freeing rcv buffer till .defer_rcv callback
2256
+ */
2257
+ qno = nvmebuf->idx;
2258
+ lpfc_post_rq_buffer(
2259
+ phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2260
+ phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2261
+ return;
2262
+ }
2263
+ ctxp->flag &= ~LPFC_NVME_TNOTIFY;
2264
+ atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2265
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2266
+ "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2267
+ ctxp->oxid, rc,
2268
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
2269
+ atomic_read(&tgtp->rcv_fcp_cmd_out),
2270
+ atomic_read(&tgtp->xmt_fcp_release));
2271
+ lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2272
+ ctxp->oxid, ctxp->size, ctxp->sid);
2273
+ spin_lock_irqsave(&ctxp->ctxlock, iflags);
2274
+ lpfc_nvmet_defer_release(phba, ctxp);
2275
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2276
+ lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2277
+#endif
2278
+}
18222279
1823
- atomic_inc(&tgtp->xmt_ls_abort);
1824
- lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
2280
+static void
2281
+lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
2282
+{
2283
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2284
+ struct lpfc_nvmet_ctxbuf *ctx_buf =
2285
+ container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
2286
+
2287
+ lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
18252288 #endif
18262289 }
18272290
....@@ -1849,7 +2312,7 @@
18492312 else
18502313 get_infop = current_infop->nvmet_ctx_next_cpu;
18512314
1852
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
2315
+ for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
18532316 if (get_infop == current_infop) {
18542317 get_infop = get_infop->nvmet_ctx_next_cpu;
18552318 continue;
....@@ -1887,6 +2350,8 @@
18872350 * @phba: pointer to lpfc hba data structure.
18882351 * @idx: relative index of MRQ vector
18892352 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2353
+ * @isr_timestamp: in jiffies.
2354
+ * @cqflag: cq processing information regarding workload.
18902355 *
18912356 * This routine is used for processing the WQE associated with a unsolicited
18922357 * event. It first determines whether there is an existing ndlp that matches
....@@ -1899,33 +2364,28 @@
18992364 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
19002365 uint32_t idx,
19012366 struct rqb_dmabuf *nvmebuf,
1902
- uint64_t isr_timestamp)
2367
+ uint64_t isr_timestamp,
2368
+ uint8_t cqflag)
19032369 {
1904
- struct lpfc_nvmet_rcv_ctx *ctxp;
2370
+ struct lpfc_async_xchg_ctx *ctxp;
19052371 struct lpfc_nvmet_tgtport *tgtp;
19062372 struct fc_frame_header *fc_hdr;
19072373 struct lpfc_nvmet_ctxbuf *ctx_buf;
19082374 struct lpfc_nvmet_ctx_info *current_infop;
1909
- uint32_t *payload;
1910
- uint32_t size, oxid, sid, rc, qno;
2375
+ uint32_t size, oxid, sid, qno;
19112376 unsigned long iflag;
19122377 int current_cpu;
1913
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1914
- uint32_t id;
1915
-#endif
19162378
19172379 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
19182380 return;
19192381
19202382 ctx_buf = NULL;
19212383 if (!nvmebuf || !phba->targetport) {
1922
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2384
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19232385 "6157 NVMET FCP Drop IO\n");
1924
- oxid = 0;
1925
- size = 0;
1926
- sid = 0;
1927
- ctxp = NULL;
1928
- goto dropit;
2386
+ if (nvmebuf)
2387
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2388
+ return;
19292389 }
19302390
19312391 /*
....@@ -1935,7 +2395,7 @@
19352395 * be empty, thus it would need to be replenished with the
19362396 * context list from another CPU for this MRQ.
19372397 */
1938
- current_cpu = smp_processor_id();
2398
+ current_cpu = raw_smp_processor_id();
19392399 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
19402400 spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
19412401 if (current_infop->nvmet_ctx_list_cnt) {
....@@ -1952,15 +2412,18 @@
19522412 size = nvmebuf->bytes_recv;
19532413
19542414 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1955
- if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1956
- id = smp_processor_id();
1957
- if (id < LPFC_CHECK_CPU_CNT)
1958
- phba->cpucheck_rcv_io[id]++;
2415
+ if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
2416
+ this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
2417
+ if (idx != current_cpu)
2418
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2419
+ "6703 CPU Check rcv: "
2420
+ "cpu %d expect %d\n",
2421
+ current_cpu, idx);
19592422 }
19602423 #endif
19612424
19622425 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
1963
- oxid, size, smp_processor_id());
2426
+ oxid, size, raw_smp_processor_id());
19642427
19652428 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
19662429
....@@ -1984,138 +2447,68 @@
19842447 return;
19852448 }
19862449
1987
- payload = (uint32_t *)(nvmebuf->dbuf.virt);
19882450 sid = sli4_sid_from_fc_hdr(fc_hdr);
19892451
1990
- ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
1991
- if (ctxp->state != LPFC_NVMET_STE_FREE) {
1992
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2452
+ ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
2453
+ spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
2454
+ list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2455
+ spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
2456
+ if (ctxp->state != LPFC_NVME_STE_FREE) {
2457
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19932458 "6414 NVMET Context corrupt %d %d oxid x%x\n",
19942459 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
19952460 }
19962461 ctxp->wqeq = NULL;
1997
- ctxp->txrdy = NULL;
19982462 ctxp->offset = 0;
19992463 ctxp->phba = phba;
20002464 ctxp->size = size;
20012465 ctxp->oxid = oxid;
20022466 ctxp->sid = sid;
20032467 ctxp->idx = idx;
2004
- ctxp->state = LPFC_NVMET_STE_RCV;
2468
+ ctxp->state = LPFC_NVME_STE_RCV;
20052469 ctxp->entry_cnt = 1;
20062470 ctxp->flag = 0;
20072471 ctxp->ctxbuf = ctx_buf;
20082472 ctxp->rqb_buffer = (void *)nvmebuf;
2473
+ ctxp->hdwq = NULL;
20092474 spin_lock_init(&ctxp->ctxlock);
20102475
20112476 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2012
- if (isr_timestamp) {
2477
+ if (isr_timestamp)
20132478 ctxp->ts_isr_cmd = isr_timestamp;
2014
- ctxp->ts_cmd_nvme = ktime_get_ns();
2015
- ctxp->ts_nvme_data = 0;
2016
- ctxp->ts_data_wqput = 0;
2017
- ctxp->ts_isr_data = 0;
2018
- ctxp->ts_data_nvme = 0;
2019
- ctxp->ts_nvme_status = 0;
2020
- ctxp->ts_status_wqput = 0;
2021
- ctxp->ts_isr_status = 0;
2022
- ctxp->ts_status_nvme = 0;
2023
- } else {
2024
- ctxp->ts_cmd_nvme = 0;
2025
- }
2479
+ ctxp->ts_cmd_nvme = 0;
2480
+ ctxp->ts_nvme_data = 0;
2481
+ ctxp->ts_data_wqput = 0;
2482
+ ctxp->ts_isr_data = 0;
2483
+ ctxp->ts_data_nvme = 0;
2484
+ ctxp->ts_nvme_status = 0;
2485
+ ctxp->ts_status_wqput = 0;
2486
+ ctxp->ts_isr_status = 0;
2487
+ ctxp->ts_status_nvme = 0;
20262488 #endif
20272489
20282490 atomic_inc(&tgtp->rcv_fcp_cmd_in);
2029
- /*
2030
- * The calling sequence should be:
2031
- * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
2032
- * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2033
- * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
2034
- * the NVME command / FC header is stored, so we are free to repost
2035
- * the buffer.
2036
- */
2037
- rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
2038
- payload, size);
2039
-
2040
- /* Process FCP command */
2041
- if (rc == 0) {
2042
- ctxp->rqb_buffer = NULL;
2043
- atomic_inc(&tgtp->rcv_fcp_cmd_out);
2044
- lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2491
+ /* check for cq processing load */
2492
+ if (!cqflag) {
2493
+ lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
20452494 return;
20462495 }
20472496
2048
- /* Processing of FCP command is deferred */
2049
- if (rc == -EOVERFLOW) {
2050
- /*
2051
- * Post a brand new DMA buffer to RQ and defer
2052
- * freeing rcv buffer till .defer_rcv callback
2053
- */
2054
- qno = nvmebuf->idx;
2055
- lpfc_post_rq_buffer(
2056
- phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2057
- phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2497
+ if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2498
+ atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2499
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2500
+ "6325 Unable to queue work for oxid x%x. "
2501
+ "FCP Drop IO [x%x x%x x%x]\n",
2502
+ ctxp->oxid,
2503
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
2504
+ atomic_read(&tgtp->rcv_fcp_cmd_out),
2505
+ atomic_read(&tgtp->xmt_fcp_release));
20582506
2059
- lpfc_nvmeio_data(phba,
2060
- "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
2061
- oxid, size, sid);
2062
- atomic_inc(&tgtp->rcv_fcp_cmd_out);
2063
- atomic_inc(&tgtp->defer_fod);
2064
- return;
2065
- }
2066
- ctxp->rqb_buffer = nvmebuf;
2067
-
2068
- atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2069
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2070
- "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2071
- ctxp->oxid, rc,
2072
- atomic_read(&tgtp->rcv_fcp_cmd_in),
2073
- atomic_read(&tgtp->rcv_fcp_cmd_out),
2074
- atomic_read(&tgtp->xmt_fcp_release));
2075
-dropit:
2076
- lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2077
- oxid, size, sid);
2078
- if (oxid) {
2507
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
20792508 lpfc_nvmet_defer_release(phba, ctxp);
2509
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
20802510 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2081
- lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2082
- return;
20832511 }
2084
-
2085
- if (ctx_buf)
2086
- lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
2087
-
2088
- if (nvmebuf)
2089
- lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2090
-}
2091
-
2092
-/**
2093
- * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2094
- * @phba: pointer to lpfc hba data structure.
2095
- * @pring: pointer to a SLI ring.
2096
- * @nvmebuf: pointer to received nvme data structure.
2097
- *
2098
- * This routine is used to process an unsolicited event received from a SLI
2099
- * (Service Level Interface) ring. The actual processing of the data buffer
2100
- * associated with the unsolicited event is done by invoking the routine
2101
- * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2102
- * SLI RQ on which the unsolicited event was received.
2103
- **/
2104
-void
2105
-lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2106
- struct lpfc_iocbq *piocb)
2107
-{
2108
- struct lpfc_dmabuf *d_buf;
2109
- struct hbq_dmabuf *nvmebuf;
2110
-
2111
- d_buf = piocb->context2;
2112
- nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2113
-
2114
- if (phba->nvmet_support == 0) {
2115
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2116
- return;
2117
- }
2118
- lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
21192512 }
21202513
21212514 /**
....@@ -2123,6 +2516,8 @@
21232516 * @phba: pointer to lpfc hba data structure.
21242517 * @idx: relative index of MRQ vector
21252518 * @nvmebuf: pointer to received nvme data structure.
2519
+ * @isr_timestamp: in jiffies.
2520
+ * @cqflag: cq processing information regarding workload.
21262521 *
21272522 * This routine is used to process an unsolicited event received from a SLI
21282523 * (Service Level Interface) ring. The actual processing of the data buffer
....@@ -2134,14 +2529,19 @@
21342529 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
21352530 uint32_t idx,
21362531 struct rqb_dmabuf *nvmebuf,
2137
- uint64_t isr_timestamp)
2532
+ uint64_t isr_timestamp,
2533
+ uint8_t cqflag)
21382534 {
2535
+ if (!nvmebuf) {
2536
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2537
+ "3167 NVMET FCP Drop IO\n");
2538
+ return;
2539
+ }
21392540 if (phba->nvmet_support == 0) {
21402541 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
21412542 return;
21422543 }
2143
- lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
2144
- isr_timestamp);
2544
+ lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
21452545 }
21462546
21472547 /**
....@@ -2171,7 +2571,7 @@
21712571 **/
21722572 static struct lpfc_iocbq *
21732573 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2174
- struct lpfc_nvmet_rcv_ctx *ctxp,
2574
+ struct lpfc_async_xchg_ctx *ctxp,
21752575 dma_addr_t rspbuf, uint16_t rspsize)
21762576 {
21772577 struct lpfc_nodelist *ndlp;
....@@ -2179,7 +2579,7 @@
21792579 union lpfc_wqe128 *wqe;
21802580
21812581 if (!lpfc_is_link_up(phba)) {
2182
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2582
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21832583 "6104 NVMET prep LS wqe: link err: "
21842584 "NPORT x%x oxid:x%x ste %d\n",
21852585 ctxp->sid, ctxp->oxid, ctxp->state);
....@@ -2189,7 +2589,7 @@
21892589 /* Allocate buffer for command wqe */
21902590 nvmewqe = lpfc_sli_get_iocbq(phba);
21912591 if (nvmewqe == NULL) {
2192
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2592
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21932593 "6105 NVMET prep LS wqe: No WQE: "
21942594 "NPORT x%x oxid x%x ste %d\n",
21952595 ctxp->sid, ctxp->oxid, ctxp->state);
....@@ -2200,7 +2600,7 @@
22002600 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
22012601 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
22022602 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2203
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2603
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
22042604 "6106 NVMET prep LS wqe: No ndlp: "
22052605 "NPORT x%x oxid x%x ste %d\n",
22062606 ctxp->sid, ctxp->oxid, ctxp->state);
....@@ -2293,9 +2693,9 @@
22932693
22942694 static struct lpfc_iocbq *
22952695 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2296
- struct lpfc_nvmet_rcv_ctx *ctxp)
2696
+ struct lpfc_async_xchg_ctx *ctxp)
22972697 {
2298
- struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2698
+ struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
22992699 struct lpfc_nvmet_tgtport *tgtp;
23002700 struct sli4_sge *sgl;
23012701 struct lpfc_nodelist *ndlp;
....@@ -2303,14 +2703,13 @@
23032703 struct scatterlist *sgel;
23042704 union lpfc_wqe128 *wqe;
23052705 struct ulp_bde64 *bde;
2306
- uint32_t *txrdy;
23072706 dma_addr_t physaddr;
2308
- int i, cnt;
2707
+ int i, cnt, nsegs;
23092708 int do_pbde;
23102709 int xc = 1;
23112710
23122711 if (!lpfc_is_link_up(phba)) {
2313
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2712
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
23142713 "6107 NVMET prep FCP wqe: link err:"
23152714 "NPORT x%x oxid x%x ste %d\n",
23162715 ctxp->sid, ctxp->oxid, ctxp->state);
....@@ -2321,7 +2720,7 @@
23212720 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
23222721 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
23232722 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2324
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2723
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
23252724 "6108 NVMET prep FCP wqe: no ndlp: "
23262725 "NPORT x%x oxid x%x ste %d\n",
23272726 ctxp->sid, ctxp->oxid, ctxp->state);
....@@ -2329,13 +2728,14 @@
23292728 }
23302729
23312730 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2332
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2731
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
23332732 "6109 NVMET prep FCP wqe: seg cnt err: "
23342733 "NPORT x%x oxid x%x ste %d cnt %d\n",
23352734 ctxp->sid, ctxp->oxid, ctxp->state,
23362735 phba->cfg_nvme_seg_cnt);
23372736 return NULL;
23382737 }
2738
+ nsegs = rsp->sg_cnt;
23392739
23402740 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
23412741 nvmewqe = ctxp->wqeq;
....@@ -2343,7 +2743,7 @@
23432743 /* Allocate buffer for command wqe */
23442744 nvmewqe = ctxp->ctxbuf->iocbq;
23452745 if (nvmewqe == NULL) {
2346
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2746
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
23472747 "6110 NVMET prep FCP wqe: No "
23482748 "WQE: NPORT x%x oxid x%x ste %d\n",
23492749 ctxp->sid, ctxp->oxid, ctxp->state);
....@@ -2356,12 +2756,12 @@
23562756 }
23572757
23582758 /* Sanity check */
2359
- if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2759
+ if (((ctxp->state == LPFC_NVME_STE_RCV) &&
23602760 (ctxp->entry_cnt == 1)) ||
2361
- (ctxp->state == LPFC_NVMET_STE_DATA)) {
2761
+ (ctxp->state == LPFC_NVME_STE_DATA)) {
23622762 wqe = &nvmewqe->wqe;
23632763 } else {
2364
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2764
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
23652765 "6111 Wrong state NVMET FCP: %d cnt %d\n",
23662766 ctxp->state, ctxp->entry_cnt);
23672767 return NULL;
....@@ -2465,23 +2865,11 @@
24652865 &lpfc_treceive_cmd_template.words[3],
24662866 sizeof(uint32_t) * 9);
24672867
2468
- /* Words 0 - 2 : The first sg segment */
2469
- txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2470
- GFP_KERNEL, &physaddr);
2471
- if (!txrdy) {
2472
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2473
- "6041 Bad txrdy buffer: oxid x%x\n",
2474
- ctxp->oxid);
2475
- return NULL;
2476
- }
2477
- ctxp->txrdy = txrdy;
2478
- ctxp->txrdy_phys = physaddr;
2479
- wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2480
- wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2481
- wqe->fcp_treceive.bde.addrLow =
2482
- cpu_to_le32(putPaddrLow(physaddr));
2483
- wqe->fcp_treceive.bde.addrHigh =
2484
- cpu_to_le32(putPaddrHigh(physaddr));
2868
+ /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
2869
+ wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
2870
+ wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
2871
+ wqe->fcp_treceive.bde.addrLow = 0;
2872
+ wqe->fcp_treceive.bde.addrHigh = 0;
24852873
24862874 /* Word 4 */
24872875 wqe->fcp_treceive.relative_offset = ctxp->offset;
....@@ -2516,17 +2904,13 @@
25162904 /* Word 12 */
25172905 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
25182906
2519
- /* Setup 1 TXRDY and 1 SKIP SGE */
2520
- txrdy[0] = 0;
2521
- txrdy[1] = cpu_to_be32(rsp->transfer_length);
2522
- txrdy[2] = 0;
2523
-
2524
- sgl->addr_hi = putPaddrHigh(physaddr);
2525
- sgl->addr_lo = putPaddrLow(physaddr);
2907
+ /* Setup 2 SKIP SGEs */
2908
+ sgl->addr_hi = 0;
2909
+ sgl->addr_lo = 0;
25262910 sgl->word2 = 0;
2527
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2911
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
25282912 sgl->word2 = cpu_to_le32(sgl->word2);
2529
- sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2913
+ sgl->sge_len = 0;
25302914 sgl++;
25312915 sgl->addr_hi = 0;
25322916 sgl->addr_lo = 0;
....@@ -2591,7 +2975,7 @@
25912975 wqe->fcp_trsp.rsvd_12_15[0] = 0;
25922976
25932977 /* Use rspbuf, NOT sg list */
2594
- rsp->sg_cnt = 0;
2978
+ nsegs = 0;
25952979 sgl->word2 = 0;
25962980 atomic_inc(&tgtp->xmt_fcp_rsp);
25972981 break;
....@@ -2608,8 +2992,7 @@
26082992 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
26092993 nvmewqe->context1 = ndlp;
26102994
2611
- for (i = 0; i < rsp->sg_cnt; i++) {
2612
- sgel = &rsp->sg[i];
2995
+ for_each_sg(rsp->sg, sgel, nsegs, i) {
26132996 physaddr = sg_dma_address(sgel);
26142997 cnt = sg_dma_len(sgel);
26152998 sgl->addr_hi = putPaddrHigh(physaddr);
....@@ -2638,7 +3021,7 @@
26383021 sgl++;
26393022 ctxp->offset += cnt;
26403023 }
2641
- ctxp->state = LPFC_NVMET_STE_DATA;
3024
+ ctxp->state = LPFC_NVME_STE_DATA;
26423025 ctxp->entry_cnt++;
26433026 return nvmewqe;
26443027 }
....@@ -2657,37 +3040,38 @@
26573040 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
26583041 struct lpfc_wcqe_complete *wcqe)
26593042 {
2660
- struct lpfc_nvmet_rcv_ctx *ctxp;
3043
+ struct lpfc_async_xchg_ctx *ctxp;
26613044 struct lpfc_nvmet_tgtport *tgtp;
2662
- uint32_t status, result;
3045
+ uint32_t result;
26633046 unsigned long flags;
26643047 bool released = false;
26653048
26663049 ctxp = cmdwqe->context2;
2667
- status = bf_get(lpfc_wcqe_c_status, wcqe);
26683050 result = wcqe->parameter;
26693051
26703052 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2671
- if (ctxp->flag & LPFC_NVMET_ABORT_OP)
3053
+ if (ctxp->flag & LPFC_NVME_ABORT_OP)
26723054 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
26733055
2674
- ctxp->state = LPFC_NVMET_STE_DONE;
3056
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
3057
+ ctxp->state = LPFC_NVME_STE_DONE;
26753058
26763059 /* Check if we already received a free context call
26773060 * and we have completed processing an abort situation.
26783061 */
2679
- spin_lock_irqsave(&ctxp->ctxlock, flags);
2680
- if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2681
- !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2682
- list_del(&ctxp->list);
3062
+ if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3063
+ !(ctxp->flag & LPFC_NVME_XBUSY)) {
3064
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3065
+ list_del_init(&ctxp->list);
3066
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
26833067 released = true;
26843068 }
2685
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3069
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
26863070 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
26873071 atomic_inc(&tgtp->xmt_abort_rsp);
26883072
26893073 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2690
- "6165 ABORT cmpl: xri x%x flg x%x (%d) "
3074
+ "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
26913075 "WCQE: %08x %08x %08x %08x\n",
26923076 ctxp->oxid, ctxp->flag, released,
26933077 wcqe->word0, wcqe->total_data_placed,
....@@ -2706,7 +3090,7 @@
27063090 lpfc_sli_release_iocbq(phba, cmdwqe);
27073091
27083092 /* Since iaab/iaar are NOT set, there is no work left.
2709
- * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
3093
+ * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
27103094 * should have been called already.
27113095 */
27123096 }
....@@ -2725,14 +3109,13 @@
27253109 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
27263110 struct lpfc_wcqe_complete *wcqe)
27273111 {
2728
- struct lpfc_nvmet_rcv_ctx *ctxp;
3112
+ struct lpfc_async_xchg_ctx *ctxp;
27293113 struct lpfc_nvmet_tgtport *tgtp;
27303114 unsigned long flags;
2731
- uint32_t status, result;
3115
+ uint32_t result;
27323116 bool released = false;
27333117
27343118 ctxp = cmdwqe->context2;
2735
- status = bf_get(lpfc_wcqe_c_status, wcqe);
27363119 result = wcqe->parameter;
27373120
27383121 if (!ctxp) {
....@@ -2745,12 +3128,13 @@
27453128 }
27463129
27473130 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2748
- if (ctxp->flag & LPFC_NVMET_ABORT_OP)
3131
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
3132
+ if (ctxp->flag & LPFC_NVME_ABORT_OP)
27493133 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
27503134
27513135 /* Sanity check */
2752
- if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2753
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3136
+ if (ctxp->state != LPFC_NVME_STE_ABORT) {
3137
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
27543138 "6112 ABTS Wrong state:%d oxid x%x\n",
27553139 ctxp->state, ctxp->oxid);
27563140 }
....@@ -2758,19 +3142,20 @@
27583142 /* Check if we already received a free context call
27593143 * and we have completed processing an abort situation.
27603144 */
2761
- ctxp->state = LPFC_NVMET_STE_DONE;
2762
- spin_lock_irqsave(&ctxp->ctxlock, flags);
2763
- if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2764
- !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2765
- list_del(&ctxp->list);
3145
+ ctxp->state = LPFC_NVME_STE_DONE;
3146
+ if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3147
+ !(ctxp->flag & LPFC_NVME_XBUSY)) {
3148
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3149
+ list_del_init(&ctxp->list);
3150
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
27663151 released = true;
27673152 }
2768
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3153
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
27693154 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
27703155 atomic_inc(&tgtp->xmt_abort_rsp);
27713156
27723157 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2773
- "6316 ABTS cmpl xri x%x flg x%x (%x) "
3158
+ "6316 ABTS cmpl oxid x%x flg x%x (%x) "
27743159 "WCQE: %08x %08x %08x %08x\n",
27753160 ctxp->oxid, ctxp->flag, released,
27763161 wcqe->word0, wcqe->total_data_placed,
....@@ -2786,7 +3171,7 @@
27863171 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
27873172
27883173 /* Since iaab/iaar are NOT set, there is no work left.
2789
- * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
3174
+ * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
27903175 * should have been called already.
27913176 */
27923177 }
....@@ -2805,24 +3190,25 @@
28053190 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
28063191 struct lpfc_wcqe_complete *wcqe)
28073192 {
2808
- struct lpfc_nvmet_rcv_ctx *ctxp;
3193
+ struct lpfc_async_xchg_ctx *ctxp;
28093194 struct lpfc_nvmet_tgtport *tgtp;
2810
- uint32_t status, result;
3195
+ uint32_t result;
28113196
28123197 ctxp = cmdwqe->context2;
2813
- status = bf_get(lpfc_wcqe_c_status, wcqe);
28143198 result = wcqe->parameter;
28153199
2816
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2817
- atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3200
+ if (phba->nvmet_support) {
3201
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3202
+ atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3203
+ }
28183204
28193205 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2820
- "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
3206
+ "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
28213207 ctxp, wcqe->word0, wcqe->total_data_placed,
28223208 result, wcqe->word3);
28233209
28243210 if (!ctxp) {
2825
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3211
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
28263212 "6415 NVMET LS Abort No ctx: WCQE: "
28273213 "%08x %08x %08x %08x\n",
28283214 wcqe->word0, wcqe->total_data_placed,
....@@ -2832,8 +3218,8 @@
28323218 return;
28333219 }
28343220
2835
- if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
2836
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3221
+ if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
3222
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
28373223 "6416 NVMET LS abort cmpl state mismatch: "
28383224 "oxid x%x: %d %d\n",
28393225 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
....@@ -2847,10 +3233,10 @@
28473233
28483234 static int
28493235 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2850
- struct lpfc_nvmet_rcv_ctx *ctxp,
3236
+ struct lpfc_async_xchg_ctx *ctxp,
28513237 uint32_t sid, uint16_t xri)
28523238 {
2853
- struct lpfc_nvmet_tgtport *tgtp;
3239
+ struct lpfc_nvmet_tgtport *tgtp = NULL;
28543240 struct lpfc_iocbq *abts_wqeq;
28553241 union lpfc_wqe128 *wqe_abts;
28563242 struct lpfc_nodelist *ndlp;
....@@ -2859,14 +3245,16 @@
28593245 "6067 ABTS: sid %x xri x%x/x%x\n",
28603246 sid, xri, ctxp->wqeq->sli4_xritag);
28613247
2862
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3248
+ if (phba->nvmet_support && phba->targetport)
3249
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
28633250
28643251 ndlp = lpfc_findnode_did(phba->pport, sid);
28653252 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
28663253 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
28673254 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2868
- atomic_inc(&tgtp->xmt_abort_rsp_error);
2869
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3255
+ if (tgtp)
3256
+ atomic_inc(&tgtp->xmt_abort_rsp_error);
3257
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
28703258 "6134 Drop ABTS - wrong NDLP state x%x.\n",
28713259 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
28723260
....@@ -2941,14 +3329,14 @@
29413329
29423330 static int
29433331 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2944
- struct lpfc_nvmet_rcv_ctx *ctxp,
3332
+ struct lpfc_async_xchg_ctx *ctxp,
29453333 uint32_t sid, uint16_t xri)
29463334 {
29473335 struct lpfc_nvmet_tgtport *tgtp;
29483336 struct lpfc_iocbq *abts_wqeq;
2949
- union lpfc_wqe128 *abts_wqe;
29503337 struct lpfc_nodelist *ndlp;
29513338 unsigned long flags;
3339
+ u8 opt;
29523340 int rc;
29533341
29543342 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
....@@ -2962,29 +3350,34 @@
29623350 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
29633351 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
29643352 atomic_inc(&tgtp->xmt_abort_rsp_error);
2965
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3353
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
29663354 "6160 Drop ABORT - wrong NDLP state x%x.\n",
29673355 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
29683356
29693357 /* No failure to an ABTS request. */
2970
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3358
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
3359
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3360
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
29713361 return 0;
29723362 }
29733363
29743364 /* Issue ABTS for this WQE based on iotag */
29753365 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3366
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
29763367 if (!ctxp->abort_wqeq) {
29773368 atomic_inc(&tgtp->xmt_abort_rsp_error);
2978
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3369
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
29793370 "6161 ABORT failed: No wqeqs: "
29803371 "xri: x%x\n", ctxp->oxid);
29813372 /* No failure to an ABTS request. */
2982
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3373
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3374
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
29833375 return 0;
29843376 }
29853377 abts_wqeq = ctxp->abort_wqeq;
2986
- abts_wqe = &abts_wqeq->wqe;
2987
- ctxp->state = LPFC_NVMET_STE_ABORT;
3378
+ ctxp->state = LPFC_NVME_STE_ABORT;
3379
+ opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
3380
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
29883381
29893382 /* Announce entry to new IO submit field. */
29903383 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
....@@ -2997,15 +3390,17 @@
29973390 */
29983391 spin_lock_irqsave(&phba->hbalock, flags);
29993392 /* driver queued commands are in process of being flushed */
3000
- if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
3393
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
30013394 spin_unlock_irqrestore(&phba->hbalock, flags);
30023395 atomic_inc(&tgtp->xmt_abort_rsp_error);
3003
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3396
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
30043397 "6163 Driver in reset cleanup - flushing "
30053398 "NVME Req now. hba_flag x%x oxid x%x\n",
30063399 phba->hba_flag, ctxp->oxid);
30073400 lpfc_sli_release_iocbq(phba, abts_wqeq);
3008
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3401
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
3402
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3403
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
30093404 return 0;
30103405 }
30113406
....@@ -3013,56 +3408,33 @@
30133408 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
30143409 spin_unlock_irqrestore(&phba->hbalock, flags);
30153410 atomic_inc(&tgtp->xmt_abort_rsp_error);
3016
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3411
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
30173412 "6164 Outstanding NVME I/O Abort Request "
30183413 "still pending on oxid x%x\n",
30193414 ctxp->oxid);
30203415 lpfc_sli_release_iocbq(phba, abts_wqeq);
3021
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3416
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
3417
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3418
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
30223419 return 0;
30233420 }
30243421
30253422 /* Ready - mark outstanding as aborted by driver. */
30263423 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
30273424
3028
- /* WQEs are reused. Clear stale data and set key fields to
3029
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3030
- */
3031
- memset(abts_wqe, 0, sizeof(union lpfc_wqe));
3032
-
3033
- /* word 3 */
3034
- bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
3035
-
3036
- /* word 7 */
3037
- bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
3038
- bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3039
-
3040
- /* word 8 - tell the FW to abort the IO associated with this
3041
- * outstanding exchange ID.
3042
- */
3043
- abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
3044
-
3045
- /* word 9 - this is the iotag for the abts_wqe completion. */
3046
- bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
3047
- abts_wqeq->iotag);
3048
-
3049
- /* word 10 */
3050
- bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
3051
- bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3052
-
3053
- /* word 11 */
3054
- bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3055
- bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
3056
- bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3425
+ lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
30573426
30583427 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
30593428 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
30603429 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3061
- abts_wqeq->iocb_cmpl = 0;
3430
+ abts_wqeq->iocb_cmpl = NULL;
30623431 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
30633432 abts_wqeq->context2 = ctxp;
30643433 abts_wqeq->vport = phba->pport;
3065
- rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
3434
+ if (!ctxp->hdwq)
3435
+ ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3436
+
3437
+ rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
30663438 spin_unlock_irqrestore(&phba->hbalock, flags);
30673439 if (rc == WQE_SUCCESS) {
30683440 atomic_inc(&tgtp->xmt_abort_sol);
....@@ -3070,24 +3442,26 @@
30703442 }
30713443
30723444 atomic_inc(&tgtp->xmt_abort_rsp_error);
3073
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3445
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
3446
+ ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3447
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
30743448 lpfc_sli_release_iocbq(phba, abts_wqeq);
3075
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3449
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
30763450 "6166 Failed ABORT issue_wqe with status x%x "
30773451 "for oxid x%x.\n",
30783452 rc, ctxp->oxid);
30793453 return 1;
30803454 }
30813455
3082
-
30833456 static int
30843457 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3085
- struct lpfc_nvmet_rcv_ctx *ctxp,
3458
+ struct lpfc_async_xchg_ctx *ctxp,
30863459 uint32_t sid, uint16_t xri)
30873460 {
30883461 struct lpfc_nvmet_tgtport *tgtp;
30893462 struct lpfc_iocbq *abts_wqeq;
30903463 unsigned long flags;
3464
+ bool released = false;
30913465 int rc;
30923466
30933467 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
....@@ -3096,14 +3470,14 @@
30963470 ctxp->wqeq->hba_wqidx = 0;
30973471 }
30983472
3099
- if (ctxp->state == LPFC_NVMET_STE_FREE) {
3100
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3473
+ if (ctxp->state == LPFC_NVME_STE_FREE) {
3474
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
31013475 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
31023476 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
31033477 rc = WQE_BUSY;
31043478 goto aerr;
31053479 }
3106
- ctxp->state = LPFC_NVMET_STE_ABORT;
3480
+ ctxp->state = LPFC_NVME_STE_ABORT;
31073481 ctxp->entry_cnt++;
31083482 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
31093483 if (rc == 0)
....@@ -3114,7 +3488,10 @@
31143488 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
31153489 abts_wqeq->iocb_cmpl = NULL;
31163490 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3117
- rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
3491
+ if (!ctxp->hdwq)
3492
+ ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3493
+
3494
+ rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
31183495 spin_unlock_irqrestore(&phba->hbalock, flags);
31193496 if (rc == WQE_SUCCESS) {
31203497 return 0;
....@@ -3122,48 +3499,63 @@
31223499
31233500 aerr:
31243501 spin_lock_irqsave(&ctxp->ctxlock, flags);
3125
- if (ctxp->flag & LPFC_NVMET_CTX_RLS)
3126
- list_del(&ctxp->list);
3127
- ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
3502
+ if (ctxp->flag & LPFC_NVME_CTX_RLS) {
3503
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3504
+ list_del_init(&ctxp->list);
3505
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3506
+ released = true;
3507
+ }
3508
+ ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
31283509 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
31293510
31303511 atomic_inc(&tgtp->xmt_abort_rsp_error);
3131
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3132
- "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
3133
- ctxp->oxid, rc);
3134
- lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3512
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3513
+ "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3514
+ "(%x)\n",
3515
+ ctxp->oxid, rc, released);
3516
+ if (released)
3517
+ lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
31353518 return 1;
31363519 }
31373520
3138
-static int
3139
-lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
3140
- struct lpfc_nvmet_rcv_ctx *ctxp,
3521
+/**
3522
+ * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received
3523
+ * via async frame receive where the frame is not handled.
3524
+ * @phba: pointer to adapter structure
3525
+ * @ctxp: pointer to the asynchronously received received sequence
3526
+ * @sid: address of the remote port to send the ABTS to
3527
+ * @xri: oxid value to for the ABTS (other side's exchange id).
3528
+ **/
3529
+int
3530
+lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
3531
+ struct lpfc_async_xchg_ctx *ctxp,
31413532 uint32_t sid, uint16_t xri)
31423533 {
3143
- struct lpfc_nvmet_tgtport *tgtp;
3534
+ struct lpfc_nvmet_tgtport *tgtp = NULL;
31443535 struct lpfc_iocbq *abts_wqeq;
3145
- union lpfc_wqe128 *wqe_abts;
31463536 unsigned long flags;
31473537 int rc;
31483538
3149
- if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3150
- (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3151
- ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3539
+ if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3540
+ (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3541
+ ctxp->state = LPFC_NVME_STE_LS_ABORT;
31523542 ctxp->entry_cnt++;
31533543 } else {
3154
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3544
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
31553545 "6418 NVMET LS abort state mismatch "
31563546 "IO x%x: %d %d\n",
31573547 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3158
- ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3548
+ ctxp->state = LPFC_NVME_STE_LS_ABORT;
31593549 }
31603550
3161
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3551
+ if (phba->nvmet_support && phba->targetport)
3552
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3553
+
31623554 if (!ctxp->wqeq) {
31633555 /* Issue ABTS for this WQE based on iotag */
31643556 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
31653557 if (!ctxp->wqeq) {
3166
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3558
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
31673559 "6068 Abort failed: No wqeqs: "
31683560 "xri: x%x\n", xri);
31693561 /* No failure to an ABTS request. */
....@@ -3172,7 +3564,6 @@
31723564 }
31733565 }
31743566 abts_wqeq = ctxp->wqeq;
3175
- wqe_abts = &abts_wqeq->wqe;
31763567
31773568 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
31783569 rc = WQE_BUSY;
....@@ -3181,21 +3572,49 @@
31813572
31823573 spin_lock_irqsave(&phba->hbalock, flags);
31833574 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3184
- abts_wqeq->iocb_cmpl = 0;
3575
+ abts_wqeq->iocb_cmpl = NULL;
31853576 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
3186
- rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
3577
+ rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
31873578 spin_unlock_irqrestore(&phba->hbalock, flags);
31883579 if (rc == WQE_SUCCESS) {
3189
- atomic_inc(&tgtp->xmt_abort_unsol);
3580
+ if (tgtp)
3581
+ atomic_inc(&tgtp->xmt_abort_unsol);
31903582 return 0;
31913583 }
31923584 out:
3193
- atomic_inc(&tgtp->xmt_abort_rsp_error);
3585
+ if (tgtp)
3586
+ atomic_inc(&tgtp->xmt_abort_rsp_error);
31943587 abts_wqeq->context2 = NULL;
31953588 abts_wqeq->context3 = NULL;
31963589 lpfc_sli_release_iocbq(phba, abts_wqeq);
3197
- kfree(ctxp);
3198
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3590
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
31993591 "6056 Failed to Issue ABTS. Status x%x\n", rc);
3200
- return 0;
3592
+ return 1;
3593
+}
3594
+
3595
+/**
3596
+ * lpfc_nvmet_invalidate_host
3597
+ *
3598
+ * @phba - pointer to the driver instance bound to an adapter port.
3599
+ * @ndlp - pointer to an lpfc_nodelist type
3600
+ *
3601
+ * This routine upcalls the nvmet transport to invalidate an NVME
3602
+ * host to which this target instance had active connections.
3603
+ */
3604
+void
3605
+lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3606
+{
3607
+ struct lpfc_nvmet_tgtport *tgtp;
3608
+
3609
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_NVME_ABTS,
3610
+ "6203 Invalidating hosthandle x%px\n",
3611
+ ndlp);
3612
+
3613
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3614
+ atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
3615
+
3616
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
3617
+ /* Need to get the nvmet_fc_target_port pointer here.*/
3618
+ nvmet_fc_invalidate_host(phba->targetport, ndlp);
3619
+#endif
32013620 }