forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 072de836f53be56a70cecf70b43ae43b7ce17376
kernel/drivers/scsi/lpfc/lpfc_nvme.c
....@@ -1,7 +1,7 @@
11 /*******************************************************************
22 * This file is part of the Emulex Linux Device Driver for *
33 * Fibre Channel Host Bus Adapters. *
4
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
4
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
55 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
66 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
77 * EMULEX and SLI are trademarks of Emulex. *
....@@ -36,9 +36,6 @@
3636 #include <scsi/scsi_transport_fc.h>
3737 #include <scsi/fc/fc_fs.h>
3838
39
-#include <linux/nvme.h>
40
-#include <linux/nvme-fc-driver.h>
41
-#include <linux/nvme-fc.h>
4239 #include "lpfc_version.h"
4340 #include "lpfc_hw4.h"
4441 #include "lpfc_hw.h"
....@@ -56,12 +53,12 @@
5653
5754 /* NVME initiator-based functions */
5855
59
-static struct lpfc_nvme_buf *
56
+static struct lpfc_io_buf *
6057 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
61
- int expedite);
58
+ int idx, int expedite);
6259
6360 static void
64
-lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
61
+lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
6562
6663 static struct nvme_fc_port_template lpfc_nvme_template;
6764
....@@ -196,9 +193,51 @@
196193 }
197194
198195 /**
196
+ * lpfc_nvme_prep_abort_wqe - set up 'abort' work queue entry.
197
+ * @pwqeq: Pointer to command iocb.
198
+ * @xritag: Tag that uniqely identifies the local exchange resource.
199
+ * @opt: Option bits -
200
+ * bit 0 = inhibit sending abts on the link
201
+ *
202
+ * This function is called with hbalock held.
203
+ **/
204
+void
205
+lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
206
+{
207
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
208
+
209
+ /* WQEs are reused. Clear stale data and set key fields to
210
+ * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
211
+ */
212
+ memset(wqe, 0, sizeof(*wqe));
213
+
214
+ if (opt & INHIBIT_ABORT)
215
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
216
+ /* Abort specified xri tag, with the mask deliberately zeroed */
217
+ bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
218
+
219
+ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
220
+
221
+ /* Abort the IO associated with this outstanding exchange ID. */
222
+ wqe->abort_cmd.wqe_com.abort_tag = xritag;
223
+
224
+ /* iotag for the wqe completion. */
225
+ bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
226
+
227
+ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
228
+ bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
229
+
230
+ bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
231
+ bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
232
+ bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
233
+}
234
+
235
+/**
199236 * lpfc_nvme_create_queue -
237
+ * @pnvme_lport: Transport localport that LS is to be issued from
200238 * @lpfc_pnvme: Pointer to the driver's nvme instance data
201239 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
240
+ * @qsize: Size of the queue in bytes
202241 * @handle: An opaque driver handle used in follow-up calls.
203242 *
204243 * Driver registers this routine to preallocate and initialize any
....@@ -229,7 +268,7 @@
229268 if (qhandle == NULL)
230269 return -ENOMEM;
231270
232
- qhandle->cpu_id = smp_processor_id();
271
+ qhandle->cpu_id = raw_smp_processor_id();
233272 qhandle->qidx = qidx;
234273 /*
235274 * NVME qidx == 0 is the admin queue, so both admin queue
....@@ -239,7 +278,7 @@
239278 if (qidx) {
240279 str = "IO "; /* IO queue */
241280 qhandle->index = ((qidx - 1) %
242
- vport->phba->cfg_nvme_io_channel);
281
+ lpfc_nvme_template.max_hw_queues);
243282 } else {
244283 str = "ADM"; /* Admin queue */
245284 qhandle->index = qidx;
....@@ -247,7 +286,7 @@
247286
248287 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
249288 "6073 Binding %s HdwQueue %d (cpu %d) to "
250
- "io_channel %d qhandle %p\n", str,
289
+ "hdw_queue %d qhandle x%px\n", str,
251290 qidx, qhandle->cpu_id, qhandle->index, qhandle);
252291 *handle = (void *)qhandle;
253292 return 0;
....@@ -255,7 +294,7 @@
255294
256295 /**
257296 * lpfc_nvme_delete_queue -
258
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
297
+ * @pnvme_lport: Transport localport that LS is to be issued from
259298 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
260299 * @handle: An opaque driver handle from lpfc_nvme_create_queue
261300 *
....@@ -282,7 +321,7 @@
282321 vport = lport->vport;
283322
284323 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
285
- "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n",
324
+ "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
286325 lport, qidx, handle);
287326 kfree(handle);
288327 }
....@@ -293,7 +332,7 @@
293332 struct lpfc_nvme_lport *lport = localport->private;
294333
295334 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
296
- "6173 localport %p delete complete\n",
335
+ "6173 localport x%px delete complete\n",
297336 lport);
298337
299338 /* release any threads waiting for the unreg to complete */
....@@ -312,7 +351,7 @@
312351 * Return value :
313352 * None
314353 */
315
-void
354
+static void
316355 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
317356 {
318357 struct lpfc_nvme_rport *rport = remoteport->private;
....@@ -332,7 +371,7 @@
332371 * calling state machine to remove the node.
333372 */
334373 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
335
- "6146 remoteport delete of remoteport %p\n",
374
+ "6146 remoteport delete of remoteport x%px\n",
336375 remoteport);
337376 spin_lock_irq(&vport->phba->hbalock);
338377
....@@ -356,18 +395,131 @@
356395 return;
357396 }
358397
398
+/**
399
+ * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request
400
+ * @phba: pointer to lpfc hba data structure.
401
+ * @axchg: pointer to exchange context for the NVME LS request
402
+ *
403
+ * This routine is used for processing an asychronously received NVME LS
404
+ * request. Any remaining validation is done and the LS is then forwarded
405
+ * to the nvme-fc transport via nvme_fc_rcv_ls_req().
406
+ *
407
+ * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing)
408
+ * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done.
409
+ * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
410
+ *
411
+ * Returns 0 if LS was handled and delivered to the transport
412
+ * Returns 1 if LS failed to be handled and should be dropped
413
+ */
414
+int
415
+lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
416
+ struct lpfc_async_xchg_ctx *axchg)
417
+{
418
+#if (IS_ENABLED(CONFIG_NVME_FC))
419
+ struct lpfc_vport *vport;
420
+ struct lpfc_nvme_rport *lpfc_rport;
421
+ struct nvme_fc_remote_port *remoteport;
422
+ struct lpfc_nvme_lport *lport;
423
+ uint32_t *payload = axchg->payload;
424
+ int rc;
425
+
426
+ vport = axchg->ndlp->vport;
427
+ lpfc_rport = axchg->ndlp->nrport;
428
+ if (!lpfc_rport)
429
+ return -EINVAL;
430
+
431
+ remoteport = lpfc_rport->remoteport;
432
+ if (!vport->localport)
433
+ return -EINVAL;
434
+
435
+ lport = vport->localport->private;
436
+ if (!lport)
437
+ return -EINVAL;
438
+
439
+ rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload,
440
+ axchg->size);
441
+
442
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
443
+ "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x "
444
+ "%08x %08x %08x\n",
445
+ axchg->size, rc,
446
+ *payload, *(payload+1), *(payload+2),
447
+ *(payload+3), *(payload+4), *(payload+5));
448
+
449
+ if (!rc)
450
+ return 0;
451
+#endif
452
+ return 1;
453
+}
454
+
455
+/**
456
+ * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME
457
+ * LS request.
458
+ * @phba: Pointer to HBA context object
459
+ * @vport: The local port that issued the LS
460
+ * @cmdwqe: Pointer to driver command WQE object.
461
+ * @wcqe: Pointer to driver response CQE object.
462
+ *
463
+ * This function is the generic completion handler for NVME LS requests.
464
+ * The function updates any states and statistics, calls the transport
465
+ * ls_req done() routine, then tears down the command and buffers used
466
+ * for the LS request.
467
+ **/
468
+void
469
+__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
470
+ struct lpfc_iocbq *cmdwqe,
471
+ struct lpfc_wcqe_complete *wcqe)
472
+{
473
+ struct nvmefc_ls_req *pnvme_lsreq;
474
+ struct lpfc_dmabuf *buf_ptr;
475
+ struct lpfc_nodelist *ndlp;
476
+ uint32_t status;
477
+
478
+ pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
479
+ ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
480
+ status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
481
+
482
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
483
+ "6047 NVMEx LS REQ %px cmpl DID %x Xri: %x "
484
+ "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
485
+ "ndlp:x%px\n",
486
+ pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
487
+ cmdwqe->sli4_xritag, status,
488
+ (wcqe->parameter & 0xffff),
489
+ cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
490
+
491
+ lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
492
+ cmdwqe->sli4_xritag, status, wcqe->parameter);
493
+
494
+ if (cmdwqe->context3) {
495
+ buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
496
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
497
+ kfree(buf_ptr);
498
+ cmdwqe->context3 = NULL;
499
+ }
500
+ if (pnvme_lsreq->done)
501
+ pnvme_lsreq->done(pnvme_lsreq, status);
502
+ else
503
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
504
+ "6046 NVMEx cmpl without done call back? "
505
+ "Data %px DID %x Xri: %x status %x\n",
506
+ pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
507
+ cmdwqe->sli4_xritag, status);
508
+ if (ndlp) {
509
+ lpfc_nlp_put(ndlp);
510
+ cmdwqe->context1 = NULL;
511
+ }
512
+ lpfc_sli_release_iocbq(phba, cmdwqe);
513
+}
514
+
359515 static void
360
-lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
516
+lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
361517 struct lpfc_wcqe_complete *wcqe)
362518 {
363519 struct lpfc_vport *vport = cmdwqe->vport;
364520 struct lpfc_nvme_lport *lport;
365521 uint32_t status;
366
- struct nvmefc_ls_req *pnvme_lsreq;
367
- struct lpfc_dmabuf *buf_ptr;
368
- struct lpfc_nodelist *ndlp;
369522
370
- pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
371523 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
372524
373525 if (vport->localport) {
....@@ -382,38 +534,7 @@
382534 }
383535 }
384536
385
- ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
386
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
387
- "6047 nvme cmpl Enter "
388
- "Data %p DID %x Xri: %x status %x reason x%x cmd:%p "
389
- "lsreg:%p bmp:%p ndlp:%p\n",
390
- pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
391
- cmdwqe->sli4_xritag, status,
392
- (wcqe->parameter & 0xffff),
393
- cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
394
-
395
- lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n",
396
- cmdwqe->sli4_xritag, status, wcqe->parameter);
397
-
398
- if (cmdwqe->context3) {
399
- buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
400
- lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
401
- kfree(buf_ptr);
402
- cmdwqe->context3 = NULL;
403
- }
404
- if (pnvme_lsreq->done)
405
- pnvme_lsreq->done(pnvme_lsreq, status);
406
- else
407
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
408
- "6046 nvme cmpl without done call back? "
409
- "Data %p DID %x Xri: %x status %x\n",
410
- pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
411
- cmdwqe->sli4_xritag, status);
412
- if (ndlp) {
413
- lpfc_nlp_put(ndlp);
414
- cmdwqe->context1 = NULL;
415
- }
416
- lpfc_sli_release_iocbq(phba, cmdwqe);
537
+ __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe);
417538 }
418539
419540 static int
....@@ -438,6 +559,7 @@
438559 return 1;
439560
440561 wqe = &genwqe->wqe;
562
+ /* Initialize only 64 bytes */
441563 memset(wqe, 0, sizeof(union lpfc_wqe));
442564
443565 genwqe->context3 = (uint8_t *)bmp;
....@@ -516,12 +638,6 @@
516638
517639
518640 /* Issue GEN REQ WQE for NPORT <did> */
519
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
520
- "6050 Issue GEN REQ WQE to NPORT x%x "
521
- "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
522
- ndlp->nlp_DID, genwqe->iotag,
523
- vport->port_state,
524
- genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
525641 genwqe->wqe_cmpl = cmpl;
526642 genwqe->iocb_cmpl = NULL;
527643 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
....@@ -531,107 +647,109 @@
531647 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
532648 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
533649
534
- rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
650
+ rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
535651 if (rc) {
536
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
652
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
537653 "6045 Issue GEN REQ WQE to NPORT x%x "
538
- "Data: x%x x%x\n",
654
+ "Data: x%x x%x rc x%x\n",
539655 ndlp->nlp_DID, genwqe->iotag,
540
- vport->port_state);
656
+ vport->port_state, rc);
541657 lpfc_sli_release_iocbq(phba, genwqe);
542658 return 1;
543659 }
660
+
661
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS,
662
+ "6050 Issue GEN REQ WQE to NPORT x%x "
663
+ "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px "
664
+ "bmp:x%px xmit:%d 1st:%d\n",
665
+ ndlp->nlp_DID, genwqe->sli4_xritag,
666
+ vport->port_state,
667
+ genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
544668 return 0;
545669 }
546670
671
+
547672 /**
548
- * lpfc_nvme_ls_req - Issue an Link Service request
549
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
550
- * @lpfc_nvme_lport: Pointer to the driver's local port data
551
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
673
+ * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request
674
+ * @vport: The local port issuing the LS
675
+ * @ndlp: The remote port to send the LS to
676
+ * @pnvme_lsreq: Pointer to LS request structure from the transport
677
+ * @gen_req_cmp: Completion call-back
552678 *
553
- * Driver registers this routine to handle any link service request
554
- * from the nvme_fc transport to a remote nvme-aware port.
679
+ * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST
680
+ * WQE to perform the LS operation.
555681 *
556682 * Return value :
557683 * 0 - Success
558
- * TODO: What are the failure codes.
684
+ * non-zero: various error codes, in form of -Exxx
559685 **/
560
-static int
561
-lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
562
- struct nvme_fc_remote_port *pnvme_rport,
563
- struct nvmefc_ls_req *pnvme_lsreq)
686
+int
687
+__lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
688
+ struct nvmefc_ls_req *pnvme_lsreq,
689
+ void (*gen_req_cmp)(struct lpfc_hba *phba,
690
+ struct lpfc_iocbq *cmdwqe,
691
+ struct lpfc_wcqe_complete *wcqe))
564692 {
565
- int ret = 0;
566
- struct lpfc_nvme_lport *lport;
567
- struct lpfc_nvme_rport *rport;
568
- struct lpfc_vport *vport;
569
- struct lpfc_nodelist *ndlp;
570
- struct ulp_bde64 *bpl;
571693 struct lpfc_dmabuf *bmp;
694
+ struct ulp_bde64 *bpl;
695
+ int ret;
572696 uint16_t ntype, nstate;
573697
574
- /* there are two dma buf in the request, actually there is one and
575
- * the second one is just the start address + cmd size.
576
- * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
577
- * in a lpfc_dmabuf struct. When freeing we just free the wrapper
578
- * because the nvem layer owns the data bufs.
579
- * We do not have to break these packets open, we don't care what is in
580
- * them. And we do not have to look at the resonse data, we only care
581
- * that we got a response. All of the caring is going to happen in the
582
- * nvme-fc layer.
583
- */
584
-
585
- lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
586
- rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
587
- if (unlikely(!lport) || unlikely(!rport))
588
- return -EINVAL;
589
-
590
- vport = lport->vport;
591
-
592
- if (vport->load_flag & FC_UNLOADING)
593
- return -ENODEV;
594
-
595
- /* Need the ndlp. It is stored in the driver's rport. */
596
- ndlp = rport->ndlp;
597698 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
598
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
599
- "6051 Remoteport %p, rport has invalid ndlp. "
600
- "Failing LS Req\n", pnvme_rport);
699
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
700
+ "6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
701
+ "LS Req\n",
702
+ ndlp);
601703 return -ENODEV;
602704 }
603705
604
- /* The remote node has to be a mapped nvme target or an
605
- * unmapped nvme initiator or it's an error.
606
- */
607706 ntype = ndlp->nlp_type;
608707 nstate = ndlp->nlp_state;
609708 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
610709 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
611
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
612
- "6088 DID x%06x not ready for "
613
- "IO. State x%x, Type x%x\n",
614
- pnvme_rport->port_id,
615
- ndlp->nlp_state, ndlp->nlp_type);
710
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
711
+ "6088 NVMEx LS REQ: Fail DID x%06x not "
712
+ "ready for IO. Type x%x, State x%x\n",
713
+ ndlp->nlp_DID, ntype, nstate);
616714 return -ENODEV;
617715 }
618
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
619
- if (!bmp) {
620716
621
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
622
- "6044 Could not find node for DID %x\n",
623
- pnvme_rport->port_id);
624
- return 2;
717
+ if (!vport->phba->sli4_hba.nvmels_wq)
718
+ return -ENOMEM;
719
+
720
+ /*
721
+ * there are two dma buf in the request, actually there is one and
722
+ * the second one is just the start address + cmd size.
723
+ * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
724
+ * in a lpfc_dmabuf struct. When freeing we just free the wrapper
725
+ * because the nvem layer owns the data bufs.
726
+ * We do not have to break these packets open, we don't care what is
727
+ * in them. And we do not have to look at the resonse data, we only
728
+ * care that we got a response. All of the caring is going to happen
729
+ * in the nvme-fc layer.
730
+ */
731
+
732
+ bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
733
+ if (!bmp) {
734
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
735
+ "6044 NVMEx LS REQ: Could not alloc LS buf "
736
+ "for DID %x\n",
737
+ ndlp->nlp_DID);
738
+ return -ENOMEM;
625739 }
626
- INIT_LIST_HEAD(&bmp->list);
740
+
627741 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
628742 if (!bmp->virt) {
629
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
630
- "6042 Could not find node for DID %x\n",
631
- pnvme_rport->port_id);
743
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
744
+ "6042 NVMEx LS REQ: Could not alloc mbuf "
745
+ "for DID %x\n",
746
+ ndlp->nlp_DID);
632747 kfree(bmp);
633
- return 3;
748
+ return -ENOMEM;
634749 }
750
+
751
+ INIT_LIST_HEAD(&bmp->list);
752
+
635753 bpl = (struct ulp_bde64 *)bmp->virt;
636754 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
637755 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
....@@ -646,52 +764,177 @@
646764 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
647765 bpl->tus.w = le32_to_cpu(bpl->tus.w);
648766
649
- /* Expand print to include key fields. */
650767 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
651
- "6149 Issue LS Req to DID 0x%06x lport %p, rport %p "
652
- "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n",
653
- ndlp->nlp_DID,
654
- pnvme_lport, pnvme_rport,
655
- pnvme_lsreq, pnvme_lsreq->rqstlen,
656
- pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
657
- &pnvme_lsreq->rspdma);
768
+ "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, "
769
+ "rqstlen:%d rsplen:%d %pad %pad\n",
770
+ ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
771
+ pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
772
+ &pnvme_lsreq->rspdma);
658773
659
- atomic_inc(&lport->fc4NvmeLsRequests);
660
-
661
- /* Hardcode the wait to 30 seconds. Connections are failing otherwise.
662
- * This code allows it all to work.
663
- */
664774 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
665
- pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
666
- ndlp, 2, 30, 0);
775
+ pnvme_lsreq, gen_req_cmp, ndlp, 2,
776
+ LPFC_NVME_LS_TIMEOUT, 0);
667777 if (ret != WQE_SUCCESS) {
668
- atomic_inc(&lport->xmt_ls_err);
669
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
670
- "6052 EXIT. issue ls wqe failed lport %p, "
671
- "rport %p lsreq%p Status %x DID %x\n",
672
- pnvme_lport, pnvme_rport, pnvme_lsreq,
673
- ret, ndlp->nlp_DID);
778
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
779
+ "6052 NVMEx REQ: EXIT. issue ls wqe failed "
780
+ "lsreq x%px Status %x DID %x\n",
781
+ pnvme_lsreq, ret, ndlp->nlp_DID);
674782 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
675783 kfree(bmp);
676
- return ret;
784
+ return -EIO;
677785 }
678786
679
- /* Stub in routine and return 0 for now. */
680
- return ret;
787
+ return 0;
681788 }
682789
683790 /**
684
- * lpfc_nvme_ls_abort - Issue an Link Service request
685
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
686
- * @lpfc_nvme_lport: Pointer to the driver's local port data
687
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
791
+ * lpfc_nvme_ls_req - Issue an NVME Link Service request
792
+ * @pnvme_lport: Transport localport that LS is to be issued from.
793
+ * @nvme_rport: Transport remoteport that LS is to be sent to.
794
+ * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
688795 *
689796 * Driver registers this routine to handle any link service request
690797 * from the nvme_fc transport to a remote nvme-aware port.
691798 *
692799 * Return value :
693800 * 0 - Success
694
- * TODO: What are the failure codes.
801
+ * non-zero: various error codes, in form of -Exxx
802
+ **/
803
+static int
804
+lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
805
+ struct nvme_fc_remote_port *pnvme_rport,
806
+ struct nvmefc_ls_req *pnvme_lsreq)
807
+{
808
+ struct lpfc_nvme_lport *lport;
809
+ struct lpfc_nvme_rport *rport;
810
+ struct lpfc_vport *vport;
811
+ int ret;
812
+
813
+ lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
814
+ rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
815
+ if (unlikely(!lport) || unlikely(!rport))
816
+ return -EINVAL;
817
+
818
+ vport = lport->vport;
819
+ if (vport->load_flag & FC_UNLOADING)
820
+ return -ENODEV;
821
+
822
+ atomic_inc(&lport->fc4NvmeLsRequests);
823
+
824
+ ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
825
+ lpfc_nvme_ls_req_cmp);
826
+ if (ret)
827
+ atomic_inc(&lport->xmt_ls_err);
828
+
829
+ return ret;
830
+}
831
+
832
+/**
833
+ * __lpfc_nvme_ls_abort - Generic service routine to abort a prior
834
+ * NVME LS request
835
+ * @vport: The local port that issued the LS
836
+ * @ndlp: The remote port the LS was sent to
837
+ * @pnvme_lsreq: Pointer to LS request structure from the transport
838
+ *
839
+ * The driver validates the ndlp, looks for the LS, and aborts the
840
+ * LS if found.
841
+ *
842
+ * Returns:
843
+ * 0 : if LS found and aborted
844
+ * non-zero: various error conditions in form -Exxx
845
+ **/
846
+int
847
+__lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
848
+ struct nvmefc_ls_req *pnvme_lsreq)
849
+{
850
+ struct lpfc_hba *phba = vport->phba;
851
+ struct lpfc_sli_ring *pring;
852
+ struct lpfc_iocbq *wqe, *next_wqe;
853
+ bool foundit = false;
854
+
855
+ if (!ndlp) {
856
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
857
+ "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID "
858
+ "x%06x, Failing LS Req\n",
859
+ ndlp, ndlp ? ndlp->nlp_DID : 0);
860
+ return -EINVAL;
861
+ }
862
+
863
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
864
+ "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq "
865
+ "x%p rqstlen:%d rsplen:%d %pad %pad\n",
866
+ pnvme_lsreq, pnvme_lsreq->rqstlen,
867
+ pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
868
+ &pnvme_lsreq->rspdma);
869
+
870
+ /*
871
+ * Lock the ELS ring txcmplq and look for the wqe that matches
872
+ * this ELS. If found, issue an abort on the wqe.
873
+ */
874
+ pring = phba->sli4_hba.nvmels_wq->pring;
875
+ spin_lock_irq(&phba->hbalock);
876
+ spin_lock(&pring->ring_lock);
877
+ list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
878
+ if (wqe->context2 == pnvme_lsreq) {
879
+ wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
880
+ foundit = true;
881
+ break;
882
+ }
883
+ }
884
+ spin_unlock(&pring->ring_lock);
885
+
886
+ if (foundit)
887
+ lpfc_sli_issue_abort_iotag(phba, pring, wqe);
888
+ spin_unlock_irq(&phba->hbalock);
889
+
890
+ if (foundit)
891
+ return 0;
892
+
893
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
894
+ "6213 NVMEx LS REQ Abort: Unable to locate req x%p\n",
895
+ pnvme_lsreq);
896
+ return -EINVAL;
897
+}
898
+
899
+static int
900
+lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
901
+ struct nvme_fc_remote_port *remoteport,
902
+ struct nvmefc_ls_rsp *ls_rsp)
903
+{
904
+ struct lpfc_async_xchg_ctx *axchg =
905
+ container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
906
+ struct lpfc_nvme_lport *lport;
907
+ int rc;
908
+
909
+ if (axchg->phba->pport->load_flag & FC_UNLOADING)
910
+ return -ENODEV;
911
+
912
+ lport = (struct lpfc_nvme_lport *)localport->private;
913
+
914
+ rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp);
915
+
916
+ if (rc) {
917
+ /*
918
+ * unless the failure is due to having already sent
919
+ * the response, an abort will be generated for the
920
+ * exchange if the rsp can't be sent.
921
+ */
922
+ if (rc != -EALREADY)
923
+ atomic_inc(&lport->xmt_ls_abort);
924
+ return rc;
925
+ }
926
+
927
+ return 0;
928
+}
929
+
930
+/**
931
+ * lpfc_nvme_ls_abort - Abort a prior NVME LS request
932
+ * @pnvme_lport: Transport localport that LS is to be issued from.
933
+ * @pnvme_rport: Transport remoteport that LS is to be sent to.
934
+ * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
935
+ *
936
+ * Driver registers this routine to abort a NVME LS request that is
937
+ * in progress (from the transports perspective).
695938 **/
696939 static void
697940 lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
....@@ -702,9 +945,7 @@
702945 struct lpfc_vport *vport;
703946 struct lpfc_hba *phba;
704947 struct lpfc_nodelist *ndlp;
705
- LIST_HEAD(abort_list);
706
- struct lpfc_sli_ring *pring;
707
- struct lpfc_iocbq *wqe, *next_wqe;
948
+ int ret;
708949
709950 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
710951 if (unlikely(!lport))
....@@ -716,54 +957,16 @@
716957 return;
717958
718959 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
719
- if (!ndlp) {
720
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
721
- "6049 Could not find node for DID %x\n",
722
- pnvme_rport->port_id);
723
- return;
724
- }
725960
726
- /* Expand print to include key fields. */
727
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
728
- "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d "
729
- "rsplen:%d %pad %pad\n",
730
- pnvme_lport, pnvme_rport,
731
- pnvme_lsreq, pnvme_lsreq->rqstlen,
732
- pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
733
- &pnvme_lsreq->rspdma);
734
-
735
- /*
736
- * Lock the ELS ring txcmplq and build a local list of all ELS IOs
737
- * that need an ABTS. The IOs need to stay on the txcmplq so that
738
- * the abort operation completes them successfully.
739
- */
740
- pring = phba->sli4_hba.nvmels_wq->pring;
741
- spin_lock_irq(&phba->hbalock);
742
- spin_lock(&pring->ring_lock);
743
- list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
744
- /* Add to abort_list on on NDLP match. */
745
- if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
746
- wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
747
- list_add_tail(&wqe->dlist, &abort_list);
748
- }
749
- }
750
- spin_unlock(&pring->ring_lock);
751
- spin_unlock_irq(&phba->hbalock);
752
-
753
- /* Abort the targeted IOs and remove them from the abort list. */
754
- list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
961
+ ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq);
962
+ if (!ret)
755963 atomic_inc(&lport->xmt_ls_abort);
756
- spin_lock_irq(&phba->hbalock);
757
- list_del_init(&wqe->dlist);
758
- lpfc_sli_issue_abort_iotag(phba, pring, wqe);
759
- spin_unlock_irq(&phba->hbalock);
760
- }
761964 }
762965
763966 /* Fix up the existing sgls for NVME IO. */
764967 static inline void
765968 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
766
- struct lpfc_nvme_buf *lpfc_ncmd,
969
+ struct lpfc_io_buf *lpfc_ncmd,
767970 struct nvmefc_fcp_req *nCmd)
768971 {
769972 struct lpfc_hba *phba = vport->phba;
....@@ -786,7 +989,7 @@
786989 * rather than the virtual memory to ease the restore
787990 * operation.
788991 */
789
- sgl = lpfc_ncmd->nvme_sgl;
992
+ sgl = lpfc_ncmd->dma_sgl;
790993 sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
791994 if (phba->cfg_nvme_embed_cmd) {
792995 sgl->addr_hi = 0;
....@@ -857,94 +1060,9 @@
8571060 sgl->sge_len = cpu_to_le32(nCmd->rsplen);
8581061 }
8591062
860
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
861
-static void
862
-lpfc_nvme_ktime(struct lpfc_hba *phba,
863
- struct lpfc_nvme_buf *lpfc_ncmd)
864
-{
865
- uint64_t seg1, seg2, seg3, seg4;
866
- uint64_t segsum;
8671063
868
- if (!lpfc_ncmd->ts_last_cmd ||
869
- !lpfc_ncmd->ts_cmd_start ||
870
- !lpfc_ncmd->ts_cmd_wqput ||
871
- !lpfc_ncmd->ts_isr_cmpl ||
872
- !lpfc_ncmd->ts_data_nvme)
873
- return;
874
-
875
- if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
876
- return;
877
- if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
878
- return;
879
- if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
880
- return;
881
- if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
882
- return;
883
- if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
884
- return;
885
- /*
886
- * Segment 1 - Time from Last FCP command cmpl is handed
887
- * off to NVME Layer to start of next command.
888
- * Segment 2 - Time from Driver receives a IO cmd start
889
- * from NVME Layer to WQ put is done on IO cmd.
890
- * Segment 3 - Time from Driver WQ put is done on IO cmd
891
- * to MSI-X ISR for IO cmpl.
892
- * Segment 4 - Time from MSI-X ISR for IO cmpl to when
893
- * cmpl is handled off to the NVME Layer.
894
- */
895
- seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
896
- if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
897
- seg1 = 0;
898
-
899
- /* Calculate times relative to start of IO */
900
- seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
901
- segsum = seg2;
902
- seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
903
- if (segsum > seg3)
904
- return;
905
- seg3 -= segsum;
906
- segsum += seg3;
907
-
908
- seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
909
- if (segsum > seg4)
910
- return;
911
- seg4 -= segsum;
912
-
913
- phba->ktime_data_samples++;
914
- phba->ktime_seg1_total += seg1;
915
- if (seg1 < phba->ktime_seg1_min)
916
- phba->ktime_seg1_min = seg1;
917
- else if (seg1 > phba->ktime_seg1_max)
918
- phba->ktime_seg1_max = seg1;
919
- phba->ktime_seg2_total += seg2;
920
- if (seg2 < phba->ktime_seg2_min)
921
- phba->ktime_seg2_min = seg2;
922
- else if (seg2 > phba->ktime_seg2_max)
923
- phba->ktime_seg2_max = seg2;
924
- phba->ktime_seg3_total += seg3;
925
- if (seg3 < phba->ktime_seg3_min)
926
- phba->ktime_seg3_min = seg3;
927
- else if (seg3 > phba->ktime_seg3_max)
928
- phba->ktime_seg3_max = seg3;
929
- phba->ktime_seg4_total += seg4;
930
- if (seg4 < phba->ktime_seg4_min)
931
- phba->ktime_seg4_min = seg4;
932
- else if (seg4 > phba->ktime_seg4_max)
933
- phba->ktime_seg4_max = seg4;
934
-
935
- lpfc_ncmd->ts_last_cmd = 0;
936
- lpfc_ncmd->ts_cmd_start = 0;
937
- lpfc_ncmd->ts_cmd_wqput = 0;
938
- lpfc_ncmd->ts_isr_cmpl = 0;
939
- lpfc_ncmd->ts_data_nvme = 0;
940
-}
941
-#endif
942
-
943
-/**
1064
+/*
9441065 * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
945
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
946
- * @lpfc_nvme_lport: Pointer to the driver's local port data
947
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
9481066 *
9491067 * Driver registers this routine as it io request handler. This
9501068 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
....@@ -958,57 +1076,56 @@
9581076 lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
9591077 struct lpfc_wcqe_complete *wcqe)
9601078 {
961
- struct lpfc_nvme_buf *lpfc_ncmd =
962
- (struct lpfc_nvme_buf *)pwqeIn->context1;
1079
+ struct lpfc_io_buf *lpfc_ncmd =
1080
+ (struct lpfc_io_buf *)pwqeIn->context1;
9631081 struct lpfc_vport *vport = pwqeIn->vport;
9641082 struct nvmefc_fcp_req *nCmd;
9651083 struct nvme_fc_ersp_iu *ep;
9661084 struct nvme_fc_cmd_iu *cp;
967
- struct lpfc_nvme_rport *rport;
9681085 struct lpfc_nodelist *ndlp;
9691086 struct lpfc_nvme_fcpreq_priv *freqpriv;
9701087 struct lpfc_nvme_lport *lport;
971
- struct lpfc_nvme_ctrl_stat *cstat;
972
- unsigned long flags;
9731088 uint32_t code, status, idx;
9741089 uint16_t cid, sqhd, data;
9751090 uint32_t *ptr;
1091
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1092
+ int cpu;
1093
+#endif
9761094
9771095 /* Sanity check on return of outstanding command */
978
- if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) {
979
- if (!lpfc_ncmd) {
980
- lpfc_printf_vlog(vport, KERN_ERR,
981
- LOG_NODE | LOG_NVME_IOERR,
982
- "6071 Null lpfc_ncmd pointer. No "
983
- "release, skip completion\n");
984
- return;
985
- }
1096
+ if (!lpfc_ncmd) {
1097
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1098
+ "6071 Null lpfc_ncmd pointer. No "
1099
+ "release, skip completion\n");
1100
+ return;
1101
+ }
9861102
987
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
988
- "6066 Missing cmpl ptrs: lpfc_ncmd %p, "
989
- "nvmeCmd %p nrport %p\n",
990
- lpfc_ncmd, lpfc_ncmd->nvmeCmd,
991
- lpfc_ncmd->nrport);
1103
+ /* Guard against abort handler being called at same time */
1104
+ spin_lock(&lpfc_ncmd->buf_lock);
1105
+
1106
+ if (!lpfc_ncmd->nvmeCmd) {
1107
+ spin_unlock(&lpfc_ncmd->buf_lock);
1108
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1109
+ "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
1110
+ "nvmeCmd x%px\n",
1111
+ lpfc_ncmd, lpfc_ncmd->nvmeCmd);
9921112
9931113 /* Release the lpfc_ncmd regardless of the missing elements. */
9941114 lpfc_release_nvme_buf(phba, lpfc_ncmd);
9951115 return;
9961116 }
9971117 nCmd = lpfc_ncmd->nvmeCmd;
998
- rport = lpfc_ncmd->nrport;
9991118 status = bf_get(lpfc_wcqe_c_status, wcqe);
10001119
1001
- if (vport->localport) {
1120
+ idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
1121
+ phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
1122
+
1123
+ if (unlikely(status && vport->localport)) {
10021124 lport = (struct lpfc_nvme_lport *)vport->localport->private;
10031125 if (lport) {
1004
- idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
1005
- cstat = &lport->cstat[idx];
1006
- atomic_inc(&cstat->fc4NvmeIoCmpls);
1007
- if (status) {
1008
- if (bf_get(lpfc_wcqe_c_xb, wcqe))
1009
- atomic_inc(&lport->cmpl_fcp_xb);
1010
- atomic_inc(&lport->cmpl_fcp_err);
1011
- }
1126
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
1127
+ atomic_inc(&lport->cmpl_fcp_xb);
1128
+ atomic_inc(&lport->cmpl_fcp_err);
10121129 }
10131130 }
10141131
....@@ -1019,18 +1136,11 @@
10191136 * Catch race where our node has transitioned, but the
10201137 * transport is still transitioning.
10211138 */
1022
- ndlp = rport->ndlp;
1139
+ ndlp = lpfc_ncmd->ndlp;
10231140 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1024
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
1025
- "6061 rport %p, DID x%06x node not ready.\n",
1026
- rport, rport->remoteport->port_id);
1027
-
1028
- ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id);
1029
- if (!ndlp) {
1030
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1031
- "6062 Ignoring NVME cmpl. No ndlp\n");
1032
- goto out_err;
1033
- }
1141
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1142
+ "6062 Ignoring NVME cmpl. No ndlp\n");
1143
+ goto out_err;
10341144 }
10351145
10361146 code = bf_get(lpfc_wcqe_c_code, wcqe);
....@@ -1099,7 +1209,7 @@
10991209 /* Sanity check */
11001210 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
11011211 break;
1102
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1212
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11031213 "6081 NVME Completion Protocol Error: "
11041214 "xri %x status x%x result x%x "
11051215 "placed x%x\n",
....@@ -1112,19 +1222,22 @@
11121222 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
11131223 lpfc_printf_vlog(vport, KERN_INFO,
11141224 LOG_NVME_IOERR,
1115
- "6032 Delay Aborted cmd %p "
1116
- "nvme cmd %p, xri x%x, "
1225
+ "6032 Delay Aborted cmd x%px "
1226
+ "nvme cmd x%px, xri x%x, "
11171227 "xb %d\n",
11181228 lpfc_ncmd, nCmd,
11191229 lpfc_ncmd->cur_iocbq.sli4_xritag,
11201230 bf_get(lpfc_wcqe_c_xb, wcqe));
1231
+ fallthrough;
11211232 default:
11221233 out_err:
11231234 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
11241235 "6072 NVME Completion Error: xri %x "
1125
- "status x%x result x%x placed x%x\n",
1236
+ "status x%x result x%x [x%x] "
1237
+ "placed x%x\n",
11261238 lpfc_ncmd->cur_iocbq.sli4_xritag,
11271239 lpfc_ncmd->status, lpfc_ncmd->result,
1240
+ wcqe->parameter,
11281241 wcqe->total_data_placed);
11291242 nCmd->transferred_length = 0;
11301243 nCmd->rcv_rsplen = 0;
....@@ -1145,18 +1258,19 @@
11451258 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11461259 if (lpfc_ncmd->ts_cmd_start) {
11471260 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1148
- lpfc_ncmd->ts_data_nvme = ktime_get_ns();
1149
- phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
1150
- lpfc_nvme_ktime(phba, lpfc_ncmd);
1261
+ lpfc_ncmd->ts_data_io = ktime_get_ns();
1262
+ phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
1263
+ lpfc_io_ktime(phba, lpfc_ncmd);
11511264 }
1152
- if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
1153
- if (lpfc_ncmd->cpu != smp_processor_id())
1154
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1265
+ if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
1266
+ cpu = raw_smp_processor_id();
1267
+ this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
1268
+ if (lpfc_ncmd->cpu != cpu)
1269
+ lpfc_printf_vlog(vport,
1270
+ KERN_INFO, LOG_NVME_IOERR,
11551271 "6701 CPU Check cmpl: "
11561272 "cpu %d expect %d\n",
1157
- smp_processor_id(), lpfc_ncmd->cpu);
1158
- if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
1159
- phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
1273
+ cpu, lpfc_ncmd->cpu);
11601274 }
11611275 #endif
11621276
....@@ -1167,13 +1281,11 @@
11671281 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
11681282 freqpriv = nCmd->private;
11691283 freqpriv->nvme_buf = NULL;
1170
- nCmd->done(nCmd);
11711284 lpfc_ncmd->nvmeCmd = NULL;
1172
- }
1173
-
1174
- spin_lock_irqsave(&phba->hbalock, flags);
1175
- lpfc_ncmd->nrport = NULL;
1176
- spin_unlock_irqrestore(&phba->hbalock, flags);
1285
+ spin_unlock(&lpfc_ncmd->buf_lock);
1286
+ nCmd->done(nCmd);
1287
+ } else
1288
+ spin_unlock(&lpfc_ncmd->buf_lock);
11771289
11781290 /* Call release with XB=1 to queue the IO into the abort list. */
11791291 lpfc_release_nvme_buf(phba, lpfc_ncmd);
....@@ -1182,11 +1294,10 @@
11821294
11831295 /**
11841296 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
1185
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
1186
- * @lpfc_nvme_lport: Pointer to the driver's local port data
1187
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1188
- * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1189
- * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1297
+ * @vport: pointer to a host virtual N_Port data structure
1298
+ * @lpfcn_cmd: Pointer to lpfc scsi command
1299
+ * @pnode: pointer to a node-list data structure
1300
+ * @cstat: pointer to the control status structure
11901301 *
11911302 * Driver registers this routine as it io request handler. This
11921303 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
....@@ -1198,17 +1309,18 @@
11981309 **/
11991310 static int
12001311 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1201
- struct lpfc_nvme_buf *lpfc_ncmd,
1312
+ struct lpfc_io_buf *lpfc_ncmd,
12021313 struct lpfc_nodelist *pnode,
1203
- struct lpfc_nvme_ctrl_stat *cstat)
1314
+ struct lpfc_fc4_ctrl_stat *cstat)
12041315 {
12051316 struct lpfc_hba *phba = vport->phba;
12061317 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1207
- struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
1318
+ struct nvme_common_command *sqe;
1319
+ struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
12081320 union lpfc_wqe128 *wqe = &pwqeq->wqe;
12091321 uint32_t req_len;
12101322
1211
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1323
+ if (!NLP_CHK_NODE_ACT(pnode))
12121324 return -EINVAL;
12131325
12141326 /*
....@@ -1238,7 +1350,7 @@
12381350 } else {
12391351 wqe->fcp_iwrite.initial_xfer_len = 0;
12401352 }
1241
- atomic_inc(&cstat->fc4NvmeOutputRequests);
1353
+ cstat->output_requests++;
12421354 } else {
12431355 /* From the iread template, initialize words 7 - 11 */
12441356 memcpy(&wqe->words[7],
....@@ -1251,14 +1363,23 @@
12511363 /* Word 5 */
12521364 wqe->fcp_iread.rsrvd5 = 0;
12531365
1254
- atomic_inc(&cstat->fc4NvmeInputRequests);
1366
+ cstat->input_requests++;
12551367 }
12561368 } else {
12571369 /* From the icmnd template, initialize words 4 - 11 */
12581370 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
12591371 sizeof(uint32_t) * 8);
1260
- atomic_inc(&cstat->fc4NvmeControlRequests);
1372
+ cstat->control_requests++;
12611373 }
1374
+
1375
+ if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
1376
+ bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
1377
+ sqe = &((struct nvme_fc_cmd_iu *)
1378
+ nCmd->cmdaddr)->sqe.common;
1379
+ if (sqe->opcode == nvme_admin_async_event)
1380
+ bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
1381
+ }
1382
+
12621383 /*
12631384 * Finish initializing those WQE fields that are independent
12641385 * of the nvme_cmnd request_buffer
....@@ -1288,11 +1409,8 @@
12881409
12891410 /**
12901411 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1291
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
1292
- * @lpfc_nvme_lport: Pointer to the driver's local port data
1293
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1294
- * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1295
- * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1412
+ * @vport: pointer to a host virtual N_Port data structure
1413
+ * @lpfcn_cmd: Pointer to lpfc scsi command
12961414 *
12971415 * Driver registers this routine as it io request handler. This
12981416 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
....@@ -1304,20 +1422,22 @@
13041422 **/
13051423 static int
13061424 lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1307
- struct lpfc_nvme_buf *lpfc_ncmd)
1425
+ struct lpfc_io_buf *lpfc_ncmd)
13081426 {
13091427 struct lpfc_hba *phba = vport->phba;
13101428 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
13111429 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1312
- struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl;
1430
+ struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
1431
+ struct sli4_hybrid_sgl *sgl_xtra = NULL;
13131432 struct scatterlist *data_sg;
13141433 struct sli4_sge *first_data_sgl;
13151434 struct ulp_bde64 *bde;
1316
- dma_addr_t physaddr;
1435
+ dma_addr_t physaddr = 0;
13171436 uint32_t num_bde = 0;
1318
- uint32_t dma_len;
1437
+ uint32_t dma_len = 0;
13191438 uint32_t dma_offset = 0;
1320
- int nseg, i;
1439
+ int nseg, i, j;
1440
+ bool lsp_just_set = false;
13211441
13221442 /* Fix up the command and response DMA stuff. */
13231443 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
....@@ -1336,7 +1456,7 @@
13361456 first_data_sgl = sgl;
13371457 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
13381458 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1339
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1459
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13401460 "6058 Too many sg segments from "
13411461 "NVME Transport. Max %d, "
13421462 "nvmeIO sg_cnt %d\n",
....@@ -1354,31 +1474,87 @@
13541474 */
13551475 nseg = nCmd->sg_cnt;
13561476 data_sg = nCmd->first_sgl;
1477
+
1478
+ /* for tracking the segment boundaries */
1479
+ j = 2;
13571480 for (i = 0; i < nseg; i++) {
13581481 if (data_sg == NULL) {
1359
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1482
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13601483 "6059 dptr err %d, nseg %d\n",
13611484 i, nseg);
13621485 lpfc_ncmd->seg_cnt = 0;
13631486 return 1;
13641487 }
1365
- physaddr = data_sg->dma_address;
1366
- dma_len = data_sg->length;
1367
- sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1368
- sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1369
- sgl->word2 = le32_to_cpu(sgl->word2);
1370
- if ((num_bde + 1) == nseg)
1371
- bf_set(lpfc_sli4_sge_last, sgl, 1);
1372
- else
1373
- bf_set(lpfc_sli4_sge_last, sgl, 0);
1374
- bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1375
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1376
- sgl->word2 = cpu_to_le32(sgl->word2);
1377
- sgl->sge_len = cpu_to_le32(dma_len);
13781488
1379
- dma_offset += dma_len;
1380
- data_sg = sg_next(data_sg);
1381
- sgl++;
1489
+ sgl->word2 = 0;
1490
+ if ((num_bde + 1) == nseg) {
1491
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
1492
+ bf_set(lpfc_sli4_sge_type, sgl,
1493
+ LPFC_SGE_TYPE_DATA);
1494
+ } else {
1495
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
1496
+
1497
+ /* expand the segment */
1498
+ if (!lsp_just_set &&
1499
+ !((j + 1) % phba->border_sge_num) &&
1500
+ ((nseg - 1) != i)) {
1501
+ /* set LSP type */
1502
+ bf_set(lpfc_sli4_sge_type, sgl,
1503
+ LPFC_SGE_TYPE_LSP);
1504
+
1505
+ sgl_xtra = lpfc_get_sgl_per_hdwq(
1506
+ phba, lpfc_ncmd);
1507
+
1508
+ if (unlikely(!sgl_xtra)) {
1509
+ lpfc_ncmd->seg_cnt = 0;
1510
+ return 1;
1511
+ }
1512
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
1513
+ sgl_xtra->dma_phys_sgl));
1514
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
1515
+ sgl_xtra->dma_phys_sgl));
1516
+
1517
+ } else {
1518
+ bf_set(lpfc_sli4_sge_type, sgl,
1519
+ LPFC_SGE_TYPE_DATA);
1520
+ }
1521
+ }
1522
+
1523
+ if (!(bf_get(lpfc_sli4_sge_type, sgl) &
1524
+ LPFC_SGE_TYPE_LSP)) {
1525
+ if ((nseg - 1) == i)
1526
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
1527
+
1528
+ physaddr = data_sg->dma_address;
1529
+ dma_len = data_sg->length;
1530
+ sgl->addr_lo = cpu_to_le32(
1531
+ putPaddrLow(physaddr));
1532
+ sgl->addr_hi = cpu_to_le32(
1533
+ putPaddrHigh(physaddr));
1534
+
1535
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1536
+ sgl->word2 = cpu_to_le32(sgl->word2);
1537
+ sgl->sge_len = cpu_to_le32(dma_len);
1538
+
1539
+ dma_offset += dma_len;
1540
+ data_sg = sg_next(data_sg);
1541
+
1542
+ sgl++;
1543
+
1544
+ lsp_just_set = false;
1545
+ } else {
1546
+ sgl->word2 = cpu_to_le32(sgl->word2);
1547
+
1548
+ sgl->sge_len = cpu_to_le32(
1549
+ phba->cfg_sg_dma_buf_size);
1550
+
1551
+ sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
1552
+ i = i - 1;
1553
+
1554
+ lsp_just_set = true;
1555
+ }
1556
+
1557
+ j++;
13821558 }
13831559 if (phba->cfg_enable_pbde) {
13841560 /* Use PBDE support for first SGL only, offset == 0 */
....@@ -1398,11 +1574,13 @@
13981574 }
13991575
14001576 } else {
1577
+ lpfc_ncmd->seg_cnt = 0;
1578
+
14011579 /* For this clause to be valid, the payload_length
14021580 * and sg_cnt must zero.
14031581 */
14041582 if (nCmd->payload_length != 0) {
1405
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1583
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14061584 "6063 NVME DMA Prep Err: sg_cnt %d "
14071585 "payload_length x%x\n",
14081586 nCmd->sg_cnt, nCmd->payload_length);
....@@ -1437,13 +1615,13 @@
14371615 {
14381616 int ret = 0;
14391617 int expedite = 0;
1440
- int idx;
1618
+ int idx, cpu;
14411619 struct lpfc_nvme_lport *lport;
1442
- struct lpfc_nvme_ctrl_stat *cstat;
1620
+ struct lpfc_fc4_ctrl_stat *cstat;
14431621 struct lpfc_vport *vport;
14441622 struct lpfc_hba *phba;
14451623 struct lpfc_nodelist *ndlp;
1446
- struct lpfc_nvme_buf *lpfc_ncmd;
1624
+ struct lpfc_io_buf *lpfc_ncmd;
14471625 struct lpfc_nvme_rport *rport;
14481626 struct lpfc_nvme_qhandle *lpfc_queue_info;
14491627 struct lpfc_nvme_fcpreq_priv *freqpriv;
....@@ -1473,12 +1651,7 @@
14731651
14741652 phba = vport->phba;
14751653
1476
- if (vport->load_flag & FC_UNLOADING) {
1477
- ret = -ENODEV;
1478
- goto out_fail;
1479
- }
1480
-
1481
- if (vport->load_flag & FC_UNLOADING) {
1654
+ if (unlikely(vport->load_flag & FC_UNLOADING)) {
14821655 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
14831656 "6124 Fail IO, Driver unload\n");
14841657 atomic_inc(&lport->xmt_fcp_err);
....@@ -1509,8 +1682,8 @@
15091682 ndlp = rport->ndlp;
15101683 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
15111684 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1512
- "6053 Fail IO, ndlp not ready: rport %p "
1513
- "ndlp %p, DID x%06x\n",
1685
+ "6053 Busy IO, ndlp not ready: rport x%px "
1686
+ "ndlp x%px, DID x%06x\n",
15141687 rport, ndlp, pnvme_rport->port_id);
15151688 atomic_inc(&lport->xmt_fcp_err);
15161689 ret = -EBUSY;
....@@ -1561,7 +1734,15 @@
15611734 }
15621735 }
15631736
1564
- lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
1737
+ /* Lookup Hardware Queue index based on fcp_io_sched module parameter */
1738
+ if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
1739
+ idx = lpfc_queue_info->index;
1740
+ } else {
1741
+ cpu = raw_smp_processor_id();
1742
+ idx = phba->sli4_hba.cpu_map[cpu].hdwq;
1743
+ }
1744
+
1745
+ lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
15651746 if (lpfc_ncmd == NULL) {
15661747 atomic_inc(&lport->xmt_fcp_noxri);
15671748 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
....@@ -1588,9 +1769,8 @@
15881769 */
15891770 freqpriv->nvme_buf = lpfc_ncmd;
15901771 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1591
- lpfc_ncmd->nrport = rport;
15921772 lpfc_ncmd->ndlp = ndlp;
1593
- lpfc_ncmd->start_time = jiffies;
1773
+ lpfc_ncmd->qidx = lpfc_queue_info->qidx;
15941774
15951775 /*
15961776 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
....@@ -1600,9 +1780,8 @@
16001780 * index to use and that they have affinitized a CPU to this hardware
16011781 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
16021782 */
1603
- idx = lpfc_queue_info->index;
16041783 lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1605
- cstat = &lport->cstat[idx];
1784
+ cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
16061785
16071786 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
16081787 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
....@@ -1620,7 +1799,7 @@
16201799 lpfc_ncmd->cur_iocbq.sli4_xritag,
16211800 lpfc_queue_info->index, ndlp->nlp_DID);
16221801
1623
- ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
1802
+ ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
16241803 if (ret) {
16251804 atomic_inc(&lport->xmt_fcp_wqerr);
16261805 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
....@@ -1631,26 +1810,24 @@
16311810 goto out_free_nvme_buf;
16321811 }
16331812
1813
+ if (phba->cfg_xri_rebalancing)
1814
+ lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
1815
+
16341816 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
16351817 if (lpfc_ncmd->ts_cmd_start)
16361818 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
16371819
1638
- if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
1639
- lpfc_ncmd->cpu = smp_processor_id();
1640
- if (lpfc_ncmd->cpu != lpfc_queue_info->index) {
1641
- /* Check for admin queue */
1642
- if (lpfc_queue_info->qidx) {
1643
- lpfc_printf_vlog(vport,
1644
- KERN_ERR, LOG_NVME_IOERR,
1645
- "6702 CPU Check cmd: "
1646
- "cpu %d wq %d\n",
1647
- lpfc_ncmd->cpu,
1648
- lpfc_queue_info->index);
1649
- }
1650
- lpfc_ncmd->cpu = lpfc_queue_info->index;
1651
- }
1652
- if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
1653
- phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++;
1820
+ if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
1821
+ cpu = raw_smp_processor_id();
1822
+ this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1823
+ lpfc_ncmd->cpu = cpu;
1824
+ if (idx != cpu)
1825
+ lpfc_printf_vlog(vport,
1826
+ KERN_INFO, LOG_NVME_IOERR,
1827
+ "6702 CPU Check cmd: "
1828
+ "cpu %d wq %d\n",
1829
+ lpfc_ncmd->cpu,
1830
+ lpfc_queue_info->index);
16541831 }
16551832 #endif
16561833 return 0;
....@@ -1658,11 +1835,11 @@
16581835 out_free_nvme_buf:
16591836 if (lpfc_ncmd->nvmeCmd->sg_cnt) {
16601837 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1661
- atomic_dec(&cstat->fc4NvmeOutputRequests);
1838
+ cstat->output_requests--;
16621839 else
1663
- atomic_dec(&cstat->fc4NvmeInputRequests);
1840
+ cstat->input_requests--;
16641841 } else
1665
- atomic_dec(&cstat->fc4NvmeControlRequests);
1842
+ cstat->control_requests--;
16661843 lpfc_release_nvme_buf(phba, lpfc_ncmd);
16671844 out_fail:
16681845 return ret;
....@@ -1722,11 +1899,10 @@
17221899 struct lpfc_nvme_lport *lport;
17231900 struct lpfc_vport *vport;
17241901 struct lpfc_hba *phba;
1725
- struct lpfc_nvme_buf *lpfc_nbuf;
1902
+ struct lpfc_io_buf *lpfc_nbuf;
17261903 struct lpfc_iocbq *abts_buf;
17271904 struct lpfc_iocbq *nvmereq_wqe;
17281905 struct lpfc_nvme_fcpreq_priv *freqpriv;
1729
- union lpfc_wqe128 *abts_wqe;
17301906 unsigned long flags;
17311907 int ret_val;
17321908
....@@ -1756,7 +1932,7 @@
17561932 /* Announce entry to new IO submit field. */
17571933 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
17581934 "6002 Abort Request to rport DID x%06x "
1759
- "for nvme_fc_req %p\n",
1935
+ "for nvme_fc_req x%px\n",
17601936 pnvme_rport->port_id,
17611937 pnvme_fcreq);
17621938
....@@ -1765,9 +1941,9 @@
17651941 */
17661942 spin_lock_irqsave(&phba->hbalock, flags);
17671943 /* driver queued commands are in process of being flushed */
1768
- if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
1944
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
17691945 spin_unlock_irqrestore(&phba->hbalock, flags);
1770
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1946
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
17711947 "6139 Driver in reset cleanup - flushing "
17721948 "NVME Req now. hba_flag x%x\n",
17731949 phba->hba_flag);
....@@ -1777,18 +1953,21 @@
17771953 lpfc_nbuf = freqpriv->nvme_buf;
17781954 if (!lpfc_nbuf) {
17791955 spin_unlock_irqrestore(&phba->hbalock, flags);
1780
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1956
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
17811957 "6140 NVME IO req has no matching lpfc nvme "
17821958 "io buffer. Skipping abort req.\n");
17831959 return;
17841960 } else if (!lpfc_nbuf->nvmeCmd) {
17851961 spin_unlock_irqrestore(&phba->hbalock, flags);
1786
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1962
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
17871963 "6141 lpfc NVME IO req has no nvme_fcreq "
17881964 "io buffer. Skipping abort req.\n");
17891965 return;
17901966 }
17911967 nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1968
+
1969
+ /* Guard against IO completion being called at same time */
1970
+ spin_lock(&lpfc_nbuf->buf_lock);
17921971
17931972 /*
17941973 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
....@@ -1798,24 +1977,22 @@
17981977 * has not seen it yet.
17991978 */
18001979 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1801
- spin_unlock_irqrestore(&phba->hbalock, flags);
1802
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1980
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18031981 "6143 NVME req mismatch: "
1804
- "lpfc_nbuf %p nvmeCmd %p, "
1805
- "pnvme_fcreq %p. Skipping Abort xri x%x\n",
1982
+ "lpfc_nbuf x%px nvmeCmd x%px, "
1983
+ "pnvme_fcreq x%px. Skipping Abort xri x%x\n",
18061984 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
18071985 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1808
- return;
1986
+ goto out_unlock;
18091987 }
18101988
18111989 /* Don't abort IOs no longer on the pending queue. */
18121990 if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1813
- spin_unlock_irqrestore(&phba->hbalock, flags);
1814
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1815
- "6142 NVME IO req %p not queued - skipping "
1991
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1992
+ "6142 NVME IO req x%px not queued - skipping "
18161993 "abort req xri x%x\n",
18171994 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1818
- return;
1995
+ goto out_unlock;
18191996 }
18201997
18211998 atomic_inc(&lport->xmt_fcp_abort);
....@@ -1825,72 +2002,41 @@
18252002
18262003 /* Outstanding abort is in progress */
18272004 if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1828
- spin_unlock_irqrestore(&phba->hbalock, flags);
1829
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
2005
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18302006 "6144 Outstanding NVME I/O Abort Request "
1831
- "still pending on nvme_fcreq %p, "
1832
- "lpfc_ncmd %p xri x%x\n",
2007
+ "still pending on nvme_fcreq x%px, "
2008
+ "lpfc_ncmd %px xri x%x\n",
18332009 pnvme_fcreq, lpfc_nbuf,
18342010 nvmereq_wqe->sli4_xritag);
1835
- return;
2011
+ goto out_unlock;
18362012 }
18372013
18382014 abts_buf = __lpfc_sli_get_iocbq(phba);
18392015 if (!abts_buf) {
1840
- spin_unlock_irqrestore(&phba->hbalock, flags);
1841
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
2016
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18422017 "6136 No available abort wqes. Skipping "
1843
- "Abts req for nvme_fcreq %p xri x%x\n",
2018
+ "Abts req for nvme_fcreq x%px xri x%x\n",
18442019 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1845
- return;
2020
+ goto out_unlock;
18462021 }
18472022
18482023 /* Ready - mark outstanding as aborted by driver. */
18492024 nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
18502025
1851
- /* Complete prepping the abort wqe and issue to the FW. */
1852
- abts_wqe = &abts_buf->wqe;
1853
-
1854
- /* WQEs are reused. Clear stale data and set key fields to
1855
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
1856
- */
1857
- memset(abts_wqe, 0, sizeof(union lpfc_wqe));
1858
- bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1859
-
1860
- /* word 7 */
1861
- bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
1862
- bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
1863
- nvmereq_wqe->iocb.ulpClass);
1864
-
1865
- /* word 8 - tell the FW to abort the IO associated with this
1866
- * outstanding exchange ID.
1867
- */
1868
- abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
1869
-
1870
- /* word 9 - this is the iotag for the abts_wqe completion. */
1871
- bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
1872
- abts_buf->iotag);
1873
-
1874
- /* word 10 */
1875
- bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
1876
- bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
1877
-
1878
- /* word 11 */
1879
- bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
1880
- bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
1881
- bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
2026
+ lpfc_nvme_prep_abort_wqe(abts_buf, nvmereq_wqe->sli4_xritag, 0);
18822027
18832028 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
18842029 abts_buf->iocb_flag |= LPFC_IO_NVME;
18852030 abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
18862031 abts_buf->vport = vport;
18872032 abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
1888
- ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
2033
+ ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf);
2034
+ spin_unlock(&lpfc_nbuf->buf_lock);
18892035 spin_unlock_irqrestore(&phba->hbalock, flags);
18902036 if (ret_val) {
1891
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
2037
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18922038 "6137 Failed abts issue_wqe with status x%x "
1893
- "for nvme_fcreq %p.\n",
2039
+ "for nvme_fcreq x%px.\n",
18942040 ret_val, pnvme_fcreq);
18952041 lpfc_sli_release_iocbq(phba, abts_buf);
18962042 return;
....@@ -1901,6 +2047,12 @@
19012047 "ox_id x%x on reqtag x%x\n",
19022048 nvmereq_wqe->sli4_xritag,
19032049 abts_buf->iotag);
2050
+ return;
2051
+
2052
+out_unlock:
2053
+ spin_unlock(&lpfc_nbuf->buf_lock);
2054
+ spin_unlock_irqrestore(&phba->hbalock, flags);
2055
+ return;
19042056 }
19052057
19062058 /* Declare and initialization an instance of the FC NVME template. */
....@@ -1914,6 +2066,7 @@
19142066 .fcp_io = lpfc_nvme_fcp_io_submit,
19152067 .ls_abort = lpfc_nvme_ls_abort,
19162068 .fcp_abort = lpfc_nvme_fcp_abort,
2069
+ .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp,
19172070
19182071 .max_hw_queues = 1,
19192072 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
....@@ -1930,456 +2083,63 @@
19302083 };
19312084
19322085 /**
1933
- * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware
1934
- * @phba: pointer to lpfc hba data structure.
1935
- * @nblist: pointer to nvme buffer list.
1936
- * @count: number of scsi buffers on the list.
1937
- *
1938
- * This routine is invoked to post a block of @count scsi sgl pages from a
1939
- * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
1940
- * No Lock is held.
1941
- *
1942
- **/
1943
-static int
1944
-lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba,
1945
- struct list_head *nblist,
1946
- int count)
1947
-{
1948
- struct lpfc_nvme_buf *lpfc_ncmd;
1949
- struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
1950
- struct sgl_page_pairs *sgl_pg_pairs;
1951
- void *viraddr;
1952
- LPFC_MBOXQ_t *mbox;
1953
- uint32_t reqlen, alloclen, pg_pairs;
1954
- uint32_t mbox_tmo;
1955
- uint16_t xritag_start = 0;
1956
- int rc = 0;
1957
- uint32_t shdr_status, shdr_add_status;
1958
- dma_addr_t pdma_phys_bpl1;
1959
- union lpfc_sli4_cfg_shdr *shdr;
1960
-
1961
- /* Calculate the requested length of the dma memory */
1962
- reqlen = count * sizeof(struct sgl_page_pairs) +
1963
- sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
1964
- if (reqlen > SLI4_PAGE_SIZE) {
1965
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1966
- "6118 Block sgl registration required DMA "
1967
- "size (%d) great than a page\n", reqlen);
1968
- return -ENOMEM;
1969
- }
1970
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1971
- if (!mbox) {
1972
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1973
- "6119 Failed to allocate mbox cmd memory\n");
1974
- return -ENOMEM;
1975
- }
1976
-
1977
- /* Allocate DMA memory and set up the non-embedded mailbox command */
1978
- alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1979
- LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
1980
- LPFC_SLI4_MBX_NEMBED);
1981
-
1982
- if (alloclen < reqlen) {
1983
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1984
- "6120 Allocated DMA memory size (%d) is "
1985
- "less than the requested DMA memory "
1986
- "size (%d)\n", alloclen, reqlen);
1987
- lpfc_sli4_mbox_cmd_free(phba, mbox);
1988
- return -ENOMEM;
1989
- }
1990
-
1991
- /* Get the first SGE entry from the non-embedded DMA memory */
1992
- viraddr = mbox->sge_array->addr[0];
1993
-
1994
- /* Set up the SGL pages in the non-embedded DMA pages */
1995
- sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
1996
- sgl_pg_pairs = &sgl->sgl_pg_pairs;
1997
-
1998
- pg_pairs = 0;
1999
- list_for_each_entry(lpfc_ncmd, nblist, list) {
2000
- /* Set up the sge entry */
2001
- sgl_pg_pairs->sgl_pg0_addr_lo =
2002
- cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
2003
- sgl_pg_pairs->sgl_pg0_addr_hi =
2004
- cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
2005
- if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
2006
- pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
2007
- SGL_PAGE_SIZE;
2008
- else
2009
- pdma_phys_bpl1 = 0;
2010
- sgl_pg_pairs->sgl_pg1_addr_lo =
2011
- cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
2012
- sgl_pg_pairs->sgl_pg1_addr_hi =
2013
- cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
2014
- /* Keep the first xritag on the list */
2015
- if (pg_pairs == 0)
2016
- xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
2017
- sgl_pg_pairs++;
2018
- pg_pairs++;
2019
- }
2020
- bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
2021
- bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
2022
- /* Perform endian conversion if necessary */
2023
- sgl->word0 = cpu_to_le32(sgl->word0);
2024
-
2025
- if (!phba->sli4_hba.intr_enable)
2026
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
2027
- else {
2028
- mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
2029
- rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
2030
- }
2031
- shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
2032
- shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2033
- shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2034
- if (rc != MBX_TIMEOUT)
2035
- lpfc_sli4_mbox_cmd_free(phba, mbox);
2036
- if (shdr_status || shdr_add_status || rc) {
2037
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2038
- "6125 POST_SGL_BLOCK mailbox command failed "
2039
- "status x%x add_status x%x mbx status x%x\n",
2040
- shdr_status, shdr_add_status, rc);
2041
- rc = -ENXIO;
2042
- }
2043
- return rc;
2044
-}
2045
-
2046
-/**
2047
- * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list
2048
- * @phba: pointer to lpfc hba data structure.
2049
- * @post_nblist: pointer to the nvme buffer list.
2050
- *
2051
- * This routine walks a list of nvme buffers that was passed in. It attempts
2052
- * to construct blocks of nvme buffer sgls which contains contiguous xris and
2053
- * uses the non-embedded SGL block post mailbox commands to post to the port.
2054
- * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
2055
- * embedded SGL post mailbox command for posting. The @post_nblist passed in
2056
- * must be local list, thus no lock is needed when manipulate the list.
2057
- *
2058
- * Returns: 0 = failure, non-zero number of successfully posted buffers.
2059
- **/
2060
-static int
2061
-lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
2062
- struct list_head *post_nblist, int sb_count)
2063
-{
2064
- struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
2065
- int status, sgl_size;
2066
- int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
2067
- dma_addr_t pdma_phys_sgl1;
2068
- int last_xritag = NO_XRI;
2069
- int cur_xritag;
2070
- LIST_HEAD(prep_nblist);
2071
- LIST_HEAD(blck_nblist);
2072
- LIST_HEAD(nvme_nblist);
2073
-
2074
- /* sanity check */
2075
- if (sb_count <= 0)
2076
- return -EINVAL;
2077
-
2078
- sgl_size = phba->cfg_sg_dma_buf_size;
2079
-
2080
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
2081
- list_del_init(&lpfc_ncmd->list);
2082
- block_cnt++;
2083
- if ((last_xritag != NO_XRI) &&
2084
- (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
2085
- /* a hole in xri block, form a sgl posting block */
2086
- list_splice_init(&prep_nblist, &blck_nblist);
2087
- post_cnt = block_cnt - 1;
2088
- /* prepare list for next posting block */
2089
- list_add_tail(&lpfc_ncmd->list, &prep_nblist);
2090
- block_cnt = 1;
2091
- } else {
2092
- /* prepare list for next posting block */
2093
- list_add_tail(&lpfc_ncmd->list, &prep_nblist);
2094
- /* enough sgls for non-embed sgl mbox command */
2095
- if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
2096
- list_splice_init(&prep_nblist, &blck_nblist);
2097
- post_cnt = block_cnt;
2098
- block_cnt = 0;
2099
- }
2100
- }
2101
- num_posting++;
2102
- last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
2103
-
2104
- /* end of repost sgl list condition for NVME buffers */
2105
- if (num_posting == sb_count) {
2106
- if (post_cnt == 0) {
2107
- /* last sgl posting block */
2108
- list_splice_init(&prep_nblist, &blck_nblist);
2109
- post_cnt = block_cnt;
2110
- } else if (block_cnt == 1) {
2111
- /* last single sgl with non-contiguous xri */
2112
- if (sgl_size > SGL_PAGE_SIZE)
2113
- pdma_phys_sgl1 =
2114
- lpfc_ncmd->dma_phys_sgl +
2115
- SGL_PAGE_SIZE;
2116
- else
2117
- pdma_phys_sgl1 = 0;
2118
- cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
2119
- status = lpfc_sli4_post_sgl(phba,
2120
- lpfc_ncmd->dma_phys_sgl,
2121
- pdma_phys_sgl1, cur_xritag);
2122
- if (status) {
2123
- /* failure, put on abort nvme list */
2124
- lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
2125
- } else {
2126
- /* success, put on NVME buffer list */
2127
- lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2128
- lpfc_ncmd->status = IOSTAT_SUCCESS;
2129
- num_posted++;
2130
- }
2131
- /* success, put on NVME buffer sgl list */
2132
- list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
2133
- }
2134
- }
2135
-
2136
- /* continue until a nembed page worth of sgls */
2137
- if (post_cnt == 0)
2138
- continue;
2139
-
2140
- /* post block of NVME buffer list sgls */
2141
- status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist,
2142
- post_cnt);
2143
-
2144
- /* don't reset xirtag due to hole in xri block */
2145
- if (block_cnt == 0)
2146
- last_xritag = NO_XRI;
2147
-
2148
- /* reset NVME buffer post count for next round of posting */
2149
- post_cnt = 0;
2150
-
2151
- /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
2152
- while (!list_empty(&blck_nblist)) {
2153
- list_remove_head(&blck_nblist, lpfc_ncmd,
2154
- struct lpfc_nvme_buf, list);
2155
- if (status) {
2156
- /* failure, put on abort nvme list */
2157
- lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
2158
- } else {
2159
- /* success, put on NVME buffer list */
2160
- lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2161
- lpfc_ncmd->status = IOSTAT_SUCCESS;
2162
- num_posted++;
2163
- }
2164
- list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
2165
- }
2166
- }
2167
- /* Push NVME buffers with sgl posted to the available list */
2168
- while (!list_empty(&nvme_nblist)) {
2169
- list_remove_head(&nvme_nblist, lpfc_ncmd,
2170
- struct lpfc_nvme_buf, list);
2171
- lpfc_release_nvme_buf(phba, lpfc_ncmd);
2172
- }
2173
- return num_posted;
2174
-}
2175
-
2176
-/**
2177
- * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls
2178
- * @phba: pointer to lpfc hba data structure.
2179
- *
2180
- * This routine walks the list of nvme buffers that have been allocated and
2181
- * repost them to the port by using SGL block post. This is needed after a
2182
- * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
2183
- * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
2184
- * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers.
2185
- *
2186
- * Returns: 0 = success, non-zero failure.
2187
- **/
2188
-int
2189
-lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
2190
-{
2191
- LIST_HEAD(post_nblist);
2192
- int num_posted, rc = 0;
2193
-
2194
- /* get all NVME buffers need to repost to a local list */
2195
- spin_lock_irq(&phba->nvme_buf_list_get_lock);
2196
- spin_lock(&phba->nvme_buf_list_put_lock);
2197
- list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
2198
- list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
2199
- phba->get_nvme_bufs = 0;
2200
- phba->put_nvme_bufs = 0;
2201
- spin_unlock(&phba->nvme_buf_list_put_lock);
2202
- spin_unlock_irq(&phba->nvme_buf_list_get_lock);
2203
-
2204
- /* post the list of nvme buffer sgls to port if available */
2205
- if (!list_empty(&post_nblist)) {
2206
- num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist,
2207
- phba->sli4_hba.nvme_xri_cnt);
2208
- /* failed to post any nvme buffer, return error */
2209
- if (num_posted == 0)
2210
- rc = -EIO;
2211
- }
2212
- return rc;
2213
-}
2214
-
2215
-/**
2216
- * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec
2217
- * @vport: The virtual port for which this call being executed.
2218
- * @num_to_allocate: The requested number of buffers to allocate.
2219
- *
2220
- * This routine allocates nvme buffers for device with SLI-4 interface spec,
2221
- * the nvme buffer contains all the necessary information needed to initiate
2222
- * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put
2223
- * them on a list, it post them to the port by using SGL block post.
2224
- *
2225
- * Return codes:
2226
- * int - number of nvme buffers that were allocated and posted.
2227
- * 0 = failure, less than num_to_alloc is a partial failure.
2228
- **/
2229
-static int
2230
-lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
2231
-{
2232
- struct lpfc_hba *phba = vport->phba;
2233
- struct lpfc_nvme_buf *lpfc_ncmd;
2234
- struct lpfc_iocbq *pwqeq;
2235
- union lpfc_wqe128 *wqe;
2236
- struct sli4_sge *sgl;
2237
- dma_addr_t pdma_phys_sgl;
2238
- uint16_t iotag, lxri = 0;
2239
- int bcnt, num_posted, sgl_size;
2240
- LIST_HEAD(prep_nblist);
2241
- LIST_HEAD(post_nblist);
2242
- LIST_HEAD(nvme_nblist);
2243
-
2244
- sgl_size = phba->cfg_sg_dma_buf_size;
2245
-
2246
- for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
2247
- lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
2248
- if (!lpfc_ncmd)
2249
- break;
2250
- /*
2251
- * Get memory from the pci pool to map the virt space to
2252
- * pci bus space for an I/O. The DMA buffer includes the
2253
- * number of SGE's necessary to support the sg_tablesize.
2254
- */
2255
- lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
2256
- GFP_KERNEL,
2257
- &lpfc_ncmd->dma_handle);
2258
- if (!lpfc_ncmd->data) {
2259
- kfree(lpfc_ncmd);
2260
- break;
2261
- }
2262
-
2263
- lxri = lpfc_sli4_next_xritag(phba);
2264
- if (lxri == NO_XRI) {
2265
- dma_pool_free(phba->lpfc_sg_dma_buf_pool,
2266
- lpfc_ncmd->data, lpfc_ncmd->dma_handle);
2267
- kfree(lpfc_ncmd);
2268
- break;
2269
- }
2270
- pwqeq = &(lpfc_ncmd->cur_iocbq);
2271
- wqe = &pwqeq->wqe;
2272
-
2273
- /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
2274
- iotag = lpfc_sli_next_iotag(phba, pwqeq);
2275
- if (iotag == 0) {
2276
- dma_pool_free(phba->lpfc_sg_dma_buf_pool,
2277
- lpfc_ncmd->data, lpfc_ncmd->dma_handle);
2278
- kfree(lpfc_ncmd);
2279
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2280
- "6121 Failed to allocated IOTAG for"
2281
- " XRI:0x%x\n", lxri);
2282
- lpfc_sli4_free_xri(phba, lxri);
2283
- break;
2284
- }
2285
- pwqeq->sli4_lxritag = lxri;
2286
- pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
2287
- pwqeq->iocb_flag |= LPFC_IO_NVME;
2288
- pwqeq->context1 = lpfc_ncmd;
2289
- pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
2290
-
2291
- /* Initialize local short-hand pointers. */
2292
- lpfc_ncmd->nvme_sgl = lpfc_ncmd->data;
2293
- sgl = lpfc_ncmd->nvme_sgl;
2294
- pdma_phys_sgl = lpfc_ncmd->dma_handle;
2295
- lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl;
2296
-
2297
- /* Rsp SGE will be filled in when we rcv an IO
2298
- * from the NVME Layer to be sent.
2299
- * The cmd is going to be embedded so we need a SKIP SGE.
2300
- */
2301
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2302
- bf_set(lpfc_sli4_sge_last, sgl, 0);
2303
- sgl->word2 = cpu_to_le32(sgl->word2);
2304
- /* Fill in word 3 / sgl_len during cmd submission */
2305
-
2306
- lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
2307
-
2308
- /* Initialize WQE */
2309
- memset(wqe, 0, sizeof(union lpfc_wqe));
2310
-
2311
- /* add the nvme buffer to a post list */
2312
- list_add_tail(&lpfc_ncmd->list, &post_nblist);
2313
- spin_lock_irq(&phba->nvme_buf_list_get_lock);
2314
- phba->sli4_hba.nvme_xri_cnt++;
2315
- spin_unlock_irq(&phba->nvme_buf_list_get_lock);
2316
- }
2317
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
2318
- "6114 Allocate %d out of %d requested new NVME "
2319
- "buffers\n", bcnt, num_to_alloc);
2320
-
2321
- /* post the list of nvme buffer sgls to port if available */
2322
- if (!list_empty(&post_nblist))
2323
- num_posted = lpfc_post_nvme_sgl_list(phba,
2324
- &post_nblist, bcnt);
2325
- else
2326
- num_posted = 0;
2327
-
2328
- return num_posted;
2329
-}
2330
-
2331
-static inline struct lpfc_nvme_buf *
2332
-lpfc_nvme_buf(struct lpfc_hba *phba)
2333
-{
2334
- struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
2335
-
2336
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
2337
- &phba->lpfc_nvme_buf_list_get, list) {
2338
- list_del_init(&lpfc_ncmd->list);
2339
- phba->get_nvme_bufs--;
2340
- return lpfc_ncmd;
2341
- }
2342
- return NULL;
2343
-}
2344
-
2345
-/**
2346
- * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
2086
+ * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
23472087 * @phba: The HBA for which this call is being executed.
23482088 *
2349
- * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list
2089
+ * This routine removes a nvme buffer from head of @hdwq io_buf_list
23502090 * and returns to caller.
23512091 *
23522092 * Return codes:
23532093 * NULL - Error
23542094 * Pointer to lpfc_nvme_buf - Success
23552095 **/
2356
-static struct lpfc_nvme_buf *
2096
+static struct lpfc_io_buf *
23572097 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
2358
- int expedite)
2098
+ int idx, int expedite)
23592099 {
2360
- struct lpfc_nvme_buf *lpfc_ncmd = NULL;
2361
- unsigned long iflag = 0;
2100
+ struct lpfc_io_buf *lpfc_ncmd;
2101
+ struct lpfc_sli4_hdw_queue *qp;
2102
+ struct sli4_sge *sgl;
2103
+ struct lpfc_iocbq *pwqeq;
2104
+ union lpfc_wqe128 *wqe;
23622105
2363
- spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
2364
- if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
2365
- lpfc_ncmd = lpfc_nvme_buf(phba);
2366
- if (!lpfc_ncmd) {
2367
- spin_lock(&phba->nvme_buf_list_put_lock);
2368
- list_splice(&phba->lpfc_nvme_buf_list_put,
2369
- &phba->lpfc_nvme_buf_list_get);
2370
- phba->get_nvme_bufs += phba->put_nvme_bufs;
2371
- INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
2372
- phba->put_nvme_bufs = 0;
2373
- spin_unlock(&phba->nvme_buf_list_put_lock);
2374
- if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
2375
- lpfc_ncmd = lpfc_nvme_buf(phba);
2376
- }
2377
- spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
2106
+ lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
23782107
2379
- if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) {
2380
- atomic_inc(&ndlp->cmd_pending);
2381
- lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH;
2108
+ if (lpfc_ncmd) {
2109
+ pwqeq = &(lpfc_ncmd->cur_iocbq);
2110
+ wqe = &pwqeq->wqe;
2111
+
2112
+ /* Setup key fields in buffer that may have been changed
2113
+ * if other protocols used this buffer.
2114
+ */
2115
+ pwqeq->iocb_flag = LPFC_IO_NVME;
2116
+ pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
2117
+ lpfc_ncmd->start_time = jiffies;
2118
+ lpfc_ncmd->flags = 0;
2119
+
2120
+ /* Rsp SGE will be filled in when we rcv an IO
2121
+ * from the NVME Layer to be sent.
2122
+ * The cmd is going to be embedded so we need a SKIP SGE.
2123
+ */
2124
+ sgl = lpfc_ncmd->dma_sgl;
2125
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2126
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
2127
+ sgl->word2 = cpu_to_le32(sgl->word2);
2128
+ /* Fill in word 3 / sgl_len during cmd submission */
2129
+
2130
+ /* Initialize 64 bytes only */
2131
+ memset(wqe, 0, sizeof(union lpfc_wqe));
2132
+
2133
+ if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
2134
+ atomic_inc(&ndlp->cmd_pending);
2135
+ lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
2136
+ }
2137
+
2138
+ } else {
2139
+ qp = &phba->sli4_hba.hdwq[idx];
2140
+ qp->empty_io_bufs++;
23822141 }
2142
+
23832143 return lpfc_ncmd;
23842144 }
23852145
....@@ -2389,43 +2149,37 @@
23892149 * @lpfc_ncmd: The nvme buffer which is being released.
23902150 *
23912151 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
2392
- * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer
2152
+ * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
23932153 * and cannot be reused for at least RA_TOV amount of time if it was
23942154 * aborted.
23952155 **/
23962156 static void
2397
-lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
2157
+lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
23982158 {
2159
+ struct lpfc_sli4_hdw_queue *qp;
23992160 unsigned long iflag = 0;
24002161
2401
- if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
2162
+ if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
24022163 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
24032164
2404
- lpfc_ncmd->nonsg_phys = 0;
24052165 lpfc_ncmd->ndlp = NULL;
2406
- lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;
2166
+ lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
24072167
2408
- if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
2168
+ qp = lpfc_ncmd->hdwq;
2169
+ if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
24092170 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
24102171 "6310 XB release deferred for "
24112172 "ox_id x%x on reqtag x%x\n",
24122173 lpfc_ncmd->cur_iocbq.sli4_xritag,
24132174 lpfc_ncmd->cur_iocbq.iotag);
24142175
2415
- spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
2416
- iflag);
2176
+ spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
24172177 list_add_tail(&lpfc_ncmd->list,
2418
- &phba->sli4_hba.lpfc_abts_nvme_buf_list);
2419
- spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
2420
- iflag);
2421
- } else {
2422
- lpfc_ncmd->nvmeCmd = NULL;
2423
- lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
2424
- spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
2425
- list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
2426
- phba->put_nvme_bufs++;
2427
- spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
2428
- }
2178
+ &qp->lpfc_abts_io_buf_list);
2179
+ qp->abts_nvme_io_bufs++;
2180
+ spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
2181
+ } else
2182
+ lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
24292183 }
24302184
24312185 /**
....@@ -2452,8 +2206,6 @@
24522206 struct nvme_fc_port_info nfcp_info;
24532207 struct nvme_fc_local_port *localport;
24542208 struct lpfc_nvme_lport *lport;
2455
- struct lpfc_nvme_ctrl_stat *cstat;
2456
- int len, i;
24572209
24582210 /* Initialize this localport instance. The vport wwn usage ensures
24592211 * that NPIV is accounted for.
....@@ -2463,27 +2215,19 @@
24632215 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
24642216 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
24652217
2466
- /* Limit to LPFC_MAX_NVME_SEG_CNT.
2467
- * For now need + 1 to get around NVME transport logic.
2218
+ /* We need to tell the transport layer + 1 because it takes page
2219
+ * alignment into account. When space for the SGL is allocated we
2220
+ * allocate + 3, one for cmd, one for rsp and one for this alignment
24682221 */
2469
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
2470
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT,
2471
- "6300 Reducing sg segment cnt to %d\n",
2472
- LPFC_MAX_NVME_SEG_CNT);
2473
- phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
2474
- } else {
2475
- phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
2476
- }
24772222 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2478
- lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
2223
+
2224
+ /* Advertise how many hw queues we support based on cfg_hdw_queue,
2225
+ * which will not exceed cpu count.
2226
+ */
2227
+ lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
24792228
24802229 if (!IS_ENABLED(CONFIG_NVME_FC))
24812230 return ret;
2482
-
2483
- cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) *
2484
- phba->cfg_nvme_io_channel), GFP_KERNEL);
2485
- if (!cstat)
2486
- return -ENOMEM;
24872231
24882232 /* localport is allocated from the stack, but the registration
24892233 * call allocates heap memory as well as the private area.
....@@ -2494,8 +2238,8 @@
24942238 if (!ret) {
24952239 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
24962240 "6005 Successfully registered local "
2497
- "NVME port num %d, localP %p, private %p, "
2498
- "sg_seg %d\n",
2241
+ "NVME port num %d, localP x%px, private "
2242
+ "x%px, sg_seg %d\n",
24992243 localport->port_num, localport,
25002244 localport->private,
25012245 lpfc_nvme_template.max_sgl_segments);
....@@ -2504,7 +2248,6 @@
25042248 lport = (struct lpfc_nvme_lport *)localport->private;
25052249 vport->localport = localport;
25062250 lport->vport = vport;
2507
- lport->cstat = cstat;
25082251 vport->nvmei_support = 1;
25092252
25102253 atomic_set(&lport->xmt_fcp_noxri, 0);
....@@ -2519,32 +2262,15 @@
25192262 atomic_set(&lport->cmpl_fcp_err, 0);
25202263 atomic_set(&lport->cmpl_ls_xb, 0);
25212264 atomic_set(&lport->cmpl_ls_err, 0);
2265
+
25222266 atomic_set(&lport->fc4NvmeLsRequests, 0);
25232267 atomic_set(&lport->fc4NvmeLsCmpls, 0);
2524
-
2525
- for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
2526
- cstat = &lport->cstat[i];
2527
- atomic_set(&cstat->fc4NvmeInputRequests, 0);
2528
- atomic_set(&cstat->fc4NvmeOutputRequests, 0);
2529
- atomic_set(&cstat->fc4NvmeControlRequests, 0);
2530
- atomic_set(&cstat->fc4NvmeIoCmpls, 0);
2531
- }
2532
-
2533
- /* Don't post more new bufs if repost already recovered
2534
- * the nvme sgls.
2535
- */
2536
- if (phba->sli4_hba.nvme_xri_cnt == 0) {
2537
- len = lpfc_new_nvme_buf(vport,
2538
- phba->sli4_hba.nvme_xri_max);
2539
- vport->phba->total_nvme_bufs += len;
2540
- }
2541
- } else {
2542
- kfree(cstat);
25432268 }
25442269
25452270 return ret;
25462271 }
25472272
2273
+#if (IS_ENABLED(CONFIG_NVME_FC))
25482274 /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
25492275 *
25502276 * The driver has to wait for the host nvme transport to callback
....@@ -2555,14 +2281,17 @@
25552281 * An uninterruptible wait is used because of the risk of transport-to-
25562282 * driver state mismatch.
25572283 */
2558
-void
2284
+static void
25592285 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
25602286 struct lpfc_nvme_lport *lport,
25612287 struct completion *lport_unreg_cmp)
25622288 {
2563
-#if (IS_ENABLED(CONFIG_NVME_FC))
25642289 u32 wait_tmo;
2565
- int ret;
2290
+ int ret, i, pending = 0;
2291
+ struct lpfc_sli_ring *pring;
2292
+ struct lpfc_hba *phba = vport->phba;
2293
+ struct lpfc_sli4_hdw_queue *qp;
2294
+ int abts_scsi, abts_nvme;
25662295
25672296 /* Host transport has to clean up and confirm requiring an indefinite
25682297 * wait. Print a message if a 10 second wait expires and renew the
....@@ -2572,19 +2301,33 @@
25722301 while (true) {
25732302 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
25742303 if (unlikely(!ret)) {
2575
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
2576
- "6176 Lport %p Localport %p wait "
2577
- "timed out. Renewing.\n",
2578
- lport, vport->localport);
2304
+ pending = 0;
2305
+ abts_scsi = 0;
2306
+ abts_nvme = 0;
2307
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
2308
+ qp = &phba->sli4_hba.hdwq[i];
2309
+ pring = qp->io_wq->pring;
2310
+ if (!pring)
2311
+ continue;
2312
+ pending += pring->txcmplq_cnt;
2313
+ abts_scsi += qp->abts_scsi_io_bufs;
2314
+ abts_nvme += qp->abts_nvme_io_bufs;
2315
+ }
2316
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2317
+ "6176 Lport x%px Localport x%px wait "
2318
+ "timed out. Pending %d [%d:%d]. "
2319
+ "Renewing.\n",
2320
+ lport, vport->localport, pending,
2321
+ abts_scsi, abts_nvme);
25792322 continue;
25802323 }
25812324 break;
25822325 }
25832326 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2584
- "6177 Lport %p Localport %p Complete Success\n",
2327
+ "6177 Lport x%px Localport x%px Complete Success\n",
25852328 lport, vport->localport);
2586
-#endif
25872329 }
2330
+#endif
25882331
25892332 /**
25902333 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
....@@ -2602,7 +2345,6 @@
26022345 #if (IS_ENABLED(CONFIG_NVME_FC))
26032346 struct nvme_fc_local_port *localport;
26042347 struct lpfc_nvme_lport *lport;
2605
- struct lpfc_nvme_ctrl_stat *cstat;
26062348 int ret;
26072349 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
26082350
....@@ -2611,10 +2353,9 @@
26112353
26122354 localport = vport->localport;
26132355 lport = (struct lpfc_nvme_lport *)localport->private;
2614
- cstat = lport->cstat;
26152356
26162357 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2617
- "6011 Destroying NVME localport %p\n",
2358
+ "6011 Destroying NVME localport x%px\n",
26182359 localport);
26192360
26202361 /* lport's rport list is clear. Unregister
....@@ -2628,7 +2369,6 @@
26282369 */
26292370 lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
26302371 vport->localport = NULL;
2631
- kfree(cstat);
26322372
26332373 /* Regardless of the unregister upcall response, clear
26342374 * nvmei_support. All rports are unregistered and the
....@@ -2665,12 +2405,12 @@
26652405 lport = (struct lpfc_nvme_lport *)localport->private;
26662406 if (!lport) {
26672407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2668
- "6171 Update NVME fail. localP %p, No lport\n",
2408
+ "6171 Update NVME fail. localP x%px, No lport\n",
26692409 localport);
26702410 return;
26712411 }
26722412 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2673
- "6012 Update NVME lport %p did x%x\n",
2413
+ "6012 Update NVME lport x%px did x%x\n",
26742414 localport, vport->fc_myDID);
26752415
26762416 localport->port_id = vport->fc_myDID;
....@@ -2680,7 +2420,7 @@
26802420 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
26812421
26822422 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2683
- "6030 bound lport %p to DID x%06x\n",
2423
+ "6030 bound lport x%px to DID x%06x\n",
26842424 lport, localport->port_id);
26852425 #endif
26862426 }
....@@ -2697,6 +2437,7 @@
26972437 struct nvme_fc_remote_port *remote_port;
26982438 struct nvme_fc_port_info rpinfo;
26992439 struct lpfc_nodelist *prev_ndlp = NULL;
2440
+ struct fc_rport *srport = ndlp->rport;
27002441
27012442 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
27022443 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
....@@ -2726,12 +2467,20 @@
27262467
27272468 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
27282469 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2470
+ if (srport)
2471
+ rpinfo.dev_loss_tmo = srport->dev_loss_tmo;
2472
+ else
2473
+ rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
27292474
27302475 spin_lock_irq(&vport->phba->hbalock);
27312476 oldrport = lpfc_ndlp_get_nrport(ndlp);
2732
- spin_unlock_irq(&vport->phba->hbalock);
2733
- if (!oldrport)
2477
+ if (oldrport) {
2478
+ prev_ndlp = oldrport->ndlp;
2479
+ spin_unlock_irq(&vport->phba->hbalock);
2480
+ } else {
2481
+ spin_unlock_irq(&vport->phba->hbalock);
27342482 lpfc_nlp_get(ndlp);
2483
+ }
27352484
27362485 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
27372486 if (!ret) {
....@@ -2747,29 +2496,6 @@
27472496 spin_unlock_irq(&vport->phba->hbalock);
27482497 rport = remote_port->private;
27492498 if (oldrport) {
2750
- /* New remoteport record does not guarantee valid
2751
- * host private memory area.
2752
- */
2753
- prev_ndlp = oldrport->ndlp;
2754
- if (oldrport == remote_port->private) {
2755
- /* Same remoteport - ndlp should match.
2756
- * Just reuse.
2757
- */
2758
- lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2759
- LOG_NVME_DISC,
2760
- "6014 Rebinding lport to "
2761
- "remoteport %p wwpn 0x%llx, "
2762
- "Data: x%x x%x %p %p x%x x%06x\n",
2763
- remote_port,
2764
- remote_port->port_name,
2765
- remote_port->port_id,
2766
- remote_port->port_role,
2767
- prev_ndlp,
2768
- ndlp,
2769
- ndlp->nlp_type,
2770
- ndlp->nlp_DID);
2771
- return 0;
2772
- }
27732499
27742500 /* Sever the ndlp<->rport association
27752501 * before dropping the ndlp ref from
....@@ -2802,17 +2528,17 @@
28022528 spin_unlock_irq(&vport->phba->hbalock);
28032529 lpfc_printf_vlog(vport, KERN_INFO,
28042530 LOG_NVME_DISC | LOG_NODE,
2805
- "6022 Binding new rport to "
2806
- "lport %p Remoteport %p rport %p WWNN 0x%llx, "
2531
+ "6022 Bind lport x%px to remoteport x%px "
2532
+ "rport x%px WWNN 0x%llx, "
28072533 "Rport WWPN 0x%llx DID "
2808
- "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
2534
+ "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
28092535 lport, remote_port, rport,
28102536 rpinfo.node_name, rpinfo.port_name,
28112537 rpinfo.port_id, rpinfo.port_role,
28122538 ndlp, prev_ndlp);
28132539 } else {
28142540 lpfc_printf_vlog(vport, KERN_ERR,
2815
- LOG_NVME_DISC | LOG_NODE,
2541
+ LOG_TRACE_EVENT,
28162542 "6031 RemotePort Registration failed "
28172543 "err: %d, DID x%06x\n",
28182544 ret, ndlp->nlp_DID);
....@@ -2821,6 +2547,53 @@
28212547 return ret;
28222548 #else
28232549 return 0;
2550
+#endif
2551
+}
2552
+
2553
+/**
2554
+ * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
2555
+ *
2556
+ * If the ndlp represents an NVME Target, that we are logged into,
2557
+ * ping the NVME FC Transport layer to initiate a device rescan
2558
+ * on this remote NPort.
2559
+ */
2560
+void
2561
+lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2562
+{
2563
+#if (IS_ENABLED(CONFIG_NVME_FC))
2564
+ struct lpfc_nvme_rport *nrport;
2565
+ struct nvme_fc_remote_port *remoteport = NULL;
2566
+
2567
+ spin_lock_irq(&vport->phba->hbalock);
2568
+ nrport = lpfc_ndlp_get_nrport(ndlp);
2569
+ if (nrport)
2570
+ remoteport = nrport->remoteport;
2571
+ spin_unlock_irq(&vport->phba->hbalock);
2572
+
2573
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2574
+ "6170 Rescan NPort DID x%06x type x%x "
2575
+ "state x%x nrport x%px remoteport x%px\n",
2576
+ ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
2577
+ nrport, remoteport);
2578
+
2579
+ if (!nrport || !remoteport)
2580
+ goto rescan_exit;
2581
+
2582
+ /* Only rescan if we are an NVME target in the MAPPED state */
2583
+ if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
2584
+ ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
2585
+ nvme_fc_rescan_remoteport(remoteport);
2586
+
2587
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2588
+ "6172 NVME rescanned DID x%06x "
2589
+ "port_state x%x\n",
2590
+ ndlp->nlp_DID, remoteport->port_state);
2591
+ }
2592
+ return;
2593
+ rescan_exit:
2594
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2595
+ "6169 Skip NVME Rport Rescan, NVME remoteport "
2596
+ "unregistered\n");
28242597 #endif
28252598 }
28262599
....@@ -2867,7 +2640,7 @@
28672640 goto input_err;
28682641
28692642 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2870
- "6033 Unreg nvme remoteport %p, portname x%llx, "
2643
+ "6033 Unreg nvme remoteport x%px, portname x%llx, "
28712644 "port_id x%06x, portstate x%x port type x%x\n",
28722645 remoteport, remoteport->port_name,
28732646 remoteport->port_id, remoteport->port_state,
....@@ -2894,7 +2667,7 @@
28942667 ret = nvme_fc_unregister_remoteport(remoteport);
28952668 if (ret != 0) {
28962669 lpfc_nlp_put(ndlp);
2897
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2670
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
28982671 "6167 NVME unregister failed %d "
28992672 "port_state x%x\n",
29002673 ret, remoteport->port_state);
....@@ -2904,8 +2677,8 @@
29042677
29052678 input_err:
29062679 #endif
2907
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2908
- "6168 State error: lport %p, rport%p FCID x%06x\n",
2680
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2681
+ "6168 State error: lport x%px, rport x%px FCID x%06x\n",
29092682 vport->localport, ndlp->rport, ndlp->nlp_DID);
29102683 }
29112684
....@@ -2913,6 +2686,7 @@
29132686 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
29142687 * @phba: pointer to lpfc hba data structure.
29152688 * @axri: pointer to the fcp xri abort wcqe structure.
2689
+ * @lpfc_ncmd: The nvme job structure for the request being aborted.
29162690 *
29172691 * This routine is invoked by the worker thread to process a SLI4 fast-path
29182692 * NVME aborted xri. Aborted NVME IO commands are completed to the transport
....@@ -2920,58 +2694,33 @@
29202694 **/
29212695 void
29222696 lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2923
- struct sli4_wcqe_xri_aborted *axri)
2697
+ struct sli4_wcqe_xri_aborted *axri,
2698
+ struct lpfc_io_buf *lpfc_ncmd)
29242699 {
29252700 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2926
- struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
29272701 struct nvmefc_fcp_req *nvme_cmd = NULL;
2928
- struct lpfc_nodelist *ndlp;
2929
- unsigned long iflag = 0;
2702
+ struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
29302703
2931
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
2932
- return;
2933
- spin_lock_irqsave(&phba->hbalock, iflag);
2934
- spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
2935
- list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
2936
- &phba->sli4_hba.lpfc_abts_nvme_buf_list,
2937
- list) {
2938
- if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
2939
- list_del_init(&lpfc_ncmd->list);
2940
- lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2941
- lpfc_ncmd->status = IOSTAT_SUCCESS;
2942
- spin_unlock(
2943
- &phba->sli4_hba.abts_nvme_buf_list_lock);
29442704
2945
- spin_unlock_irqrestore(&phba->hbalock, iflag);
2946
- ndlp = lpfc_ncmd->ndlp;
2947
- if (ndlp)
2948
- lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2949
-
2950
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2951
- "6311 nvme_cmd %p xri x%x tag x%x "
2952
- "abort complete and xri released\n",
2953
- lpfc_ncmd->nvmeCmd, xri,
2954
- lpfc_ncmd->cur_iocbq.iotag);
2955
-
2956
- /* Aborted NVME commands are required to not complete
2957
- * before the abort exchange command fully completes.
2958
- * Once completed, it is available via the put list.
2959
- */
2960
- if (lpfc_ncmd->nvmeCmd) {
2961
- nvme_cmd = lpfc_ncmd->nvmeCmd;
2962
- nvme_cmd->done(nvme_cmd);
2963
- lpfc_ncmd->nvmeCmd = NULL;
2964
- }
2965
- lpfc_release_nvme_buf(phba, lpfc_ncmd);
2966
- return;
2967
- }
2968
- }
2969
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
2970
- spin_unlock_irqrestore(&phba->hbalock, iflag);
2705
+ if (ndlp)
2706
+ lpfc_sli4_abts_err_handler(phba, ndlp, axri);
29712707
29722708 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2973
- "6312 XRI Aborted xri x%x not found\n", xri);
2709
+ "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
2710
+ "xri released\n",
2711
+ lpfc_ncmd->nvmeCmd, xri,
2712
+ lpfc_ncmd->cur_iocbq.iotag);
29742713
2714
+ /* Aborted NVME commands are required to not complete
2715
+ * before the abort exchange command fully completes.
2716
+ * Once completed, it is available via the put list.
2717
+ */
2718
+ if (lpfc_ncmd->nvmeCmd) {
2719
+ nvme_cmd = lpfc_ncmd->nvmeCmd;
2720
+ nvme_cmd->done(nvme_cmd);
2721
+ lpfc_ncmd->nvmeCmd = NULL;
2722
+ }
2723
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
29752724 }
29762725
29772726 /**
....@@ -2990,14 +2739,16 @@
29902739 struct lpfc_sli_ring *pring;
29912740 u32 i, wait_cnt = 0;
29922741
2993
- if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq)
2742
+ if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
29942743 return;
29952744
2996
- /* Cycle through all NVME rings and make sure all outstanding
2745
+ /* Cycle through all IO rings and make sure all outstanding
29972746 * WQEs have been removed from the txcmplqs.
29982747 */
2999
- for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3000
- pring = phba->sli4_hba.nvme_wq[i]->pring;
2748
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
2749
+ if (!phba->sli4_hba.hdwq[i].io_wq)
2750
+ continue;
2751
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
30012752
30022753 if (!pring)
30032754 continue;
....@@ -3011,10 +2762,57 @@
30112762 * dump a message. Something is wrong.
30122763 */
30132764 if ((wait_cnt % 1000) == 0) {
3014
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2765
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
30152766 "6178 NVME IO not empty, "
30162767 "cnt %d\n", wait_cnt);
30172768 }
30182769 }
30192770 }
30202771 }
2772
+
2773
+void
2774
+lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn)
2775
+{
2776
+#if (IS_ENABLED(CONFIG_NVME_FC))
2777
+ struct lpfc_io_buf *lpfc_ncmd;
2778
+ struct nvmefc_fcp_req *nCmd;
2779
+ struct lpfc_nvme_fcpreq_priv *freqpriv;
2780
+
2781
+ if (!pwqeIn->context1) {
2782
+ lpfc_sli_release_iocbq(phba, pwqeIn);
2783
+ return;
2784
+ }
2785
+ /* For abort iocb just return, IO iocb will do a done call */
2786
+ if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
2787
+ CMD_ABORT_XRI_CX) {
2788
+ lpfc_sli_release_iocbq(phba, pwqeIn);
2789
+ return;
2790
+ }
2791
+ lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
2792
+
2793
+ spin_lock(&lpfc_ncmd->buf_lock);
2794
+ if (!lpfc_ncmd->nvmeCmd) {
2795
+ spin_unlock(&lpfc_ncmd->buf_lock);
2796
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
2797
+ return;
2798
+ }
2799
+
2800
+ nCmd = lpfc_ncmd->nvmeCmd;
2801
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2802
+ "6194 NVME Cancel xri %x\n",
2803
+ lpfc_ncmd->cur_iocbq.sli4_xritag);
2804
+
2805
+ nCmd->transferred_length = 0;
2806
+ nCmd->rcv_rsplen = 0;
2807
+ nCmd->status = NVME_SC_INTERNAL;
2808
+ freqpriv = nCmd->private;
2809
+ freqpriv->nvme_buf = NULL;
2810
+ lpfc_ncmd->nvmeCmd = NULL;
2811
+
2812
+ spin_unlock(&lpfc_ncmd->buf_lock);
2813
+ nCmd->done(nCmd);
2814
+
2815
+ /* Call release with XB=1 to queue the IO into the abort list. */
2816
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
2817
+#endif
2818
+}