forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/scsi/qedf/qedf_io.c
....@@ -1,10 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * QLogic FCoE Offload Driver
34 * Copyright (c) 2016-2018 Cavium Inc.
4
- *
5
- * This software is available under the terms of the GNU General Public License
6
- * (GPL) Version 2, available from the file COPYING in the main directory of
7
- * this source tree.
85 */
96 #include <linux/spinlock.h>
107 #include <linux/vmalloc.h>
....@@ -25,7 +22,6 @@
2522 container_of(work, struct qedf_ioreq, timeout_work.work);
2623 struct qedf_ctx *qedf;
2724 struct qedf_rport *fcport;
28
- u8 op = 0;
2925
3026 if (io_req == NULL) {
3127 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
....@@ -43,8 +39,9 @@
4339 switch (io_req->cmd_type) {
4440 case QEDF_ABTS:
4541 if (qedf == NULL) {
46
- QEDF_INFO(NULL, QEDF_LOG_IO, "qedf is NULL for xid=0x%x.\n",
47
- io_req->xid);
42
+ QEDF_INFO(NULL, QEDF_LOG_IO,
43
+ "qedf is NULL for ABTS xid=0x%x.\n",
44
+ io_req->xid);
4845 return;
4946 }
5047
....@@ -61,6 +58,9 @@
6158 */
6259 kref_put(&io_req->refcount, qedf_release_cmd);
6360
61
+ /* Clear in abort bit now that we're done with the command */
62
+ clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
63
+
6464 /*
6565 * Now that the original I/O and the ABTS are complete see
6666 * if we need to reconnect to the target.
....@@ -68,6 +68,15 @@
6868 qedf_restart_rport(fcport);
6969 break;
7070 case QEDF_ELS:
71
+ if (!qedf) {
72
+ QEDF_INFO(NULL, QEDF_LOG_IO,
73
+ "qedf is NULL for ELS xid=0x%x.\n",
74
+ io_req->xid);
75
+ return;
76
+ }
77
+ /* ELS request no longer outstanding since it timed out */
78
+ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
79
+
7180 kref_get(&io_req->refcount);
7281 /*
7382 * Don't attempt to clean an ELS timeout as any subseqeunt
....@@ -76,14 +85,13 @@
7685 */
7786 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
7887 io_req->xid);
88
+ qedf_initiate_cleanup(io_req, true);
7989 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
8090 /* Call callback function to complete command */
8191 if (io_req->cb_func && io_req->cb_arg) {
82
- op = io_req->cb_arg->op;
8392 io_req->cb_func(io_req->cb_arg);
8493 io_req->cb_arg = NULL;
8594 }
86
- qedf_initiate_cleanup(io_req, true);
8795 kref_put(&io_req->refcount, qedf_release_cmd);
8896 break;
8997 case QEDF_SEQ_CLEANUP:
....@@ -94,6 +102,8 @@
94102 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
95103 break;
96104 default:
105
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
106
+ "Hit default case, xid=0x%x.\n", io_req->xid);
97107 break;
98108 }
99109 }
....@@ -103,7 +113,7 @@
103113 struct io_bdt *bdt_info;
104114 struct qedf_ctx *qedf = cmgr->qedf;
105115 size_t bd_tbl_sz;
106
- u16 min_xid = QEDF_MIN_XID;
116
+ u16 min_xid = 0;
107117 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
108118 int num_ios;
109119 int i;
....@@ -112,8 +122,10 @@
112122 num_ios = max_xid - min_xid + 1;
113123
114124 /* Free fcoe_bdt_ctx structures */
115
- if (!cmgr->io_bdt_pool)
125
+ if (!cmgr->io_bdt_pool) {
126
+ QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n");
116127 goto free_cmd_pool;
128
+ }
117129
118130 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
119131 for (i = 0; i < num_ios; i++) {
....@@ -157,6 +169,7 @@
157169 struct qedf_ioreq *io_req =
158170 container_of(work, struct qedf_ioreq, rrq_work.work);
159171
172
+ atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
160173 qedf_send_rrq(io_req);
161174
162175 }
....@@ -169,7 +182,7 @@
169182 u16 xid;
170183 int i;
171184 int num_ios;
172
- u16 min_xid = QEDF_MIN_XID;
185
+ u16 min_xid = 0;
173186 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
174187
175188 /* Make sure num_queues is already set before calling this function */
....@@ -201,7 +214,7 @@
201214 /*
202215 * Initialize I/O request fields.
203216 */
204
- xid = QEDF_MIN_XID;
217
+ xid = 0;
205218
206219 for (i = 0; i < num_ios; i++) {
207220 io_req = &cmgr->cmds[i];
....@@ -215,8 +228,11 @@
215228 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
216229 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
217230 GFP_KERNEL);
218
- if (!io_req->sense_buffer)
231
+ if (!io_req->sense_buffer) {
232
+ QEDF_ERR(&qedf->dbg_ctx,
233
+ "Failed to alloc sense buffer.\n");
219234 goto mem_err;
235
+ }
220236
221237 /* Allocate task parameters to pass to f/w init funcions */
222238 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
....@@ -329,7 +345,7 @@
329345 cmd_mgr->idx = 0;
330346
331347 /* Check to make sure command was previously freed */
332
- if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags))
348
+ if (!io_req->alloc)
333349 break;
334350 }
335351
....@@ -338,7 +354,14 @@
338354 goto out_failed;
339355 }
340356
341
- set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
357
+ if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
358
+ QEDF_ERR(&qedf->dbg_ctx,
359
+ "io_req found to be dirty ox_id = 0x%x.\n",
360
+ io_req->xid);
361
+
362
+ /* Clear any flags now that we've reallocated the xid */
363
+ io_req->flags = 0;
364
+ io_req->alloc = 1;
342365 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
343366
344367 atomic_inc(&fcport->num_active_ios);
....@@ -349,8 +372,13 @@
349372 io_req->cmd_mgr = cmd_mgr;
350373 io_req->fcport = fcport;
351374
375
+ /* Clear any stale sc_cmd back pointer */
376
+ io_req->sc_cmd = NULL;
377
+ io_req->lun = -1;
378
+
352379 /* Hold the io_req against deletion */
353
- kref_init(&io_req->refcount);
380
+ kref_init(&io_req->refcount); /* ID: 001 */
381
+ atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
354382
355383 /* Bind io_bdt for this io_req */
356384 /* Have a static link between io_req and io_bdt_pool */
....@@ -412,6 +440,14 @@
412440 container_of(ref, struct qedf_ioreq, refcount);
413441 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
414442 struct qedf_rport *fcport = io_req->fcport;
443
+ unsigned long flags;
444
+
445
+ if (io_req->cmd_type == QEDF_SCSI_CMD) {
446
+ QEDF_WARN(&fcport->qedf->dbg_ctx,
447
+ "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
448
+ io_req, io_req->xid);
449
+ WARN_ON(io_req->sc_cmd);
450
+ }
415451
416452 if (io_req->cmd_type == QEDF_ELS ||
417453 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
....@@ -419,36 +455,22 @@
419455
420456 atomic_inc(&cmd_mgr->free_list_cnt);
421457 atomic_dec(&fcport->num_active_ios);
422
- if (atomic_read(&fcport->num_active_ios) < 0)
458
+ atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
459
+ if (atomic_read(&fcport->num_active_ios) < 0) {
423460 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
461
+ WARN_ON(1);
462
+ }
424463
425464 /* Increment task retry identifier now that the request is released */
426465 io_req->task_retry_identifier++;
466
+ io_req->fcport = NULL;
427467
428
- clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
429
-}
430
-
431
-static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
432
- int bd_index)
433
-{
434
- struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
435
- int frag_size, sg_frags;
436
-
437
- sg_frags = 0;
438
- while (sg_len) {
439
- if (sg_len > QEDF_BD_SPLIT_SZ)
440
- frag_size = QEDF_BD_SPLIT_SZ;
441
- else
442
- frag_size = sg_len;
443
- bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
444
- bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
445
- bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
446
-
447
- addr += (u64)frag_size;
448
- sg_frags++;
449
- sg_len -= frag_size;
450
- }
451
- return sg_frags;
468
+ clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
469
+ io_req->cpu = 0;
470
+ spin_lock_irqsave(&cmd_mgr->lock, flags);
471
+ io_req->fcport = NULL;
472
+ io_req->alloc = 0;
473
+ spin_unlock_irqrestore(&cmd_mgr->lock, flags);
452474 }
453475
454476 static int qedf_map_sg(struct qedf_ioreq *io_req)
....@@ -462,74 +484,43 @@
462484 int byte_count = 0;
463485 int sg_count = 0;
464486 int bd_count = 0;
465
- int sg_frags;
466
- unsigned int sg_len;
467
- u64 addr, end_addr;
468
- int i;
487
+ u32 sg_len;
488
+ u64 addr;
489
+ int i = 0;
469490
470491 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
471492 scsi_sg_count(sc), sc->sc_data_direction);
472
-
473493 sg = scsi_sglist(sc);
474494
475
- /*
476
- * New condition to send single SGE as cached-SGL with length less
477
- * than 64k.
478
- */
479
- if ((sg_count == 1) && (sg_dma_len(sg) <=
480
- QEDF_MAX_SGLEN_FOR_CACHESGL)) {
481
- sg_len = sg_dma_len(sg);
482
- addr = (u64)sg_dma_address(sg);
495
+ io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
483496
484
- bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
485
- bd[bd_count].sge_addr.hi = (addr >> 32);
486
- bd[bd_count].sge_len = (u16)sg_len;
487
-
488
- return ++bd_count;
489
- }
497
+ if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
498
+ io_req->sge_type = QEDF_IOREQ_FAST_SGE;
490499
491500 scsi_for_each_sg(sc, sg, sg_count, i) {
492
- sg_len = sg_dma_len(sg);
501
+ sg_len = (u32)sg_dma_len(sg);
493502 addr = (u64)sg_dma_address(sg);
494
- end_addr = (u64)(addr + sg_len);
495503
496504 /*
497
- * First s/g element in the list so check if the end_addr
498
- * is paged aligned. Also check to make sure the length is
499
- * at least page size.
505
+ * Intermediate s/g element so check if start address
506
+ * is page aligned. Only required for writes and only if the
507
+ * number of scatter/gather elements is 8 or more.
500508 */
501
- if ((i == 0) && (sg_count > 1) &&
502
- ((end_addr % QEDF_PAGE_SIZE) ||
503
- sg_len < QEDF_PAGE_SIZE))
504
- io_req->use_slowpath = true;
505
- /*
506
- * Last s/g element so check if the start address is paged
507
- * aligned.
508
- */
509
- else if ((i == (sg_count - 1)) && (sg_count > 1) &&
510
- (addr % QEDF_PAGE_SIZE))
511
- io_req->use_slowpath = true;
512
- /*
513
- * Intermediate s/g element so check if start and end address
514
- * is page aligned.
515
- */
516
- else if ((i != 0) && (i != (sg_count - 1)) &&
517
- ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE)))
518
- io_req->use_slowpath = true;
509
+ if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
510
+ (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
511
+ io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
519512
520
- if (sg_len > QEDF_MAX_BD_LEN) {
521
- sg_frags = qedf_split_bd(io_req, addr, sg_len,
522
- bd_count);
523
- } else {
524
- sg_frags = 1;
525
- bd[bd_count].sge_addr.lo = U64_LO(addr);
526
- bd[bd_count].sge_addr.hi = U64_HI(addr);
527
- bd[bd_count].sge_len = (uint16_t)sg_len;
528
- }
513
+ bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
514
+ bd[bd_count].sge_addr.hi = cpu_to_le32(U64_HI(addr));
515
+ bd[bd_count].sge_len = cpu_to_le32(sg_len);
529516
530
- bd_count += sg_frags;
517
+ bd_count++;
531518 byte_count += sg_len;
532519 }
520
+
521
+ /* To catch a case where FAST and SLOW nothing is set, set FAST */
522
+ if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
523
+ io_req->sge_type = QEDF_IOREQ_FAST_SGE;
533524
534525 if (byte_count != scsi_bufflen(sc))
535526 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
....@@ -655,8 +646,10 @@
655646 io_req->sgl_task_params->num_sges = bd_count;
656647 io_req->sgl_task_params->total_buffer_size =
657648 scsi_bufflen(io_req->sc_cmd);
658
- io_req->sgl_task_params->small_mid_sge =
659
- io_req->use_slowpath;
649
+ if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
650
+ io_req->sgl_task_params->small_mid_sge = 1;
651
+ else
652
+ io_req->sgl_task_params->small_mid_sge = 0;
660653 }
661654
662655 /* Fill in physical address of sense buffer */
....@@ -679,16 +672,10 @@
679672 io_req->task_retry_identifier, fcp_cmnd);
680673
681674 /* Increment SGL type counters */
682
- if (bd_count == 1) {
683
- qedf->single_sge_ios++;
684
- io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
685
- } else if (io_req->use_slowpath) {
675
+ if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
686676 qedf->slow_sge_ios++;
687
- io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
688
- } else {
677
+ else
689678 qedf->fast_sge_ios++;
690
- io_req->sge_type = QEDF_IOREQ_FAST_SGE;
691
- }
692679 }
693680
694681 void qedf_init_mp_task(struct qedf_ioreq *io_req,
....@@ -770,9 +757,6 @@
770757 &task_fc_hdr,
771758 &tx_sgl_task_params,
772759 &rx_sgl_task_params, 0);
773
-
774
- /* Midpath requests always consume 1 SGE */
775
- qedf->single_sge_ios++;
776760 }
777761
778762 /* Presumed that fcport->rport_lock is held */
....@@ -804,10 +788,18 @@
804788 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
805789
806790 dbell.sq_prod = fcport->fw_sq_prod_idx;
807
- writel(*(u32 *)&dbell, fcport->p_doorbell);
808
- /* Make sure SQ index is updated so f/w prcesses requests in order */
791
+ /* wmb makes sure that the BDs data is updated before updating the
792
+ * producer, otherwise FW may read old data from the BDs.
793
+ */
809794 wmb();
810
- mmiowb();
795
+ barrier();
796
+ writel(*(u32 *)&dbell, fcport->p_doorbell);
797
+ /*
798
+ * Fence required to flush the write combined buffer, since another
799
+ * CPU may write to the same doorbell address and data may be lost
800
+ * due to relaxed order nature of write combined bar.
801
+ */
802
+ wmb();
811803 }
812804
813805 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
....@@ -865,24 +857,21 @@
865857 struct qedf_ctx *qedf = lport_priv(lport);
866858 struct e4_fcoe_task_context *task_ctx;
867859 u16 xid;
868
- enum fcoe_task_type req_type = 0;
869860 struct fcoe_wqe *sqe;
870861 u16 sqe_idx;
871862
872863 /* Initialize rest of io_req fileds */
873864 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
874865 sc_cmd->SCp.ptr = (char *)io_req;
875
- io_req->use_slowpath = false; /* Assume fast SGL by default */
866
+ io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
876867
877868 /* Record which cpu this request is associated with */
878869 io_req->cpu = smp_processor_id();
879870
880871 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
881
- req_type = FCOE_TASK_TYPE_READ_INITIATOR;
882872 io_req->io_req_flags = QEDF_READ;
883873 qedf->input_requests++;
884874 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
885
- req_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
886875 io_req->io_req_flags = QEDF_WRITE;
887876 qedf->output_requests++;
888877 } else {
....@@ -895,15 +884,23 @@
895884 /* Build buffer descriptor list for firmware from sg list */
896885 if (qedf_build_bd_list_from_sg(io_req)) {
897886 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
887
+ /* Release cmd will release io_req, but sc_cmd is assigned */
888
+ io_req->sc_cmd = NULL;
898889 kref_put(&io_req->refcount, qedf_release_cmd);
899890 return -EAGAIN;
900891 }
901892
902
- if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
893
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
894
+ test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
903895 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
896
+ /* Release cmd will release io_req, but sc_cmd is assigned */
897
+ io_req->sc_cmd = NULL;
904898 kref_put(&io_req->refcount, qedf_release_cmd);
905899 return -EINVAL;
906900 }
901
+
902
+ /* Record LUN number for later use if we neeed them */
903
+ io_req->lun = (int)sc_cmd->device->lun;
907904
908905 /* Obtain free SQE */
909906 sqe_idx = qedf_get_sqe_idx(fcport);
....@@ -915,6 +912,8 @@
915912 if (!task_ctx) {
916913 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
917914 xid);
915
+ /* Release cmd will release io_req, but sc_cmd is assigned */
916
+ io_req->sc_cmd = NULL;
918917 kref_put(&io_req->refcount, qedf_release_cmd);
919918 return -EINVAL;
920919 }
....@@ -923,6 +922,9 @@
923922
924923 /* Ring doorbell */
925924 qedf_ring_doorbell(fcport);
925
+
926
+ /* Set that command is with the firmware now */
927
+ set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
926928
927929 if (qedf_io_tracing && io_req->sc_cmd)
928930 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
....@@ -942,10 +944,23 @@
942944 int rc = 0;
943945 int rval;
944946 unsigned long flags = 0;
947
+ int num_sgs = 0;
945948
949
+ num_sgs = scsi_sg_count(sc_cmd);
950
+ if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
951
+ QEDF_ERR(&qedf->dbg_ctx,
952
+ "Number of SG elements %d exceeds what hardware limitation of %d.\n",
953
+ num_sgs, QEDF_MAX_BDS_PER_CMD);
954
+ sc_cmd->result = DID_ERROR;
955
+ sc_cmd->scsi_done(sc_cmd);
956
+ return 0;
957
+ }
946958
947959 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
948960 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
961
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
962
+ "Returning DNC as unloading or stop io, flags 0x%lx.\n",
963
+ qedf->flags);
949964 sc_cmd->result = DID_NO_CONNECT << 16;
950965 sc_cmd->scsi_done(sc_cmd);
951966 return 0;
....@@ -962,6 +977,9 @@
962977
963978 rval = fc_remote_port_chkready(rport);
964979 if (rval) {
980
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
981
+ "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n",
982
+ rval, rport->port_id);
965983 sc_cmd->result = rval;
966984 sc_cmd->scsi_done(sc_cmd);
967985 return 0;
....@@ -969,12 +987,14 @@
969987
970988 /* Retry command if we are doing a qed drain operation */
971989 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
990
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n");
972991 rc = SCSI_MLQUEUE_HOST_BUSY;
973992 goto exit_qcmd;
974993 }
975994
976995 if (lport->state != LPORT_ST_READY ||
977996 atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
997
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n");
978998 rc = SCSI_MLQUEUE_HOST_BUSY;
979999 goto exit_qcmd;
9801000 }
....@@ -982,7 +1002,8 @@
9821002 /* rport and tgt are allocated together, so tgt should be non-NULL */
9831003 fcport = (struct qedf_rport *)&rp[1];
9841004
985
- if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1005
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
1006
+ test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
9861007 /*
9871008 * Session is not offloaded yet. Let SCSI-ml retry
9881009 * the command.
....@@ -990,19 +1011,28 @@
9901011 rc = SCSI_MLQUEUE_TARGET_BUSY;
9911012 goto exit_qcmd;
9921013 }
1014
+
1015
+ atomic_inc(&fcport->ios_to_queue);
1016
+
9931017 if (fcport->retry_delay_timestamp) {
1018
+ /* Take fcport->rport_lock for resetting the delay_timestamp */
1019
+ spin_lock_irqsave(&fcport->rport_lock, flags);
9941020 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
9951021 fcport->retry_delay_timestamp = 0;
9961022 } else {
1023
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
9971024 /* If retry_delay timer is active, flow off the ML */
9981025 rc = SCSI_MLQUEUE_TARGET_BUSY;
1026
+ atomic_dec(&fcport->ios_to_queue);
9991027 goto exit_qcmd;
10001028 }
1029
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
10011030 }
10021031
10031032 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
10041033 if (!io_req) {
10051034 rc = SCSI_MLQUEUE_HOST_BUSY;
1035
+ atomic_dec(&fcport->ios_to_queue);
10061036 goto exit_qcmd;
10071037 }
10081038
....@@ -1017,6 +1047,7 @@
10171047 rc = SCSI_MLQUEUE_HOST_BUSY;
10181048 }
10191049 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1050
+ atomic_dec(&fcport->ios_to_queue);
10201051
10211052 exit_qcmd:
10221053 return rc;
....@@ -1093,22 +1124,29 @@
10931124 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
10941125 struct qedf_ioreq *io_req)
10951126 {
1096
- u16 xid, rval;
1097
- struct e4_fcoe_task_context *task_ctx;
10981127 struct scsi_cmnd *sc_cmd;
10991128 struct fcoe_cqe_rsp_info *fcp_rsp;
11001129 struct qedf_rport *fcport;
11011130 int refcount;
11021131 u16 scope, qualifier = 0;
11031132 u8 fw_residual_flag = 0;
1133
+ unsigned long flags = 0;
1134
+ u16 chk_scope = 0;
11041135
11051136 if (!io_req)
11061137 return;
11071138 if (!cqe)
11081139 return;
11091140
1110
- xid = io_req->xid;
1111
- task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1141
+ if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1142
+ test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1143
+ test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1144
+ QEDF_ERR(&qedf->dbg_ctx,
1145
+ "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
1146
+ io_req->xid);
1147
+ return;
1148
+ }
1149
+
11121150 sc_cmd = io_req->sc_cmd;
11131151 fcp_rsp = &cqe->cqe_info.rsp_info;
11141152
....@@ -1123,15 +1161,15 @@
11231161 return;
11241162 }
11251163
1126
- if (!sc_cmd->request) {
1127
- QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1128
- "sc_cmd=%p.\n", sc_cmd);
1164
+ if (!sc_cmd->device) {
1165
+ QEDF_ERR(&qedf->dbg_ctx,
1166
+ "Device for sc_cmd %p is NULL.\n", sc_cmd);
11291167 return;
11301168 }
11311169
1132
- if (!sc_cmd->request->special) {
1133
- QEDF_WARN(&(qedf->dbg_ctx), "request->special is NULL so "
1134
- "request not valid, sc_cmd=%p.\n", sc_cmd);
1170
+ if (!sc_cmd->request) {
1171
+ QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1172
+ "sc_cmd=%p.\n", sc_cmd);
11351173 return;
11361174 }
11371175
....@@ -1142,6 +1180,19 @@
11421180 }
11431181
11441182 fcport = io_req->fcport;
1183
+
1184
+ /*
1185
+ * When flush is active, let the cmds be completed from the cleanup
1186
+ * context
1187
+ */
1188
+ if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1189
+ (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
1190
+ sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
1191
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1192
+ "Dropping good completion xid=0x%x as fcport is flushing",
1193
+ io_req->xid);
1194
+ return;
1195
+ }
11451196
11461197 qedf_parse_fcp_rsp(io_req, fcp_rsp);
11471198
....@@ -1160,24 +1211,17 @@
11601211 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
11611212 FCOE_CQE_RSP_INFO_FW_UNDERRUN);
11621213 if (fw_residual_flag) {
1163
- QEDF_ERR(&(qedf->dbg_ctx),
1164
- "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
1165
- "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid,
1166
- fcp_rsp->rsp_flags.flags, io_req->fcp_resid,
1167
- cqe->cqe_info.rsp_info.fw_residual);
1214
+ QEDF_ERR(&qedf->dbg_ctx,
1215
+ "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n",
1216
+ io_req->xid, fcp_rsp->rsp_flags.flags,
1217
+ io_req->fcp_resid,
1218
+ cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
1219
+ sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]);
11681220
11691221 if (io_req->cdb_status == 0)
11701222 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
11711223 else
11721224 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1173
-
1174
- /* Abort the command since we did not get all the data */
1175
- init_completion(&io_req->abts_done);
1176
- rval = qedf_initiate_abts(io_req, true);
1177
- if (rval) {
1178
- QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1179
- sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1180
- }
11811225
11821226 /*
11831227 * Set resid to the whole buffer length so we won't try to resue
....@@ -1219,16 +1263,8 @@
12191263 /* Lower 14 bits */
12201264 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
12211265
1222
- if (qedf_retry_delay &&
1223
- scope > 0 && qualifier > 0 &&
1224
- qualifier <= 0x3FEF) {
1225
- /* Check we don't go over the max */
1226
- if (qualifier > QEDF_RETRY_DELAY_MAX)
1227
- qualifier =
1228
- QEDF_RETRY_DELAY_MAX;
1229
- fcport->retry_delay_timestamp =
1230
- jiffies + (qualifier * HZ / 10);
1231
- }
1266
+ if (qedf_retry_delay)
1267
+ chk_scope = 1;
12321268 /* Record stats */
12331269 if (io_req->cdb_status ==
12341270 SAM_STAT_TASK_SET_FULL)
....@@ -1239,6 +1275,36 @@
12391275 }
12401276 if (io_req->fcp_resid)
12411277 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1278
+
1279
+ if (chk_scope == 1) {
1280
+ if ((scope == 1 || scope == 2) &&
1281
+ (qualifier > 0 && qualifier <= 0x3FEF)) {
1282
+ /* Check we don't go over the max */
1283
+ if (qualifier > QEDF_RETRY_DELAY_MAX) {
1284
+ qualifier = QEDF_RETRY_DELAY_MAX;
1285
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1286
+ "qualifier = %d\n",
1287
+ (fcp_rsp->retry_delay_timer &
1288
+ 0x3FFF));
1289
+ }
1290
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1291
+ "Scope = %d and qualifier = %d",
1292
+ scope, qualifier);
1293
+ /* Take fcport->rport_lock to
1294
+ * update the retry_delay_timestamp
1295
+ */
1296
+ spin_lock_irqsave(&fcport->rport_lock, flags);
1297
+ fcport->retry_delay_timestamp =
1298
+ jiffies + (qualifier * HZ / 10);
1299
+ spin_unlock_irqrestore(&fcport->rport_lock,
1300
+ flags);
1301
+
1302
+ } else {
1303
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1304
+ "combination of scope = %d and qualifier = %d is not handled in qedf.\n",
1305
+ scope, qualifier);
1306
+ }
1307
+ }
12421308 break;
12431309 default:
12441310 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
....@@ -1250,6 +1316,12 @@
12501316 if (qedf_io_tracing)
12511317 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
12521318
1319
+ /*
1320
+ * We wait till the end of the function to clear the
1321
+ * outstanding bit in case we need to send an abort
1322
+ */
1323
+ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1324
+
12531325 io_req->sc_cmd = NULL;
12541326 sc_cmd->SCp.ptr = NULL;
12551327 sc_cmd->scsi_done(sc_cmd);
....@@ -1260,14 +1332,27 @@
12601332 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
12611333 int result)
12621334 {
1263
- u16 xid;
12641335 struct scsi_cmnd *sc_cmd;
12651336 int refcount;
12661337
1267
- if (!io_req)
1338
+ if (!io_req) {
1339
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
12681340 return;
1341
+ }
12691342
1270
- xid = io_req->xid;
1343
+ if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
1344
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1345
+ "io_req:%p scsi_done handling already done\n",
1346
+ io_req);
1347
+ return;
1348
+ }
1349
+
1350
+ /*
1351
+ * We will be done with this command after this call so clear the
1352
+ * outstanding bit.
1353
+ */
1354
+ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1355
+
12711356 sc_cmd = io_req->sc_cmd;
12721357
12731358 if (!sc_cmd) {
....@@ -1275,10 +1360,48 @@
12751360 return;
12761361 }
12771362
1363
+ if (!virt_addr_valid(sc_cmd)) {
1364
+ QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
1365
+ goto bad_scsi_ptr;
1366
+ }
1367
+
12781368 if (!sc_cmd->SCp.ptr) {
12791369 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
12801370 "another context.\n");
12811371 return;
1372
+ }
1373
+
1374
+ if (!sc_cmd->device) {
1375
+ QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
1376
+ sc_cmd);
1377
+ goto bad_scsi_ptr;
1378
+ }
1379
+
1380
+ if (!virt_addr_valid(sc_cmd->device)) {
1381
+ QEDF_ERR(&qedf->dbg_ctx,
1382
+ "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
1383
+ goto bad_scsi_ptr;
1384
+ }
1385
+
1386
+ if (!sc_cmd->sense_buffer) {
1387
+ QEDF_ERR(&qedf->dbg_ctx,
1388
+ "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
1389
+ sc_cmd);
1390
+ goto bad_scsi_ptr;
1391
+ }
1392
+
1393
+ if (!virt_addr_valid(sc_cmd->sense_buffer)) {
1394
+ QEDF_ERR(&qedf->dbg_ctx,
1395
+ "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
1396
+ sc_cmd);
1397
+ goto bad_scsi_ptr;
1398
+ }
1399
+
1400
+ if (!sc_cmd->scsi_done) {
1401
+ QEDF_ERR(&qedf->dbg_ctx,
1402
+ "sc_cmd->scsi_done for sc_cmd %p is NULL.\n",
1403
+ sc_cmd);
1404
+ goto bad_scsi_ptr;
12821405 }
12831406
12841407 qedf_unmap_sg_list(qedf, io_req);
....@@ -1307,6 +1430,15 @@
13071430 sc_cmd->SCp.ptr = NULL;
13081431 sc_cmd->scsi_done(sc_cmd);
13091432 kref_put(&io_req->refcount, qedf_release_cmd);
1433
+ return;
1434
+
1435
+bad_scsi_ptr:
1436
+ /*
1437
+ * Clear the io_req->sc_cmd backpointer so we don't try to process
1438
+ * this again
1439
+ */
1440
+ io_req->sc_cmd = NULL;
1441
+ kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */
13101442 }
13111443
13121444 /*
....@@ -1321,8 +1453,12 @@
13211453 u64 err_warn_bit_map;
13221454 u8 err_warn = 0xff;
13231455
1324
- if (!cqe)
1456
+ if (!cqe) {
1457
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1458
+ "cqe is NULL for io_req %p xid=0x%x\n",
1459
+ io_req, io_req->xid);
13251460 return;
1461
+ }
13261462
13271463 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
13281464 "xid=0x%x\n", io_req->xid);
....@@ -1384,8 +1520,21 @@
13841520 {
13851521 int rval;
13861522
1387
- if (!cqe)
1523
+ if (io_req == NULL) {
1524
+ QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
13881525 return;
1526
+ }
1527
+
1528
+ if (io_req->fcport == NULL) {
1529
+ QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
1530
+ return;
1531
+ }
1532
+
1533
+ if (!cqe) {
1534
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1535
+ "cqe is NULL for io_req %p\n", io_req);
1536
+ return;
1537
+ }
13891538
13901539 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
13911540 "xid=0x%x\n", io_req->xid);
....@@ -1398,6 +1547,16 @@
13981547 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
13991548 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
14001549 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1550
+
1551
+ /* When flush is active, let the cmds be flushed out from the cleanup context */
1552
+ if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
1553
+ (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
1554
+ io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
1555
+ QEDF_ERR(&qedf->dbg_ctx,
1556
+ "Dropping EQE for xid=0x%x as fcport is flushing",
1557
+ io_req->xid);
1558
+ return;
1559
+ }
14011560
14021561 if (qedf->stop_io_on_error) {
14031562 qedf_stop_all_io(qedf);
....@@ -1423,6 +1582,8 @@
14231582 */
14241583 els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
14251584
1585
+ clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
1586
+
14261587 /* Cancel the timer */
14271588 cancel_delayed_work_sync(&els_req->timeout_work);
14281589
....@@ -1445,9 +1606,15 @@
14451606 struct qedf_ctx *qedf;
14461607 struct qedf_cmd_mgr *cmd_mgr;
14471608 int i, rc;
1609
+ unsigned long flags;
1610
+ int flush_cnt = 0;
1611
+ int wait_cnt = 100;
1612
+ int refcount = 0;
14481613
1449
- if (!fcport)
1614
+ if (!fcport) {
1615
+ QEDF_ERR(NULL, "fcport is NULL\n");
14501616 return;
1617
+ }
14511618
14521619 /* Check that fcport is still offloaded */
14531620 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
....@@ -1456,18 +1623,102 @@
14561623 }
14571624
14581625 qedf = fcport->qedf;
1626
+
1627
+ if (!qedf) {
1628
+ QEDF_ERR(NULL, "qedf is NULL.\n");
1629
+ return;
1630
+ }
1631
+
1632
+ /* Only wait for all commands to be queued in the Upload context */
1633
+ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1634
+ (lun == -1)) {
1635
+ while (atomic_read(&fcport->ios_to_queue)) {
1636
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1637
+ "Waiting for %d I/Os to be queued\n",
1638
+ atomic_read(&fcport->ios_to_queue));
1639
+ if (wait_cnt == 0) {
1640
+ QEDF_ERR(NULL,
1641
+ "%d IOs request could not be queued\n",
1642
+ atomic_read(&fcport->ios_to_queue));
1643
+ }
1644
+ msleep(20);
1645
+ wait_cnt--;
1646
+ }
1647
+ }
1648
+
14591649 cmd_mgr = qedf->cmd_mgr;
14601650
1461
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n");
1651
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1652
+ "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
1653
+ atomic_read(&fcport->num_active_ios), fcport,
1654
+ fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
1655
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
1656
+
1657
+ mutex_lock(&qedf->flush_mutex);
1658
+ if (lun == -1) {
1659
+ set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1660
+ } else {
1661
+ set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1662
+ fcport->lun_reset_lun = lun;
1663
+ }
14621664
14631665 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
14641666 io_req = &cmd_mgr->cmds[i];
14651667
14661668 if (!io_req)
14671669 continue;
1670
+ if (!io_req->fcport)
1671
+ continue;
1672
+
1673
+ spin_lock_irqsave(&cmd_mgr->lock, flags);
1674
+
1675
+ if (io_req->alloc) {
1676
+ if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1677
+ if (io_req->cmd_type == QEDF_SCSI_CMD)
1678
+ QEDF_ERR(&qedf->dbg_ctx,
1679
+ "Allocated but not queued, xid=0x%x\n",
1680
+ io_req->xid);
1681
+ }
1682
+ spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1683
+ } else {
1684
+ spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1685
+ continue;
1686
+ }
1687
+
14681688 if (io_req->fcport != fcport)
14691689 continue;
1470
- if (io_req->cmd_type == QEDF_ELS) {
1690
+
1691
+ /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
1692
+ * but RRQ is still pending.
1693
+ * Workaround: Within qedf_send_rrq, we check if the fcport is
1694
+ * NULL, and we drop the ref on the io_req to clean it up.
1695
+ */
1696
+ if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1697
+ refcount = kref_read(&io_req->refcount);
1698
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1699
+ "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
1700
+ io_req->xid, io_req->cmd_type, refcount);
1701
+ /* If RRQ work has been queue, try to cancel it and
1702
+ * free the io_req
1703
+ */
1704
+ if (atomic_read(&io_req->state) ==
1705
+ QEDFC_CMD_ST_RRQ_WAIT) {
1706
+ if (cancel_delayed_work_sync
1707
+ (&io_req->rrq_work)) {
1708
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1709
+ "Putting reference for pending RRQ work xid=0x%x.\n",
1710
+ io_req->xid);
1711
+ /* ID: 003 */
1712
+ kref_put(&io_req->refcount,
1713
+ qedf_release_cmd);
1714
+ }
1715
+ }
1716
+ continue;
1717
+ }
1718
+
1719
+ /* Only consider flushing ELS during target reset */
1720
+ if (io_req->cmd_type == QEDF_ELS &&
1721
+ lun == -1) {
14711722 rc = kref_get_unless_zero(&io_req->refcount);
14721723 if (!rc) {
14731724 QEDF_ERR(&(qedf->dbg_ctx),
....@@ -1475,7 +1726,10 @@
14751726 io_req, io_req->xid);
14761727 continue;
14771728 }
1729
+ qedf_initiate_cleanup(io_req, false);
1730
+ flush_cnt++;
14781731 qedf_flush_els_req(qedf, io_req);
1732
+
14791733 /*
14801734 * Release the kref and go back to the top of the
14811735 * loop.
....@@ -1484,6 +1738,7 @@
14841738 }
14851739
14861740 if (io_req->cmd_type == QEDF_ABTS) {
1741
+ /* ID: 004 */
14871742 rc = kref_get_unless_zero(&io_req->refcount);
14881743 if (!rc) {
14891744 QEDF_ERR(&(qedf->dbg_ctx),
....@@ -1491,28 +1746,50 @@
14911746 io_req, io_req->xid);
14921747 continue;
14931748 }
1749
+ if (lun != -1 && io_req->lun != lun)
1750
+ goto free_cmd;
1751
+
14941752 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
14951753 "Flushing abort xid=0x%x.\n", io_req->xid);
14961754
1497
- clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1498
-
1499
- if (io_req->sc_cmd) {
1500
- if (io_req->return_scsi_cmd_on_abts)
1501
- qedf_scsi_done(qedf, io_req, DID_ERROR);
1755
+ if (cancel_delayed_work_sync(&io_req->rrq_work)) {
1756
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1757
+ "Putting ref for cancelled RRQ work xid=0x%x.\n",
1758
+ io_req->xid);
1759
+ kref_put(&io_req->refcount, qedf_release_cmd);
15021760 }
15031761
1504
- /* Notify eh_abort handler that ABTS is complete */
1505
- complete(&io_req->abts_done);
1506
- kref_put(&io_req->refcount, qedf_release_cmd);
1507
-
1762
+ if (cancel_delayed_work_sync(&io_req->timeout_work)) {
1763
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1764
+ "Putting ref for cancelled tmo work xid=0x%x.\n",
1765
+ io_req->xid);
1766
+ qedf_initiate_cleanup(io_req, true);
1767
+ /* Notify eh_abort handler that ABTS is
1768
+ * complete
1769
+ */
1770
+ complete(&io_req->abts_done);
1771
+ clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1772
+ /* ID: 002 */
1773
+ kref_put(&io_req->refcount, qedf_release_cmd);
1774
+ }
1775
+ flush_cnt++;
15081776 goto free_cmd;
15091777 }
15101778
15111779 if (!io_req->sc_cmd)
15121780 continue;
1513
- if (lun > 0) {
1514
- if (io_req->sc_cmd->device->lun !=
1515
- (u64)lun)
1781
+ if (!io_req->sc_cmd->device) {
1782
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1783
+ "Device backpointer NULL for sc_cmd=%p.\n",
1784
+ io_req->sc_cmd);
1785
+ /* Put reference for non-existent scsi_cmnd */
1786
+ io_req->sc_cmd = NULL;
1787
+ qedf_initiate_cleanup(io_req, false);
1788
+ kref_put(&io_req->refcount, qedf_release_cmd);
1789
+ continue;
1790
+ }
1791
+ if (lun > -1) {
1792
+ if (io_req->lun != lun)
15161793 continue;
15171794 }
15181795
....@@ -1526,15 +1803,65 @@
15261803 "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
15271804 continue;
15281805 }
1806
+
15291807 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
15301808 "Cleanup xid=0x%x.\n", io_req->xid);
1809
+ flush_cnt++;
15311810
15321811 /* Cleanup task and return I/O mid-layer */
15331812 qedf_initiate_cleanup(io_req, true);
15341813
15351814 free_cmd:
1536
- kref_put(&io_req->refcount, qedf_release_cmd);
1815
+ kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 004 */
15371816 }
1817
+
1818
+ wait_cnt = 60;
1819
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1820
+ "Flushed 0x%x I/Os, active=0x%x.\n",
1821
+ flush_cnt, atomic_read(&fcport->num_active_ios));
1822
+ /* Only wait for all commands to complete in the Upload context */
1823
+ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1824
+ (lun == -1)) {
1825
+ while (atomic_read(&fcport->num_active_ios)) {
1826
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1827
+ "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
1828
+ flush_cnt,
1829
+ atomic_read(&fcport->num_active_ios),
1830
+ wait_cnt);
1831
+ if (wait_cnt == 0) {
1832
+ QEDF_ERR(&qedf->dbg_ctx,
1833
+ "Flushed %d I/Os, active=%d.\n",
1834
+ flush_cnt,
1835
+ atomic_read(&fcport->num_active_ios));
1836
+ for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1837
+ io_req = &cmd_mgr->cmds[i];
1838
+ if (io_req->fcport &&
1839
+ io_req->fcport == fcport) {
1840
+ refcount =
1841
+ kref_read(&io_req->refcount);
1842
+ set_bit(QEDF_CMD_DIRTY,
1843
+ &io_req->flags);
1844
+ QEDF_ERR(&qedf->dbg_ctx,
1845
+ "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
1846
+ io_req, io_req->xid,
1847
+ io_req->flags,
1848
+ io_req->sc_cmd,
1849
+ refcount,
1850
+ io_req->cmd_type);
1851
+ }
1852
+ }
1853
+ WARN_ON(1);
1854
+ break;
1855
+ }
1856
+ msleep(500);
1857
+ wait_cnt--;
1858
+ }
1859
+ }
1860
+
1861
+ clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1862
+ clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1863
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
1864
+ mutex_unlock(&qedf->flush_mutex);
15381865 }
15391866
15401867 /*
....@@ -1548,58 +1875,70 @@
15481875 struct fc_rport_priv *rdata;
15491876 struct qedf_ctx *qedf;
15501877 u16 xid;
1551
- u32 r_a_tov = 0;
15521878 int rc = 0;
15531879 unsigned long flags;
15541880 struct fcoe_wqe *sqe;
15551881 u16 sqe_idx;
1882
+ int refcount = 0;
15561883
15571884 /* Sanity check qedf_rport before dereferencing any pointers */
15581885 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
15591886 QEDF_ERR(NULL, "tgt not offloaded\n");
15601887 rc = 1;
1561
- goto abts_err;
1888
+ goto out;
15621889 }
15631890
1564
- rdata = fcport->rdata;
1565
- r_a_tov = rdata->r_a_tov;
15661891 qedf = fcport->qedf;
1892
+ rdata = fcport->rdata;
1893
+
1894
+ if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
1895
+ QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
1896
+ rc = 1;
1897
+ goto out;
1898
+ }
1899
+
15671900 lport = qedf->lport;
15681901
15691902 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
15701903 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
15711904 rc = 1;
1572
- goto abts_err;
1905
+ goto drop_rdata_kref;
15731906 }
15741907
15751908 if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
15761909 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
15771910 rc = 1;
1578
- goto abts_err;
1911
+ goto drop_rdata_kref;
15791912 }
15801913
15811914 /* Ensure room on SQ */
15821915 if (!atomic_read(&fcport->free_sqes)) {
15831916 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
15841917 rc = 1;
1585
- goto abts_err;
1918
+ goto drop_rdata_kref;
15861919 }
15871920
15881921 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
15891922 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
15901923 rc = 1;
1591
- goto out;
1924
+ goto drop_rdata_kref;
15921925 }
15931926
1927
+ spin_lock_irqsave(&fcport->rport_lock, flags);
15941928 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
15951929 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
15961930 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1597
- QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
1598
- "cleanup or abort processing or already "
1599
- "completed.\n", io_req->xid);
1931
+ QEDF_ERR(&qedf->dbg_ctx,
1932
+ "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
1933
+ io_req->xid, io_req->sc_cmd);
16001934 rc = 1;
1601
- goto out;
1935
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
1936
+ goto drop_rdata_kref;
16021937 }
1938
+
1939
+ /* Set the command type to abort */
1940
+ io_req->cmd_type = QEDF_ABTS;
1941
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
16031942
16041943 kref_get(&io_req->refcount);
16051944
....@@ -1607,18 +1946,15 @@
16071946 qedf->control_requests++;
16081947 qedf->packet_aborts++;
16091948
1610
- /* Set the return CPU to be the same as the request one */
1611
- io_req->cpu = smp_processor_id();
1612
-
1613
- /* Set the command type to abort */
1614
- io_req->cmd_type = QEDF_ABTS;
16151949 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
16161950
16171951 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1618
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = "
1619
- "0x%x\n", xid);
1952
+ refcount = kref_read(&io_req->refcount);
1953
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1954
+ "ABTS io_req xid = 0x%x refcount=%d\n",
1955
+ xid, refcount);
16201956
1621
- qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ);
1957
+ qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
16221958
16231959 spin_lock_irqsave(&fcport->rport_lock, flags);
16241960
....@@ -1632,13 +1968,8 @@
16321968
16331969 spin_unlock_irqrestore(&fcport->rport_lock, flags);
16341970
1635
- return rc;
1636
-abts_err:
1637
- /*
1638
- * If the ABTS task fails to queue then we need to cleanup the
1639
- * task at the firmware.
1640
- */
1641
- qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts);
1971
+drop_rdata_kref:
1972
+ kref_put(&rdata->kref, fc_rport_destroy);
16421973 out:
16431974 return rc;
16441975 }
....@@ -1647,28 +1978,61 @@
16471978 struct qedf_ioreq *io_req)
16481979 {
16491980 uint32_t r_ctl;
1650
- uint16_t xid;
1981
+ int rc;
1982
+ struct qedf_rport *fcport = io_req->fcport;
16511983
16521984 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
16531985 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
16541986
1655
- cancel_delayed_work(&io_req->timeout_work);
1656
-
1657
- xid = io_req->xid;
16581987 r_ctl = cqe->cqe_info.abts_info.r_ctl;
1988
+
1989
+ /* This was added at a point when we were scheduling abts_compl &
1990
+ * cleanup_compl on different CPUs and there was a possibility of
1991
+ * the io_req to be freed from the other context before we got here.
1992
+ */
1993
+ if (!fcport) {
1994
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1995
+ "Dropping ABTS completion xid=0x%x as fcport is NULL",
1996
+ io_req->xid);
1997
+ return;
1998
+ }
1999
+
2000
+ /*
2001
+ * When flush is active, let the cmds be completed from the cleanup
2002
+ * context
2003
+ */
2004
+ if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
2005
+ test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
2006
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2007
+ "Dropping ABTS completion xid=0x%x as fcport is flushing",
2008
+ io_req->xid);
2009
+ return;
2010
+ }
2011
+
2012
+ if (!cancel_delayed_work(&io_req->timeout_work)) {
2013
+ QEDF_ERR(&qedf->dbg_ctx,
2014
+ "Wasn't able to cancel abts timeout work.\n");
2015
+ }
16592016
16602017 switch (r_ctl) {
16612018 case FC_RCTL_BA_ACC:
16622019 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
16632020 "ABTS response - ACC Send RRQ after R_A_TOV\n");
16642021 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
2022
+ rc = kref_get_unless_zero(&io_req->refcount); /* ID: 003 */
2023
+ if (!rc) {
2024
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2025
+ "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
2026
+ io_req->xid);
2027
+ return;
2028
+ }
16652029 /*
16662030 * Dont release this cmd yet. It will be relesed
16672031 * after we get RRQ response
16682032 */
1669
- kref_get(&io_req->refcount);
16702033 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
16712034 msecs_to_jiffies(qedf->lport->r_a_tov));
2035
+ atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
16722036 break;
16732037 /* For error cases let the cleanup return the command */
16742038 case FC_RCTL_BA_RJT:
....@@ -1684,6 +2048,10 @@
16842048 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
16852049
16862050 if (io_req->sc_cmd) {
2051
+ if (!io_req->return_scsi_cmd_on_abts)
2052
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2053
+ "Not call scsi_done for xid=0x%x.\n",
2054
+ io_req->xid);
16872055 if (io_req->return_scsi_cmd_on_abts)
16882056 qedf_scsi_done(qedf, io_req, DID_ERROR);
16892057 }
....@@ -1803,13 +2171,12 @@
18032171 {
18042172 struct qedf_rport *fcport;
18052173 struct qedf_ctx *qedf;
1806
- uint16_t xid;
1807
- struct e4_fcoe_task_context *task;
18082174 int tmo = 0;
18092175 int rc = SUCCESS;
18102176 unsigned long flags;
18112177 struct fcoe_wqe *sqe;
18122178 u16 sqe_idx;
2179
+ int refcount = 0;
18132180
18142181 fcport = io_req->fcport;
18152182 if (!fcport) {
....@@ -1820,7 +2187,6 @@
18202187 /* Sanity check qedf_rport before dereferencing any pointers */
18212188 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
18222189 QEDF_ERR(NULL, "tgt not offloaded\n");
1823
- rc = 1;
18242190 return SUCCESS;
18252191 }
18262192
....@@ -1830,37 +2196,50 @@
18302196 return SUCCESS;
18312197 }
18322198
2199
+ if (io_req->cmd_type == QEDF_ELS) {
2200
+ goto process_els;
2201
+ }
2202
+
18332203 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1834
- test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
2204
+ test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
18352205 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
18362206 "cleanup processing or already completed.\n",
18372207 io_req->xid);
18382208 return SUCCESS;
18392209 }
2210
+ set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
18402211
2212
+process_els:
18412213 /* Ensure room on SQ */
18422214 if (!atomic_read(&fcport->free_sqes)) {
18432215 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
2216
+ /* Need to make sure we clear the flag since it was set */
2217
+ clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
18442218 return FAILED;
18452219 }
18462220
2221
+ if (io_req->cmd_type == QEDF_CLEANUP) {
2222
+ QEDF_ERR(&qedf->dbg_ctx,
2223
+ "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
2224
+ io_req->xid, io_req->cmd_type);
2225
+ clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2226
+ return SUCCESS;
2227
+ }
18472228
1848
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n",
1849
- io_req->xid);
2229
+ refcount = kref_read(&io_req->refcount);
2230
+
2231
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2232
+ "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
2233
+ io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
2234
+ refcount, fcport, fcport->rdata->ids.port_id);
18502235
18512236 /* Cleanup cmds re-use the same TID as the original I/O */
1852
- xid = io_req->xid;
2237
+ spin_lock_irqsave(&fcport->rport_lock, flags);
18532238 io_req->cmd_type = QEDF_CLEANUP;
2239
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
18542240 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
18552241
1856
- /* Set the return CPU to be the same as the request one */
1857
- io_req->cpu = smp_processor_id();
1858
-
1859
- set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1860
-
1861
- task = qedf_get_task_mem(&qedf->tasks, xid);
1862
-
1863
- init_completion(&io_req->tm_done);
2242
+ init_completion(&io_req->cleanup_done);
18642243
18652244 spin_lock_irqsave(&fcport->rport_lock, flags);
18662245
....@@ -1874,8 +2253,8 @@
18742253
18752254 spin_unlock_irqrestore(&fcport->rport_lock, flags);
18762255
1877
- tmo = wait_for_completion_timeout(&io_req->tm_done,
1878
- QEDF_CLEANUP_TIMEOUT * HZ);
2256
+ tmo = wait_for_completion_timeout(&io_req->cleanup_done,
2257
+ QEDF_CLEANUP_TIMEOUT * HZ);
18792258
18802259 if (!tmo) {
18812260 rc = FAILED;
....@@ -1888,7 +2267,22 @@
18882267 qedf_drain_request(qedf);
18892268 }
18902269
2270
+ /* If it TASK MGMT handle it, reference will be decreased
2271
+ * in qedf_execute_tmf
2272
+ */
2273
+ if (io_req->tm_flags == FCP_TMF_LUN_RESET ||
2274
+ io_req->tm_flags == FCP_TMF_TGT_RESET) {
2275
+ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2276
+ io_req->sc_cmd = NULL;
2277
+ kref_put(&io_req->refcount, qedf_release_cmd);
2278
+ complete(&io_req->tm_done);
2279
+ }
2280
+
18912281 if (io_req->sc_cmd) {
2282
+ if (!io_req->return_scsi_cmd_on_abts)
2283
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2284
+ "Not call scsi_done for xid=0x%x.\n",
2285
+ io_req->xid);
18922286 if (io_req->return_scsi_cmd_on_abts)
18932287 qedf_scsi_done(qedf, io_req, DID_ERROR);
18942288 }
....@@ -1910,7 +2304,7 @@
19102304 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
19112305
19122306 /* Complete so we can finish cleaning up the I/O */
1913
- complete(&io_req->tm_done);
2307
+ complete(&io_req->cleanup_done);
19142308 }
19152309
19162310 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
....@@ -1923,29 +2317,28 @@
19232317 int rc = 0;
19242318 uint16_t xid;
19252319 int tmo = 0;
2320
+ int lun = 0;
19262321 unsigned long flags;
19272322 struct fcoe_wqe *sqe;
19282323 u16 sqe_idx;
19292324
19302325 if (!sc_cmd) {
1931
- QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
2326
+ QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n");
19322327 return FAILED;
19332328 }
19342329
2330
+ lun = (int)sc_cmd->device->lun;
19352331 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
19362332 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
19372333 rc = FAILED;
1938
- return FAILED;
2334
+ goto no_flush;
19392335 }
1940
-
1941
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x "
1942
- "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags);
19432336
19442337 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
19452338 if (!io_req) {
19462339 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
19472340 rc = -EAGAIN;
1948
- goto reset_tmf_err;
2341
+ goto no_flush;
19492342 }
19502343
19512344 if (tm_flags == FCP_TMF_LUN_RESET)
....@@ -1958,7 +2351,7 @@
19582351 io_req->fcport = fcport;
19592352 io_req->cmd_type = QEDF_TASK_MGMT_CMD;
19602353
1961
- /* Set the return CPU to be the same as the request one */
2354
+ /* Record which cpu this request is associated with */
19622355 io_req->cpu = smp_processor_id();
19632356
19642357 /* Set TM flags */
....@@ -1967,7 +2360,7 @@
19672360 io_req->tm_flags = tm_flags;
19682361
19692362 /* Default is to return a SCSI command when an error occurs */
1970
- io_req->return_scsi_cmd_on_abts = true;
2363
+ io_req->return_scsi_cmd_on_abts = false;
19712364
19722365 /* Obtain exchange id */
19732366 xid = io_req->xid;
....@@ -1991,12 +2384,16 @@
19912384
19922385 spin_unlock_irqrestore(&fcport->rport_lock, flags);
19932386
2387
+ set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
19942388 tmo = wait_for_completion_timeout(&io_req->tm_done,
19952389 QEDF_TM_TIMEOUT * HZ);
19962390
19972391 if (!tmo) {
19982392 rc = FAILED;
19992393 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
2394
+ /* Clear outstanding bit since command timed out */
2395
+ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2396
+ io_req->sc_cmd = NULL;
20002397 } else {
20012398 /* Check TMF response code */
20022399 if (io_req->fcp_rsp_code == 0)
....@@ -2004,14 +2401,25 @@
20042401 else
20052402 rc = FAILED;
20062403 }
2404
+ /*
2405
+ * Double check that fcport has not gone into an uploading state before
2406
+ * executing the command flush for the LUN/target.
2407
+ */
2408
+ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2409
+ QEDF_ERR(&qedf->dbg_ctx,
2410
+ "fcport is uploading, not executing flush.\n");
2411
+ goto no_flush;
2412
+ }
2413
+ /* We do not need this io_req any more */
2414
+ kref_put(&io_req->refcount, qedf_release_cmd);
2415
+
20072416
20082417 if (tm_flags == FCP_TMF_LUN_RESET)
2009
- qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun);
2418
+ qedf_flush_active_ios(fcport, lun);
20102419 else
20112420 qedf_flush_active_ios(fcport, -1);
20122421
2013
- kref_put(&io_req->refcount, qedf_release_cmd);
2014
-
2422
+no_flush:
20152423 if (rc != SUCCESS) {
20162424 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
20172425 rc = FAILED;
....@@ -2019,7 +2427,6 @@
20192427 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
20202428 rc = SUCCESS;
20212429 }
2022
-reset_tmf_err:
20232430 return rc;
20242431 }
20252432
....@@ -2029,26 +2436,65 @@
20292436 struct fc_rport_libfc_priv *rp = rport->dd_data;
20302437 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
20312438 struct qedf_ctx *qedf;
2032
- struct fc_lport *lport;
2439
+ struct fc_lport *lport = shost_priv(sc_cmd->device->host);
20332440 int rc = SUCCESS;
20342441 int rval;
2442
+ struct qedf_ioreq *io_req = NULL;
2443
+ int ref_cnt = 0;
2444
+ struct fc_rport_priv *rdata = fcport->rdata;
2445
+
2446
+ QEDF_ERR(NULL,
2447
+ "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
2448
+ tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff,
2449
+ rport->scsi_target_id, (int)sc_cmd->device->lun);
2450
+
2451
+ if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
2452
+ QEDF_ERR(NULL, "stale rport\n");
2453
+ return FAILED;
2454
+ }
2455
+
2456
+ QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
2457
+ (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
2458
+ "LUN RESET");
2459
+
2460
+ if (sc_cmd->SCp.ptr) {
2461
+ io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
2462
+ ref_cnt = kref_read(&io_req->refcount);
2463
+ QEDF_ERR(NULL,
2464
+ "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
2465
+ io_req, io_req->xid, ref_cnt);
2466
+ }
20352467
20362468 rval = fc_remote_port_chkready(rport);
2037
-
20382469 if (rval) {
20392470 QEDF_ERR(NULL, "device_reset rport not ready\n");
20402471 rc = FAILED;
20412472 goto tmf_err;
20422473 }
20432474
2044
- if (fcport == NULL) {
2475
+ rc = fc_block_scsi_eh(sc_cmd);
2476
+ if (rc)
2477
+ goto tmf_err;
2478
+
2479
+ if (!fcport) {
20452480 QEDF_ERR(NULL, "device_reset: rport is NULL\n");
20462481 rc = FAILED;
20472482 goto tmf_err;
20482483 }
20492484
20502485 qedf = fcport->qedf;
2051
- lport = qedf->lport;
2486
+
2487
+ if (!qedf) {
2488
+ QEDF_ERR(NULL, "qedf is NULL.\n");
2489
+ rc = FAILED;
2490
+ goto tmf_err;
2491
+ }
2492
+
2493
+ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2494
+ QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
2495
+ rc = SUCCESS;
2496
+ goto tmf_err;
2497
+ }
20522498
20532499 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
20542500 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
....@@ -2062,9 +2508,22 @@
20622508 goto tmf_err;
20632509 }
20642510
2511
+ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2512
+ if (!fcport->rdata)
2513
+ QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
2514
+ fcport);
2515
+ else
2516
+ QEDF_ERR(&qedf->dbg_ctx,
2517
+ "fcport %p port_id=%06x is uploading.\n",
2518
+ fcport, fcport->rdata->ids.port_id);
2519
+ rc = FAILED;
2520
+ goto tmf_err;
2521
+ }
2522
+
20652523 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
20662524
20672525 tmf_err:
2526
+ kref_put(&rdata->kref, fc_rport_destroy);
20682527 return rc;
20692528 }
20702529
....@@ -2072,6 +2531,8 @@
20722531 struct qedf_ioreq *io_req)
20732532 {
20742533 struct fcoe_cqe_rsp_info *fcp_rsp;
2534
+
2535
+ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
20752536
20762537 fcp_rsp = &cqe->cqe_info.rsp_info;
20772538 qedf_parse_fcp_rsp(io_req, fcp_rsp);
....@@ -2084,7 +2545,6 @@
20842545 struct fcoe_cqe *cqe)
20852546 {
20862547 unsigned long flags;
2087
- uint16_t tmp;
20882548 uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
20892549 u32 payload_len, crc;
20902550 struct fc_frame_header *fh;
....@@ -2136,6 +2596,11 @@
21362596 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
21372597 memcpy(fh, (void *)bdq_addr, pktlen);
21382598
2599
+ QEDF_WARN(&qedf->dbg_ctx,
2600
+ "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n",
2601
+ ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
2602
+ fh->fh_type, fc_frame_payload_op(fp));
2603
+
21392604 /* Initialize the frame so libfc sees it as a valid frame */
21402605 crc = fcoe_fc_crc(fp);
21412606 fc_frame_init(fp);
....@@ -2177,9 +2642,9 @@
21772642 qedf->bdq_prod_idx = 0;
21782643
21792644 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2180
- tmp = readw(qedf->bdq_primary_prod);
2645
+ readw(qedf->bdq_primary_prod);
21812646 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2182
- tmp = readw(qedf->bdq_secondary_prod);
2647
+ readw(qedf->bdq_secondary_prod);
21832648
21842649 spin_unlock_irqrestore(&qedf->hba_lock, flags);
21852650 }