.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * QLogic FCoE Offload Driver |
---|
3 | 4 | * Copyright (c) 2016-2018 Cavium Inc. |
---|
4 | | - * |
---|
5 | | - * This software is available under the terms of the GNU General Public License |
---|
6 | | - * (GPL) Version 2, available from the file COPYING in the main directory of |
---|
7 | | - * this source tree. |
---|
8 | 5 | */ |
---|
9 | 6 | #include <linux/spinlock.h> |
---|
10 | 7 | #include <linux/vmalloc.h> |
---|
.. | .. |
---|
25 | 22 | container_of(work, struct qedf_ioreq, timeout_work.work); |
---|
26 | 23 | struct qedf_ctx *qedf; |
---|
27 | 24 | struct qedf_rport *fcport; |
---|
28 | | - u8 op = 0; |
---|
29 | 25 | |
---|
30 | 26 | if (io_req == NULL) { |
---|
31 | 27 | QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n"); |
---|
.. | .. |
---|
43 | 39 | switch (io_req->cmd_type) { |
---|
44 | 40 | case QEDF_ABTS: |
---|
45 | 41 | if (qedf == NULL) { |
---|
46 | | - QEDF_INFO(NULL, QEDF_LOG_IO, "qedf is NULL for xid=0x%x.\n", |
---|
47 | | - io_req->xid); |
---|
| 42 | + QEDF_INFO(NULL, QEDF_LOG_IO, |
---|
| 43 | + "qedf is NULL for ABTS xid=0x%x.\n", |
---|
| 44 | + io_req->xid); |
---|
48 | 45 | return; |
---|
49 | 46 | } |
---|
50 | 47 | |
---|
.. | .. |
---|
61 | 58 | */ |
---|
62 | 59 | kref_put(&io_req->refcount, qedf_release_cmd); |
---|
63 | 60 | |
---|
| 61 | + /* Clear in abort bit now that we're done with the command */ |
---|
| 62 | + clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); |
---|
| 63 | + |
---|
64 | 64 | /* |
---|
65 | 65 | * Now that the original I/O and the ABTS are complete see |
---|
66 | 66 | * if we need to reconnect to the target. |
---|
.. | .. |
---|
68 | 68 | qedf_restart_rport(fcport); |
---|
69 | 69 | break; |
---|
70 | 70 | case QEDF_ELS: |
---|
| 71 | + if (!qedf) { |
---|
| 72 | + QEDF_INFO(NULL, QEDF_LOG_IO, |
---|
| 73 | + "qedf is NULL for ELS xid=0x%x.\n", |
---|
| 74 | + io_req->xid); |
---|
| 75 | + return; |
---|
| 76 | + } |
---|
| 77 | + /* ELS request no longer outstanding since it timed out */ |
---|
| 78 | + clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); |
---|
| 79 | + |
---|
71 | 80 | kref_get(&io_req->refcount); |
---|
72 | 81 | /* |
---|
73 | 82 | * Don't attempt to clean an ELS timeout as any subseqeunt |
---|
.. | .. |
---|
76 | 85 | */ |
---|
77 | 86 | QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n", |
---|
78 | 87 | io_req->xid); |
---|
| 88 | + qedf_initiate_cleanup(io_req, true); |
---|
79 | 89 | io_req->event = QEDF_IOREQ_EV_ELS_TMO; |
---|
80 | 90 | /* Call callback function to complete command */ |
---|
81 | 91 | if (io_req->cb_func && io_req->cb_arg) { |
---|
82 | | - op = io_req->cb_arg->op; |
---|
83 | 92 | io_req->cb_func(io_req->cb_arg); |
---|
84 | 93 | io_req->cb_arg = NULL; |
---|
85 | 94 | } |
---|
86 | | - qedf_initiate_cleanup(io_req, true); |
---|
87 | 95 | kref_put(&io_req->refcount, qedf_release_cmd); |
---|
88 | 96 | break; |
---|
89 | 97 | case QEDF_SEQ_CLEANUP: |
---|
.. | .. |
---|
94 | 102 | qedf_process_seq_cleanup_compl(qedf, NULL, io_req); |
---|
95 | 103 | break; |
---|
96 | 104 | default: |
---|
| 105 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 106 | + "Hit default case, xid=0x%x.\n", io_req->xid); |
---|
97 | 107 | break; |
---|
98 | 108 | } |
---|
99 | 109 | } |
---|
.. | .. |
---|
103 | 113 | struct io_bdt *bdt_info; |
---|
104 | 114 | struct qedf_ctx *qedf = cmgr->qedf; |
---|
105 | 115 | size_t bd_tbl_sz; |
---|
106 | | - u16 min_xid = QEDF_MIN_XID; |
---|
| 116 | + u16 min_xid = 0; |
---|
107 | 117 | u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1); |
---|
108 | 118 | int num_ios; |
---|
109 | 119 | int i; |
---|
.. | .. |
---|
112 | 122 | num_ios = max_xid - min_xid + 1; |
---|
113 | 123 | |
---|
114 | 124 | /* Free fcoe_bdt_ctx structures */ |
---|
115 | | - if (!cmgr->io_bdt_pool) |
---|
| 125 | + if (!cmgr->io_bdt_pool) { |
---|
| 126 | + QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n"); |
---|
116 | 127 | goto free_cmd_pool; |
---|
| 128 | + } |
---|
117 | 129 | |
---|
118 | 130 | bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge); |
---|
119 | 131 | for (i = 0; i < num_ios; i++) { |
---|
.. | .. |
---|
157 | 169 | struct qedf_ioreq *io_req = |
---|
158 | 170 | container_of(work, struct qedf_ioreq, rrq_work.work); |
---|
159 | 171 | |
---|
| 172 | + atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE); |
---|
160 | 173 | qedf_send_rrq(io_req); |
---|
161 | 174 | |
---|
162 | 175 | } |
---|
.. | .. |
---|
169 | 182 | u16 xid; |
---|
170 | 183 | int i; |
---|
171 | 184 | int num_ios; |
---|
172 | | - u16 min_xid = QEDF_MIN_XID; |
---|
| 185 | + u16 min_xid = 0; |
---|
173 | 186 | u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1); |
---|
174 | 187 | |
---|
175 | 188 | /* Make sure num_queues is already set before calling this function */ |
---|
.. | .. |
---|
201 | 214 | /* |
---|
202 | 215 | * Initialize I/O request fields. |
---|
203 | 216 | */ |
---|
204 | | - xid = QEDF_MIN_XID; |
---|
| 217 | + xid = 0; |
---|
205 | 218 | |
---|
206 | 219 | for (i = 0; i < num_ios; i++) { |
---|
207 | 220 | io_req = &cmgr->cmds[i]; |
---|
.. | .. |
---|
215 | 228 | io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev, |
---|
216 | 229 | QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma, |
---|
217 | 230 | GFP_KERNEL); |
---|
218 | | - if (!io_req->sense_buffer) |
---|
| 231 | + if (!io_req->sense_buffer) { |
---|
| 232 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 233 | + "Failed to alloc sense buffer.\n"); |
---|
219 | 234 | goto mem_err; |
---|
| 235 | + } |
---|
220 | 236 | |
---|
221 | 237 | /* Allocate task parameters to pass to f/w init funcions */ |
---|
222 | 238 | io_req->task_params = kzalloc(sizeof(*io_req->task_params), |
---|
.. | .. |
---|
329 | 345 | cmd_mgr->idx = 0; |
---|
330 | 346 | |
---|
331 | 347 | /* Check to make sure command was previously freed */ |
---|
332 | | - if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) |
---|
| 348 | + if (!io_req->alloc) |
---|
333 | 349 | break; |
---|
334 | 350 | } |
---|
335 | 351 | |
---|
.. | .. |
---|
338 | 354 | goto out_failed; |
---|
339 | 355 | } |
---|
340 | 356 | |
---|
341 | | - set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); |
---|
| 357 | + if (test_bit(QEDF_CMD_DIRTY, &io_req->flags)) |
---|
| 358 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 359 | + "io_req found to be dirty ox_id = 0x%x.\n", |
---|
| 360 | + io_req->xid); |
---|
| 361 | + |
---|
| 362 | + /* Clear any flags now that we've reallocated the xid */ |
---|
| 363 | + io_req->flags = 0; |
---|
| 364 | + io_req->alloc = 1; |
---|
342 | 365 | spin_unlock_irqrestore(&cmd_mgr->lock, flags); |
---|
343 | 366 | |
---|
344 | 367 | atomic_inc(&fcport->num_active_ios); |
---|
.. | .. |
---|
349 | 372 | io_req->cmd_mgr = cmd_mgr; |
---|
350 | 373 | io_req->fcport = fcport; |
---|
351 | 374 | |
---|
| 375 | + /* Clear any stale sc_cmd back pointer */ |
---|
| 376 | + io_req->sc_cmd = NULL; |
---|
| 377 | + io_req->lun = -1; |
---|
| 378 | + |
---|
352 | 379 | /* Hold the io_req against deletion */ |
---|
353 | | - kref_init(&io_req->refcount); |
---|
| 380 | + kref_init(&io_req->refcount); /* ID: 001 */ |
---|
| 381 | + atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE); |
---|
354 | 382 | |
---|
355 | 383 | /* Bind io_bdt for this io_req */ |
---|
356 | 384 | /* Have a static link between io_req and io_bdt_pool */ |
---|
.. | .. |
---|
412 | 440 | container_of(ref, struct qedf_ioreq, refcount); |
---|
413 | 441 | struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr; |
---|
414 | 442 | struct qedf_rport *fcport = io_req->fcport; |
---|
| 443 | + unsigned long flags; |
---|
| 444 | + |
---|
| 445 | + if (io_req->cmd_type == QEDF_SCSI_CMD) { |
---|
| 446 | + QEDF_WARN(&fcport->qedf->dbg_ctx, |
---|
| 447 | + "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n", |
---|
| 448 | + io_req, io_req->xid); |
---|
| 449 | + WARN_ON(io_req->sc_cmd); |
---|
| 450 | + } |
---|
415 | 451 | |
---|
416 | 452 | if (io_req->cmd_type == QEDF_ELS || |
---|
417 | 453 | io_req->cmd_type == QEDF_TASK_MGMT_CMD) |
---|
.. | .. |
---|
419 | 455 | |
---|
420 | 456 | atomic_inc(&cmd_mgr->free_list_cnt); |
---|
421 | 457 | atomic_dec(&fcport->num_active_ios); |
---|
422 | | - if (atomic_read(&fcport->num_active_ios) < 0) |
---|
| 458 | + atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE); |
---|
| 459 | + if (atomic_read(&fcport->num_active_ios) < 0) { |
---|
423 | 460 | QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n"); |
---|
| 461 | + WARN_ON(1); |
---|
| 462 | + } |
---|
424 | 463 | |
---|
425 | 464 | /* Increment task retry identifier now that the request is released */ |
---|
426 | 465 | io_req->task_retry_identifier++; |
---|
| 466 | + io_req->fcport = NULL; |
---|
427 | 467 | |
---|
428 | | - clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); |
---|
429 | | -} |
---|
430 | | - |
---|
431 | | -static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len, |
---|
432 | | - int bd_index) |
---|
433 | | -{ |
---|
434 | | - struct scsi_sge *bd = io_req->bd_tbl->bd_tbl; |
---|
435 | | - int frag_size, sg_frags; |
---|
436 | | - |
---|
437 | | - sg_frags = 0; |
---|
438 | | - while (sg_len) { |
---|
439 | | - if (sg_len > QEDF_BD_SPLIT_SZ) |
---|
440 | | - frag_size = QEDF_BD_SPLIT_SZ; |
---|
441 | | - else |
---|
442 | | - frag_size = sg_len; |
---|
443 | | - bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr); |
---|
444 | | - bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr); |
---|
445 | | - bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size; |
---|
446 | | - |
---|
447 | | - addr += (u64)frag_size; |
---|
448 | | - sg_frags++; |
---|
449 | | - sg_len -= frag_size; |
---|
450 | | - } |
---|
451 | | - return sg_frags; |
---|
| 468 | + clear_bit(QEDF_CMD_DIRTY, &io_req->flags); |
---|
| 469 | + io_req->cpu = 0; |
---|
| 470 | + spin_lock_irqsave(&cmd_mgr->lock, flags); |
---|
| 471 | + io_req->fcport = NULL; |
---|
| 472 | + io_req->alloc = 0; |
---|
| 473 | + spin_unlock_irqrestore(&cmd_mgr->lock, flags); |
---|
452 | 474 | } |
---|
453 | 475 | |
---|
454 | 476 | static int qedf_map_sg(struct qedf_ioreq *io_req) |
---|
.. | .. |
---|
462 | 484 | int byte_count = 0; |
---|
463 | 485 | int sg_count = 0; |
---|
464 | 486 | int bd_count = 0; |
---|
465 | | - int sg_frags; |
---|
466 | | - unsigned int sg_len; |
---|
467 | | - u64 addr, end_addr; |
---|
468 | | - int i; |
---|
| 487 | + u32 sg_len; |
---|
| 488 | + u64 addr; |
---|
| 489 | + int i = 0; |
---|
469 | 490 | |
---|
470 | 491 | sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc), |
---|
471 | 492 | scsi_sg_count(sc), sc->sc_data_direction); |
---|
472 | | - |
---|
473 | 493 | sg = scsi_sglist(sc); |
---|
474 | 494 | |
---|
475 | | - /* |
---|
476 | | - * New condition to send single SGE as cached-SGL with length less |
---|
477 | | - * than 64k. |
---|
478 | | - */ |
---|
479 | | - if ((sg_count == 1) && (sg_dma_len(sg) <= |
---|
480 | | - QEDF_MAX_SGLEN_FOR_CACHESGL)) { |
---|
481 | | - sg_len = sg_dma_len(sg); |
---|
482 | | - addr = (u64)sg_dma_address(sg); |
---|
| 495 | + io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE; |
---|
483 | 496 | |
---|
484 | | - bd[bd_count].sge_addr.lo = (addr & 0xffffffff); |
---|
485 | | - bd[bd_count].sge_addr.hi = (addr >> 32); |
---|
486 | | - bd[bd_count].sge_len = (u16)sg_len; |
---|
487 | | - |
---|
488 | | - return ++bd_count; |
---|
489 | | - } |
---|
| 497 | + if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ) |
---|
| 498 | + io_req->sge_type = QEDF_IOREQ_FAST_SGE; |
---|
490 | 499 | |
---|
491 | 500 | scsi_for_each_sg(sc, sg, sg_count, i) { |
---|
492 | | - sg_len = sg_dma_len(sg); |
---|
| 501 | + sg_len = (u32)sg_dma_len(sg); |
---|
493 | 502 | addr = (u64)sg_dma_address(sg); |
---|
494 | | - end_addr = (u64)(addr + sg_len); |
---|
495 | 503 | |
---|
496 | 504 | /* |
---|
497 | | - * First s/g element in the list so check if the end_addr |
---|
498 | | - * is paged aligned. Also check to make sure the length is |
---|
499 | | - * at least page size. |
---|
| 505 | + * Intermediate s/g element so check if start address |
---|
| 506 | + * is page aligned. Only required for writes and only if the |
---|
| 507 | + * number of scatter/gather elements is 8 or more. |
---|
500 | 508 | */ |
---|
501 | | - if ((i == 0) && (sg_count > 1) && |
---|
502 | | - ((end_addr % QEDF_PAGE_SIZE) || |
---|
503 | | - sg_len < QEDF_PAGE_SIZE)) |
---|
504 | | - io_req->use_slowpath = true; |
---|
505 | | - /* |
---|
506 | | - * Last s/g element so check if the start address is paged |
---|
507 | | - * aligned. |
---|
508 | | - */ |
---|
509 | | - else if ((i == (sg_count - 1)) && (sg_count > 1) && |
---|
510 | | - (addr % QEDF_PAGE_SIZE)) |
---|
511 | | - io_req->use_slowpath = true; |
---|
512 | | - /* |
---|
513 | | - * Intermediate s/g element so check if start and end address |
---|
514 | | - * is page aligned. |
---|
515 | | - */ |
---|
516 | | - else if ((i != 0) && (i != (sg_count - 1)) && |
---|
517 | | - ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE))) |
---|
518 | | - io_req->use_slowpath = true; |
---|
| 509 | + if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) && |
---|
| 510 | + (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE) |
---|
| 511 | + io_req->sge_type = QEDF_IOREQ_SLOW_SGE; |
---|
519 | 512 | |
---|
520 | | - if (sg_len > QEDF_MAX_BD_LEN) { |
---|
521 | | - sg_frags = qedf_split_bd(io_req, addr, sg_len, |
---|
522 | | - bd_count); |
---|
523 | | - } else { |
---|
524 | | - sg_frags = 1; |
---|
525 | | - bd[bd_count].sge_addr.lo = U64_LO(addr); |
---|
526 | | - bd[bd_count].sge_addr.hi = U64_HI(addr); |
---|
527 | | - bd[bd_count].sge_len = (uint16_t)sg_len; |
---|
528 | | - } |
---|
| 513 | + bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr)); |
---|
| 514 | + bd[bd_count].sge_addr.hi = cpu_to_le32(U64_HI(addr)); |
---|
| 515 | + bd[bd_count].sge_len = cpu_to_le32(sg_len); |
---|
529 | 516 | |
---|
530 | | - bd_count += sg_frags; |
---|
| 517 | + bd_count++; |
---|
531 | 518 | byte_count += sg_len; |
---|
532 | 519 | } |
---|
| 520 | + |
---|
| 521 | + /* To catch a case where FAST and SLOW nothing is set, set FAST */ |
---|
| 522 | + if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE) |
---|
| 523 | + io_req->sge_type = QEDF_IOREQ_FAST_SGE; |
---|
533 | 524 | |
---|
534 | 525 | if (byte_count != scsi_bufflen(sc)) |
---|
535 | 526 | QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != " |
---|
.. | .. |
---|
655 | 646 | io_req->sgl_task_params->num_sges = bd_count; |
---|
656 | 647 | io_req->sgl_task_params->total_buffer_size = |
---|
657 | 648 | scsi_bufflen(io_req->sc_cmd); |
---|
658 | | - io_req->sgl_task_params->small_mid_sge = |
---|
659 | | - io_req->use_slowpath; |
---|
| 649 | + if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE) |
---|
| 650 | + io_req->sgl_task_params->small_mid_sge = 1; |
---|
| 651 | + else |
---|
| 652 | + io_req->sgl_task_params->small_mid_sge = 0; |
---|
660 | 653 | } |
---|
661 | 654 | |
---|
662 | 655 | /* Fill in physical address of sense buffer */ |
---|
.. | .. |
---|
679 | 672 | io_req->task_retry_identifier, fcp_cmnd); |
---|
680 | 673 | |
---|
681 | 674 | /* Increment SGL type counters */ |
---|
682 | | - if (bd_count == 1) { |
---|
683 | | - qedf->single_sge_ios++; |
---|
684 | | - io_req->sge_type = QEDF_IOREQ_SINGLE_SGE; |
---|
685 | | - } else if (io_req->use_slowpath) { |
---|
| 675 | + if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE) |
---|
686 | 676 | qedf->slow_sge_ios++; |
---|
687 | | - io_req->sge_type = QEDF_IOREQ_SLOW_SGE; |
---|
688 | | - } else { |
---|
| 677 | + else |
---|
689 | 678 | qedf->fast_sge_ios++; |
---|
690 | | - io_req->sge_type = QEDF_IOREQ_FAST_SGE; |
---|
691 | | - } |
---|
692 | 679 | } |
---|
693 | 680 | |
---|
694 | 681 | void qedf_init_mp_task(struct qedf_ioreq *io_req, |
---|
.. | .. |
---|
770 | 757 | &task_fc_hdr, |
---|
771 | 758 | &tx_sgl_task_params, |
---|
772 | 759 | &rx_sgl_task_params, 0); |
---|
773 | | - |
---|
774 | | - /* Midpath requests always consume 1 SGE */ |
---|
775 | | - qedf->single_sge_ios++; |
---|
776 | 760 | } |
---|
777 | 761 | |
---|
778 | 762 | /* Presumed that fcport->rport_lock is held */ |
---|
.. | .. |
---|
804 | 788 | FCOE_DB_DATA_AGG_VAL_SEL_SHIFT; |
---|
805 | 789 | |
---|
806 | 790 | dbell.sq_prod = fcport->fw_sq_prod_idx; |
---|
807 | | - writel(*(u32 *)&dbell, fcport->p_doorbell); |
---|
808 | | - /* Make sure SQ index is updated so f/w prcesses requests in order */ |
---|
| 791 | + /* wmb makes sure that the BDs data is updated before updating the |
---|
| 792 | + * producer, otherwise FW may read old data from the BDs. |
---|
| 793 | + */ |
---|
809 | 794 | wmb(); |
---|
810 | | - mmiowb(); |
---|
| 795 | + barrier(); |
---|
| 796 | + writel(*(u32 *)&dbell, fcport->p_doorbell); |
---|
| 797 | + /* |
---|
| 798 | + * Fence required to flush the write combined buffer, since another |
---|
| 799 | + * CPU may write to the same doorbell address and data may be lost |
---|
| 800 | + * due to relaxed order nature of write combined bar. |
---|
| 801 | + */ |
---|
| 802 | + wmb(); |
---|
811 | 803 | } |
---|
812 | 804 | |
---|
813 | 805 | static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req, |
---|
.. | .. |
---|
865 | 857 | struct qedf_ctx *qedf = lport_priv(lport); |
---|
866 | 858 | struct e4_fcoe_task_context *task_ctx; |
---|
867 | 859 | u16 xid; |
---|
868 | | - enum fcoe_task_type req_type = 0; |
---|
869 | 860 | struct fcoe_wqe *sqe; |
---|
870 | 861 | u16 sqe_idx; |
---|
871 | 862 | |
---|
872 | 863 | /* Initialize rest of io_req fileds */ |
---|
873 | 864 | io_req->data_xfer_len = scsi_bufflen(sc_cmd); |
---|
874 | 865 | sc_cmd->SCp.ptr = (char *)io_req; |
---|
875 | | - io_req->use_slowpath = false; /* Assume fast SGL by default */ |
---|
| 866 | + io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */ |
---|
876 | 867 | |
---|
877 | 868 | /* Record which cpu this request is associated with */ |
---|
878 | 869 | io_req->cpu = smp_processor_id(); |
---|
879 | 870 | |
---|
880 | 871 | if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { |
---|
881 | | - req_type = FCOE_TASK_TYPE_READ_INITIATOR; |
---|
882 | 872 | io_req->io_req_flags = QEDF_READ; |
---|
883 | 873 | qedf->input_requests++; |
---|
884 | 874 | } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { |
---|
885 | | - req_type = FCOE_TASK_TYPE_WRITE_INITIATOR; |
---|
886 | 875 | io_req->io_req_flags = QEDF_WRITE; |
---|
887 | 876 | qedf->output_requests++; |
---|
888 | 877 | } else { |
---|
.. | .. |
---|
895 | 884 | /* Build buffer descriptor list for firmware from sg list */ |
---|
896 | 885 | if (qedf_build_bd_list_from_sg(io_req)) { |
---|
897 | 886 | QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n"); |
---|
| 887 | + /* Release cmd will release io_req, but sc_cmd is assigned */ |
---|
| 888 | + io_req->sc_cmd = NULL; |
---|
898 | 889 | kref_put(&io_req->refcount, qedf_release_cmd); |
---|
899 | 890 | return -EAGAIN; |
---|
900 | 891 | } |
---|
901 | 892 | |
---|
902 | | - if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { |
---|
| 893 | + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || |
---|
| 894 | + test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { |
---|
903 | 895 | QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); |
---|
| 896 | + /* Release cmd will release io_req, but sc_cmd is assigned */ |
---|
| 897 | + io_req->sc_cmd = NULL; |
---|
904 | 898 | kref_put(&io_req->refcount, qedf_release_cmd); |
---|
905 | 899 | return -EINVAL; |
---|
906 | 900 | } |
---|
| 901 | + |
---|
| 902 | + /* Record LUN number for later use if we neeed them */ |
---|
| 903 | + io_req->lun = (int)sc_cmd->device->lun; |
---|
907 | 904 | |
---|
908 | 905 | /* Obtain free SQE */ |
---|
909 | 906 | sqe_idx = qedf_get_sqe_idx(fcport); |
---|
.. | .. |
---|
915 | 912 | if (!task_ctx) { |
---|
916 | 913 | QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n", |
---|
917 | 914 | xid); |
---|
| 915 | + /* Release cmd will release io_req, but sc_cmd is assigned */ |
---|
| 916 | + io_req->sc_cmd = NULL; |
---|
918 | 917 | kref_put(&io_req->refcount, qedf_release_cmd); |
---|
919 | 918 | return -EINVAL; |
---|
920 | 919 | } |
---|
.. | .. |
---|
923 | 922 | |
---|
924 | 923 | /* Ring doorbell */ |
---|
925 | 924 | qedf_ring_doorbell(fcport); |
---|
| 925 | + |
---|
| 926 | + /* Set that command is with the firmware now */ |
---|
| 927 | + set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); |
---|
926 | 928 | |
---|
927 | 929 | if (qedf_io_tracing && io_req->sc_cmd) |
---|
928 | 930 | qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ); |
---|
.. | .. |
---|
942 | 944 | int rc = 0; |
---|
943 | 945 | int rval; |
---|
944 | 946 | unsigned long flags = 0; |
---|
| 947 | + int num_sgs = 0; |
---|
945 | 948 | |
---|
| 949 | + num_sgs = scsi_sg_count(sc_cmd); |
---|
| 950 | + if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) { |
---|
| 951 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 952 | + "Number of SG elements %d exceeds what hardware limitation of %d.\n", |
---|
| 953 | + num_sgs, QEDF_MAX_BDS_PER_CMD); |
---|
| 954 | + sc_cmd->result = DID_ERROR; |
---|
| 955 | + sc_cmd->scsi_done(sc_cmd); |
---|
| 956 | + return 0; |
---|
| 957 | + } |
---|
946 | 958 | |
---|
947 | 959 | if (test_bit(QEDF_UNLOADING, &qedf->flags) || |
---|
948 | 960 | test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { |
---|
| 961 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 962 | + "Returning DNC as unloading or stop io, flags 0x%lx.\n", |
---|
| 963 | + qedf->flags); |
---|
949 | 964 | sc_cmd->result = DID_NO_CONNECT << 16; |
---|
950 | 965 | sc_cmd->scsi_done(sc_cmd); |
---|
951 | 966 | return 0; |
---|
.. | .. |
---|
962 | 977 | |
---|
963 | 978 | rval = fc_remote_port_chkready(rport); |
---|
964 | 979 | if (rval) { |
---|
| 980 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 981 | + "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n", |
---|
| 982 | + rval, rport->port_id); |
---|
965 | 983 | sc_cmd->result = rval; |
---|
966 | 984 | sc_cmd->scsi_done(sc_cmd); |
---|
967 | 985 | return 0; |
---|
.. | .. |
---|
969 | 987 | |
---|
970 | 988 | /* Retry command if we are doing a qed drain operation */ |
---|
971 | 989 | if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { |
---|
| 990 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n"); |
---|
972 | 991 | rc = SCSI_MLQUEUE_HOST_BUSY; |
---|
973 | 992 | goto exit_qcmd; |
---|
974 | 993 | } |
---|
975 | 994 | |
---|
976 | 995 | if (lport->state != LPORT_ST_READY || |
---|
977 | 996 | atomic_read(&qedf->link_state) != QEDF_LINK_UP) { |
---|
| 997 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n"); |
---|
978 | 998 | rc = SCSI_MLQUEUE_HOST_BUSY; |
---|
979 | 999 | goto exit_qcmd; |
---|
980 | 1000 | } |
---|
.. | .. |
---|
982 | 1002 | /* rport and tgt are allocated together, so tgt should be non-NULL */ |
---|
983 | 1003 | fcport = (struct qedf_rport *)&rp[1]; |
---|
984 | 1004 | |
---|
985 | | - if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { |
---|
| 1005 | + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || |
---|
| 1006 | + test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { |
---|
986 | 1007 | /* |
---|
987 | 1008 | * Session is not offloaded yet. Let SCSI-ml retry |
---|
988 | 1009 | * the command. |
---|
.. | .. |
---|
990 | 1011 | rc = SCSI_MLQUEUE_TARGET_BUSY; |
---|
991 | 1012 | goto exit_qcmd; |
---|
992 | 1013 | } |
---|
| 1014 | + |
---|
| 1015 | + atomic_inc(&fcport->ios_to_queue); |
---|
| 1016 | + |
---|
993 | 1017 | if (fcport->retry_delay_timestamp) { |
---|
| 1018 | + /* Take fcport->rport_lock for resetting the delay_timestamp */ |
---|
| 1019 | + spin_lock_irqsave(&fcport->rport_lock, flags); |
---|
994 | 1020 | if (time_after(jiffies, fcport->retry_delay_timestamp)) { |
---|
995 | 1021 | fcport->retry_delay_timestamp = 0; |
---|
996 | 1022 | } else { |
---|
| 1023 | + spin_unlock_irqrestore(&fcport->rport_lock, flags); |
---|
997 | 1024 | /* If retry_delay timer is active, flow off the ML */ |
---|
998 | 1025 | rc = SCSI_MLQUEUE_TARGET_BUSY; |
---|
| 1026 | + atomic_dec(&fcport->ios_to_queue); |
---|
999 | 1027 | goto exit_qcmd; |
---|
1000 | 1028 | } |
---|
| 1029 | + spin_unlock_irqrestore(&fcport->rport_lock, flags); |
---|
1001 | 1030 | } |
---|
1002 | 1031 | |
---|
1003 | 1032 | io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); |
---|
1004 | 1033 | if (!io_req) { |
---|
1005 | 1034 | rc = SCSI_MLQUEUE_HOST_BUSY; |
---|
| 1035 | + atomic_dec(&fcport->ios_to_queue); |
---|
1006 | 1036 | goto exit_qcmd; |
---|
1007 | 1037 | } |
---|
1008 | 1038 | |
---|
.. | .. |
---|
1017 | 1047 | rc = SCSI_MLQUEUE_HOST_BUSY; |
---|
1018 | 1048 | } |
---|
1019 | 1049 | spin_unlock_irqrestore(&fcport->rport_lock, flags); |
---|
| 1050 | + atomic_dec(&fcport->ios_to_queue); |
---|
1020 | 1051 | |
---|
1021 | 1052 | exit_qcmd: |
---|
1022 | 1053 | return rc; |
---|
.. | .. |
---|
1093 | 1124 | void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, |
---|
1094 | 1125 | struct qedf_ioreq *io_req) |
---|
1095 | 1126 | { |
---|
1096 | | - u16 xid, rval; |
---|
1097 | | - struct e4_fcoe_task_context *task_ctx; |
---|
1098 | 1127 | struct scsi_cmnd *sc_cmd; |
---|
1099 | 1128 | struct fcoe_cqe_rsp_info *fcp_rsp; |
---|
1100 | 1129 | struct qedf_rport *fcport; |
---|
1101 | 1130 | int refcount; |
---|
1102 | 1131 | u16 scope, qualifier = 0; |
---|
1103 | 1132 | u8 fw_residual_flag = 0; |
---|
| 1133 | + unsigned long flags = 0; |
---|
| 1134 | + u16 chk_scope = 0; |
---|
1104 | 1135 | |
---|
1105 | 1136 | if (!io_req) |
---|
1106 | 1137 | return; |
---|
1107 | 1138 | if (!cqe) |
---|
1108 | 1139 | return; |
---|
1109 | 1140 | |
---|
1110 | | - xid = io_req->xid; |
---|
1111 | | - task_ctx = qedf_get_task_mem(&qedf->tasks, xid); |
---|
| 1141 | + if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || |
---|
| 1142 | + test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) || |
---|
| 1143 | + test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) { |
---|
| 1144 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 1145 | + "io_req xid=0x%x already in cleanup or abort processing or already completed.\n", |
---|
| 1146 | + io_req->xid); |
---|
| 1147 | + return; |
---|
| 1148 | + } |
---|
| 1149 | + |
---|
1112 | 1150 | sc_cmd = io_req->sc_cmd; |
---|
1113 | 1151 | fcp_rsp = &cqe->cqe_info.rsp_info; |
---|
1114 | 1152 | |
---|
.. | .. |
---|
1123 | 1161 | return; |
---|
1124 | 1162 | } |
---|
1125 | 1163 | |
---|
1126 | | - if (!sc_cmd->request) { |
---|
1127 | | - QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, " |
---|
1128 | | - "sc_cmd=%p.\n", sc_cmd); |
---|
| 1164 | + if (!sc_cmd->device) { |
---|
| 1165 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 1166 | + "Device for sc_cmd %p is NULL.\n", sc_cmd); |
---|
1129 | 1167 | return; |
---|
1130 | 1168 | } |
---|
1131 | 1169 | |
---|
1132 | | - if (!sc_cmd->request->special) { |
---|
1133 | | - QEDF_WARN(&(qedf->dbg_ctx), "request->special is NULL so " |
---|
1134 | | - "request not valid, sc_cmd=%p.\n", sc_cmd); |
---|
| 1170 | + if (!sc_cmd->request) { |
---|
| 1171 | + QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, " |
---|
| 1172 | + "sc_cmd=%p.\n", sc_cmd); |
---|
1135 | 1173 | return; |
---|
1136 | 1174 | } |
---|
1137 | 1175 | |
---|
.. | .. |
---|
1142 | 1180 | } |
---|
1143 | 1181 | |
---|
1144 | 1182 | fcport = io_req->fcport; |
---|
| 1183 | + |
---|
| 1184 | + /* |
---|
| 1185 | + * When flush is active, let the cmds be completed from the cleanup |
---|
| 1186 | + * context |
---|
| 1187 | + */ |
---|
| 1188 | + if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || |
---|
| 1189 | + (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) && |
---|
| 1190 | + sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) { |
---|
| 1191 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1192 | + "Dropping good completion xid=0x%x as fcport is flushing", |
---|
| 1193 | + io_req->xid); |
---|
| 1194 | + return; |
---|
| 1195 | + } |
---|
1145 | 1196 | |
---|
1146 | 1197 | qedf_parse_fcp_rsp(io_req, fcp_rsp); |
---|
1147 | 1198 | |
---|
.. | .. |
---|
1160 | 1211 | fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags, |
---|
1161 | 1212 | FCOE_CQE_RSP_INFO_FW_UNDERRUN); |
---|
1162 | 1213 | if (fw_residual_flag) { |
---|
1163 | | - QEDF_ERR(&(qedf->dbg_ctx), |
---|
1164 | | - "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x " |
---|
1165 | | - "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid, |
---|
1166 | | - fcp_rsp->rsp_flags.flags, io_req->fcp_resid, |
---|
1167 | | - cqe->cqe_info.rsp_info.fw_residual); |
---|
| 1214 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 1215 | + "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n", |
---|
| 1216 | + io_req->xid, fcp_rsp->rsp_flags.flags, |
---|
| 1217 | + io_req->fcp_resid, |
---|
| 1218 | + cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2], |
---|
| 1219 | + sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]); |
---|
1168 | 1220 | |
---|
1169 | 1221 | if (io_req->cdb_status == 0) |
---|
1170 | 1222 | sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status; |
---|
1171 | 1223 | else |
---|
1172 | 1224 | sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; |
---|
1173 | | - |
---|
1174 | | - /* Abort the command since we did not get all the data */ |
---|
1175 | | - init_completion(&io_req->abts_done); |
---|
1176 | | - rval = qedf_initiate_abts(io_req, true); |
---|
1177 | | - if (rval) { |
---|
1178 | | - QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); |
---|
1179 | | - sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status; |
---|
1180 | | - } |
---|
1181 | 1225 | |
---|
1182 | 1226 | /* |
---|
1183 | 1227 | * Set resid to the whole buffer length so we won't try to resue |
---|
.. | .. |
---|
1219 | 1263 | /* Lower 14 bits */ |
---|
1220 | 1264 | qualifier = fcp_rsp->retry_delay_timer & 0x3FFF; |
---|
1221 | 1265 | |
---|
1222 | | - if (qedf_retry_delay && |
---|
1223 | | - scope > 0 && qualifier > 0 && |
---|
1224 | | - qualifier <= 0x3FEF) { |
---|
1225 | | - /* Check we don't go over the max */ |
---|
1226 | | - if (qualifier > QEDF_RETRY_DELAY_MAX) |
---|
1227 | | - qualifier = |
---|
1228 | | - QEDF_RETRY_DELAY_MAX; |
---|
1229 | | - fcport->retry_delay_timestamp = |
---|
1230 | | - jiffies + (qualifier * HZ / 10); |
---|
1231 | | - } |
---|
| 1266 | + if (qedf_retry_delay) |
---|
| 1267 | + chk_scope = 1; |
---|
1232 | 1268 | /* Record stats */ |
---|
1233 | 1269 | if (io_req->cdb_status == |
---|
1234 | 1270 | SAM_STAT_TASK_SET_FULL) |
---|
.. | .. |
---|
1239 | 1275 | } |
---|
1240 | 1276 | if (io_req->fcp_resid) |
---|
1241 | 1277 | scsi_set_resid(sc_cmd, io_req->fcp_resid); |
---|
| 1278 | + |
---|
| 1279 | + if (chk_scope == 1) { |
---|
| 1280 | + if ((scope == 1 || scope == 2) && |
---|
| 1281 | + (qualifier > 0 && qualifier <= 0x3FEF)) { |
---|
| 1282 | + /* Check we don't go over the max */ |
---|
| 1283 | + if (qualifier > QEDF_RETRY_DELAY_MAX) { |
---|
| 1284 | + qualifier = QEDF_RETRY_DELAY_MAX; |
---|
| 1285 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1286 | + "qualifier = %d\n", |
---|
| 1287 | + (fcp_rsp->retry_delay_timer & |
---|
| 1288 | + 0x3FFF)); |
---|
| 1289 | + } |
---|
| 1290 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1291 | + "Scope = %d and qualifier = %d", |
---|
| 1292 | + scope, qualifier); |
---|
| 1293 | + /* Take fcport->rport_lock to |
---|
| 1294 | + * update the retry_delay_timestamp |
---|
| 1295 | + */ |
---|
| 1296 | + spin_lock_irqsave(&fcport->rport_lock, flags); |
---|
| 1297 | + fcport->retry_delay_timestamp = |
---|
| 1298 | + jiffies + (qualifier * HZ / 10); |
---|
| 1299 | + spin_unlock_irqrestore(&fcport->rport_lock, |
---|
| 1300 | + flags); |
---|
| 1301 | + |
---|
| 1302 | + } else { |
---|
| 1303 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1304 | + "combination of scope = %d and qualifier = %d is not handled in qedf.\n", |
---|
| 1305 | + scope, qualifier); |
---|
| 1306 | + } |
---|
| 1307 | + } |
---|
1242 | 1308 | break; |
---|
1243 | 1309 | default: |
---|
1244 | 1310 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n", |
---|
.. | .. |
---|
1250 | 1316 | if (qedf_io_tracing) |
---|
1251 | 1317 | qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP); |
---|
1252 | 1318 | |
---|
| 1319 | + /* |
---|
| 1320 | + * We wait till the end of the function to clear the |
---|
| 1321 | + * outstanding bit in case we need to send an abort |
---|
| 1322 | + */ |
---|
| 1323 | + clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); |
---|
| 1324 | + |
---|
1253 | 1325 | io_req->sc_cmd = NULL; |
---|
1254 | 1326 | sc_cmd->SCp.ptr = NULL; |
---|
1255 | 1327 | sc_cmd->scsi_done(sc_cmd); |
---|
.. | .. |
---|
1260 | 1332 | void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, |
---|
1261 | 1333 | int result) |
---|
1262 | 1334 | { |
---|
1263 | | - u16 xid; |
---|
1264 | 1335 | struct scsi_cmnd *sc_cmd; |
---|
1265 | 1336 | int refcount; |
---|
1266 | 1337 | |
---|
1267 | | - if (!io_req) |
---|
| 1338 | + if (!io_req) { |
---|
| 1339 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n"); |
---|
1268 | 1340 | return; |
---|
| 1341 | + } |
---|
1269 | 1342 | |
---|
1270 | | - xid = io_req->xid; |
---|
| 1343 | + if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) { |
---|
| 1344 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1345 | + "io_req:%p scsi_done handling already done\n", |
---|
| 1346 | + io_req); |
---|
| 1347 | + return; |
---|
| 1348 | + } |
---|
| 1349 | + |
---|
| 1350 | + /* |
---|
| 1351 | + * We will be done with this command after this call so clear the |
---|
| 1352 | + * outstanding bit. |
---|
| 1353 | + */ |
---|
| 1354 | + clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); |
---|
| 1355 | + |
---|
1271 | 1356 | sc_cmd = io_req->sc_cmd; |
---|
1272 | 1357 | |
---|
1273 | 1358 | if (!sc_cmd) { |
---|
.. | .. |
---|
1275 | 1360 | return; |
---|
1276 | 1361 | } |
---|
1277 | 1362 | |
---|
| 1363 | + if (!virt_addr_valid(sc_cmd)) { |
---|
| 1364 | + QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd); |
---|
| 1365 | + goto bad_scsi_ptr; |
---|
| 1366 | + } |
---|
| 1367 | + |
---|
1278 | 1368 | if (!sc_cmd->SCp.ptr) { |
---|
1279 | 1369 | QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in " |
---|
1280 | 1370 | "another context.\n"); |
---|
1281 | 1371 | return; |
---|
| 1372 | + } |
---|
| 1373 | + |
---|
| 1374 | + if (!sc_cmd->device) { |
---|
| 1375 | + QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n", |
---|
| 1376 | + sc_cmd); |
---|
| 1377 | + goto bad_scsi_ptr; |
---|
| 1378 | + } |
---|
| 1379 | + |
---|
| 1380 | + if (!virt_addr_valid(sc_cmd->device)) { |
---|
| 1381 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 1382 | + "Device pointer for sc_cmd %p is bad.\n", sc_cmd); |
---|
| 1383 | + goto bad_scsi_ptr; |
---|
| 1384 | + } |
---|
| 1385 | + |
---|
| 1386 | + if (!sc_cmd->sense_buffer) { |
---|
| 1387 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 1388 | + "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n", |
---|
| 1389 | + sc_cmd); |
---|
| 1390 | + goto bad_scsi_ptr; |
---|
| 1391 | + } |
---|
| 1392 | + |
---|
| 1393 | + if (!virt_addr_valid(sc_cmd->sense_buffer)) { |
---|
| 1394 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 1395 | + "sc_cmd->sense_buffer for sc_cmd %p is bad.\n", |
---|
| 1396 | + sc_cmd); |
---|
| 1397 | + goto bad_scsi_ptr; |
---|
| 1398 | + } |
---|
| 1399 | + |
---|
| 1400 | + if (!sc_cmd->scsi_done) { |
---|
| 1401 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 1402 | + "sc_cmd->scsi_done for sc_cmd %p is NULL.\n", |
---|
| 1403 | + sc_cmd); |
---|
| 1404 | + goto bad_scsi_ptr; |
---|
1282 | 1405 | } |
---|
1283 | 1406 | |
---|
1284 | 1407 | qedf_unmap_sg_list(qedf, io_req); |
---|
.. | .. |
---|
1307 | 1430 | sc_cmd->SCp.ptr = NULL; |
---|
1308 | 1431 | sc_cmd->scsi_done(sc_cmd); |
---|
1309 | 1432 | kref_put(&io_req->refcount, qedf_release_cmd); |
---|
| 1433 | + return; |
---|
| 1434 | + |
---|
| 1435 | +bad_scsi_ptr: |
---|
| 1436 | + /* |
---|
| 1437 | + * Clear the io_req->sc_cmd backpointer so we don't try to process |
---|
| 1438 | + * this again |
---|
| 1439 | + */ |
---|
| 1440 | + io_req->sc_cmd = NULL; |
---|
| 1441 | + kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */ |
---|
1310 | 1442 | } |
---|
1311 | 1443 | |
---|
1312 | 1444 | /* |
---|
.. | .. |
---|
1321 | 1453 | u64 err_warn_bit_map; |
---|
1322 | 1454 | u8 err_warn = 0xff; |
---|
1323 | 1455 | |
---|
1324 | | - if (!cqe) |
---|
| 1456 | + if (!cqe) { |
---|
| 1457 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1458 | + "cqe is NULL for io_req %p xid=0x%x\n", |
---|
| 1459 | + io_req, io_req->xid); |
---|
1325 | 1460 | return; |
---|
| 1461 | + } |
---|
1326 | 1462 | |
---|
1327 | 1463 | QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, " |
---|
1328 | 1464 | "xid=0x%x\n", io_req->xid); |
---|
.. | .. |
---|
1384 | 1520 | { |
---|
1385 | 1521 | int rval; |
---|
1386 | 1522 | |
---|
1387 | | - if (!cqe) |
---|
| 1523 | + if (io_req == NULL) { |
---|
| 1524 | + QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n"); |
---|
1388 | 1525 | return; |
---|
| 1526 | + } |
---|
| 1527 | + |
---|
| 1528 | + if (io_req->fcport == NULL) { |
---|
| 1529 | + QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n"); |
---|
| 1530 | + return; |
---|
| 1531 | + } |
---|
| 1532 | + |
---|
| 1533 | + if (!cqe) { |
---|
| 1534 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1535 | + "cqe is NULL for io_req %p\n", io_req); |
---|
| 1536 | + return; |
---|
| 1537 | + } |
---|
1389 | 1538 | |
---|
1390 | 1539 | QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, " |
---|
1391 | 1540 | "xid=0x%x\n", io_req->xid); |
---|
.. | .. |
---|
1398 | 1547 | le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off), |
---|
1399 | 1548 | le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off), |
---|
1400 | 1549 | le32_to_cpu(cqe->cqe_info.err_info.rx_id)); |
---|
| 1550 | + |
---|
| 1551 | + /* When flush is active, let the cmds be flushed out from the cleanup context */ |
---|
| 1552 | + if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) || |
---|
| 1553 | + (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) && |
---|
| 1554 | + io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) { |
---|
| 1555 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 1556 | + "Dropping EQE for xid=0x%x as fcport is flushing", |
---|
| 1557 | + io_req->xid); |
---|
| 1558 | + return; |
---|
| 1559 | + } |
---|
1401 | 1560 | |
---|
1402 | 1561 | if (qedf->stop_io_on_error) { |
---|
1403 | 1562 | qedf_stop_all_io(qedf); |
---|
.. | .. |
---|
1423 | 1582 | */ |
---|
1424 | 1583 | els_req->event = QEDF_IOREQ_EV_ELS_FLUSH; |
---|
1425 | 1584 | |
---|
| 1585 | + clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags); |
---|
| 1586 | + |
---|
1426 | 1587 | /* Cancel the timer */ |
---|
1427 | 1588 | cancel_delayed_work_sync(&els_req->timeout_work); |
---|
1428 | 1589 | |
---|
.. | .. |
---|
1445 | 1606 | struct qedf_ctx *qedf; |
---|
1446 | 1607 | struct qedf_cmd_mgr *cmd_mgr; |
---|
1447 | 1608 | int i, rc; |
---|
| 1609 | + unsigned long flags; |
---|
| 1610 | + int flush_cnt = 0; |
---|
| 1611 | + int wait_cnt = 100; |
---|
| 1612 | + int refcount = 0; |
---|
1448 | 1613 | |
---|
1449 | | - if (!fcport) |
---|
| 1614 | + if (!fcport) { |
---|
| 1615 | + QEDF_ERR(NULL, "fcport is NULL\n"); |
---|
1450 | 1616 | return; |
---|
| 1617 | + } |
---|
1451 | 1618 | |
---|
1452 | 1619 | /* Check that fcport is still offloaded */ |
---|
1453 | 1620 | if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { |
---|
.. | .. |
---|
1456 | 1623 | } |
---|
1457 | 1624 | |
---|
1458 | 1625 | qedf = fcport->qedf; |
---|
| 1626 | + |
---|
| 1627 | + if (!qedf) { |
---|
| 1628 | + QEDF_ERR(NULL, "qedf is NULL.\n"); |
---|
| 1629 | + return; |
---|
| 1630 | + } |
---|
| 1631 | + |
---|
| 1632 | + /* Only wait for all commands to be queued in the Upload context */ |
---|
| 1633 | + if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) && |
---|
| 1634 | + (lun == -1)) { |
---|
| 1635 | + while (atomic_read(&fcport->ios_to_queue)) { |
---|
| 1636 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1637 | + "Waiting for %d I/Os to be queued\n", |
---|
| 1638 | + atomic_read(&fcport->ios_to_queue)); |
---|
| 1639 | + if (wait_cnt == 0) { |
---|
| 1640 | + QEDF_ERR(NULL, |
---|
| 1641 | + "%d IOs request could not be queued\n", |
---|
| 1642 | + atomic_read(&fcport->ios_to_queue)); |
---|
| 1643 | + } |
---|
| 1644 | + msleep(20); |
---|
| 1645 | + wait_cnt--; |
---|
| 1646 | + } |
---|
| 1647 | + } |
---|
| 1648 | + |
---|
1459 | 1649 | cmd_mgr = qedf->cmd_mgr; |
---|
1460 | 1650 | |
---|
1461 | | - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n"); |
---|
| 1651 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1652 | + "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n", |
---|
| 1653 | + atomic_read(&fcport->num_active_ios), fcport, |
---|
| 1654 | + fcport->rdata->ids.port_id, fcport->rport->scsi_target_id); |
---|
| 1655 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n"); |
---|
| 1656 | + |
---|
| 1657 | + mutex_lock(&qedf->flush_mutex); |
---|
| 1658 | + if (lun == -1) { |
---|
| 1659 | + set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags); |
---|
| 1660 | + } else { |
---|
| 1661 | + set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags); |
---|
| 1662 | + fcport->lun_reset_lun = lun; |
---|
| 1663 | + } |
---|
1462 | 1664 | |
---|
1463 | 1665 | for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { |
---|
1464 | 1666 | io_req = &cmd_mgr->cmds[i]; |
---|
1465 | 1667 | |
---|
1466 | 1668 | if (!io_req) |
---|
1467 | 1669 | continue; |
---|
| 1670 | + if (!io_req->fcport) |
---|
| 1671 | + continue; |
---|
| 1672 | + |
---|
| 1673 | + spin_lock_irqsave(&cmd_mgr->lock, flags); |
---|
| 1674 | + |
---|
| 1675 | + if (io_req->alloc) { |
---|
| 1676 | + if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) { |
---|
| 1677 | + if (io_req->cmd_type == QEDF_SCSI_CMD) |
---|
| 1678 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 1679 | + "Allocated but not queued, xid=0x%x\n", |
---|
| 1680 | + io_req->xid); |
---|
| 1681 | + } |
---|
| 1682 | + spin_unlock_irqrestore(&cmd_mgr->lock, flags); |
---|
| 1683 | + } else { |
---|
| 1684 | + spin_unlock_irqrestore(&cmd_mgr->lock, flags); |
---|
| 1685 | + continue; |
---|
| 1686 | + } |
---|
| 1687 | + |
---|
1468 | 1688 | if (io_req->fcport != fcport) |
---|
1469 | 1689 | continue; |
---|
1470 | | - if (io_req->cmd_type == QEDF_ELS) { |
---|
| 1690 | + |
---|
| 1691 | + /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response, |
---|
| 1692 | + * but RRQ is still pending. |
---|
| 1693 | + * Workaround: Within qedf_send_rrq, we check if the fcport is |
---|
| 1694 | + * NULL, and we drop the ref on the io_req to clean it up. |
---|
| 1695 | + */ |
---|
| 1696 | + if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) { |
---|
| 1697 | + refcount = kref_read(&io_req->refcount); |
---|
| 1698 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1699 | + "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n", |
---|
| 1700 | + io_req->xid, io_req->cmd_type, refcount); |
---|
| 1701 | + /* If RRQ work has been queue, try to cancel it and |
---|
| 1702 | + * free the io_req |
---|
| 1703 | + */ |
---|
| 1704 | + if (atomic_read(&io_req->state) == |
---|
| 1705 | + QEDFC_CMD_ST_RRQ_WAIT) { |
---|
| 1706 | + if (cancel_delayed_work_sync |
---|
| 1707 | + (&io_req->rrq_work)) { |
---|
| 1708 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1709 | + "Putting reference for pending RRQ work xid=0x%x.\n", |
---|
| 1710 | + io_req->xid); |
---|
| 1711 | + /* ID: 003 */ |
---|
| 1712 | + kref_put(&io_req->refcount, |
---|
| 1713 | + qedf_release_cmd); |
---|
| 1714 | + } |
---|
| 1715 | + } |
---|
| 1716 | + continue; |
---|
| 1717 | + } |
---|
| 1718 | + |
---|
| 1719 | + /* Only consider flushing ELS during target reset */ |
---|
| 1720 | + if (io_req->cmd_type == QEDF_ELS && |
---|
| 1721 | + lun == -1) { |
---|
1471 | 1722 | rc = kref_get_unless_zero(&io_req->refcount); |
---|
1472 | 1723 | if (!rc) { |
---|
1473 | 1724 | QEDF_ERR(&(qedf->dbg_ctx), |
---|
.. | .. |
---|
1475 | 1726 | io_req, io_req->xid); |
---|
1476 | 1727 | continue; |
---|
1477 | 1728 | } |
---|
| 1729 | + qedf_initiate_cleanup(io_req, false); |
---|
| 1730 | + flush_cnt++; |
---|
1478 | 1731 | qedf_flush_els_req(qedf, io_req); |
---|
| 1732 | + |
---|
1479 | 1733 | /* |
---|
1480 | 1734 | * Release the kref and go back to the top of the |
---|
1481 | 1735 | * loop. |
---|
.. | .. |
---|
1484 | 1738 | } |
---|
1485 | 1739 | |
---|
1486 | 1740 | if (io_req->cmd_type == QEDF_ABTS) { |
---|
| 1741 | + /* ID: 004 */ |
---|
1487 | 1742 | rc = kref_get_unless_zero(&io_req->refcount); |
---|
1488 | 1743 | if (!rc) { |
---|
1489 | 1744 | QEDF_ERR(&(qedf->dbg_ctx), |
---|
.. | .. |
---|
1491 | 1746 | io_req, io_req->xid); |
---|
1492 | 1747 | continue; |
---|
1493 | 1748 | } |
---|
| 1749 | + if (lun != -1 && io_req->lun != lun) |
---|
| 1750 | + goto free_cmd; |
---|
| 1751 | + |
---|
1494 | 1752 | QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
1495 | 1753 | "Flushing abort xid=0x%x.\n", io_req->xid); |
---|
1496 | 1754 | |
---|
1497 | | - clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); |
---|
1498 | | - |
---|
1499 | | - if (io_req->sc_cmd) { |
---|
1500 | | - if (io_req->return_scsi_cmd_on_abts) |
---|
1501 | | - qedf_scsi_done(qedf, io_req, DID_ERROR); |
---|
| 1755 | + if (cancel_delayed_work_sync(&io_req->rrq_work)) { |
---|
| 1756 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1757 | + "Putting ref for cancelled RRQ work xid=0x%x.\n", |
---|
| 1758 | + io_req->xid); |
---|
| 1759 | + kref_put(&io_req->refcount, qedf_release_cmd); |
---|
1502 | 1760 | } |
---|
1503 | 1761 | |
---|
1504 | | - /* Notify eh_abort handler that ABTS is complete */ |
---|
1505 | | - complete(&io_req->abts_done); |
---|
1506 | | - kref_put(&io_req->refcount, qedf_release_cmd); |
---|
1507 | | - |
---|
| 1762 | + if (cancel_delayed_work_sync(&io_req->timeout_work)) { |
---|
| 1763 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1764 | + "Putting ref for cancelled tmo work xid=0x%x.\n", |
---|
| 1765 | + io_req->xid); |
---|
| 1766 | + qedf_initiate_cleanup(io_req, true); |
---|
| 1767 | + /* Notify eh_abort handler that ABTS is |
---|
| 1768 | + * complete |
---|
| 1769 | + */ |
---|
| 1770 | + complete(&io_req->abts_done); |
---|
| 1771 | + clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); |
---|
| 1772 | + /* ID: 002 */ |
---|
| 1773 | + kref_put(&io_req->refcount, qedf_release_cmd); |
---|
| 1774 | + } |
---|
| 1775 | + flush_cnt++; |
---|
1508 | 1776 | goto free_cmd; |
---|
1509 | 1777 | } |
---|
1510 | 1778 | |
---|
1511 | 1779 | if (!io_req->sc_cmd) |
---|
1512 | 1780 | continue; |
---|
1513 | | - if (lun > 0) { |
---|
1514 | | - if (io_req->sc_cmd->device->lun != |
---|
1515 | | - (u64)lun) |
---|
| 1781 | + if (!io_req->sc_cmd->device) { |
---|
| 1782 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1783 | + "Device backpointer NULL for sc_cmd=%p.\n", |
---|
| 1784 | + io_req->sc_cmd); |
---|
| 1785 | + /* Put reference for non-existent scsi_cmnd */ |
---|
| 1786 | + io_req->sc_cmd = NULL; |
---|
| 1787 | + qedf_initiate_cleanup(io_req, false); |
---|
| 1788 | + kref_put(&io_req->refcount, qedf_release_cmd); |
---|
| 1789 | + continue; |
---|
| 1790 | + } |
---|
| 1791 | + if (lun > -1) { |
---|
| 1792 | + if (io_req->lun != lun) |
---|
1516 | 1793 | continue; |
---|
1517 | 1794 | } |
---|
1518 | 1795 | |
---|
.. | .. |
---|
1526 | 1803 | "io_req=0x%p xid=0x%x\n", io_req, io_req->xid); |
---|
1527 | 1804 | continue; |
---|
1528 | 1805 | } |
---|
| 1806 | + |
---|
1529 | 1807 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, |
---|
1530 | 1808 | "Cleanup xid=0x%x.\n", io_req->xid); |
---|
| 1809 | + flush_cnt++; |
---|
1531 | 1810 | |
---|
1532 | 1811 | /* Cleanup task and return I/O mid-layer */ |
---|
1533 | 1812 | qedf_initiate_cleanup(io_req, true); |
---|
1534 | 1813 | |
---|
1535 | 1814 | free_cmd: |
---|
1536 | | - kref_put(&io_req->refcount, qedf_release_cmd); |
---|
| 1815 | + kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 004 */ |
---|
1537 | 1816 | } |
---|
| 1817 | + |
---|
| 1818 | + wait_cnt = 60; |
---|
| 1819 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1820 | + "Flushed 0x%x I/Os, active=0x%x.\n", |
---|
| 1821 | + flush_cnt, atomic_read(&fcport->num_active_ios)); |
---|
| 1822 | + /* Only wait for all commands to complete in the Upload context */ |
---|
| 1823 | + if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) && |
---|
| 1824 | + (lun == -1)) { |
---|
| 1825 | + while (atomic_read(&fcport->num_active_ios)) { |
---|
| 1826 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1827 | + "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n", |
---|
| 1828 | + flush_cnt, |
---|
| 1829 | + atomic_read(&fcport->num_active_ios), |
---|
| 1830 | + wait_cnt); |
---|
| 1831 | + if (wait_cnt == 0) { |
---|
| 1832 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 1833 | + "Flushed %d I/Os, active=%d.\n", |
---|
| 1834 | + flush_cnt, |
---|
| 1835 | + atomic_read(&fcport->num_active_ios)); |
---|
| 1836 | + for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { |
---|
| 1837 | + io_req = &cmd_mgr->cmds[i]; |
---|
| 1838 | + if (io_req->fcport && |
---|
| 1839 | + io_req->fcport == fcport) { |
---|
| 1840 | + refcount = |
---|
| 1841 | + kref_read(&io_req->refcount); |
---|
| 1842 | + set_bit(QEDF_CMD_DIRTY, |
---|
| 1843 | + &io_req->flags); |
---|
| 1844 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 1845 | + "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n", |
---|
| 1846 | + io_req, io_req->xid, |
---|
| 1847 | + io_req->flags, |
---|
| 1848 | + io_req->sc_cmd, |
---|
| 1849 | + refcount, |
---|
| 1850 | + io_req->cmd_type); |
---|
| 1851 | + } |
---|
| 1852 | + } |
---|
| 1853 | + WARN_ON(1); |
---|
| 1854 | + break; |
---|
| 1855 | + } |
---|
| 1856 | + msleep(500); |
---|
| 1857 | + wait_cnt--; |
---|
| 1858 | + } |
---|
| 1859 | + } |
---|
| 1860 | + |
---|
| 1861 | + clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags); |
---|
| 1862 | + clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags); |
---|
| 1863 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n"); |
---|
| 1864 | + mutex_unlock(&qedf->flush_mutex); |
---|
1538 | 1865 | } |
---|
1539 | 1866 | |
---|
1540 | 1867 | /* |
---|
.. | .. |
---|
1548 | 1875 | struct fc_rport_priv *rdata; |
---|
1549 | 1876 | struct qedf_ctx *qedf; |
---|
1550 | 1877 | u16 xid; |
---|
1551 | | - u32 r_a_tov = 0; |
---|
1552 | 1878 | int rc = 0; |
---|
1553 | 1879 | unsigned long flags; |
---|
1554 | 1880 | struct fcoe_wqe *sqe; |
---|
1555 | 1881 | u16 sqe_idx; |
---|
| 1882 | + int refcount = 0; |
---|
1556 | 1883 | |
---|
1557 | 1884 | /* Sanity check qedf_rport before dereferencing any pointers */ |
---|
1558 | 1885 | if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { |
---|
1559 | 1886 | QEDF_ERR(NULL, "tgt not offloaded\n"); |
---|
1560 | 1887 | rc = 1; |
---|
1561 | | - goto abts_err; |
---|
| 1888 | + goto out; |
---|
1562 | 1889 | } |
---|
1563 | 1890 | |
---|
1564 | | - rdata = fcport->rdata; |
---|
1565 | | - r_a_tov = rdata->r_a_tov; |
---|
1566 | 1891 | qedf = fcport->qedf; |
---|
| 1892 | + rdata = fcport->rdata; |
---|
| 1893 | + |
---|
| 1894 | + if (!rdata || !kref_get_unless_zero(&rdata->kref)) { |
---|
| 1895 | + QEDF_ERR(&qedf->dbg_ctx, "stale rport\n"); |
---|
| 1896 | + rc = 1; |
---|
| 1897 | + goto out; |
---|
| 1898 | + } |
---|
| 1899 | + |
---|
1567 | 1900 | lport = qedf->lport; |
---|
1568 | 1901 | |
---|
1569 | 1902 | if (lport->state != LPORT_ST_READY || !(lport->link_up)) { |
---|
1570 | 1903 | QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n"); |
---|
1571 | 1904 | rc = 1; |
---|
1572 | | - goto abts_err; |
---|
| 1905 | + goto drop_rdata_kref; |
---|
1573 | 1906 | } |
---|
1574 | 1907 | |
---|
1575 | 1908 | if (atomic_read(&qedf->link_down_tmo_valid) > 0) { |
---|
1576 | 1909 | QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n"); |
---|
1577 | 1910 | rc = 1; |
---|
1578 | | - goto abts_err; |
---|
| 1911 | + goto drop_rdata_kref; |
---|
1579 | 1912 | } |
---|
1580 | 1913 | |
---|
1581 | 1914 | /* Ensure room on SQ */ |
---|
1582 | 1915 | if (!atomic_read(&fcport->free_sqes)) { |
---|
1583 | 1916 | QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n"); |
---|
1584 | 1917 | rc = 1; |
---|
1585 | | - goto abts_err; |
---|
| 1918 | + goto drop_rdata_kref; |
---|
1586 | 1919 | } |
---|
1587 | 1920 | |
---|
1588 | 1921 | if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { |
---|
1589 | 1922 | QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n"); |
---|
1590 | 1923 | rc = 1; |
---|
1591 | | - goto out; |
---|
| 1924 | + goto drop_rdata_kref; |
---|
1592 | 1925 | } |
---|
1593 | 1926 | |
---|
| 1927 | + spin_lock_irqsave(&fcport->rport_lock, flags); |
---|
1594 | 1928 | if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || |
---|
1595 | 1929 | test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) || |
---|
1596 | 1930 | test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) { |
---|
1597 | | - QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in " |
---|
1598 | | - "cleanup or abort processing or already " |
---|
1599 | | - "completed.\n", io_req->xid); |
---|
| 1931 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 1932 | + "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n", |
---|
| 1933 | + io_req->xid, io_req->sc_cmd); |
---|
1600 | 1934 | rc = 1; |
---|
1601 | | - goto out; |
---|
| 1935 | + spin_unlock_irqrestore(&fcport->rport_lock, flags); |
---|
| 1936 | + goto drop_rdata_kref; |
---|
1602 | 1937 | } |
---|
| 1938 | + |
---|
| 1939 | + /* Set the command type to abort */ |
---|
| 1940 | + io_req->cmd_type = QEDF_ABTS; |
---|
| 1941 | + spin_unlock_irqrestore(&fcport->rport_lock, flags); |
---|
1603 | 1942 | |
---|
1604 | 1943 | kref_get(&io_req->refcount); |
---|
1605 | 1944 | |
---|
.. | .. |
---|
1607 | 1946 | qedf->control_requests++; |
---|
1608 | 1947 | qedf->packet_aborts++; |
---|
1609 | 1948 | |
---|
1610 | | - /* Set the return CPU to be the same as the request one */ |
---|
1611 | | - io_req->cpu = smp_processor_id(); |
---|
1612 | | - |
---|
1613 | | - /* Set the command type to abort */ |
---|
1614 | | - io_req->cmd_type = QEDF_ABTS; |
---|
1615 | 1949 | io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; |
---|
1616 | 1950 | |
---|
1617 | 1951 | set_bit(QEDF_CMD_IN_ABORT, &io_req->flags); |
---|
1618 | | - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = " |
---|
1619 | | - "0x%x\n", xid); |
---|
| 1952 | + refcount = kref_read(&io_req->refcount); |
---|
| 1953 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, |
---|
| 1954 | + "ABTS io_req xid = 0x%x refcount=%d\n", |
---|
| 1955 | + xid, refcount); |
---|
1620 | 1956 | |
---|
1621 | | - qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ); |
---|
| 1957 | + qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT); |
---|
1622 | 1958 | |
---|
1623 | 1959 | spin_lock_irqsave(&fcport->rport_lock, flags); |
---|
1624 | 1960 | |
---|
.. | .. |
---|
1632 | 1968 | |
---|
1633 | 1969 | spin_unlock_irqrestore(&fcport->rport_lock, flags); |
---|
1634 | 1970 | |
---|
1635 | | - return rc; |
---|
1636 | | -abts_err: |
---|
1637 | | - /* |
---|
1638 | | - * If the ABTS task fails to queue then we need to cleanup the |
---|
1639 | | - * task at the firmware. |
---|
1640 | | - */ |
---|
1641 | | - qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts); |
---|
| 1971 | +drop_rdata_kref: |
---|
| 1972 | + kref_put(&rdata->kref, fc_rport_destroy); |
---|
1642 | 1973 | out: |
---|
1643 | 1974 | return rc; |
---|
1644 | 1975 | } |
---|
.. | .. |
---|
1647 | 1978 | struct qedf_ioreq *io_req) |
---|
1648 | 1979 | { |
---|
1649 | 1980 | uint32_t r_ctl; |
---|
1650 | | - uint16_t xid; |
---|
| 1981 | + int rc; |
---|
| 1982 | + struct qedf_rport *fcport = io_req->fcport; |
---|
1651 | 1983 | |
---|
1652 | 1984 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = " |
---|
1653 | 1985 | "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type); |
---|
1654 | 1986 | |
---|
1655 | | - cancel_delayed_work(&io_req->timeout_work); |
---|
1656 | | - |
---|
1657 | | - xid = io_req->xid; |
---|
1658 | 1987 | r_ctl = cqe->cqe_info.abts_info.r_ctl; |
---|
| 1988 | + |
---|
| 1989 | + /* This was added at a point when we were scheduling abts_compl & |
---|
| 1990 | + * cleanup_compl on different CPUs and there was a possibility of |
---|
| 1991 | + * the io_req to be freed from the other context before we got here. |
---|
| 1992 | + */ |
---|
| 1993 | + if (!fcport) { |
---|
| 1994 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 1995 | + "Dropping ABTS completion xid=0x%x as fcport is NULL", |
---|
| 1996 | + io_req->xid); |
---|
| 1997 | + return; |
---|
| 1998 | + } |
---|
| 1999 | + |
---|
| 2000 | + /* |
---|
| 2001 | + * When flush is active, let the cmds be completed from the cleanup |
---|
| 2002 | + * context |
---|
| 2003 | + */ |
---|
| 2004 | + if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || |
---|
| 2005 | + test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) { |
---|
| 2006 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 2007 | + "Dropping ABTS completion xid=0x%x as fcport is flushing", |
---|
| 2008 | + io_req->xid); |
---|
| 2009 | + return; |
---|
| 2010 | + } |
---|
| 2011 | + |
---|
| 2012 | + if (!cancel_delayed_work(&io_req->timeout_work)) { |
---|
| 2013 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 2014 | + "Wasn't able to cancel abts timeout work.\n"); |
---|
| 2015 | + } |
---|
1659 | 2016 | |
---|
1660 | 2017 | switch (r_ctl) { |
---|
1661 | 2018 | case FC_RCTL_BA_ACC: |
---|
1662 | 2019 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, |
---|
1663 | 2020 | "ABTS response - ACC Send RRQ after R_A_TOV\n"); |
---|
1664 | 2021 | io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS; |
---|
| 2022 | + rc = kref_get_unless_zero(&io_req->refcount); /* ID: 003 */ |
---|
| 2023 | + if (!rc) { |
---|
| 2024 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, |
---|
| 2025 | + "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n", |
---|
| 2026 | + io_req->xid); |
---|
| 2027 | + return; |
---|
| 2028 | + } |
---|
1665 | 2029 | /* |
---|
1666 | 2030 | * Dont release this cmd yet. It will be relesed |
---|
1667 | 2031 | * after we get RRQ response |
---|
1668 | 2032 | */ |
---|
1669 | | - kref_get(&io_req->refcount); |
---|
1670 | 2033 | queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work, |
---|
1671 | 2034 | msecs_to_jiffies(qedf->lport->r_a_tov)); |
---|
| 2035 | + atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT); |
---|
1672 | 2036 | break; |
---|
1673 | 2037 | /* For error cases let the cleanup return the command */ |
---|
1674 | 2038 | case FC_RCTL_BA_RJT: |
---|
.. | .. |
---|
1684 | 2048 | clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); |
---|
1685 | 2049 | |
---|
1686 | 2050 | if (io_req->sc_cmd) { |
---|
| 2051 | + if (!io_req->return_scsi_cmd_on_abts) |
---|
| 2052 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, |
---|
| 2053 | + "Not call scsi_done for xid=0x%x.\n", |
---|
| 2054 | + io_req->xid); |
---|
1687 | 2055 | if (io_req->return_scsi_cmd_on_abts) |
---|
1688 | 2056 | qedf_scsi_done(qedf, io_req, DID_ERROR); |
---|
1689 | 2057 | } |
---|
.. | .. |
---|
1803 | 2171 | { |
---|
1804 | 2172 | struct qedf_rport *fcport; |
---|
1805 | 2173 | struct qedf_ctx *qedf; |
---|
1806 | | - uint16_t xid; |
---|
1807 | | - struct e4_fcoe_task_context *task; |
---|
1808 | 2174 | int tmo = 0; |
---|
1809 | 2175 | int rc = SUCCESS; |
---|
1810 | 2176 | unsigned long flags; |
---|
1811 | 2177 | struct fcoe_wqe *sqe; |
---|
1812 | 2178 | u16 sqe_idx; |
---|
| 2179 | + int refcount = 0; |
---|
1813 | 2180 | |
---|
1814 | 2181 | fcport = io_req->fcport; |
---|
1815 | 2182 | if (!fcport) { |
---|
.. | .. |
---|
1820 | 2187 | /* Sanity check qedf_rport before dereferencing any pointers */ |
---|
1821 | 2188 | if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { |
---|
1822 | 2189 | QEDF_ERR(NULL, "tgt not offloaded\n"); |
---|
1823 | | - rc = 1; |
---|
1824 | 2190 | return SUCCESS; |
---|
1825 | 2191 | } |
---|
1826 | 2192 | |
---|
.. | .. |
---|
1830 | 2196 | return SUCCESS; |
---|
1831 | 2197 | } |
---|
1832 | 2198 | |
---|
| 2199 | + if (io_req->cmd_type == QEDF_ELS) { |
---|
| 2200 | + goto process_els; |
---|
| 2201 | + } |
---|
| 2202 | + |
---|
1833 | 2203 | if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || |
---|
1834 | | - test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) { |
---|
| 2204 | + test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) { |
---|
1835 | 2205 | QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in " |
---|
1836 | 2206 | "cleanup processing or already completed.\n", |
---|
1837 | 2207 | io_req->xid); |
---|
1838 | 2208 | return SUCCESS; |
---|
1839 | 2209 | } |
---|
| 2210 | + set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); |
---|
1840 | 2211 | |
---|
| 2212 | +process_els: |
---|
1841 | 2213 | /* Ensure room on SQ */ |
---|
1842 | 2214 | if (!atomic_read(&fcport->free_sqes)) { |
---|
1843 | 2215 | QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n"); |
---|
| 2216 | + /* Need to make sure we clear the flag since it was set */ |
---|
| 2217 | + clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); |
---|
1844 | 2218 | return FAILED; |
---|
1845 | 2219 | } |
---|
1846 | 2220 | |
---|
| 2221 | + if (io_req->cmd_type == QEDF_CLEANUP) { |
---|
| 2222 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 2223 | + "io_req=0x%x is already a cleanup command cmd_type=%d.\n", |
---|
| 2224 | + io_req->xid, io_req->cmd_type); |
---|
| 2225 | + clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); |
---|
| 2226 | + return SUCCESS; |
---|
| 2227 | + } |
---|
1847 | 2228 | |
---|
1848 | | - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n", |
---|
1849 | | - io_req->xid); |
---|
| 2229 | + refcount = kref_read(&io_req->refcount); |
---|
| 2230 | + |
---|
| 2231 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 2232 | + "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n", |
---|
| 2233 | + io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags, |
---|
| 2234 | + refcount, fcport, fcport->rdata->ids.port_id); |
---|
1850 | 2235 | |
---|
1851 | 2236 | /* Cleanup cmds re-use the same TID as the original I/O */ |
---|
1852 | | - xid = io_req->xid; |
---|
| 2237 | + spin_lock_irqsave(&fcport->rport_lock, flags); |
---|
1853 | 2238 | io_req->cmd_type = QEDF_CLEANUP; |
---|
| 2239 | + spin_unlock_irqrestore(&fcport->rport_lock, flags); |
---|
1854 | 2240 | io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; |
---|
1855 | 2241 | |
---|
1856 | | - /* Set the return CPU to be the same as the request one */ |
---|
1857 | | - io_req->cpu = smp_processor_id(); |
---|
1858 | | - |
---|
1859 | | - set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); |
---|
1860 | | - |
---|
1861 | | - task = qedf_get_task_mem(&qedf->tasks, xid); |
---|
1862 | | - |
---|
1863 | | - init_completion(&io_req->tm_done); |
---|
| 2242 | + init_completion(&io_req->cleanup_done); |
---|
1864 | 2243 | |
---|
1865 | 2244 | spin_lock_irqsave(&fcport->rport_lock, flags); |
---|
1866 | 2245 | |
---|
.. | .. |
---|
1874 | 2253 | |
---|
1875 | 2254 | spin_unlock_irqrestore(&fcport->rport_lock, flags); |
---|
1876 | 2255 | |
---|
1877 | | - tmo = wait_for_completion_timeout(&io_req->tm_done, |
---|
1878 | | - QEDF_CLEANUP_TIMEOUT * HZ); |
---|
| 2256 | + tmo = wait_for_completion_timeout(&io_req->cleanup_done, |
---|
| 2257 | + QEDF_CLEANUP_TIMEOUT * HZ); |
---|
1879 | 2258 | |
---|
1880 | 2259 | if (!tmo) { |
---|
1881 | 2260 | rc = FAILED; |
---|
.. | .. |
---|
1888 | 2267 | qedf_drain_request(qedf); |
---|
1889 | 2268 | } |
---|
1890 | 2269 | |
---|
| 2270 | + /* If it TASK MGMT handle it, reference will be decreased |
---|
| 2271 | + * in qedf_execute_tmf |
---|
| 2272 | + */ |
---|
| 2273 | + if (io_req->tm_flags == FCP_TMF_LUN_RESET || |
---|
| 2274 | + io_req->tm_flags == FCP_TMF_TGT_RESET) { |
---|
| 2275 | + clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); |
---|
| 2276 | + io_req->sc_cmd = NULL; |
---|
| 2277 | + kref_put(&io_req->refcount, qedf_release_cmd); |
---|
| 2278 | + complete(&io_req->tm_done); |
---|
| 2279 | + } |
---|
| 2280 | + |
---|
1891 | 2281 | if (io_req->sc_cmd) { |
---|
| 2282 | + if (!io_req->return_scsi_cmd_on_abts) |
---|
| 2283 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, |
---|
| 2284 | + "Not call scsi_done for xid=0x%x.\n", |
---|
| 2285 | + io_req->xid); |
---|
1892 | 2286 | if (io_req->return_scsi_cmd_on_abts) |
---|
1893 | 2287 | qedf_scsi_done(qedf, io_req, DID_ERROR); |
---|
1894 | 2288 | } |
---|
.. | .. |
---|
1910 | 2304 | clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); |
---|
1911 | 2305 | |
---|
1912 | 2306 | /* Complete so we can finish cleaning up the I/O */ |
---|
1913 | | - complete(&io_req->tm_done); |
---|
| 2307 | + complete(&io_req->cleanup_done); |
---|
1914 | 2308 | } |
---|
1915 | 2309 | |
---|
1916 | 2310 | static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, |
---|
.. | .. |
---|
1923 | 2317 | int rc = 0; |
---|
1924 | 2318 | uint16_t xid; |
---|
1925 | 2319 | int tmo = 0; |
---|
| 2320 | + int lun = 0; |
---|
1926 | 2321 | unsigned long flags; |
---|
1927 | 2322 | struct fcoe_wqe *sqe; |
---|
1928 | 2323 | u16 sqe_idx; |
---|
1929 | 2324 | |
---|
1930 | 2325 | if (!sc_cmd) { |
---|
1931 | | - QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n"); |
---|
| 2326 | + QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n"); |
---|
1932 | 2327 | return FAILED; |
---|
1933 | 2328 | } |
---|
1934 | 2329 | |
---|
| 2330 | + lun = (int)sc_cmd->device->lun; |
---|
1935 | 2331 | if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { |
---|
1936 | 2332 | QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n"); |
---|
1937 | 2333 | rc = FAILED; |
---|
1938 | | - return FAILED; |
---|
| 2334 | + goto no_flush; |
---|
1939 | 2335 | } |
---|
1940 | | - |
---|
1941 | | - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x " |
---|
1942 | | - "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags); |
---|
1943 | 2336 | |
---|
1944 | 2337 | io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD); |
---|
1945 | 2338 | if (!io_req) { |
---|
1946 | 2339 | QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF"); |
---|
1947 | 2340 | rc = -EAGAIN; |
---|
1948 | | - goto reset_tmf_err; |
---|
| 2341 | + goto no_flush; |
---|
1949 | 2342 | } |
---|
1950 | 2343 | |
---|
1951 | 2344 | if (tm_flags == FCP_TMF_LUN_RESET) |
---|
.. | .. |
---|
1958 | 2351 | io_req->fcport = fcport; |
---|
1959 | 2352 | io_req->cmd_type = QEDF_TASK_MGMT_CMD; |
---|
1960 | 2353 | |
---|
1961 | | - /* Set the return CPU to be the same as the request one */ |
---|
| 2354 | + /* Record which cpu this request is associated with */ |
---|
1962 | 2355 | io_req->cpu = smp_processor_id(); |
---|
1963 | 2356 | |
---|
1964 | 2357 | /* Set TM flags */ |
---|
.. | .. |
---|
1967 | 2360 | io_req->tm_flags = tm_flags; |
---|
1968 | 2361 | |
---|
1969 | 2362 | /* Default is to return a SCSI command when an error occurs */ |
---|
1970 | | - io_req->return_scsi_cmd_on_abts = true; |
---|
| 2363 | + io_req->return_scsi_cmd_on_abts = false; |
---|
1971 | 2364 | |
---|
1972 | 2365 | /* Obtain exchange id */ |
---|
1973 | 2366 | xid = io_req->xid; |
---|
.. | .. |
---|
1991 | 2384 | |
---|
1992 | 2385 | spin_unlock_irqrestore(&fcport->rport_lock, flags); |
---|
1993 | 2386 | |
---|
| 2387 | + set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); |
---|
1994 | 2388 | tmo = wait_for_completion_timeout(&io_req->tm_done, |
---|
1995 | 2389 | QEDF_TM_TIMEOUT * HZ); |
---|
1996 | 2390 | |
---|
1997 | 2391 | if (!tmo) { |
---|
1998 | 2392 | rc = FAILED; |
---|
1999 | 2393 | QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n"); |
---|
| 2394 | + /* Clear outstanding bit since command timed out */ |
---|
| 2395 | + clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); |
---|
| 2396 | + io_req->sc_cmd = NULL; |
---|
2000 | 2397 | } else { |
---|
2001 | 2398 | /* Check TMF response code */ |
---|
2002 | 2399 | if (io_req->fcp_rsp_code == 0) |
---|
.. | .. |
---|
2004 | 2401 | else |
---|
2005 | 2402 | rc = FAILED; |
---|
2006 | 2403 | } |
---|
| 2404 | + /* |
---|
| 2405 | + * Double check that fcport has not gone into an uploading state before |
---|
| 2406 | + * executing the command flush for the LUN/target. |
---|
| 2407 | + */ |
---|
| 2408 | + if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { |
---|
| 2409 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 2410 | + "fcport is uploading, not executing flush.\n"); |
---|
| 2411 | + goto no_flush; |
---|
| 2412 | + } |
---|
| 2413 | + /* We do not need this io_req any more */ |
---|
| 2414 | + kref_put(&io_req->refcount, qedf_release_cmd); |
---|
| 2415 | + |
---|
2007 | 2416 | |
---|
2008 | 2417 | if (tm_flags == FCP_TMF_LUN_RESET) |
---|
2009 | | - qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun); |
---|
| 2418 | + qedf_flush_active_ios(fcport, lun); |
---|
2010 | 2419 | else |
---|
2011 | 2420 | qedf_flush_active_ios(fcport, -1); |
---|
2012 | 2421 | |
---|
2013 | | - kref_put(&io_req->refcount, qedf_release_cmd); |
---|
2014 | | - |
---|
| 2422 | +no_flush: |
---|
2015 | 2423 | if (rc != SUCCESS) { |
---|
2016 | 2424 | QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n"); |
---|
2017 | 2425 | rc = FAILED; |
---|
.. | .. |
---|
2019 | 2427 | QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n"); |
---|
2020 | 2428 | rc = SUCCESS; |
---|
2021 | 2429 | } |
---|
2022 | | -reset_tmf_err: |
---|
2023 | 2430 | return rc; |
---|
2024 | 2431 | } |
---|
2025 | 2432 | |
---|
.. | .. |
---|
2029 | 2436 | struct fc_rport_libfc_priv *rp = rport->dd_data; |
---|
2030 | 2437 | struct qedf_rport *fcport = (struct qedf_rport *)&rp[1]; |
---|
2031 | 2438 | struct qedf_ctx *qedf; |
---|
2032 | | - struct fc_lport *lport; |
---|
| 2439 | + struct fc_lport *lport = shost_priv(sc_cmd->device->host); |
---|
2033 | 2440 | int rc = SUCCESS; |
---|
2034 | 2441 | int rval; |
---|
| 2442 | + struct qedf_ioreq *io_req = NULL; |
---|
| 2443 | + int ref_cnt = 0; |
---|
| 2444 | + struct fc_rport_priv *rdata = fcport->rdata; |
---|
| 2445 | + |
---|
| 2446 | + QEDF_ERR(NULL, |
---|
| 2447 | + "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n", |
---|
| 2448 | + tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff, |
---|
| 2449 | + rport->scsi_target_id, (int)sc_cmd->device->lun); |
---|
| 2450 | + |
---|
| 2451 | + if (!rdata || !kref_get_unless_zero(&rdata->kref)) { |
---|
| 2452 | + QEDF_ERR(NULL, "stale rport\n"); |
---|
| 2453 | + return FAILED; |
---|
| 2454 | + } |
---|
| 2455 | + |
---|
| 2456 | + QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id, |
---|
| 2457 | + (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" : |
---|
| 2458 | + "LUN RESET"); |
---|
| 2459 | + |
---|
| 2460 | + if (sc_cmd->SCp.ptr) { |
---|
| 2461 | + io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr; |
---|
| 2462 | + ref_cnt = kref_read(&io_req->refcount); |
---|
| 2463 | + QEDF_ERR(NULL, |
---|
| 2464 | + "orig io_req = %p xid = 0x%x ref_cnt = %d.\n", |
---|
| 2465 | + io_req, io_req->xid, ref_cnt); |
---|
| 2466 | + } |
---|
2035 | 2467 | |
---|
2036 | 2468 | rval = fc_remote_port_chkready(rport); |
---|
2037 | | - |
---|
2038 | 2469 | if (rval) { |
---|
2039 | 2470 | QEDF_ERR(NULL, "device_reset rport not ready\n"); |
---|
2040 | 2471 | rc = FAILED; |
---|
2041 | 2472 | goto tmf_err; |
---|
2042 | 2473 | } |
---|
2043 | 2474 | |
---|
2044 | | - if (fcport == NULL) { |
---|
| 2475 | + rc = fc_block_scsi_eh(sc_cmd); |
---|
| 2476 | + if (rc) |
---|
| 2477 | + goto tmf_err; |
---|
| 2478 | + |
---|
| 2479 | + if (!fcport) { |
---|
2045 | 2480 | QEDF_ERR(NULL, "device_reset: rport is NULL\n"); |
---|
2046 | 2481 | rc = FAILED; |
---|
2047 | 2482 | goto tmf_err; |
---|
2048 | 2483 | } |
---|
2049 | 2484 | |
---|
2050 | 2485 | qedf = fcport->qedf; |
---|
2051 | | - lport = qedf->lport; |
---|
| 2486 | + |
---|
| 2487 | + if (!qedf) { |
---|
| 2488 | + QEDF_ERR(NULL, "qedf is NULL.\n"); |
---|
| 2489 | + rc = FAILED; |
---|
| 2490 | + goto tmf_err; |
---|
| 2491 | + } |
---|
| 2492 | + |
---|
| 2493 | + if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { |
---|
| 2494 | + QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n"); |
---|
| 2495 | + rc = SUCCESS; |
---|
| 2496 | + goto tmf_err; |
---|
| 2497 | + } |
---|
2052 | 2498 | |
---|
2053 | 2499 | if (test_bit(QEDF_UNLOADING, &qedf->flags) || |
---|
2054 | 2500 | test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { |
---|
.. | .. |
---|
2062 | 2508 | goto tmf_err; |
---|
2063 | 2509 | } |
---|
2064 | 2510 | |
---|
| 2511 | + if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { |
---|
| 2512 | + if (!fcport->rdata) |
---|
| 2513 | + QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n", |
---|
| 2514 | + fcport); |
---|
| 2515 | + else |
---|
| 2516 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 2517 | + "fcport %p port_id=%06x is uploading.\n", |
---|
| 2518 | + fcport, fcport->rdata->ids.port_id); |
---|
| 2519 | + rc = FAILED; |
---|
| 2520 | + goto tmf_err; |
---|
| 2521 | + } |
---|
| 2522 | + |
---|
2065 | 2523 | rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags); |
---|
2066 | 2524 | |
---|
2067 | 2525 | tmf_err: |
---|
| 2526 | + kref_put(&rdata->kref, fc_rport_destroy); |
---|
2068 | 2527 | return rc; |
---|
2069 | 2528 | } |
---|
2070 | 2529 | |
---|
.. | .. |
---|
2072 | 2531 | struct qedf_ioreq *io_req) |
---|
2073 | 2532 | { |
---|
2074 | 2533 | struct fcoe_cqe_rsp_info *fcp_rsp; |
---|
| 2534 | + |
---|
| 2535 | + clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); |
---|
2075 | 2536 | |
---|
2076 | 2537 | fcp_rsp = &cqe->cqe_info.rsp_info; |
---|
2077 | 2538 | qedf_parse_fcp_rsp(io_req, fcp_rsp); |
---|
.. | .. |
---|
2084 | 2545 | struct fcoe_cqe *cqe) |
---|
2085 | 2546 | { |
---|
2086 | 2547 | unsigned long flags; |
---|
2087 | | - uint16_t tmp; |
---|
2088 | 2548 | uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len; |
---|
2089 | 2549 | u32 payload_len, crc; |
---|
2090 | 2550 | struct fc_frame_header *fh; |
---|
.. | .. |
---|
2136 | 2596 | fh = (struct fc_frame_header *)fc_frame_header_get(fp); |
---|
2137 | 2597 | memcpy(fh, (void *)bdq_addr, pktlen); |
---|
2138 | 2598 | |
---|
| 2599 | + QEDF_WARN(&qedf->dbg_ctx, |
---|
| 2600 | + "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n", |
---|
| 2601 | + ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, |
---|
| 2602 | + fh->fh_type, fc_frame_payload_op(fp)); |
---|
| 2603 | + |
---|
2139 | 2604 | /* Initialize the frame so libfc sees it as a valid frame */ |
---|
2140 | 2605 | crc = fcoe_fc_crc(fp); |
---|
2141 | 2606 | fc_frame_init(fp); |
---|
.. | .. |
---|
2177 | 2642 | qedf->bdq_prod_idx = 0; |
---|
2178 | 2643 | |
---|
2179 | 2644 | writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); |
---|
2180 | | - tmp = readw(qedf->bdq_primary_prod); |
---|
| 2645 | + readw(qedf->bdq_primary_prod); |
---|
2181 | 2646 | writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); |
---|
2182 | | - tmp = readw(qedf->bdq_secondary_prod); |
---|
| 2647 | + readw(qedf->bdq_secondary_prod); |
---|
2183 | 2648 | |
---|
2184 | 2649 | spin_unlock_irqrestore(&qedf->hba_lock, flags); |
---|
2185 | 2650 | } |
---|