From d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 11 Dec 2023 02:45:28 +0000 Subject: [PATCH] add boot partition size --- kernel/drivers/scsi/bnx2fc/bnx2fc_io.c | 196 +++++++++++++++++++++++++++++++------------------ 1 files changed, 124 insertions(+), 72 deletions(-) diff --git a/kernel/drivers/scsi/bnx2fc/bnx2fc_io.c b/kernel/drivers/scsi/bnx2fc/bnx2fc_io.c index 5d89cc3..ed300a2 100644 --- a/kernel/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/kernel/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -24,7 +24,7 @@ static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, struct fcoe_fcp_rsp_payload *fcp_rsp, - u8 num_rq); + u8 num_rq, unsigned char *rq_data); void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, unsigned int timer_msec) @@ -70,7 +70,7 @@ &io_req->req_flags)) { /* Handle eh_abort timeout */ BNX2FC_IO_DBG(io_req, "eh_abort timed out\n"); - complete(&io_req->tm_done); + complete(&io_req->abts_done); } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { /* Handle internally generated ABTS timeout */ @@ -775,31 +775,32 @@ io_req->on_tmf_queue = 1; list_add_tail(&io_req->link, &tgt->active_tm_queue); - init_completion(&io_req->tm_done); - io_req->wait_for_comp = 1; + init_completion(&io_req->abts_done); + io_req->wait_for_abts_comp = 1; /* Ring doorbell */ bnx2fc_ring_doorbell(tgt); spin_unlock_bh(&tgt->tgt_lock); - rc = wait_for_completion_timeout(&io_req->tm_done, + rc = wait_for_completion_timeout(&io_req->abts_done, interface->tm_timeout * HZ); spin_lock_bh(&tgt->tgt_lock); - io_req->wait_for_comp = 0; + io_req->wait_for_abts_comp = 0; if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) { set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); if (io_req->on_tmf_queue) { list_del_init(&io_req->link); io_req->on_tmf_queue = 0; } - io_req->wait_for_comp = 1; + io_req->wait_for_cleanup_comp = 1; + init_completion(&io_req->cleanup_done); bnx2fc_initiate_cleanup(io_req); spin_unlock_bh(&tgt->tgt_lock); - rc = wait_for_completion_timeout(&io_req->tm_done, + rc = wait_for_completion_timeout(&io_req->cleanup_done, BNX2FC_FW_TIMEOUT); spin_lock_bh(&tgt->tgt_lock); - io_req->wait_for_comp = 0; + io_req->wait_for_cleanup_comp = 0; if (!rc) kref_put(&io_req->refcount, bnx2fc_cmd_release); } @@ -863,7 +864,7 @@ abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS); if (!abts_io_req) { - printk(KERN_ERR PFX "abts: couldnt allocate cmd\n"); + printk(KERN_ERR PFX "abts: couldn't allocate cmd\n"); rc = FAILED; goto abts_err; } @@ -929,7 +930,6 @@ int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, enum fc_rctl r_ctl) { - struct fc_lport *lport; struct bnx2fc_rport *tgt = orig_io_req->tgt; struct bnx2fc_interface *interface; struct fcoe_port *port; @@ -947,7 +947,6 @@ port = orig_io_req->port; interface = port->priv; - lport = port->lport; cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); if (!cb_arg) { @@ -958,7 +957,7 @@ seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP); if (!seq_clnp_req) { - printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); + printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n"); rc = -ENOMEM; kfree(cb_arg); goto cleanup_err; @@ -998,7 +997,6 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) { - struct fc_lport *lport; struct bnx2fc_rport *tgt = io_req->tgt; struct bnx2fc_interface *interface; struct fcoe_port *port; @@ -1014,11 +1012,10 @@ port = io_req->port; interface = port->priv; - lport = port->lport; cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); if (!cleanup_io_req) { - printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); + printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n"); rc = -1; goto cleanup_err; } @@ -1046,6 +1043,9 @@ /* Obtain free SQ entry */ bnx2fc_add_2_sq(tgt, xid); + + /* Set flag that cleanup request is pending with the firmware */ + set_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags); /* Ring doorbell */ bnx2fc_ring_doorbell(tgt); @@ -1081,12 +1081,13 @@ } static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req) + __must_hold(&tgt->tgt_lock) { struct bnx2fc_rport *tgt = io_req->tgt; - int rc = SUCCESS; unsigned int time_left; - io_req->wait_for_comp = 1; + init_completion(&io_req->cleanup_done); + io_req->wait_for_cleanup_comp = 1; bnx2fc_initiate_cleanup(io_req); spin_unlock_bh(&tgt->tgt_lock); @@ -1095,22 +1096,22 @@ * Can't wait forever on cleanup response lest we let the SCSI error * handler wait forever */ - time_left = wait_for_completion_timeout(&io_req->tm_done, + time_left = wait_for_completion_timeout(&io_req->cleanup_done, BNX2FC_FW_TIMEOUT); - io_req->wait_for_comp = 0; - if (!time_left) + if (!time_left) { BNX2FC_IO_DBG(io_req, "%s(): Wait for cleanup timed out.\n", __func__); - /* - * Release reference held by SCSI command the cleanup completion - * hits the BNX2FC_CLEANUP case in bnx2fc_process_cq_compl() and - * thus the SCSI command is not returnedi by bnx2fc_scsi_done(). - */ - kref_put(&io_req->refcount, bnx2fc_cmd_release); + /* + * Put the extra reference to the SCSI command since it would + * not have been returned in this case. + */ + kref_put(&io_req->refcount, bnx2fc_cmd_release); + } spin_lock_bh(&tgt->tgt_lock); - return rc; + io_req->wait_for_cleanup_comp = 0; + return SUCCESS; } /** @@ -1198,7 +1199,8 @@ /* Move IO req to retire queue */ list_add_tail(&io_req->link, &tgt->io_retire_queue); - init_completion(&io_req->tm_done); + init_completion(&io_req->abts_done); + init_completion(&io_req->cleanup_done); if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " @@ -1227,26 +1229,28 @@ kref_put(&io_req->refcount, bnx2fc_cmd_release); /* drop timer hold */ set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); - io_req->wait_for_comp = 1; + io_req->wait_for_abts_comp = 1; rc = bnx2fc_initiate_abts(io_req); if (rc == FAILED) { + io_req->wait_for_cleanup_comp = 1; bnx2fc_initiate_cleanup(io_req); spin_unlock_bh(&tgt->tgt_lock); - wait_for_completion(&io_req->tm_done); + wait_for_completion(&io_req->cleanup_done); spin_lock_bh(&tgt->tgt_lock); - io_req->wait_for_comp = 0; + io_req->wait_for_cleanup_comp = 0; goto done; } spin_unlock_bh(&tgt->tgt_lock); /* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */ - time_left = wait_for_completion_timeout(&io_req->tm_done, - (2 * rp->r_a_tov + 1) * HZ); + time_left = wait_for_completion_timeout(&io_req->abts_done, + msecs_to_jiffies(2 * rp->r_a_tov + 1)); if (time_left) - BNX2FC_IO_DBG(io_req, "Timed out in eh_abort waiting for tm_done"); + BNX2FC_IO_DBG(io_req, + "Timed out in eh_abort waiting for abts_done"); spin_lock_bh(&tgt->tgt_lock); - io_req->wait_for_comp = 0; + io_req->wait_for_abts_comp = 0; if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { BNX2FC_IO_DBG(io_req, "IO completed in a different context\n"); rc = SUCCESS; @@ -1321,10 +1325,29 @@ BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl " "refcnt = %d, cmd_type = %d\n", kref_read(&io_req->refcount), io_req->cmd_type); + /* + * Test whether there is a cleanup request pending. If not just + * exit. + */ + if (!test_and_clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, + &io_req->req_flags)) + return; + /* + * If we receive a cleanup completion for this request then the + * firmware will not give us an abort completion for this request + * so clear any ABTS pending flags. + */ + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags) && + !test_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags)) { + set_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags); + if (io_req->wait_for_abts_comp) + complete(&io_req->abts_done); + } + bnx2fc_scsi_done(io_req, DID_ERROR); kref_put(&io_req->refcount, bnx2fc_cmd_release); - if (io_req->wait_for_comp) - complete(&io_req->tm_done); + if (io_req->wait_for_cleanup_comp) + complete(&io_req->cleanup_done); } void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, @@ -1346,6 +1369,16 @@ BNX2FC_IO_DBG(io_req, "Timer context finished processing" " this io\n"); return; + } + + /* + * If we receive an ABTS completion here then we will not receive + * a cleanup completion so clear any cleanup pending flags. + */ + if (test_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags)) { + clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags); + if (io_req->wait_for_cleanup_comp) + complete(&io_req->cleanup_done); } /* Do not issue RRQ as this IO is already cleanedup */ @@ -1392,10 +1425,10 @@ bnx2fc_cmd_timer_set(io_req, r_a_tov); io_compl: - if (io_req->wait_for_comp) { + if (io_req->wait_for_abts_comp) { if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags)) - complete(&io_req->tm_done); + complete(&io_req->abts_done); } else { /* * We end up here when ABTS is issued as @@ -1487,7 +1520,8 @@ } void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, - struct fcoe_task_ctx_entry *task, u8 num_rq) + struct fcoe_task_ctx_entry *task, u8 num_rq, + unsigned char *rq_data) { struct bnx2fc_mp_req *tm_req; struct fc_frame_header *fc_hdr; @@ -1526,7 +1560,7 @@ if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) { bnx2fc_parse_fcp_rsp(io_req, (struct fcoe_fcp_rsp_payload *) - rsp_buf, num_rq); + rsp_buf, num_rq, rq_data); if (io_req->fcp_rsp_code == 0) { /* TM successful */ if (tm_req->tm_flags & FCP_TMF_LUN_RESET) @@ -1579,9 +1613,9 @@ sc_cmd->scsi_done(sc_cmd); kref_put(&io_req->refcount, bnx2fc_cmd_release); - if (io_req->wait_for_comp) { + if (io_req->wait_for_abts_comp) { BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n"); - complete(&io_req->tm_done); + complete(&io_req->abts_done); } } @@ -1625,6 +1659,7 @@ u64 addr; int i; + WARN_ON(scsi_sg_count(sc) > BNX2FC_MAX_BDS_PER_CMD); /* * Use dma_map_sg directly to ensure we're using the correct * dev struct off of pcidev. @@ -1672,6 +1707,16 @@ } io_req->bd_tbl->bd_valid = bd_count; + /* + * Return the command to ML if BD count exceeds the max number + * that can be handled by FW. + */ + if (bd_count > BNX2FC_FW_MAX_BDS_PER_CMD) { + pr_err("bd_count = %d exceeded FW supported max BD(255), task_id = 0x%x\n", + bd_count, io_req->xid); + return -ENOMEM; + } + return 0; } @@ -1713,15 +1758,11 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, struct fcoe_fcp_rsp_payload *fcp_rsp, - u8 num_rq) + u8 num_rq, unsigned char *rq_data) { struct scsi_cmnd *sc_cmd = io_req->sc_cmd; - struct bnx2fc_rport *tgt = io_req->tgt; u8 rsp_flags = fcp_rsp->fcp_flags.flags; u32 rq_buff_len = 0; - int i; - unsigned char *rq_data; - unsigned char *dummy; int fcp_sns_len = 0; int fcp_rsp_len = 0; @@ -1767,14 +1808,6 @@ rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; } - rq_data = bnx2fc_get_next_rqe(tgt, 1); - - if (num_rq > 1) { - /* We do not need extra sense data */ - for (i = 1; i < num_rq; i++) - dummy = bnx2fc_get_next_rqe(tgt, 1); - } - /* fetch fcp_rsp_code */ if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { /* Only for task management function */ @@ -1795,9 +1828,6 @@ if (fcp_sns_len) memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); - /* return RQ entries */ - for (i = 0; i < num_rq; i++) - bnx2fc_return_rqe(tgt, 1); } } @@ -1876,13 +1906,12 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, - u8 num_rq) + u8 num_rq, unsigned char *rq_data) { struct fcoe_fcp_rsp_payload *fcp_rsp; struct bnx2fc_rport *tgt = io_req->tgt; struct scsi_cmnd *sc_cmd; - struct Scsi_Host *host; - + u16 scope = 0, qualifier = 0; /* scsi_cmd_cmpl is called with tgt lock held */ @@ -1890,6 +1919,12 @@ /* we will not receive ABTS response for this IO */ BNX2FC_IO_DBG(io_req, "Timer context finished processing " "this scsi cmd\n"); + if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, + &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, + "Actual completion after cleanup request cleaning up\n"); + bnx2fc_process_cleanup_compl(io_req, task, num_rq); + } return; } @@ -1909,9 +1944,8 @@ &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload); /* parse fcp_rsp and obtain sense data from RQ if available */ - bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); + bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq, rq_data); - host = sc_cmd->device->host; if (!sc_cmd->SCp.ptr) { printk(KERN_ERR PFX "SCp.ptr is NULL\n"); return; @@ -1928,10 +1962,10 @@ * between command abort and (late) completion. */ BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n"); - if (io_req->wait_for_comp) + if (io_req->wait_for_abts_comp) if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags)) - complete(&io_req->tm_done); + complete(&io_req->abts_done); } bnx2fc_unmap_sg_list(io_req); @@ -1951,12 +1985,30 @@ if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || io_req->cdb_status == SAM_STAT_BUSY) { - /* Set the jiffies + retry_delay_timer * 100ms - for the rport/tgt */ - tgt->retry_delay_timestamp = jiffies + - fcp_rsp->retry_delay_timer * HZ / 10; + /* Newer array firmware with BUSY or + * TASK_SET_FULL may return a status that needs + * the scope bits masked. + * Or a huge delay timestamp up to 27 minutes + * can result. + */ + if (fcp_rsp->retry_delay_timer) { + /* Upper 2 bits */ + scope = fcp_rsp->retry_delay_timer + & 0xC000; + /* Lower 14 bits */ + qualifier = fcp_rsp->retry_delay_timer + & 0x3FFF; + } + if (scope > 0 && qualifier > 0 && + qualifier <= 0x3FEF) { + /* Set the jiffies + + * retry_delay_timer * 100ms + * for the rport/tgt + */ + tgt->retry_delay_timestamp = jiffies + + (qualifier * HZ / 10); + } } - } if (io_req->fcp_resid) scsi_set_resid(sc_cmd, io_req->fcp_resid); -- Gitblit v1.6.2