.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * QLogic FCoE Offload Driver |
---|
3 | 4 | * Copyright (c) 2016-2018 Cavium Inc. |
---|
4 | | - * |
---|
5 | | - * This software is available under the terms of the GNU General Public License |
---|
6 | | - * (GPL) Version 2, available from the file COPYING in the main directory of |
---|
7 | | - * this source tree. |
---|
8 | 5 | */ |
---|
9 | 6 | #include "qedf.h" |
---|
10 | 7 | |
---|
.. | .. |
---|
127 | 124 | task = qedf_get_task_mem(&qedf->tasks, xid); |
---|
128 | 125 | qedf_init_mp_task(els_req, task, sqe); |
---|
129 | 126 | |
---|
130 | | - /* Put timer on original I/O request */ |
---|
| 127 | + /* Put timer on els request */ |
---|
131 | 128 | if (timer_msec) |
---|
132 | 129 | qedf_cmd_timer_set(qedf, els_req, timer_msec); |
---|
133 | 130 | |
---|
.. | .. |
---|
135 | 132 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS " |
---|
136 | 133 | "req\n"); |
---|
137 | 134 | qedf_ring_doorbell(fcport); |
---|
| 135 | + set_bit(QEDF_CMD_OUTSTANDING, &els_req->flags); |
---|
| 136 | + |
---|
138 | 137 | spin_unlock_irqrestore(&fcport->rport_lock, flags); |
---|
139 | 138 | els_err: |
---|
140 | 139 | return rc; |
---|
.. | .. |
---|
143 | 142 | void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, |
---|
144 | 143 | struct qedf_ioreq *els_req) |
---|
145 | 144 | { |
---|
146 | | - struct fcoe_task_context *task_ctx; |
---|
147 | | - struct scsi_cmnd *sc_cmd; |
---|
148 | | - uint16_t xid; |
---|
149 | 145 | struct fcoe_cqe_midpath_info *mp_info; |
---|
| 146 | + struct qedf_rport *fcport; |
---|
150 | 147 | |
---|
151 | 148 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x" |
---|
152 | 149 | " cmd_type = %d.\n", els_req->xid, els_req->cmd_type); |
---|
153 | 150 | |
---|
| 151 | + if ((els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) |
---|
| 152 | + || (els_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) |
---|
| 153 | + || (els_req->event == QEDF_IOREQ_EV_CLEANUP_FAILED)) { |
---|
| 154 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 155 | + "ELS completion xid=0x%x after flush event=0x%x", |
---|
| 156 | + els_req->xid, els_req->event); |
---|
| 157 | + return; |
---|
| 158 | + } |
---|
| 159 | + |
---|
| 160 | + fcport = els_req->fcport; |
---|
| 161 | + |
---|
| 162 | + /* When flush is active, |
---|
| 163 | + * let the cmds be completed from the cleanup context |
---|
| 164 | + */ |
---|
| 165 | + if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || |
---|
| 166 | + test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) { |
---|
| 167 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, |
---|
| 168 | + "Dropping ELS completion xid=0x%x as fcport is flushing", |
---|
| 169 | + els_req->xid); |
---|
| 170 | + return; |
---|
| 171 | + } |
---|
| 172 | + |
---|
| 173 | + clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags); |
---|
| 174 | + |
---|
154 | 175 | /* Kill the ELS timer */ |
---|
155 | 176 | cancel_delayed_work(&els_req->timeout_work); |
---|
156 | | - |
---|
157 | | - xid = els_req->xid; |
---|
158 | | - task_ctx = qedf_get_task_mem(&qedf->tasks, xid); |
---|
159 | | - sc_cmd = els_req->sc_cmd; |
---|
160 | 177 | |
---|
161 | 178 | /* Get ELS response length from CQE */ |
---|
162 | 179 | mp_info = &cqe->cqe_info.midpath_info; |
---|
.. | .. |
---|
185 | 202 | |
---|
186 | 203 | orig_io_req = cb_arg->aborted_io_req; |
---|
187 | 204 | |
---|
188 | | - if (!orig_io_req) |
---|
| 205 | + if (!orig_io_req) { |
---|
| 206 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 207 | + "Original io_req is NULL, rrq_req = %p.\n", rrq_req); |
---|
189 | 208 | goto out_free; |
---|
190 | | - |
---|
191 | | - if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO && |
---|
192 | | - rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) |
---|
193 | | - cancel_delayed_work_sync(&orig_io_req->timeout_work); |
---|
| 209 | + } |
---|
194 | 210 | |
---|
195 | 211 | refcount = kref_read(&orig_io_req->refcount); |
---|
196 | 212 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p," |
---|
197 | 213 | " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n", |
---|
198 | 214 | orig_io_req, orig_io_req->xid, rrq_req->xid, refcount); |
---|
199 | 215 | |
---|
200 | | - /* This should return the aborted io_req to the command pool */ |
---|
201 | | - if (orig_io_req) |
---|
| 216 | + /* |
---|
| 217 | + * This should return the aborted io_req to the command pool. Note that |
---|
| 218 | + * we need to check the refcound in case the original request was |
---|
| 219 | + * flushed but we get a completion on this xid. |
---|
| 220 | + */ |
---|
| 221 | + if (orig_io_req && refcount > 0) |
---|
202 | 222 | kref_put(&orig_io_req->refcount, qedf_release_cmd); |
---|
203 | 223 | |
---|
204 | 224 | out_free: |
---|
.. | .. |
---|
225 | 245 | uint32_t sid; |
---|
226 | 246 | uint32_t r_a_tov; |
---|
227 | 247 | int rc; |
---|
| 248 | + int refcount; |
---|
228 | 249 | |
---|
229 | 250 | if (!aborted_io_req) { |
---|
230 | 251 | QEDF_ERR(NULL, "abort_io_req is NULL.\n"); |
---|
.. | .. |
---|
232 | 253 | } |
---|
233 | 254 | |
---|
234 | 255 | fcport = aborted_io_req->fcport; |
---|
| 256 | + |
---|
| 257 | + if (!fcport) { |
---|
| 258 | + refcount = kref_read(&aborted_io_req->refcount); |
---|
| 259 | + QEDF_ERR(NULL, |
---|
| 260 | + "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n", |
---|
| 261 | + aborted_io_req->xid, refcount); |
---|
| 262 | + kref_put(&aborted_io_req->refcount, qedf_release_cmd); |
---|
| 263 | + return -EINVAL; |
---|
| 264 | + } |
---|
235 | 265 | |
---|
236 | 266 | /* Check that fcport is still offloaded */ |
---|
237 | 267 | if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { |
---|
.. | .. |
---|
245 | 275 | } |
---|
246 | 276 | |
---|
247 | 277 | qedf = fcport->qedf; |
---|
| 278 | + |
---|
| 279 | + /* |
---|
| 280 | + * Sanity check that we can send a RRQ to make sure that refcount isn't |
---|
| 281 | + * 0 |
---|
| 282 | + */ |
---|
| 283 | + refcount = kref_read(&aborted_io_req->refcount); |
---|
| 284 | + if (refcount != 1) { |
---|
| 285 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, |
---|
| 286 | + "refcount for xid=%x io_req=%p refcount=%d is not 1.\n", |
---|
| 287 | + aborted_io_req->xid, aborted_io_req, refcount); |
---|
| 288 | + return -EINVAL; |
---|
| 289 | + } |
---|
| 290 | + |
---|
248 | 291 | lport = qedf->lport; |
---|
249 | 292 | sid = fcport->sid; |
---|
250 | 293 | r_a_tov = lport->r_a_tov; |
---|
.. | .. |
---|
327 | 370 | struct fc_lport *lport; |
---|
328 | 371 | struct fc_rport_priv *rdata; |
---|
329 | 372 | u32 port_id; |
---|
| 373 | + unsigned long flags; |
---|
330 | 374 | |
---|
331 | | - if (!fcport) |
---|
| 375 | + if (!fcport) { |
---|
| 376 | + QEDF_ERR(NULL, "fcport is NULL.\n"); |
---|
332 | 377 | return; |
---|
| 378 | + } |
---|
333 | 379 | |
---|
| 380 | + spin_lock_irqsave(&fcport->rport_lock, flags); |
---|
334 | 381 | if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) || |
---|
335 | 382 | !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || |
---|
336 | 383 | test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { |
---|
337 | 384 | QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n", |
---|
338 | 385 | fcport); |
---|
| 386 | + spin_unlock_irqrestore(&fcport->rport_lock, flags); |
---|
339 | 387 | return; |
---|
340 | 388 | } |
---|
341 | 389 | |
---|
342 | 390 | /* Set that we are now in reset */ |
---|
343 | 391 | set_bit(QEDF_RPORT_IN_RESET, &fcport->flags); |
---|
| 392 | + spin_unlock_irqrestore(&fcport->rport_lock, flags); |
---|
344 | 393 | |
---|
345 | 394 | rdata = fcport->rdata; |
---|
346 | | - if (rdata) { |
---|
| 395 | + if (rdata && !kref_get_unless_zero(&rdata->kref)) { |
---|
| 396 | + fcport->rdata = NULL; |
---|
| 397 | + rdata = NULL; |
---|
| 398 | + } |
---|
| 399 | + |
---|
| 400 | + if (rdata && rdata->rp_state == RPORT_ST_READY) { |
---|
347 | 401 | lport = fcport->qedf->lport; |
---|
348 | 402 | port_id = rdata->ids.port_id; |
---|
349 | 403 | QEDF_ERR(&(fcport->qedf->dbg_ctx), |
---|
350 | 404 | "LOGO port_id=%x.\n", port_id); |
---|
351 | 405 | fc_rport_logoff(rdata); |
---|
| 406 | + kref_put(&rdata->kref, fc_rport_destroy); |
---|
| 407 | + mutex_lock(&lport->disc.disc_mutex); |
---|
352 | 408 | /* Recreate the rport and log back in */ |
---|
353 | 409 | rdata = fc_rport_create(lport, port_id); |
---|
| 410 | + mutex_unlock(&lport->disc.disc_mutex); |
---|
354 | 411 | if (rdata) |
---|
355 | 412 | fc_rport_login(rdata); |
---|
| 413 | + fcport->rdata = rdata; |
---|
356 | 414 | } |
---|
357 | 415 | clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags); |
---|
358 | 416 | } |
---|
.. | .. |
---|
380 | 438 | * If we are flushing the command just free the cb_arg as none of the |
---|
381 | 439 | * response data will be valid. |
---|
382 | 440 | */ |
---|
383 | | - if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) |
---|
| 441 | + if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) { |
---|
| 442 | + QEDF_ERR(NULL, "els_req xid=0x%x event is flush.\n", |
---|
| 443 | + els_req->xid); |
---|
384 | 444 | goto free_arg; |
---|
| 445 | + } |
---|
385 | 446 | |
---|
386 | 447 | fcport = els_req->fcport; |
---|
387 | 448 | mp_req = &(els_req->mp_req); |
---|
.. | .. |
---|
494 | 555 | |
---|
495 | 556 | orig_io_req = cb_arg->aborted_io_req; |
---|
496 | 557 | |
---|
497 | | - if (!orig_io_req) |
---|
| 558 | + if (!orig_io_req) { |
---|
| 559 | + QEDF_ERR(NULL, "orig_io_req is NULL.\n"); |
---|
498 | 560 | goto out_free; |
---|
| 561 | + } |
---|
499 | 562 | |
---|
500 | 563 | clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); |
---|
501 | 564 | |
---|
.. | .. |
---|
509 | 572 | orig_io_req, orig_io_req->xid, srr_req->xid, refcount); |
---|
510 | 573 | |
---|
511 | 574 | /* If a SRR times out, simply free resources */ |
---|
512 | | - if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) |
---|
| 575 | + if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) { |
---|
| 576 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 577 | + "ELS timeout rec_xid=0x%x.\n", srr_req->xid); |
---|
513 | 578 | goto out_put; |
---|
| 579 | + } |
---|
514 | 580 | |
---|
515 | 581 | /* Normalize response data into struct fc_frame */ |
---|
516 | 582 | mp_req = &(srr_req->mp_req); |
---|
.. | .. |
---|
561 | 627 | struct qedf_rport *fcport; |
---|
562 | 628 | struct fc_lport *lport; |
---|
563 | 629 | struct qedf_els_cb_arg *cb_arg = NULL; |
---|
564 | | - u32 sid, r_a_tov; |
---|
| 630 | + u32 r_a_tov; |
---|
565 | 631 | int rc; |
---|
566 | 632 | |
---|
567 | 633 | if (!orig_io_req) { |
---|
.. | .. |
---|
587 | 653 | |
---|
588 | 654 | qedf = fcport->qedf; |
---|
589 | 655 | lport = qedf->lport; |
---|
590 | | - sid = fcport->sid; |
---|
591 | 656 | r_a_tov = lport->r_a_tov; |
---|
592 | 657 | |
---|
593 | 658 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, " |
---|
.. | .. |
---|
684 | 749 | cb_arg = io_req->cb_arg; |
---|
685 | 750 | |
---|
686 | 751 | /* If we timed out just free resources */ |
---|
687 | | - if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) |
---|
| 752 | + if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) { |
---|
| 753 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 754 | + "cqe is NULL or timeout event (0x%x)", io_req->event); |
---|
688 | 755 | goto free; |
---|
| 756 | + } |
---|
689 | 757 | |
---|
690 | 758 | /* Kill the timer we put on the request */ |
---|
691 | 759 | cancel_delayed_work_sync(&io_req->timeout_work); |
---|
.. | .. |
---|
788 | 856 | |
---|
789 | 857 | orig_io_req = cb_arg->aborted_io_req; |
---|
790 | 858 | |
---|
791 | | - if (!orig_io_req) |
---|
| 859 | + if (!orig_io_req) { |
---|
| 860 | + QEDF_ERR(NULL, "orig_io_req is NULL.\n"); |
---|
792 | 861 | goto out_free; |
---|
| 862 | + } |
---|
793 | 863 | |
---|
794 | 864 | if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO && |
---|
795 | 865 | rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) |
---|
.. | .. |
---|
801 | 871 | orig_io_req, orig_io_req->xid, rec_req->xid, refcount); |
---|
802 | 872 | |
---|
803 | 873 | /* If a REC times out, free resources */ |
---|
804 | | - if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) |
---|
| 874 | + if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) { |
---|
| 875 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 876 | + "Got TMO event, orig_io_req %p orig_io_xid=0x%x.\n", |
---|
| 877 | + orig_io_req, orig_io_req->xid); |
---|
805 | 878 | goto out_put; |
---|
| 879 | + } |
---|
806 | 880 | |
---|
807 | 881 | /* Normalize response data into struct fc_frame */ |
---|
808 | 882 | mp_req = &(rec_req->mp_req); |
---|
.. | .. |
---|
828 | 902 | opcode = fc_frame_payload_op(fp); |
---|
829 | 903 | if (opcode == ELS_LS_RJT) { |
---|
830 | 904 | rjt = fc_frame_payload_get(fp, sizeof(*rjt)); |
---|
| 905 | + if (!rjt) { |
---|
| 906 | + QEDF_ERR(&qedf->dbg_ctx, "payload get failed"); |
---|
| 907 | + goto out_free_frame; |
---|
| 908 | + } |
---|
| 909 | + |
---|
831 | 910 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, |
---|
832 | 911 | "Received LS_RJT for REC: er_reason=0x%x, " |
---|
833 | 912 | "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan); |
---|